aury-boot 0.0.34__py3-none-any.whl → 0.0.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aury/boot/_version.py +2 -2
- aury/boot/application/app/base.py +4 -10
- aury/boot/application/config/settings.py +18 -16
- aury/boot/commands/scheduler.py +6 -3
- aury/boot/commands/server/app.py +8 -5
- aury/boot/commands/templates/project/main.py.tpl +5 -3
- aury/boot/commands/worker.py +6 -3
- aury/boot/common/logging/__init__.py +2 -4
- aury/boot/common/logging/setup.py +51 -50
- aury/boot/infrastructure/monitoring/alerting/rules.py +2 -2
- aury/boot/infrastructure/monitoring/tracing/processor.py +17 -1
- aury/boot/infrastructure/mq/backends/__init__.py +2 -0
- aury/boot/infrastructure/mq/backends/redis_stream.py +428 -0
- aury/boot/infrastructure/mq/base.py +1 -0
- aury/boot/infrastructure/mq/manager.py +3 -0
- {aury_boot-0.0.34.dist-info → aury_boot-0.0.36.dist-info}/METADATA +1 -1
- {aury_boot-0.0.34.dist-info → aury_boot-0.0.36.dist-info}/RECORD +19 -18
- {aury_boot-0.0.34.dist-info → aury_boot-0.0.36.dist-info}/WHEEL +0 -0
- {aury_boot-0.0.34.dist-info → aury_boot-0.0.36.dist-info}/entry_points.txt +0 -0
aury/boot/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 0,
|
|
31
|
+
__version__ = version = '0.0.36'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 0, 36)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -280,7 +280,7 @@ class FoundationApp(FastAPI):
|
|
|
280
280
|
title: str = "Aury Service",
|
|
281
281
|
version: str = "1.0.0",
|
|
282
282
|
description: str | None = None,
|
|
283
|
-
|
|
283
|
+
logger_levels: list[tuple[str, str]] | None = None,
|
|
284
284
|
**kwargs: Any,
|
|
285
285
|
) -> None:
|
|
286
286
|
"""初始化应用。
|
|
@@ -290,9 +290,8 @@ class FoundationApp(FastAPI):
|
|
|
290
290
|
title: 应用标题
|
|
291
291
|
version: 应用版本
|
|
292
292
|
description: 应用描述
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
也可通过配置 LOG__INTERCEPT_LOGGERS 设置。
|
|
293
|
+
logger_levels: 需要设置特定级别的 logger 列表,格式: [("name", "LEVEL"), ...]
|
|
294
|
+
例如: [("sse_starlette", "WARNING"), ("httpx", "INFO")]
|
|
296
295
|
**kwargs: 传递给 FastAPI 的其他参数
|
|
297
296
|
"""
|
|
298
297
|
# 加载配置
|
|
@@ -304,11 +303,6 @@ class FoundationApp(FastAPI):
|
|
|
304
303
|
frame = sys._getframe(1)
|
|
305
304
|
self._caller_module = frame.f_globals.get("__name__", "__main__")
|
|
306
305
|
|
|
307
|
-
# 合并 intercept_loggers:参数 + 配置
|
|
308
|
-
merged_intercept = list(config.log.intercept_loggers)
|
|
309
|
-
if intercept_loggers:
|
|
310
|
-
merged_intercept.extend(intercept_loggers)
|
|
311
|
-
|
|
312
306
|
# 初始化日志(必须在其他操作之前)
|
|
313
307
|
setup_logging(
|
|
314
308
|
log_level=config.log.level,
|
|
@@ -318,7 +312,7 @@ class FoundationApp(FastAPI):
|
|
|
318
312
|
retention_days=config.log.retention_days,
|
|
319
313
|
enable_file_rotation=config.log.enable_file_rotation,
|
|
320
314
|
enable_console=config.log.enable_console,
|
|
321
|
-
|
|
315
|
+
logger_levels=logger_levels,
|
|
322
316
|
)
|
|
323
317
|
|
|
324
318
|
# 注册 access 日志(HTTP 请求日志)
|
|
@@ -401,14 +401,6 @@ class LogSettings(BaseModel):
|
|
|
401
401
|
default=False,
|
|
402
402
|
description="是否记录 WebSocket 消息内容(注意性能和敏感数据)"
|
|
403
403
|
)
|
|
404
|
-
intercept_loggers: list[str] = Field(
|
|
405
|
-
default_factory=list,
|
|
406
|
-
description=(
|
|
407
|
-
"额外需要由 loguru 接管的标准 logging logger 名称列表。"
|
|
408
|
-
"框架默认已拦截 uvicorn、uvicorn.error、uvicorn.access、sqlalchemy.engine,"
|
|
409
|
-
"此处配置会追加到默认列表。"
|
|
410
|
-
),
|
|
411
|
-
)
|
|
412
404
|
|
|
413
405
|
|
|
414
406
|
class ServiceSettings(BaseModel):
|
|
@@ -538,24 +530,25 @@ class MessageQueueSettings(BaseModel):
|
|
|
538
530
|
"""消息队列配置。
|
|
539
531
|
|
|
540
532
|
环境变量格式: MQ__{FIELD}
|
|
541
|
-
示例:
|
|
533
|
+
示例: MQ__BACKEND, MQ__URL, MQ__BROKER_URL
|
|
542
534
|
|
|
543
535
|
与 Task(任务队列)的区别:
|
|
544
536
|
- Task: 基于 Dramatiq,用于异步任务处理(API + Worker 模式)
|
|
545
537
|
- MQ: 通用消息队列,用于服务间通信、事件驱动架构
|
|
546
538
|
|
|
547
539
|
支持的后端:
|
|
548
|
-
-
|
|
549
|
-
-
|
|
540
|
+
- redis: redis://localhost:6379/0
|
|
541
|
+
- redis_stream: redis://localhost:6379/0
|
|
542
|
+
- rabbitmq: amqp://guest:guest@localhost:5672//
|
|
550
543
|
"""
|
|
551
544
|
|
|
552
|
-
|
|
553
|
-
default=
|
|
554
|
-
description="
|
|
545
|
+
backend: str = Field(
|
|
546
|
+
default="",
|
|
547
|
+
description="消息队列后端 (redis/redis_stream/rabbitmq),空字符串表示不启用"
|
|
555
548
|
)
|
|
556
|
-
|
|
549
|
+
url: str | None = Field(
|
|
557
550
|
default=None,
|
|
558
|
-
description="
|
|
551
|
+
description="连接 URL"
|
|
559
552
|
)
|
|
560
553
|
default_queue: str = Field(
|
|
561
554
|
default="default",
|
|
@@ -1009,6 +1002,7 @@ class BaseConfig(BaseSettings):
|
|
|
1009
1002
|
# ========== 异步与事件 ==========
|
|
1010
1003
|
task: TaskSettings = Field(default_factory=TaskSettings)
|
|
1011
1004
|
event: EventSettings = Field(default_factory=EventSettings)
|
|
1005
|
+
mq: MessageQueueSettings = Field(default_factory=MessageQueueSettings)
|
|
1012
1006
|
|
|
1013
1007
|
# ========== 微服务通信 ==========
|
|
1014
1008
|
# RPC 客户端配置(调用其他服务)
|
|
@@ -1116,10 +1110,18 @@ class BaseConfig(BaseSettings):
|
|
|
1116
1110
|
"""获取所有消息队列实例配置。
|
|
1117
1111
|
|
|
1118
1112
|
从环境变量解析 MQ__{INSTANCE}__{FIELD} 格式的配置。
|
|
1113
|
+
如果没有配置多实例,返回从单实例配置转换的 default 实例。
|
|
1119
1114
|
"""
|
|
1120
1115
|
if self._mqs is None:
|
|
1121
1116
|
loader = MultiInstanceConfigLoader("MQ", MQInstanceConfig)
|
|
1122
1117
|
self._mqs = loader.load()
|
|
1118
|
+
if not self._mqs and self.mq.backend:
|
|
1119
|
+
self._mqs = {
|
|
1120
|
+
"default": MQInstanceConfig(
|
|
1121
|
+
backend=self.mq.backend,
|
|
1122
|
+
url=self.mq.url,
|
|
1123
|
+
)
|
|
1124
|
+
}
|
|
1123
1125
|
return self._mqs
|
|
1124
1126
|
|
|
1125
1127
|
def get_events(self) -> dict[str, EventInstanceConfig]:
|
aury/boot/commands/scheduler.py
CHANGED
|
@@ -84,9 +84,12 @@ def run_scheduler(
|
|
|
84
84
|
module = __import__(module_path, fromlist=[app_name])
|
|
85
85
|
application = getattr(module, app_name)
|
|
86
86
|
|
|
87
|
-
#
|
|
88
|
-
from aury.boot.common.logging import
|
|
89
|
-
|
|
87
|
+
# 设置日志(必须在其他操作之前)
|
|
88
|
+
from aury.boot.common.logging import setup_logging
|
|
89
|
+
setup_logging(
|
|
90
|
+
log_level=getattr(application, "_config", None) and application._config.log.level or "INFO",
|
|
91
|
+
service_type="scheduler",
|
|
92
|
+
)
|
|
90
93
|
|
|
91
94
|
# 获取调度器
|
|
92
95
|
from aury.boot.infrastructure.scheduler import SchedulerManager
|
aury/boot/commands/server/app.py
CHANGED
|
@@ -9,6 +9,7 @@ import sys
|
|
|
9
9
|
from typing import TYPE_CHECKING
|
|
10
10
|
|
|
11
11
|
import typer
|
|
12
|
+
import uvicorn
|
|
12
13
|
|
|
13
14
|
if TYPE_CHECKING:
|
|
14
15
|
from aury.boot.application.app.base import FoundationApp
|
|
@@ -416,7 +417,6 @@ def dev(
|
|
|
416
417
|
os_module.environ["AURIMYTH_RELOAD"] = "1"
|
|
417
418
|
|
|
418
419
|
# 热重载模式下,直接使用 uvicorn,传递 app 字符串路径
|
|
419
|
-
import uvicorn
|
|
420
420
|
uvicorn.run(
|
|
421
421
|
app=app_module_path,
|
|
422
422
|
host=server_host,
|
|
@@ -500,19 +500,22 @@ def prod(
|
|
|
500
500
|
typer.echo(" 热重载: ❌")
|
|
501
501
|
typer.echo(" 调试模式: ❌")
|
|
502
502
|
|
|
503
|
+
# 获取 app 模块路径(多进程模式需要字符串格式)
|
|
504
|
+
app_module_path = app_path or _detect_app_module()
|
|
505
|
+
typer.echo(f" 应用模块: {app_module_path}")
|
|
506
|
+
|
|
503
507
|
try:
|
|
504
|
-
|
|
505
|
-
|
|
508
|
+
# 多进程模式必须使用字符串路径,否则子进程无法重新加载应用
|
|
509
|
+
uvicorn.run(
|
|
510
|
+
app=app_module_path,
|
|
506
511
|
host=server_host,
|
|
507
512
|
port=server_port,
|
|
508
513
|
workers=server_workers,
|
|
509
514
|
reload=False,
|
|
510
515
|
loop="auto",
|
|
511
516
|
http="auto",
|
|
512
|
-
debug=False,
|
|
513
517
|
access_log=True,
|
|
514
518
|
)
|
|
515
|
-
server.run()
|
|
516
519
|
except KeyboardInterrupt:
|
|
517
520
|
typer.echo("\n👋 服务器已停止")
|
|
518
521
|
except Exception as e:
|
|
@@ -25,15 +25,17 @@ config = AppConfig()
|
|
|
25
25
|
# - HEALTH_CHECK_PATH: 健康检查路径(默认 /api/health)
|
|
26
26
|
# - HEALTH_CHECK_ENABLED: 是否启用(默认 true)
|
|
27
27
|
#
|
|
28
|
-
#
|
|
29
|
-
#
|
|
28
|
+
# 日志:
|
|
29
|
+
# 框架自动全局接管所有 logging,无需配置
|
|
30
|
+
# 要查看 TRACE 级别日志,设置 LOG__LEVEL=TRACE
|
|
31
|
+
# 要屏蔽某些库的 DEBUG 日志,使用 logger_levels 参数
|
|
30
32
|
#
|
|
31
33
|
app = FoundationApp(
|
|
32
34
|
title="{project_name}",
|
|
33
35
|
version="0.1.0",
|
|
34
36
|
description="{project_name} - 基于 Aury Boot",
|
|
35
37
|
config=config,
|
|
36
|
-
|
|
38
|
+
# logger_levels=[("sse_starlette", "WARNING")], # 可选:设置特定库的日志级别
|
|
37
39
|
)
|
|
38
40
|
|
|
39
41
|
# 注册 API 路由
|
aury/boot/commands/worker.py
CHANGED
|
@@ -100,9 +100,12 @@ def run_worker(
|
|
|
100
100
|
module = __import__(module_path, fromlist=[app_name])
|
|
101
101
|
application = getattr(module, app_name)
|
|
102
102
|
|
|
103
|
-
#
|
|
104
|
-
from aury.boot.common.logging import
|
|
105
|
-
|
|
103
|
+
# 设置日志(必须在其他操作之前)
|
|
104
|
+
from aury.boot.common.logging import setup_logging
|
|
105
|
+
setup_logging(
|
|
106
|
+
log_level=getattr(application, "_config", None) and application._config.log.level or "INFO",
|
|
107
|
+
service_type="worker",
|
|
108
|
+
)
|
|
106
109
|
|
|
107
110
|
# 尝试导入 dramatiq
|
|
108
111
|
try:
|
|
@@ -41,14 +41,13 @@ from aury.boot.common.logging.format import (
|
|
|
41
41
|
log_exception,
|
|
42
42
|
)
|
|
43
43
|
from aury.boot.common.logging.setup import (
|
|
44
|
-
|
|
44
|
+
TRACE,
|
|
45
45
|
register_log_sink,
|
|
46
|
-
setup_intercept,
|
|
47
46
|
setup_logging,
|
|
48
47
|
)
|
|
49
48
|
|
|
50
49
|
__all__ = [
|
|
51
|
-
"
|
|
50
|
+
"TRACE",
|
|
52
51
|
"ServiceContext",
|
|
53
52
|
"format_exception_java_style",
|
|
54
53
|
"get_class_logger",
|
|
@@ -61,7 +60,6 @@ __all__ = [
|
|
|
61
60
|
"register_log_sink",
|
|
62
61
|
"set_service_context",
|
|
63
62
|
"set_trace_id",
|
|
64
|
-
"setup_intercept",
|
|
65
63
|
"setup_logging",
|
|
66
64
|
]
|
|
67
65
|
|
|
@@ -11,6 +11,25 @@ from typing import Any
|
|
|
11
11
|
|
|
12
12
|
from loguru import logger
|
|
13
13
|
|
|
14
|
+
# =============================================================================
|
|
15
|
+
# TRACE Level 支持
|
|
16
|
+
# =============================================================================
|
|
17
|
+
# 标准 logging 没有 TRACE,需要手动添加
|
|
18
|
+
# TRACE (5) < DEBUG (10),用于超细粒度调试(如每个 streaming chunk)
|
|
19
|
+
TRACE = 5
|
|
20
|
+
logging.addLevelName(TRACE, "TRACE")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _add_trace_method() -> None:
|
|
24
|
+
"""为标准 logging.Logger 添加 trace() 方法。"""
|
|
25
|
+
def trace(self: logging.Logger, msg: str, *args: Any, **kwargs: Any) -> None:
|
|
26
|
+
if self.isEnabledFor(TRACE):
|
|
27
|
+
self._log(TRACE, msg, args, **kwargs)
|
|
28
|
+
|
|
29
|
+
logging.Logger.trace = trace # type: ignore[attr-defined]
|
|
30
|
+
|
|
31
|
+
_add_trace_method()
|
|
32
|
+
|
|
14
33
|
from aury.boot.common.logging.context import (
|
|
15
34
|
ServiceContext,
|
|
16
35
|
_to_service_context,
|
|
@@ -89,31 +108,27 @@ def register_log_sink(
|
|
|
89
108
|
logger.debug(f"注册日志 sink: {name} (filter_key={filter_key})")
|
|
90
109
|
|
|
91
110
|
|
|
92
|
-
# 默认拦截的标准 logging 日志记录器
|
|
93
|
-
# - uvicorn: Uvicorn 服务器日志
|
|
94
|
-
# - uvicorn.error: Uvicorn 错误日志
|
|
95
|
-
# - sqlalchemy.engine: SQLAlchemy SQL 语句日志
|
|
96
|
-
# 注意:uvicorn.access 不拦截,因为框架有自己的 RequestLoggingMiddleware
|
|
97
|
-
DEFAULT_INTERCEPT_LOGGERS = [
|
|
98
|
-
"uvicorn",
|
|
99
|
-
"uvicorn.error",
|
|
100
|
-
"sqlalchemy.engine",
|
|
101
|
-
]
|
|
102
|
-
|
|
103
111
|
|
|
104
112
|
class _InterceptHandler(logging.Handler):
|
|
105
113
|
"""将标准 logging 日志转发到 loguru 的处理器。"""
|
|
106
114
|
|
|
107
115
|
def emit(self, record: logging.LogRecord) -> None:
|
|
108
116
|
# 获取对应的 loguru 级别
|
|
117
|
+
# loguru 原生支持 TRACE,标准 logging 的 TRACE(5) 会自动映射
|
|
109
118
|
try:
|
|
110
119
|
level = logger.level(record.levelname).name
|
|
111
120
|
except ValueError:
|
|
112
121
|
level = record.levelno
|
|
113
122
|
|
|
114
123
|
# 查找调用者的帧深度
|
|
115
|
-
|
|
116
|
-
|
|
124
|
+
# 跳过 logging 模块和本文件的所有帧
|
|
125
|
+
frame = logging.currentframe()
|
|
126
|
+
depth = 0
|
|
127
|
+
while frame is not None:
|
|
128
|
+
filename = frame.f_code.co_filename
|
|
129
|
+
# 跳过 logging 模块、本文件、loguru 内部
|
|
130
|
+
if "logging" not in filename and "loguru" not in filename:
|
|
131
|
+
break
|
|
117
132
|
frame = frame.f_back
|
|
118
133
|
depth += 1
|
|
119
134
|
|
|
@@ -122,35 +137,22 @@ class _InterceptHandler(logging.Handler):
|
|
|
122
137
|
)
|
|
123
138
|
|
|
124
139
|
|
|
125
|
-
def
|
|
126
|
-
"""
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
std_logger.handlers = [handler]
|
|
131
|
-
std_logger.setLevel(logging.DEBUG)
|
|
132
|
-
std_logger.propagate = False
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
def setup_intercept(logger_names: list[str] | None = None) -> None:
|
|
136
|
-
"""拦截标准 logging 日志记录器并转发到 loguru。
|
|
137
|
-
|
|
138
|
-
用于独立脚本/CLI 入口点(不使用 FoundationApp 时)。
|
|
139
|
-
FoundationApp 会自动调用此函数,无需手动调用。
|
|
140
|
-
|
|
140
|
+
def _setup_global_intercept(logger_levels: list[tuple[str, str]] | None = None) -> None:
|
|
141
|
+
"""全局接管所有标准 logging,转发到 loguru。
|
|
142
|
+
|
|
143
|
+
这样任何使用 logging.getLogger() 的库都会自动被接管。
|
|
144
|
+
|
|
141
145
|
Args:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
使用示例::
|
|
146
|
-
|
|
147
|
-
from aury.boot.common.logging import setup_logging, setup_intercept
|
|
148
|
-
|
|
149
|
-
setup_logging(log_level="DEBUG")
|
|
150
|
-
setup_intercept(["my_package", "third_party_lib"])
|
|
146
|
+
logger_levels: 需要设置特定级别的 logger 列表,格式: [("name", "LEVEL"), ...]
|
|
147
|
+
例如: [("sse_starlette", "WARNING"), ("httpx", "INFO")]
|
|
151
148
|
"""
|
|
152
|
-
|
|
153
|
-
|
|
149
|
+
logging.root.handlers = [_InterceptHandler()]
|
|
150
|
+
logging.root.setLevel(TRACE) # 接收所有级别,包括 TRACE
|
|
151
|
+
|
|
152
|
+
# 对指定的 logger 设置特定级别
|
|
153
|
+
if logger_levels:
|
|
154
|
+
for name, level in logger_levels:
|
|
155
|
+
logging.getLogger(name).setLevel(level.upper())
|
|
154
156
|
|
|
155
157
|
|
|
156
158
|
def setup_logging(
|
|
@@ -162,7 +164,7 @@ def setup_logging(
|
|
|
162
164
|
retention_days: int = 7,
|
|
163
165
|
rotation_size: str = "50 MB",
|
|
164
166
|
enable_console: bool = True,
|
|
165
|
-
|
|
167
|
+
logger_levels: list[tuple[str, str]] | None = None,
|
|
166
168
|
) -> None:
|
|
167
169
|
"""设置日志配置。
|
|
168
170
|
|
|
@@ -177,7 +179,7 @@ def setup_logging(
|
|
|
177
179
|
可通过 register_log_sink() 注册额外的日志文件(如 access.log)。
|
|
178
180
|
|
|
179
181
|
Args:
|
|
180
|
-
log_level: 日志级别(DEBUG/INFO/WARNING/ERROR/CRITICAL)
|
|
182
|
+
log_level: 日志级别(DEBUG/INFO/WARNING/ERROR/CRITICAL/TRACE)
|
|
181
183
|
log_dir: 日志目录(默认:./logs)
|
|
182
184
|
service_type: 服务类型(app/scheduler/worker)
|
|
183
185
|
enable_file_rotation: 是否启用日志轮转
|
|
@@ -185,8 +187,8 @@ def setup_logging(
|
|
|
185
187
|
retention_days: 日志保留天数(默认:7 天)
|
|
186
188
|
rotation_size: 单文件大小上限(默认:50 MB)
|
|
187
189
|
enable_console: 是否输出到控制台
|
|
188
|
-
|
|
189
|
-
|
|
190
|
+
logger_levels: 需要设置特定级别的 logger 列表,格式: [("name", "LEVEL"), ...]
|
|
191
|
+
例如: [("sse_starlette", "WARNING"), ("httpx", "INFO")]
|
|
190
192
|
"""
|
|
191
193
|
log_level = log_level.upper()
|
|
192
194
|
log_dir = log_dir or "logs"
|
|
@@ -273,16 +275,15 @@ def setup_logging(
|
|
|
273
275
|
filter=lambda record, c=ctx: record["extra"].get("service") == c,
|
|
274
276
|
)
|
|
275
277
|
|
|
276
|
-
#
|
|
277
|
-
|
|
278
|
-
|
|
278
|
+
# 全局拦截标准 logging 日志并转发到 loguru
|
|
279
|
+
# 所有使用 logging.getLogger() 的库自动被接管
|
|
280
|
+
_setup_global_intercept(logger_levels=logger_levels)
|
|
279
281
|
|
|
280
|
-
logger.info
|
|
282
|
+
logger.info
|
|
281
283
|
|
|
282
284
|
|
|
283
285
|
__all__ = [
|
|
284
|
-
"
|
|
286
|
+
"TRACE",
|
|
285
287
|
"register_log_sink",
|
|
286
|
-
"setup_intercept",
|
|
287
288
|
"setup_logging",
|
|
288
289
|
]
|
|
@@ -107,14 +107,14 @@ class AlertRule:
|
|
|
107
107
|
# 检查路径
|
|
108
108
|
if self._path_regex:
|
|
109
109
|
endpoint = event.metadata.get("endpoint", "")
|
|
110
|
-
if not self._path_regex.
|
|
110
|
+
if not self._path_regex.fullmatch(endpoint):
|
|
111
111
|
return False
|
|
112
112
|
|
|
113
113
|
# 检查排除路径
|
|
114
114
|
if self._exclude_regexes:
|
|
115
115
|
endpoint = event.metadata.get("endpoint", "")
|
|
116
116
|
for exclude_regex in self._exclude_regexes:
|
|
117
|
-
if exclude_regex.
|
|
117
|
+
if exclude_regex.fullmatch(endpoint):
|
|
118
118
|
return False # 匹配到排除规则,不触发告警
|
|
119
119
|
|
|
120
120
|
# 检查阈值(对于 slow_* 类型)
|
|
@@ -181,7 +181,23 @@ class AlertingSpanProcessor:
|
|
|
181
181
|
or name
|
|
182
182
|
)
|
|
183
183
|
|
|
184
|
-
|
|
184
|
+
# 检查所有可能的路径来源
|
|
185
|
+
paths_to_check = [path]
|
|
186
|
+
|
|
187
|
+
# 也检查 span name 中的路径(可能包含 HTTP 方法和后缀)
|
|
188
|
+
# 例如 "GET /api/v1/spaces/{space_id}/subscribe http receive"
|
|
189
|
+
if name and name != path:
|
|
190
|
+
# 尝试提取 span name 中的路径部分
|
|
191
|
+
parts = name.split()
|
|
192
|
+
for part in parts:
|
|
193
|
+
if part.startswith("/"):
|
|
194
|
+
paths_to_check.append(part)
|
|
195
|
+
|
|
196
|
+
for p in paths_to_check:
|
|
197
|
+
if any(regex.fullmatch(p) for regex in self._exclude_regexes):
|
|
198
|
+
return True
|
|
199
|
+
|
|
200
|
+
return False
|
|
185
201
|
|
|
186
202
|
def _emit_slow_alert(
|
|
187
203
|
self,
|
|
@@ -0,0 +1,428 @@
|
|
|
1
|
+
"""Redis Stream 消息队列后端。
|
|
2
|
+
|
|
3
|
+
使用 Redis Stream 实现支持消费者组的消息队列。
|
|
4
|
+
相比 Redis List,提供更强的持久化和消费保证。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
from collections.abc import Callable
|
|
11
|
+
import json
|
|
12
|
+
from typing import TYPE_CHECKING, Any
|
|
13
|
+
|
|
14
|
+
from aury.boot.common.logging import logger
|
|
15
|
+
|
|
16
|
+
from ..base import IMQ, MQMessage
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from aury.boot.infrastructure.clients.redis import RedisClient
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RedisStreamMQ(IMQ):
|
|
23
|
+
"""Redis Stream 消息队列实现。
|
|
24
|
+
|
|
25
|
+
使用 Redis Stream (XADD/XREADGROUP/XACK) 实现可靠的消息队列。
|
|
26
|
+
|
|
27
|
+
特性:
|
|
28
|
+
- 消费者组支持多实例消费
|
|
29
|
+
- 消息持久化 (配合 AOF)
|
|
30
|
+
- 消息确认机制
|
|
31
|
+
- 支持消息重放
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
url: str | None = None,
|
|
37
|
+
*,
|
|
38
|
+
redis_client: RedisClient | None = None,
|
|
39
|
+
prefix: str = "stream:",
|
|
40
|
+
consumer_group: str = "default",
|
|
41
|
+
consumer_name: str | None = None,
|
|
42
|
+
max_len: int | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""初始化 Redis Stream 消息队列。
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
url: Redis 连接 URL(当 redis_client 为 None 时必须提供)
|
|
48
|
+
redis_client: RedisClient 实例(可选,优先使用)
|
|
49
|
+
prefix: 队列名称前缀
|
|
50
|
+
consumer_group: 消费者组名称
|
|
51
|
+
consumer_name: 消费者名称(默认自动生成)
|
|
52
|
+
max_len: Stream 最大长度(可选,用于自动裁剪)
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
ValueError: 当 url 和 redis_client 都为 None 时
|
|
56
|
+
"""
|
|
57
|
+
if redis_client is None and url is None:
|
|
58
|
+
raise ValueError("Redis Stream 消息队列需要提供 url 或 redis_client 参数")
|
|
59
|
+
|
|
60
|
+
self._url = url
|
|
61
|
+
self._client = redis_client
|
|
62
|
+
self._prefix = prefix
|
|
63
|
+
self._consumer_group = consumer_group
|
|
64
|
+
self._consumer_name = consumer_name or f"consumer-{id(self)}"
|
|
65
|
+
self._max_len = max_len
|
|
66
|
+
self._consuming = False
|
|
67
|
+
self._owns_client = False
|
|
68
|
+
|
|
69
|
+
async def _ensure_client(self) -> None:
|
|
70
|
+
"""确保 Redis 客户端已初始化。"""
|
|
71
|
+
if self._client is None and self._url:
|
|
72
|
+
from aury.boot.infrastructure.clients.redis import RedisClient
|
|
73
|
+
# 创建独立实例(不使用 get_instance 避免和全局实例冲突)
|
|
74
|
+
self._client = RedisClient(name=f"mq-{id(self)}")
|
|
75
|
+
self._client.configure(url=self._url)
|
|
76
|
+
await self._client.initialize()
|
|
77
|
+
self._owns_client = True
|
|
78
|
+
|
|
79
|
+
def _stream_key(self, queue: str) -> str:
|
|
80
|
+
"""获取 Stream 的 Redis key。"""
|
|
81
|
+
return f"{self._prefix}{queue}"
|
|
82
|
+
|
|
83
|
+
async def _ensure_group(self, queue: str) -> None:
|
|
84
|
+
"""确保消费者组存在。"""
|
|
85
|
+
stream_key = self._stream_key(queue)
|
|
86
|
+
try:
|
|
87
|
+
await self._client.connection.xgroup_create(
|
|
88
|
+
stream_key,
|
|
89
|
+
self._consumer_group,
|
|
90
|
+
id="0",
|
|
91
|
+
mkstream=True,
|
|
92
|
+
)
|
|
93
|
+
logger.debug(f"创建消费者组: {self._consumer_group} on {stream_key}")
|
|
94
|
+
except Exception as e:
|
|
95
|
+
# 组已存在,忽略
|
|
96
|
+
if "BUSYGROUP" not in str(e):
|
|
97
|
+
raise
|
|
98
|
+
|
|
99
|
+
async def send(self, queue: str, message: MQMessage) -> str:
|
|
100
|
+
"""发送消息到 Stream。
|
|
101
|
+
|
|
102
|
+
使用 XADD 命令,支持 MAXLEN 自动裁剪。
|
|
103
|
+
"""
|
|
104
|
+
await self._ensure_client()
|
|
105
|
+
message.queue = queue
|
|
106
|
+
|
|
107
|
+
# 序列化消息
|
|
108
|
+
data = {
|
|
109
|
+
"payload": json.dumps(message.to_dict()),
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
stream_key = self._stream_key(queue)
|
|
113
|
+
|
|
114
|
+
# XADD with optional MAXLEN
|
|
115
|
+
if self._max_len:
|
|
116
|
+
msg_id = await self._client.connection.xadd(
|
|
117
|
+
stream_key,
|
|
118
|
+
data,
|
|
119
|
+
maxlen=self._max_len,
|
|
120
|
+
approximate=True, # ~ 近似裁剪,性能更好
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
msg_id = await self._client.connection.xadd(stream_key, data)
|
|
124
|
+
|
|
125
|
+
logger.debug(f"发送消息到 Stream: {stream_key}, id={msg_id}")
|
|
126
|
+
return message.id
|
|
127
|
+
|
|
128
|
+
async def receive(
|
|
129
|
+
self,
|
|
130
|
+
queue: str,
|
|
131
|
+
timeout: float | None = None,
|
|
132
|
+
) -> MQMessage | None:
|
|
133
|
+
"""从 Stream 接收消息(不使用消费者组)。
|
|
134
|
+
|
|
135
|
+
用于简单场景,直接 XREAD 读取最新消息。
|
|
136
|
+
"""
|
|
137
|
+
await self._ensure_client()
|
|
138
|
+
|
|
139
|
+
stream_key = self._stream_key(queue)
|
|
140
|
+
timeout_ms = int(timeout * 1000) if timeout else 0
|
|
141
|
+
|
|
142
|
+
result = await self._client.connection.xread(
|
|
143
|
+
streams={stream_key: "$"},
|
|
144
|
+
count=1,
|
|
145
|
+
block=timeout_ms,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if not result:
|
|
149
|
+
return None
|
|
150
|
+
|
|
151
|
+
# 解析结果: [[stream_key, [(msg_id, data)]]]
|
|
152
|
+
for stream, messages in result:
|
|
153
|
+
for msg_id, data in messages:
|
|
154
|
+
try:
|
|
155
|
+
payload = data.get(b"payload") or data.get("payload")
|
|
156
|
+
if isinstance(payload, bytes):
|
|
157
|
+
payload = payload.decode()
|
|
158
|
+
msg_dict = json.loads(payload)
|
|
159
|
+
message = MQMessage.from_dict(msg_dict)
|
|
160
|
+
message._stream_id = msg_id # 保存 stream ID 用于 ACK
|
|
161
|
+
return message
|
|
162
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
163
|
+
logger.error(f"解析消息失败: {e}")
|
|
164
|
+
return None
|
|
165
|
+
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
async def receive_group(
|
|
169
|
+
self,
|
|
170
|
+
queue: str,
|
|
171
|
+
timeout: float | None = None,
|
|
172
|
+
) -> MQMessage | None:
|
|
173
|
+
"""从 Stream 接收消息(使用消费者组)。
|
|
174
|
+
|
|
175
|
+
使用 XREADGROUP 从消费者组读取,支持多实例消费。
|
|
176
|
+
"""
|
|
177
|
+
await self._ensure_client()
|
|
178
|
+
await self._ensure_group(queue)
|
|
179
|
+
|
|
180
|
+
stream_key = self._stream_key(queue)
|
|
181
|
+
timeout_ms = int(timeout * 1000) if timeout else 0
|
|
182
|
+
|
|
183
|
+
result = await self._client.connection.xreadgroup(
|
|
184
|
+
groupname=self._consumer_group,
|
|
185
|
+
consumername=self._consumer_name,
|
|
186
|
+
streams={stream_key: ">"}, # > 表示只读取新消息
|
|
187
|
+
count=1,
|
|
188
|
+
block=timeout_ms,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
if not result:
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
# 解析结果
|
|
195
|
+
for stream, messages in result:
|
|
196
|
+
for msg_id, data in messages:
|
|
197
|
+
try:
|
|
198
|
+
payload = data.get(b"payload") or data.get("payload")
|
|
199
|
+
if isinstance(payload, bytes):
|
|
200
|
+
payload = payload.decode()
|
|
201
|
+
msg_dict = json.loads(payload)
|
|
202
|
+
message = MQMessage.from_dict(msg_dict)
|
|
203
|
+
message._stream_id = msg_id # 保存用于 ACK
|
|
204
|
+
message.queue = queue
|
|
205
|
+
return message
|
|
206
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
207
|
+
logger.error(f"解析消息失败: {e}")
|
|
208
|
+
# ACK 损坏的消息,防止阻塞
|
|
209
|
+
await self._client.connection.xack(
|
|
210
|
+
stream_key, self._consumer_group, msg_id
|
|
211
|
+
)
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
async def ack(self, message: MQMessage) -> None:
|
|
217
|
+
"""确认消息已处理。"""
|
|
218
|
+
if not message.queue:
|
|
219
|
+
return
|
|
220
|
+
|
|
221
|
+
stream_id = getattr(message, "_stream_id", None)
|
|
222
|
+
if stream_id:
|
|
223
|
+
stream_key = self._stream_key(message.queue)
|
|
224
|
+
await self._client.connection.xack(
|
|
225
|
+
stream_key, self._consumer_group, stream_id
|
|
226
|
+
)
|
|
227
|
+
logger.debug(f"ACK 消息: {stream_id}")
|
|
228
|
+
|
|
229
|
+
async def nack(self, message: MQMessage, requeue: bool = True) -> None:
|
|
230
|
+
"""拒绝消息。
|
|
231
|
+
|
|
232
|
+
Redis Stream 没有原生 NACK,通过重新发送实现。
|
|
233
|
+
"""
|
|
234
|
+
if not message.queue:
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
stream_id = getattr(message, "_stream_id", None)
|
|
238
|
+
if stream_id:
|
|
239
|
+
stream_key = self._stream_key(message.queue)
|
|
240
|
+
# 先 ACK 原消息
|
|
241
|
+
await self._client.connection.xack(
|
|
242
|
+
stream_key, self._consumer_group, stream_id
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if requeue and message.retry_count < message.max_retries:
|
|
246
|
+
# 重新发送
|
|
247
|
+
message.retry_count += 1
|
|
248
|
+
await self.send(message.queue, message)
|
|
249
|
+
logger.debug(f"NACK 重新入队: {message.id}, retry={message.retry_count}")
|
|
250
|
+
|
|
251
|
+
async def consume(
|
|
252
|
+
self,
|
|
253
|
+
queue: str,
|
|
254
|
+
handler: Callable[[MQMessage], Any],
|
|
255
|
+
*,
|
|
256
|
+
prefetch: int = 1,
|
|
257
|
+
) -> None:
|
|
258
|
+
"""消费队列消息(使用消费者组)。"""
|
|
259
|
+
self._consuming = True
|
|
260
|
+
await self._ensure_group(queue)
|
|
261
|
+
logger.info(f"开始消费 Stream: {queue}, group={self._consumer_group}")
|
|
262
|
+
|
|
263
|
+
while self._consuming:
|
|
264
|
+
try:
|
|
265
|
+
message = await self.receive_group(queue, timeout=1.0)
|
|
266
|
+
if message is None:
|
|
267
|
+
continue
|
|
268
|
+
|
|
269
|
+
try:
|
|
270
|
+
result = handler(message)
|
|
271
|
+
if asyncio.iscoroutine(result):
|
|
272
|
+
await result
|
|
273
|
+
await self.ack(message)
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.error(f"处理消息失败: {e}")
|
|
276
|
+
await self.nack(message, requeue=True)
|
|
277
|
+
|
|
278
|
+
except Exception as e:
|
|
279
|
+
logger.error(f"消费消息异常: {e}")
|
|
280
|
+
await asyncio.sleep(1)
|
|
281
|
+
|
|
282
|
+
async def read_all(
|
|
283
|
+
self,
|
|
284
|
+
queue: str,
|
|
285
|
+
start: str = "-",
|
|
286
|
+
end: str = "+",
|
|
287
|
+
count: int | None = None,
|
|
288
|
+
) -> list[MQMessage]:
|
|
289
|
+
"""读取 Stream 中的所有消息(用于 compaction)。
|
|
290
|
+
|
|
291
|
+
使用 XRANGE 读取指定范围的消息。
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
queue: 队列名称
|
|
295
|
+
start: 起始 ID("-" 表示最早)
|
|
296
|
+
end: 结束 ID("+" 表示最新)
|
|
297
|
+
count: 最大数量
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
消息列表
|
|
301
|
+
"""
|
|
302
|
+
await self._ensure_client()
|
|
303
|
+
stream_key = self._stream_key(queue)
|
|
304
|
+
|
|
305
|
+
result = await self._client.connection.xrange(
|
|
306
|
+
stream_key,
|
|
307
|
+
min=start,
|
|
308
|
+
max=end,
|
|
309
|
+
count=count,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
messages = []
|
|
313
|
+
for msg_id, data in result:
|
|
314
|
+
try:
|
|
315
|
+
payload = data.get(b"payload") or data.get("payload")
|
|
316
|
+
if isinstance(payload, bytes):
|
|
317
|
+
payload = payload.decode()
|
|
318
|
+
msg_dict = json.loads(payload)
|
|
319
|
+
message = MQMessage.from_dict(msg_dict)
|
|
320
|
+
message._stream_id = msg_id
|
|
321
|
+
messages.append(message)
|
|
322
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
323
|
+
logger.warning(f"跳过损坏的消息 {msg_id}: {e}")
|
|
324
|
+
|
|
325
|
+
return messages
|
|
326
|
+
|
|
327
|
+
async def read_blocking(
|
|
328
|
+
self,
|
|
329
|
+
queue: str,
|
|
330
|
+
last_id: str = "$",
|
|
331
|
+
count: int = 10,
|
|
332
|
+
block_ms: int = 100,
|
|
333
|
+
) -> list[MQMessage]:
|
|
334
|
+
"""阻塞读取 Stream 中的新消息(使用 XREAD BLOCK)。
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
queue: 队列名称
|
|
338
|
+
last_id: 起始 ID("$" 表示只等待新消息,"0" 表示从开头)
|
|
339
|
+
count: 最大读取数量
|
|
340
|
+
block_ms: 阻塞等待超时(毫秒),0 表示不阻塞
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
消息列表
|
|
344
|
+
"""
|
|
345
|
+
await self._ensure_client()
|
|
346
|
+
stream_key = self._stream_key(queue)
|
|
347
|
+
|
|
348
|
+
result = await self._client.connection.xread(
|
|
349
|
+
streams={stream_key: last_id},
|
|
350
|
+
count=count,
|
|
351
|
+
block=block_ms,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
if not result:
|
|
355
|
+
return []
|
|
356
|
+
|
|
357
|
+
messages = []
|
|
358
|
+
for stream, stream_messages in result:
|
|
359
|
+
for msg_id, data in stream_messages:
|
|
360
|
+
try:
|
|
361
|
+
payload = data.get(b"payload") or data.get("payload")
|
|
362
|
+
if isinstance(payload, bytes):
|
|
363
|
+
payload = payload.decode()
|
|
364
|
+
msg_dict = json.loads(payload)
|
|
365
|
+
message = MQMessage.from_dict(msg_dict)
|
|
366
|
+
message._stream_id = msg_id
|
|
367
|
+
messages.append(message)
|
|
368
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
369
|
+
logger.warning(f"跳过损坏的消息 {msg_id}: {e}")
|
|
370
|
+
|
|
371
|
+
return messages
|
|
372
|
+
|
|
373
|
+
async def trim(
|
|
374
|
+
self,
|
|
375
|
+
queue: str,
|
|
376
|
+
*,
|
|
377
|
+
maxlen: int | None = None,
|
|
378
|
+
minid: str | None = None,
|
|
379
|
+
) -> int:
|
|
380
|
+
"""裁剪 Stream。
|
|
381
|
+
|
|
382
|
+
Args:
|
|
383
|
+
queue: 队列名称
|
|
384
|
+
maxlen: 保留的最大长度
|
|
385
|
+
minid: 保留此 ID 之后的消息
|
|
386
|
+
|
|
387
|
+
Returns:
|
|
388
|
+
删除的消息数量
|
|
389
|
+
"""
|
|
390
|
+
await self._ensure_client()
|
|
391
|
+
stream_key = self._stream_key(queue)
|
|
392
|
+
|
|
393
|
+
if minid:
|
|
394
|
+
return await self._client.connection.xtrim(
|
|
395
|
+
stream_key, minid=minid, approximate=False
|
|
396
|
+
)
|
|
397
|
+
elif maxlen is not None:
|
|
398
|
+
# maxlen=0 也应该生效(清空 stream)
|
|
399
|
+
return await self._client.connection.xtrim(
|
|
400
|
+
stream_key, maxlen=maxlen, approximate=False
|
|
401
|
+
)
|
|
402
|
+
return 0
|
|
403
|
+
|
|
404
|
+
async def delete_stream(self, queue: str) -> bool:
|
|
405
|
+
"""删除整个 Stream。"""
|
|
406
|
+
await self._ensure_client()
|
|
407
|
+
stream_key = self._stream_key(queue)
|
|
408
|
+
return await self._client.connection.delete(stream_key) > 0
|
|
409
|
+
|
|
410
|
+
async def stream_info(self, queue: str) -> dict[str, Any]:
|
|
411
|
+
"""获取 Stream 信息。"""
|
|
412
|
+
await self._ensure_client()
|
|
413
|
+
stream_key = self._stream_key(queue)
|
|
414
|
+
try:
|
|
415
|
+
return await self._client.connection.xinfo_stream(stream_key)
|
|
416
|
+
except Exception:
|
|
417
|
+
return {}
|
|
418
|
+
|
|
419
|
+
async def close(self) -> None:
|
|
420
|
+
"""关闭连接。"""
|
|
421
|
+
self._consuming = False
|
|
422
|
+
if self._owns_client and self._client:
|
|
423
|
+
await self._client.close()
|
|
424
|
+
self._client = None
|
|
425
|
+
logger.debug("Redis Stream 消息队列已关闭")
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
__all__ = ["RedisStreamMQ"]
|
|
@@ -12,6 +12,7 @@ from aury.boot.common.logging import logger
|
|
|
12
12
|
|
|
13
13
|
from .backends.rabbitmq import RabbitMQ
|
|
14
14
|
from .backends.redis import RedisMQ
|
|
15
|
+
from .backends.redis_stream import RedisStreamMQ
|
|
15
16
|
from .base import IMQ, MQBackend, MQMessage
|
|
16
17
|
|
|
17
18
|
if TYPE_CHECKING:
|
|
@@ -127,6 +128,8 @@ class MQManager:
|
|
|
127
128
|
# 根据后端类型创建实例,参数校验由后端自己处理
|
|
128
129
|
if backend == MQBackend.REDIS:
|
|
129
130
|
self._backend = RedisMQ(url=url, redis_client=redis_client, prefix=prefix)
|
|
131
|
+
elif backend == MQBackend.REDIS_STREAM:
|
|
132
|
+
self._backend = RedisStreamMQ(url=url, redis_client=redis_client, prefix=prefix)
|
|
130
133
|
elif backend == MQBackend.RABBITMQ:
|
|
131
134
|
self._backend = RabbitMQ(url=url)
|
|
132
135
|
else:
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
aury/boot/__init__.py,sha256=pCno-EInnpIBa1OtxNYF-JWf9j95Cd2h6vmu0xqa_-4,1791
|
|
2
|
-
aury/boot/_version.py,sha256=
|
|
2
|
+
aury/boot/_version.py,sha256=0ktbVh2xRI7xnrojV_sDPVhrN753EK8vWN68JQalwWk,706
|
|
3
3
|
aury/boot/application/__init__.py,sha256=I2KqNVdYg2q5nlOXr0TtFGyHmhj4oWdaR6ZB73Mwg7Y,3041
|
|
4
4
|
aury/boot/application/adapter/__init__.py,sha256=e1bcSb1bxUMfofTwiCuHBZJk5-STkMCWPF2EJXHQ7UU,3976
|
|
5
5
|
aury/boot/application/adapter/base.py,sha256=Ar_66fiHPDEmV-1DKnqXKwc53p3pozG31bgTJTEUriY,15763
|
|
@@ -8,13 +8,13 @@ aury/boot/application/adapter/decorators.py,sha256=yyGu_16bWWUiO36gxCeQWgG0DN19p
|
|
|
8
8
|
aury/boot/application/adapter/exceptions.py,sha256=Kzm-ytRxdUnSMIcWCSOHPxo4Jh_A6YbyxlOVIUs-5F4,6183
|
|
9
9
|
aury/boot/application/adapter/http.py,sha256=4TADsSzdSRU63307dmmo-2U_JpVP12mwTFy66B5Ps-w,10759
|
|
10
10
|
aury/boot/application/app/__init__.py,sha256=I8FfCKDuDQsGzAK6BevyfdtAwieMUVYu6qgVQzBazpE,830
|
|
11
|
-
aury/boot/application/app/base.py,sha256=
|
|
11
|
+
aury/boot/application/app/base.py,sha256=kyuNm3wOr8cnrPlKJOLJUBQY2-91q5LZABtoUGkmg7g,21634
|
|
12
12
|
aury/boot/application/app/components.py,sha256=Ub7NlfxSPXSDcxUajQ5ed42kNmsBSol-UttcBfnx64Y,33473
|
|
13
13
|
aury/boot/application/app/middlewares.py,sha256=BXe2H14FHzJUVpQM6DZUm-zfZRXSXIi1QIZ4_3izfHw,3306
|
|
14
14
|
aury/boot/application/app/startup.py,sha256=DHKt3C2G7V5XfFr1SQMl14tNzcuDd9MqUVAxi274HDQ,7873
|
|
15
15
|
aury/boot/application/config/__init__.py,sha256=Dd-myRSBCM18DXXsi863h0cJG5VFrI10xMRtjnvelGo,1894
|
|
16
16
|
aury/boot/application/config/multi_instance.py,sha256=RXSp-xP8-bKMDEhq3SeL7T3lS8-vpRlvBEVBuZVjVK4,6475
|
|
17
|
-
aury/boot/application/config/settings.py,sha256=
|
|
17
|
+
aury/boot/application/config/settings.py,sha256=YxcR75SxdjJznJ1813sK4dhUPJ5ac5ATvGuT6IbNst0,37998
|
|
18
18
|
aury/boot/application/constants/__init__.py,sha256=DCXs13_VVaQWHqO-qpJoZwRd7HIexiirtw_nu8msTXE,340
|
|
19
19
|
aury/boot/application/constants/components.py,sha256=I4SlsF2DpSzMiLsi1wVrEmdHn4yV5J2h3ikMQqufPmM,1120
|
|
20
20
|
aury/boot/application/constants/scheduler.py,sha256=S77FBIvHlyruvlabRWZJ2J1YAs2xWXPQI2yuGdGUDNA,471
|
|
@@ -49,13 +49,13 @@ aury/boot/commands/docs.py,sha256=Hz1W-2TW8DzaPxARqEF4UncPhGMI9h97jJ962dlox3U,14
|
|
|
49
49
|
aury/boot/commands/generate.py,sha256=WZieSXuofxJOC7NBiVGpBigB9NZ4GMcF2F1ReTNun1I,44420
|
|
50
50
|
aury/boot/commands/init.py,sha256=W_eCL3wydWaMSLqTpadREDnzC0w-LGgNnj3IBjuQAfA,32348
|
|
51
51
|
aury/boot/commands/pkg.py,sha256=bw0QPptKscNgQ4I1SfSehTio9Q5KrvxgvkYx4tbZ7Vs,14495
|
|
52
|
-
aury/boot/commands/scheduler.py,sha256=
|
|
53
|
-
aury/boot/commands/worker.py,sha256=
|
|
52
|
+
aury/boot/commands/scheduler.py,sha256=XO3Gq7PqNxXNz5Gw0xNUHa_bEnAKZ9AkzLc062QJ3j8,3669
|
|
53
|
+
aury/boot/commands/worker.py,sha256=OEvfDiiM_pV3Mj73HKhSm1RNqFPuS125iNM0qNCTHFY,4316
|
|
54
54
|
aury/boot/commands/migrate/__init__.py,sha256=W9OhkX8ILdolySofgdP2oYoJGG9loQd5FeSwkniU3qM,455
|
|
55
55
|
aury/boot/commands/migrate/app.py,sha256=phCMKW6cuFYW2wr6PSMSCq0K2uUCiYo3UiFd0_UvA_o,1327
|
|
56
56
|
aury/boot/commands/migrate/commands.py,sha256=892htS_pTtpejLGqRP8bc3xXJPG92WwAejHlY74oI3o,9950
|
|
57
57
|
aury/boot/commands/server/__init__.py,sha256=aP3bPNGn6wT8dHa_OmKw1Dexnxuvf0BhrGA6pEUcsVM,319
|
|
58
|
-
aury/boot/commands/server/app.py,sha256
|
|
58
|
+
aury/boot/commands/server/app.py,sha256=9QQs7PHOw-CBxm9-soccB5v-mElO3G4-WZDWJQebpp0,16010
|
|
59
59
|
aury/boot/commands/templates/generate/api.py.tpl,sha256=xTbk9uzn5IMtJ-SPMadjmOUNHoM3WoE6g-TIEsGHFUA,3153
|
|
60
60
|
aury/boot/commands/templates/generate/model.py.tpl,sha256=knFwMyGZ7wMpzH4_bQD_V1hFTvmCb2H04G8p3s2xvyA,312
|
|
61
61
|
aury/boot/commands/templates/generate/repository.py.tpl,sha256=Uj9jNEI9Zn8W061FGFlRaIfAy9IhdassYH6noEjG0z0,662
|
|
@@ -68,7 +68,7 @@ aury/boot/commands/templates/project/alert_rules.example.yaml.tpl,sha256=QZH6SC5
|
|
|
68
68
|
aury/boot/commands/templates/project/config.py.tpl,sha256=H_B05FypBJxTjb7qIL91zC1C9e37Pk7C9gO0-b3CqNs,1009
|
|
69
69
|
aury/boot/commands/templates/project/conftest.py.tpl,sha256=chbETK81Hy26cWz6YZ2cFgy7HbnABzYCqeyMzgpa3eI,726
|
|
70
70
|
aury/boot/commands/templates/project/gitignore.tpl,sha256=OI0nt9u2E9EC-jAMoh3gpqamsWo18uDgyPybgee_snQ,3053
|
|
71
|
-
aury/boot/commands/templates/project/main.py.tpl,sha256=
|
|
71
|
+
aury/boot/commands/templates/project/main.py.tpl,sha256=6uiXv8KuGl24qZfzgFI2twB6WYCWHXCGMfwirezF8L4,1217
|
|
72
72
|
aury/boot/commands/templates/project/aury_docs/00-overview.md.tpl,sha256=eOjtqMeKqZ8OgijrOwcpfpHhrhUvt_CiHPUtRG0dilA,2251
|
|
73
73
|
aury/boot/commands/templates/project/aury_docs/01-model.md.tpl,sha256=1mQ3hGDxqEZjev4CD5-3dzYRFVonPNcAaStI1UBEUyM,6811
|
|
74
74
|
aury/boot/commands/templates/project/aury_docs/02-repository.md.tpl,sha256=Pn3pT9RoBponTcc4tvepFkVcE8EKxu2F9JaHPJ_mDk8,8345
|
|
@@ -108,11 +108,11 @@ aury/boot/common/__init__.py,sha256=MhNP3c_nwx8CyDkDF6p1f4DcTZ1CZZScg66FWdbdaZI,
|
|
|
108
108
|
aury/boot/common/exceptions/__init__.py,sha256=aS3rIXWc5qNNJbfMs_PNmBlFsyNdKUMErziNMd1yoB8,3176
|
|
109
109
|
aury/boot/common/i18n/__init__.py,sha256=2cy4kteU-1YsAHkuMDTr2c5o4G33fvtYUGKtzEy1Q6c,394
|
|
110
110
|
aury/boot/common/i18n/translator.py,sha256=_vEDL2SjEI1vwMNHbnJb0xErKUPLm7VmhyOuMBeCqRM,8412
|
|
111
|
-
aury/boot/common/logging/__init__.py,sha256=
|
|
111
|
+
aury/boot/common/logging/__init__.py,sha256=SNuqbEKaraqYwB8qM6mQUl55lXJNPb1tLujPexnogi4,1528
|
|
112
112
|
aury/boot/common/logging/context.py,sha256=ndml3rUokEIt5-845E5aW8jI8b4N93ZtukyqsjqzuNE,2566
|
|
113
113
|
aury/boot/common/logging/decorators.py,sha256=UaGMhRJdARNJ2VgCuRwaNX0DD5wIc1gAl6NDj7u8K2c,3354
|
|
114
114
|
aury/boot/common/logging/format.py,sha256=ZEqLagTdyGadywTamybcEh1fAZng3Wfx7DC952TFU30,9782
|
|
115
|
-
aury/boot/common/logging/setup.py,sha256=
|
|
115
|
+
aury/boot/common/logging/setup.py,sha256=gPzappMVB372rlEIZvWR8QMOhyv0S2r70WB7LaonRNY,9619
|
|
116
116
|
aury/boot/contrib/__init__.py,sha256=fyk_St9VufIx64hsobv9EsOYzb_T5FbJHxjqtPds4g8,198
|
|
117
117
|
aury/boot/contrib/admin_console/__init__.py,sha256=HEesLFrtYtBFWTDrh5H3mR-4V4LRg5N4a2a1C4-Whgs,445
|
|
118
118
|
aury/boot/contrib/admin_console/auth.py,sha256=_goyjZ8Clssvmy8g84svenGfBqCe9OC5pIvCjIzt42g,4706
|
|
@@ -173,7 +173,7 @@ aury/boot/infrastructure/monitoring/alerting/__init__.py,sha256=UvUsMhSZeGJOjZy0
|
|
|
173
173
|
aury/boot/infrastructure/monitoring/alerting/aggregator.py,sha256=fiI-lBSqWxXv1eVPfaDNjcigX-81w41fcmhD_vN_XSs,5805
|
|
174
174
|
aury/boot/infrastructure/monitoring/alerting/events.py,sha256=zJvTevQ-9JflIDyYVo1BRzOVyAGhdgEfRlMsD0NcBgM,4056
|
|
175
175
|
aury/boot/infrastructure/monitoring/alerting/manager.py,sha256=vdWox9Pnjl_0IIE6w-Ne9R17IUrqtF9CPhZHwZvke6E,16044
|
|
176
|
-
aury/boot/infrastructure/monitoring/alerting/rules.py,sha256=
|
|
176
|
+
aury/boot/infrastructure/monitoring/alerting/rules.py,sha256=FdyGOolQJF31fN_9mqRGi9i_x2JqtoHEOkNOcPyO07o,6124
|
|
177
177
|
aury/boot/infrastructure/monitoring/alerting/notifiers/__init__.py,sha256=dsfxThPHO_Ofb3Wo_dYlL8HvP_N63pb_S_UXm_qSxF8,321
|
|
178
178
|
aury/boot/infrastructure/monitoring/alerting/notifiers/base.py,sha256=_RXZMzWX-YeTG0Up1U8CwK8ADfX34dd0Sh56ugfqOWM,1462
|
|
179
179
|
aury/boot/infrastructure/monitoring/alerting/notifiers/feishu.py,sha256=JAMJiCNRYoDeJrYn29ew_ZVXDGq8OLgiFApRWd4iPY0,7134
|
|
@@ -182,15 +182,16 @@ aury/boot/infrastructure/monitoring/health/__init__.py,sha256=nqwFFXl6J9yTfQa1JL
|
|
|
182
182
|
aury/boot/infrastructure/monitoring/tracing/__init__.py,sha256=YizkpnhY-bcUUcd8YaDzUsluMflhNOH1dAKdVtkW05U,1287
|
|
183
183
|
aury/boot/infrastructure/monitoring/tracing/context.py,sha256=s_k2MzNl4LDDpei9xUP6TFW5BwZneoQg44RPaw95jac,978
|
|
184
184
|
aury/boot/infrastructure/monitoring/tracing/logging.py,sha256=gzuKa1ZiyY4z06fHNTbjgZasS6mLftSEaZQQ-Z6J_RE,2041
|
|
185
|
-
aury/boot/infrastructure/monitoring/tracing/processor.py,sha256=
|
|
185
|
+
aury/boot/infrastructure/monitoring/tracing/processor.py,sha256=36hoiyQ25sk55k7D4vDAKXt7l9d4wCNkZMTpwlPwg_Y,13224
|
|
186
186
|
aury/boot/infrastructure/monitoring/tracing/provider.py,sha256=AnPHUDHnfrCB48WHjp9vLBhCh9BpyfWb3DHGRh6Din4,11553
|
|
187
187
|
aury/boot/infrastructure/monitoring/tracing/tracing.py,sha256=BeWL-FYtlQ05r05wGJ6qjTSpypgCp-7OzdNnZ3uunB0,6890
|
|
188
188
|
aury/boot/infrastructure/mq/__init__.py,sha256=Q7kBk_GeQnxnqkyp29Bh1yFH3Q8xxxjs8oDYLeDj8C0,498
|
|
189
|
-
aury/boot/infrastructure/mq/base.py,sha256=
|
|
190
|
-
aury/boot/infrastructure/mq/manager.py,sha256=
|
|
191
|
-
aury/boot/infrastructure/mq/backends/__init__.py,sha256=
|
|
189
|
+
aury/boot/infrastructure/mq/base.py,sha256=ld4wtzhO_6y8wJRXL1DagqJiwhd0VQ6MJlJGDQoL6A8,3430
|
|
190
|
+
aury/boot/infrastructure/mq/manager.py,sha256=Bu4E1Tgz0CzFvJuCS9_fBMj9eAqmXcZp8aFIYhvNUl4,7692
|
|
191
|
+
aury/boot/infrastructure/mq/backends/__init__.py,sha256=10nggw2V-AzuZ1vvzq_ksoXR4FI3e4BR36EfY49Pek4,200
|
|
192
192
|
aury/boot/infrastructure/mq/backends/rabbitmq.py,sha256=0NWgPKEwtbmI63EVvKINdfXXDNyOvuOOP9LlBzqH91E,5493
|
|
193
193
|
aury/boot/infrastructure/mq/backends/redis.py,sha256=i8KECToIFEZ6CnHyNCk34_xdff5ioK172_knOy6EeUU,5279
|
|
194
|
+
aury/boot/infrastructure/mq/backends/redis_stream.py,sha256=hbSX03d0BLshE10GryjyvqybtjDBOS4pQUsNHhQFB-Q,14420
|
|
194
195
|
aury/boot/infrastructure/scheduler/__init__.py,sha256=eTRJ5dSPcKvyFvLVtraoQteXTTDDGwIrmw06J2hoNdA,323
|
|
195
196
|
aury/boot/infrastructure/scheduler/exceptions.py,sha256=ROltrhSctVWA-6ulnjuYeHAk3ZF-sykDoesuierYzew,634
|
|
196
197
|
aury/boot/infrastructure/scheduler/manager.py,sha256=OHQOHQlcoN8yFnky4kfuhsEIk39qX6nLZ7xJ51tfg68,23130
|
|
@@ -209,7 +210,7 @@ aury/boot/testing/client.py,sha256=KOg1EemuIVsBG68G5y0DjSxZGcIQVdWQ4ASaHE3o1R0,4
|
|
|
209
210
|
aury/boot/testing/factory.py,sha256=8GvwX9qIDu0L65gzJMlrWB0xbmJ-7zPHuwk3eECULcg,5185
|
|
210
211
|
aury/boot/toolkit/__init__.py,sha256=AcyVb9fDf3CaEmJPNkWC4iGv32qCPyk4BuFKSuNiJRQ,334
|
|
211
212
|
aury/boot/toolkit/http/__init__.py,sha256=zIPmpIZ9Qbqe25VmEr7jixoY2fkRbLm7NkCB9vKpg6I,11039
|
|
212
|
-
aury_boot-0.0.
|
|
213
|
-
aury_boot-0.0.
|
|
214
|
-
aury_boot-0.0.
|
|
215
|
-
aury_boot-0.0.
|
|
213
|
+
aury_boot-0.0.36.dist-info/METADATA,sha256=dyV3mpOq5ZzkAUCbQdLt4Bmxw_vyLSyNcuUaJOtF6B8,8694
|
|
214
|
+
aury_boot-0.0.36.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
215
|
+
aury_boot-0.0.36.dist-info/entry_points.txt,sha256=f9KXEkDIGc0BGkgBvsNx_HMz9VhDjNxu26q00jUpDwQ,49
|
|
216
|
+
aury_boot-0.0.36.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|