algo-backend-framework 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,3 @@
1
- from dotenv import load_dotenv
1
+ from .basic_config import ErrorCodeConfig, ServiceConfig, CommonLogConfig
2
2
 
3
- load_dotenv(".env")
4
-
5
- from .basic_config import ErrorCodeConfig, ServiceConfig
6
- from .loguru_config import LoguruConfig
7
-
8
- __all__ = ["LoguruConfig", "ServiceConfig", "ErrorCodeConfig"]
3
+ __all__ = ["ServiceConfig", "ErrorCodeConfig", "CommonLogConfig"]
@@ -1,13 +1,32 @@
1
1
  import os
2
2
 
3
- from algo_backend.utils import OsAttrMeta
3
+ from pydantic_settings import BaseSettings
4
+
5
+
6
+ class BasicConfig(BaseSettings):
7
+ class Config:
8
+ env_file = ".env"
9
+ case_sensitive = False
10
+ extra = "allow"
4
11
 
5
12
 
6
13
  class ErrorCodeConfig:
7
14
  SERVICE_PREFIX: int = os.getenv("ERROR_CODE_SERVICE_PREFIX", "0")
8
15
 
9
16
 
10
- class ServiceConfig(metaclass=OsAttrMeta):
17
+ class ServiceConfig(BasicConfig):
11
18
  HTTP_PORT: int = 8100
12
19
  TIMEOUT_KEEP_ALIVE: int = 1000
13
20
  PROCESS_NUM: int = 1
21
+
22
+
23
+ class CommonLogConfig(BasicConfig):
24
+ """
25
+ LOGGER_PATH: 日志文件路径
26
+ LOG_RETENTION_DAY: 日志保留天数
27
+ SAVE_LOG: 是否保存日志
28
+ """
29
+
30
+ LOGGER_PATH: str = "/logger"
31
+ LOG_RETENTION_DAY: int = 60
32
+ SAVE_LOG: bool = True
@@ -8,21 +8,21 @@ from . import status_code
8
8
  from .status_code import BasicApiId, BasicApiInnerErrorCode, BasicStatusCode
9
9
 
10
10
 
11
+ def set_error_code_prefix_env(prefix: Optional[int] = None):
12
+ """
13
+ 服务启动时执行这个函数
14
+ 也可以启动服务时通过环境变量ERROR_CODE_SERVICE_PREFIX指定服务前缀
15
+ 如果输入的prefix code不符合规则,则忽略,保持原来的6位
16
+ """
17
+ if prefix and 1 <= prefix <= 99:
18
+ ErrorCodeConfig.SERVICE_PREFIX = prefix
19
+
20
+
11
21
  class ApiErrorCodeManage:
12
22
  """
13
23
  构建”5aaabb“错误码,aaa接口id,bb表示接口内部错误码
14
24
  """
15
25
 
16
- @classmethod
17
- def set_error_code_prefix_env(cls, prefix: Optional[int] = None):
18
- """
19
- 服务启动时执行这个函数
20
- 也可以启动服务时通过环境变量ERROR_CODE_SERVICE_PREFIX指定服务前缀
21
- 如果输入的prefix code不符合规则,则忽略,保持原来的6位
22
- """
23
- if prefix and 1 <= prefix <= 99:
24
- ErrorCodeConfig.SERVICE_PREFIX = prefix
25
-
26
26
  @classmethod
27
27
  def scan_module_and_summary(cls, *list_module) -> list:
28
28
  """扫描模块中的ApiId和错误码枚举类"""
@@ -1,3 +1,4 @@
1
1
  from .simple_handler import timing_and_exception_handler
2
+ from .sse_handler import sse_timing_and_exception_handler
2
3
 
3
- __all__ = ["timing_and_exception_handler"]
4
+ __all__ = ["timing_and_exception_handler", "sse_timing_and_exception_handler"]
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  import logging
2
3
  import time
3
4
  import traceback
@@ -9,7 +10,8 @@ from algo_backend.exception import (
9
10
  DefaultApiErrorCode,
10
11
  )
11
12
  from algo_backend.metrics import PrometheusTimeCostMetricSetting
12
- from algo_backend.schema import AbstractRespVo
13
+ from algo_backend.schema import AbstractRespVo, BaseRespVo
14
+
13
15
  from .exception_to_vo import gen_vo_from_exception
14
16
 
15
17
  logger = logging.getLogger(__name__)
@@ -20,10 +22,14 @@ def timing_and_exception_handler(
20
22
  *,
21
23
  api_id: BasicApiId = DefaultApiErrorCode.DEFAULT_ERROR,
22
24
  api_name: str = "",
25
+ vo_class_type: type(AbstractRespVo) = BaseRespVo,
23
26
  ):
24
27
  """
25
28
  装饰器:用于统计函数执行时间并捕获异常
26
29
  函数中需要包含参数reqid或者request_id
30
+ : param api_id: 错误码
31
+ : param api_name: api名称
32
+ : param vo_class_type: 返回值类型,优先从被装饰的函数typehint中获取,如果获取类型非AbstractRespVo的实现类,则从装饰器vo_class_type中获取,默认BaseRespVo
27
33
  """
28
34
 
29
35
  def decorator(
@@ -59,9 +65,10 @@ def timing_and_exception_handler(
59
65
  f"Traceback:\n{traceback.format_exc()}"
60
66
  )
61
67
 
62
- vo_cls: type(AbstractRespVo) = get_type_hints(func).get(
63
- "return"
64
- ) # 这里可能会失败,因为无法强制用户的类型
68
+ vo_cls = get_type_hints(func).get("return")
69
+ if inspect.isclass(vo_cls) and issubclass(vo_cls, AbstractRespVo):
70
+ vo_cls = vo_class_type
71
+
65
72
  return gen_vo_from_exception(
66
73
  vo_cls, e, api_name=_name, request_id=request_id, api_id=api_id
67
74
  )
@@ -0,0 +1,84 @@
1
+ import logging
2
+ import time
3
+ import traceback
4
+ from functools import wraps
5
+ from typing import AsyncIterable, Callable, TypeVar
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from algo_backend.exception import (
10
+ BasicApiId,
11
+ BasicException,
12
+ DefaultApiErrorCode,
13
+ transfer_exception,
14
+ )
15
+ from algo_backend.metrics import PrometheusTimeCostMetricSetting
16
+ from algo_backend.schema import SseVoGenerator
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ D = TypeVar("D", bound=BaseModel)
22
+
23
+
24
+ def sse_timing_and_exception_handler(
25
+ transfer_obj_cls=type(SseVoGenerator),
26
+ *,
27
+ api_id: BasicApiId = DefaultApiErrorCode.DEFAULT_ERROR,
28
+ api_name: str = "",
29
+ ):
30
+ def decorator(
31
+ func: Callable[..., AsyncIterable[D]],
32
+ ) -> Callable[..., AsyncIterable[D]]:
33
+ @wraps(func)
34
+ async def wrapper(*args, **kwargs):
35
+ request_id = kwargs.get("request_id", None) or kwargs.get("reqid", None)
36
+ _name = func.__name__ or api_name
37
+
38
+ transfer_obj: SseVoGenerator = transfer_obj_cls(request_id=request_id)
39
+ # 提供额外参数,方便丰富上下文
40
+ transfer_obj.extract_info(*args, **kwargs)
41
+
42
+ start_time = time.perf_counter()
43
+ # 执行原函数
44
+ logger.info(f"ReqId: {request_id} | Function: {_name} | Start")
45
+
46
+ try:
47
+ start_event = transfer_obj.start()
48
+ if start_event:
49
+ yield start_event
50
+
51
+ async for item in func(*args, **kwargs):
52
+ yield transfer_obj.generate(item)
53
+
54
+ elapsed_time = time.perf_counter() - start_time
55
+
56
+ logger.info(
57
+ f"ReqId: {request_id} | Function: {_name} | COST:{elapsed_time:.4f}s"
58
+ )
59
+ PrometheusTimeCostMetricSetting.api_metrics_instance().add(
60
+ _name, elapsed_time
61
+ )
62
+
63
+ end_event = transfer_obj.done()
64
+ if end_event:
65
+ yield end_event
66
+
67
+ except Exception as e:
68
+ # 计算耗时
69
+ elapsed_time = time.perf_counter() - start_time
70
+ PrometheusTimeCostMetricSetting.api_metrics_instance().add_error(_name)
71
+ # 记录异常信息和完整堆栈
72
+ logger.error(
73
+ f"ReqId: {request_id} | Function: {_name} | COST:{elapsed_time:.4f}s | Exception: {str(e)}\n"
74
+ f"Traceback:\n{traceback.format_exc()}"
75
+ )
76
+
77
+ basic_exception: BasicException = transfer_exception(
78
+ e, api_name=_name, api_id=api_id, request_id=request_id
79
+ )
80
+ yield transfer_obj.error(basic_exception)
81
+
82
+ return wrapper
83
+
84
+ return decorator
@@ -1 +1,4 @@
1
- from .common import BasicLogStarter
1
+ from .common import BasicLogStarter
2
+
3
+
4
+ __all__ = ["BasicLogStarter"]
@@ -13,4 +13,16 @@ class BasicLogStarter:
13
13
  """
14
14
  执行一些日志设置
15
15
  """
16
- pass
16
+ ...
17
+
18
+ def app_generator_hook(self, app_generate):
19
+ """
20
+ 钩子函数,传入对象类型DefaultAlgoAppGenerator
21
+ """
22
+ ...
23
+
24
+ def service_generator_hook(self, service_generate):
25
+ """
26
+ 钩子函数:传入对象类型DefaultAlgoServiceStarter
27
+ """
28
+ ...
@@ -2,8 +2,6 @@ import logging
2
2
  import os
3
3
  import shutil
4
4
 
5
- from fastapi import FastAPI
6
-
7
5
  logger = logging.getLogger(__name__)
8
6
 
9
7
 
@@ -42,14 +40,3 @@ class PrometheusContext:
42
40
 
43
41
  os.makedirs(prom_dir, exist_ok=True)
44
42
  logger.info(f"Created new Prometheus multiprocessing directory: {prom_dir}")
45
-
46
- @classmethod
47
- def mount_prometheus_endpoint(cls, app: FastAPI):
48
- """
49
- Mount the Prometheus metrics endpoint on the given application.
50
- """
51
- from prometheus_client import CollectorRegistry, make_asgi_app, multiprocess
52
-
53
- registry = CollectorRegistry()
54
- multiprocess.MultiProcessCollector(registry=registry)
55
- app.mount("/metrics", make_asgi_app(registry=registry))
@@ -2,7 +2,8 @@ import logging
2
2
  import os
3
3
  import time
4
4
  from functools import wraps
5
- from typing import Awaitable, Callable, List, Optional
5
+ from typing import List, Optional
6
+ import inspect
6
7
 
7
8
  from prometheus_client import Counter, Histogram
8
9
 
@@ -120,27 +121,62 @@ class PrometheusTimeCostMetricSetting:
120
121
  def client_metrics_instance(cls) -> BasicTimeCostMetrics:
121
122
  return cls.get_metrics("client")
122
123
 
124
+ @classmethod
125
+ def _create_async_gen_wrapper(cls, func, metrics_cls_name: str, key: str):
126
+ """
127
+ 创建异步生成器包装器
128
+ """
129
+
130
+ @wraps(func)
131
+ async def async_gen_wrapper(*args, **kwargs):
132
+ metrics = cls.get_metrics(metrics_cls_name)
133
+ start = time.perf_counter()
134
+ try:
135
+ async for item in func(*args, **kwargs):
136
+ yield item
137
+ cost = time.perf_counter() - start
138
+ metrics.add(key, cost)
139
+ except Exception as e:
140
+ metrics.add_error(key)
141
+ raise e
142
+
143
+ return async_gen_wrapper
144
+
145
+ @classmethod
146
+ def _create_async_func_wrapper(cls, func, metrics_cls_name: str, key: str):
147
+ """
148
+ 创建普通异步函数包装器
149
+ """
150
+
151
+ @wraps(func)
152
+ async def async_func_wrapper(*args, **kwargs):
153
+ metrics = cls.get_metrics(metrics_cls_name)
154
+ start = time.perf_counter()
155
+ try:
156
+ result = await func(*args, **kwargs)
157
+ metrics.add(key, time.perf_counter() - start)
158
+ return result
159
+ except Exception as e:
160
+ metrics.add_error(key)
161
+ raise e
162
+
163
+ return async_func_wrapper
164
+
123
165
  @classmethod
124
166
  def metrics_handler(cls, key: str, metrics_cls_name: str):
125
167
  """
126
168
  装饰器只会初始化一次,因此要使用动态方式获取指标类,否则初始化装饰器时,指标类还没有被初始化
127
169
  """
128
170
 
129
- def decorator(func: Callable[..., Awaitable]) -> Callable[..., Awaitable]:
130
- @wraps(func)
131
- async def wrapper(*args, **kwargs):
132
- metrics = cls.get_metrics(metrics_cls_name)
133
- try:
134
- start = time.perf_counter()
135
- result = await func(*args, **kwargs)
136
- cost = time.perf_counter() - start
137
- metrics.add(key, cost)
138
- return result
139
- except Exception as e:
140
- metrics.add_error(key)
141
- raise e
142
-
143
- return wrapper
171
+ def decorator(func):
172
+ if inspect.isasyncgenfunction(func):
173
+ return cls._create_async_gen_wrapper(func, metrics_cls_name, key)
174
+ elif inspect.iscoroutinefunction(func):
175
+ return cls._create_async_func_wrapper(func, metrics_cls_name, key)
176
+ else:
177
+ raise ValueError(
178
+ f"{func.__name__} is not a async function or async generator"
179
+ )
144
180
 
145
181
  return decorator
146
182
 
@@ -1,3 +1,4 @@
1
1
  from .vo import AbstractRespVo, BaseRespVo
2
+ from .sse import SseVoGenerator
2
3
 
3
- __all__ = ["BaseRespVo", "AbstractRespVo"]
4
+ __all__ = ["BaseRespVo", "AbstractRespVo", "SseVoGenerator"]
@@ -0,0 +1,39 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import TypeVar, Optional
3
+
4
+ from pydantic import BaseModel
5
+
6
+ from algo_backend.exception import BasicException
7
+
8
+
9
+ T = TypeVar("T", bound=BaseModel)
10
+
11
+
12
+ class SseVoGenerator(ABC):
13
+ def __init__(self, request_id: str = None):
14
+ self.request_id = request_id
15
+
16
+ @abstractmethod
17
+ def extract_info(self, *args, **kwargs):
18
+ """额外获取信息"""
19
+ ...
20
+
21
+ @abstractmethod
22
+ def start(self) -> Optional[T]:
23
+ """
24
+ 返回None时,sse_timing_and_exception_handler会忽略这一步
25
+ """
26
+ ...
27
+
28
+ @abstractmethod
29
+ def generate(self, content: BaseModel) -> T: ...
30
+
31
+ @abstractmethod
32
+ def done(self) -> T:
33
+ """
34
+ 返回None时,sse_timing_and_exception_handler会忽略这一步
35
+ """
36
+ ...
37
+
38
+ @abstractmethod
39
+ def error(self, exception: BasicException) -> T: ...
@@ -1,4 +1,5 @@
1
1
  from .default_app_generator import DefaultAlgoAppGenerator
2
2
  from .default_service_starter import DefaultAlgoServiceStarter
3
+ from .app_mounter import AbstractAppMounter
3
4
 
4
- __all__ = ["DefaultAlgoAppGenerator", "DefaultAlgoServiceStarter"]
5
+ __all__ = ["DefaultAlgoAppGenerator", "DefaultAlgoServiceStarter", "AbstractAppMounter"]
@@ -0,0 +1,31 @@
1
+ from abc import abstractmethod, ABC
2
+
3
+ from fastapi import FastAPI
4
+
5
+
6
+ class AbstractAppMounter(ABC):
7
+ """
8
+ App接口挂载点
9
+ """
10
+
11
+ def __init__(self, service_name: str):
12
+ self.service_name = service_name
13
+
14
+ @abstractmethod
15
+ def mount_app(self, app: FastAPI): ...
16
+
17
+
18
+ class PrometheusAppMounter(AbstractAppMounter):
19
+ """
20
+ Prometheus metrics endpoint 挂载点
21
+ """
22
+
23
+ def mount_app(self, app: FastAPI):
24
+ """
25
+ Mount the Prometheus metrics endpoint on the given application.
26
+ """
27
+ from prometheus_client import CollectorRegistry, make_asgi_app, multiprocess
28
+
29
+ registry = CollectorRegistry()
30
+ multiprocess.MultiProcessCollector(registry=registry)
31
+ app.mount("/metrics", make_asgi_app(registry=registry))
@@ -6,20 +6,18 @@ from fastapi import FastAPI
6
6
  from fastapi.exceptions import RequestValidationError, StarletteHTTPException
7
7
  from starlette.middleware import Middleware
8
8
 
9
- from algo_backend.exception import ApiErrorCodeManage
9
+ from algo_backend.exception.error_code_manage import set_error_code_prefix_env
10
10
  from algo_backend.intercept import (
11
11
  BasicExceptionInterceptor,
12
12
  HTTPExceptionInterceptor,
13
13
  ValidateExceptionInterceptor,
14
14
  )
15
15
  from algo_backend.log import BasicLogStarter
16
- from algo_backend.metrics import (
17
- PrometheusContext,
18
- PrometheusTimeCostMetricSetting,
19
- )
16
+ from algo_backend.metrics import PrometheusTimeCostMetricSetting
20
17
  from algo_backend.metrics.collector import MetricsScheduleMonitor
21
18
  from algo_backend.middleware import default_cors_middleware
22
19
 
20
+ from .app_mounter import AbstractAppMounter, PrometheusAppMounter
23
21
  from .event_list import EventList
24
22
 
25
23
  logger = logging.getLogger(__name__)
@@ -37,7 +35,7 @@ class DefaultAlgoAppGenerator:
37
35
  app_error_code_prefix: Optional[int] = None,
38
36
  custom_start_event: List[Callable[[], Coroutine[Any, Any, None]]] = None,
39
37
  custom_end_event: List[Callable[[], Coroutine[Any, Any, None]]] = None,
40
- log_starter: Optional[BasicLogStarter] = None,
38
+ log_starter: Optional[type(BasicLogStarter)] = BasicLogStarter,
41
39
  intercept_dict: Optional[Dict[Exception, BasicExceptionInterceptor]] = None,
42
40
  middlewares: List[Middleware] = (default_cors_middleware,),
43
41
  api_time_cost_buckets: Optional[List[int]] = None,
@@ -55,7 +53,7 @@ class DefaultAlgoAppGenerator:
55
53
  :param client_time_cost_buckets: 客户端耗时统计的桶,None表示使用默认的,空列表表示不启用这个指标
56
54
  """
57
55
  # 注册8位错误码前缀
58
- ApiErrorCodeManage.set_error_code_prefix_env(app_error_code_prefix)
56
+ set_error_code_prefix_env(app_error_code_prefix)
59
57
  self.service_name = service_name
60
58
 
61
59
  self.__app: Optional[FastAPI] = FastAPI()
@@ -63,7 +61,7 @@ class DefaultAlgoAppGenerator:
63
61
  self.start_event_list = EventList()
64
62
  self.end_event_list = EventList()
65
63
 
66
- self.log_starter = log_starter or BasicLogStarter(service_name=service_name)
64
+ self.log_starter: BasicLogStarter = log_starter(service_name=service_name)
67
65
 
68
66
  self.intercept_dict = {
69
67
  RequestValidationError: ValidateExceptionInterceptor(),
@@ -73,6 +71,10 @@ class DefaultAlgoAppGenerator:
73
71
 
74
72
  self.middleware_list = list(middlewares)
75
73
 
74
+ self.app_mounter_list: List[type(AbstractAppMounter)] = [
75
+ PrometheusAppMounter,
76
+ ]
77
+
76
78
  self.time_cost_metrics_initializer = (
77
79
  lambda: PrometheusTimeCostMetricSetting.initialize(
78
80
  api_metrics_buckets=api_time_cost_buckets,
@@ -117,26 +119,20 @@ class DefaultAlgoAppGenerator:
117
119
  self.middleware_list.append(middleware)
118
120
  return self
119
121
 
120
- def set_log_stater(self, log_starter: BasicLogStarter) -> "DefaultAlgoAppGenerator":
121
- self.log_starter = log_starter
122
- return self
123
-
124
- def use_log_loguru(self):
125
- """
126
- 使用loguru日志框架
127
- """
128
- from algo_backend.log.loguru import LoguruStarter
122
+ def append_mounter(self, mounter_cls: type(AbstractAppMounter)):
123
+ self.app_mounter_list.append(mounter_cls)
129
124
 
130
- return self.set_log_stater(LoguruStarter(service_name=self.service_name))
125
+ def add_mounter(self) -> "DefaultAlgoAppGenerator":
126
+ for mounter_cls in self.app_mounter_list:
127
+ mounter_cls(self.service_name).mount_app(self.__app)
128
+ return self
131
129
 
132
- def use_log_nblog(self):
130
+ def generate(self):
133
131
  """
134
- 使用nblog日志框架,待实现
132
+ 主方法
133
+ :return:
135
134
  """
136
- ...
137
-
138
- def generate(self):
139
- self.init_app().add_prometheus_endpoint().add_interceptor()
135
+ self.init_app().add_interceptor().add_mounter()
140
136
  return self.app
141
137
 
142
138
  @property
@@ -149,10 +145,6 @@ class DefaultAlgoAppGenerator:
149
145
  )
150
146
  return self
151
147
 
152
- def add_prometheus_endpoint(self):
153
- PrometheusContext.mount_prometheus_endpoint(self.__app)
154
- return self
155
-
156
148
  def add_interceptor(self):
157
149
  for exc, interceptor in self.intercept_dict.items():
158
150
  self.__app.add_exception_handler(exc, interceptor.intercept)
@@ -4,6 +4,7 @@ import uvicorn
4
4
 
5
5
  from algo_backend.config import ServiceConfig
6
6
  from algo_backend.metrics import PrometheusContext
7
+ from algo_backend.log import BasicLogStarter
7
8
 
8
9
 
9
10
  class DefaultAlgoServiceStarter:
@@ -32,16 +33,9 @@ class DefaultAlgoServiceStarter:
32
33
  self.__main_pid_event.insert(index, func)
33
34
  return self
34
35
 
35
- def use_loguru(self):
36
- """
37
- 使用loguru日志设置,对主进程的日志生效
38
- 注册loguru的定时清理器
39
- """
40
- from algo_backend.log.loguru import LoguruStarter
41
-
42
- loguru_stater = LoguruStarter(service_name=self.service_name)
43
- self.insert_event(loguru_stater.setup_log)
44
- self.append_event(loguru_stater.run_log_cleaner)
36
+ def use_log_stater(self, log_starter_class: type(BasicLogStarter)):
37
+ log_starter: BasicLogStarter = log_starter_class(service_name=self.service_name)
38
+ log_starter.service_generator_hook(self)
45
39
  return self
46
40
 
47
41
  def main_pid_setup(self) -> "DefaultAlgoServiceStarter":
@@ -59,12 +53,14 @@ class DefaultAlgoServiceStarter:
59
53
  host: str = "0.0.0.0",
60
54
  **kwargs,
61
55
  ):
56
+ service_config = ServiceConfig()
62
57
  # 启动服务
63
58
  uvicorn.run(
64
59
  app_str,
65
60
  host=host,
66
- port=ServiceConfig.HTTP_PORT,
67
- timeout_keep_alive=ServiceConfig.TIMEOUT_KEEP_ALIVE,
68
- workers=ServiceConfig.PROCESS_NUM,
61
+ port=service_config.HTTP_PORT,
62
+ timeout_keep_alive=service_config.TIMEOUT_KEEP_ALIVE,
63
+ workers=service_config.PROCESS_NUM,
64
+ log_config=None,
69
65
  **kwargs,
70
66
  )
@@ -1,15 +1,14 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: algo-backend-framework
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: Ctcdn algorithm backend framework
5
5
  Requires-Python: >=3.10
6
6
  Description-Content-Type: text/markdown
7
7
  Requires-Dist: fastapi>=0.128.0
8
- Requires-Dist: loguru>=0.7.3
9
8
  Requires-Dist: prometheus-client>=0.24.1
10
9
  Requires-Dist: psutil>=7.2.1
11
10
  Requires-Dist: pydantic>=2.12.5
12
- Requires-Dist: python-dotenv>=1.2.1
11
+ Requires-Dist: pydantic-settings>=2.12.0
13
12
  Requires-Dist: uvicorn>=0.40.0
14
13
 
15
14
  # algo-backend-framework-python
@@ -1,30 +1,24 @@
1
1
  algo_backend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- algo_backend/config/__init__.py,sha256=MLAvcvodxXiaVsSJaVvQcNjFWIFusBQ0i9OrWJv3zv0,222
3
- algo_backend/config/basic_config.py,sha256=RSRf8lgc8KfA_FHNjchaghSLNnJy7ejn2ux57SBSakg,292
4
- algo_backend/config/loguru_config.py,sha256=WovghKcMqeysPuf8vlKFhbNug8Z7zPqxkjlwVYKjZWw,513
2
+ algo_backend/config/__init__.py,sha256=2aa302si2ksBSRouTNQ2_ud8v2R7XZQfuFTombBa-Uc,144
3
+ algo_backend/config/basic_config.py,sha256=f8lcmvnIVgVci5qsAjAUiM9Tr7FH5tYQEQCxfC7nPBg,694
5
4
  algo_backend/exception/__init__.py,sha256=tHC3p8DyCrFHW4WvLEOuPKP5t4yVqhc1flcWQRtz5AY,653
6
- algo_backend/exception/error_code_manage.py,sha256=d29aMgelf93RUqlvLTjQDxBiSH9ZVFYbW66rzTWDKM4,4391
5
+ algo_backend/exception/error_code_manage.py,sha256=7aBGvwepAmZe1VgW87-2_JbBD6eJrmZD6tmsgdczk1k,4338
7
6
  algo_backend/exception/exception.py,sha256=Gz5LDsqRct8LON1Aq43TdqQQjyC4_Uy9yeUlO7XtPnM,1962
8
7
  algo_backend/exception/status_code.py,sha256=s5lXXR-oSALk3gtI4KcPKHstvnRFArQL1SEgQFIeSxU,2707
9
- algo_backend/handler/__init__.py,sha256=QUWn5MmDUzqbq_gcoSpMZtPMgfzrkLnlwDbLksXwQVE,104
8
+ algo_backend/handler/__init__.py,sha256=DBUeFrDVm8qhFPahiva93HeYuViPIFL2ZCHg26iupFU,199
10
9
  algo_backend/handler/exception_to_vo.py,sha256=uX3JZaPP5Zt1RgJunFfumxpSfWHwcWtKLZ21dIXDraQ,687
11
- algo_backend/handler/simple_handler.py,sha256=7ffsgjj1EZ_d3x_jx9krju_uwCmwfjXHJ_9KZ8SrpOw,2654
10
+ algo_backend/handler/simple_handler.py,sha256=1Pqpgt-ZKY2ytSAlAXOEqI55C5yein-XSePPD6F8zH4,3010
11
+ algo_backend/handler/sse_handler.py,sha256=9862lq6nerDthYmM8LX_PRVMIp-36nkZar5p5unDox0,2845
12
12
  algo_backend/intercept/__init__.py,sha256=FoNHCzUc3ceLo85ECN3L7HzW6KmLqcG5sgMh_qULLdw,265
13
13
  algo_backend/intercept/common.py,sha256=T50_IAeY0HQ8TbupjYvMHMObg3j9I4lQnqZMiWNzuQw,1445
14
14
  algo_backend/intercept/http.py,sha256=C_N2nyErFhdOZ1LPQ6iU_JCy4fFYEucDwhWJb-qBnbc,1381
15
15
  algo_backend/intercept/validate.py,sha256=FBaLc2CRxtIjfjudOH41nwAX7jKXlWJHgUFooC19deI,2949
16
- algo_backend/log/__init__.py,sha256=0bRk_Y5olw95oIR7DabfT-1gJv_WdXhe7ToufAr5VKo,36
17
- algo_backend/log/common.py,sha256=1h9zuCuoas3qUr9yG2fvm_3WNYVQW2eH5DJzS0IMvFQ,355
18
- algo_backend/log/loguru/__init__.py,sha256=zOfdTmDUf6bFeZeZYbXEg1EMMm3ju90k5dtdpN7Sn0s,177
19
- algo_backend/log/loguru/log_clean.py,sha256=_BVF9dLooTGIgwctjuqba6hEUUvHww8JuUzFHO88GEM,4590
20
- algo_backend/log/loguru/log_setup.py,sha256=Xnw6JCB8gBfm6z-KxTSdenoxfsxwiZGWOs75DpE0N3w,3010
21
- algo_backend/log/loguru/log_starter.py,sha256=2YsLrdNPlF0Mgu4hduEYJGidLtnkFgtoMlzEzOJyuUc,1841
22
- algo_backend/log/loguru/patch_logging.py,sha256=GS8x0MmIjqaY5nmiPgjU01yNG1AvbOrTU8xKzwb2Vdc,2796
23
- algo_backend/log/nblog/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ algo_backend/log/__init__.py,sha256=OdDLh1EbsC9dI9xIsDVI7ACVYRcpkTL_FecpuJoYV30,72
17
+ algo_backend/log/common.py,sha256=FTIqowHmsdhr8t4ZsA-lOKyzC7M2_BcC2kP1v7UNp4I,676
24
18
  algo_backend/metrics/__init__.py,sha256=20CPEFgi0yJ2Fy-LTcFQ3aqlXHhLPtgPFboqOMBhl34,640
25
19
  algo_backend/metrics/http_metrics.py,sha256=3GYNhKJoS6oJ8L7FXnVkPZ_wa-SKj_444KLdBeGkbs4,1944
26
- algo_backend/metrics/prometheus_context.py,sha256=GqemLccNbTx_5DBijOyzySzWydbpWYfnhWh17rujjwE,1910
27
- algo_backend/metrics/time_cost_metrics.py,sha256=wXSv3yN6_Eq55KduGy9tIX7sZWsdHI9YBnWoSsT9sZE,4895
20
+ algo_backend/metrics/prometheus_context.py,sha256=Eop1PqHr9k5JpNPQyt0rtgZwmjl7wM8jTckh8FoTlI8,1449
21
+ algo_backend/metrics/time_cost_metrics.py,sha256=FyqNOJD6ICvcCXA5Fm60yd4nsRYuK7480W38ODWr6J8,6051
28
22
  algo_backend/metrics/collector/__init__.py,sha256=t5Csl-Z9qpik-isEUVYDYBWYgf7xdM49548YfJr4X64,413
29
23
  algo_backend/metrics/collector/common.py,sha256=yeiZp9ufbYSW7N-R6ytk74UF0dCqJ8r338lL64TmA4g,475
30
24
  algo_backend/metrics/collector/gc_metrics.py,sha256=s2ariKimji9LC71eAQFlrFkpcF3hkSy5IBC6D8LFePc,2510
@@ -33,16 +27,18 @@ algo_backend/metrics/collector/system_metrics.py,sha256=SxOswHKGKLDB5wMsKjd7rkQt
33
27
  algo_backend/middleware/__init__.py,sha256=KfSRT3TZ89BzY__Rrbu-9HkjO1LToD5sXyl_J4Po7Q4,173
34
28
  algo_backend/middleware/cors.py,sha256=kKdPeZMS38IVvOueJz34lQX1t1hH5OOz068IdIAR-Kc,271
35
29
  algo_backend/middleware/metrics.py,sha256=XwZGipQwKp20naZglx41Wc79rXud81Y4AIfswUJj3jM,385
36
- algo_backend/schema/__init__.py,sha256=STf9SdNuP5dzMDehdGjJGHQ7Pq-qO4jw-5CWsayvx5g,90
30
+ algo_backend/schema/__init__.py,sha256=WhbHgUnPG3B69qJlo-lAmtRhu_LfSwjkBbByLASNcFY,141
31
+ algo_backend/schema/sse.py,sha256=m6akZ1dHfXPs0hs6ijnbDtj8xChP1WKJAxVGSEQ5O6g,933
37
32
  algo_backend/schema/vo.py,sha256=CSYNHpJx2YNCZ5-4GjjSs6BNKiJEyl31gnplnxGvXfY,2211
38
- algo_backend/starter/__init__.py,sha256=2AfidtR7PmlhVPz65tt_t3QdIYqpp0mVg8M-lbJE2F4,194
39
- algo_backend/starter/default_app_generator.py,sha256=fdNPiCyhEQiGCO7NQf9s4urvUXr-iqBcJp1deX9Xu2E,6209
40
- algo_backend/starter/default_service_starter.py,sha256=QHVDAS4css7tYCH7Fd_is2uWX8fxV1GD7LAugx904B8,2017
33
+ algo_backend/starter/__init__.py,sha256=eudOlb2XZeZRog9wQK8LVZzxpVQa5Ee9vAWjlH2xATU,261
34
+ algo_backend/starter/app_mounter.py,sha256=YTX1hp5fQ1VafNdCQbFCBYZ6kr1gC9M8RBiBCEMnp98,812
35
+ algo_backend/starter/default_app_generator.py,sha256=w-E7Jzc25t5jstsslLGvFOX68kao1hu6oEUYc6C5f-4,6114
36
+ algo_backend/starter/default_service_starter.py,sha256=dxeTK5PvoYg9b2Ey4EdR2k2wdHw1-Piutw9T0Uwdhqo,1952
41
37
  algo_backend/starter/event_list.py,sha256=vQHzQIpW8LZmQ93YyET-1gX6pQGVE5A6I_pLoYTFOH0,824
42
38
  algo_backend/utils/__init__.py,sha256=oX6OyL-28jzc94u4fyH1TtntCzQySkfZ8jibMk1KPU8,168
43
39
  algo_backend/utils/meta_class.py,sha256=hcZPGF7EIHvJOXXR82_7Gah_AWbqkcSxUc473I_6maY,1850
44
40
  algo_backend/utils/utils.py,sha256=q3bxBrivndLRggWsLryloSpu-_Ecbj4mhZL8oYdiDTo,481
45
- algo_backend_framework-0.0.4.dist-info/METADATA,sha256=dX_eOI1tqlVzXmJjDu9S6J7h8EtEtWyOeFAysoSp5CI,2196
46
- algo_backend_framework-0.0.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
47
- algo_backend_framework-0.0.4.dist-info/top_level.txt,sha256=zLsbLTRV1tO2hQfazqiBLO73VnjSAhJSUpMMBmQaLfw,13
48
- algo_backend_framework-0.0.4.dist-info/RECORD,,
41
+ algo_backend_framework-0.0.6.dist-info/METADATA,sha256=Kcqz4gE5Bq9tb2bWiUSpVWmIjc0HZ-hOJbvHTuWhf5Y,2171
42
+ algo_backend_framework-0.0.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ algo_backend_framework-0.0.6.dist-info/top_level.txt,sha256=zLsbLTRV1tO2hQfazqiBLO73VnjSAhJSUpMMBmQaLfw,13
44
+ algo_backend_framework-0.0.6.dist-info/RECORD,,
@@ -1,19 +0,0 @@
1
- from typing import List
2
-
3
- from algo_backend.utils import OsAttrMeta
4
-
5
-
6
- class LoguruConfig(metaclass=OsAttrMeta):
7
- LOGGER_PATH: str = "/logger"
8
- LOG_RETENTION_DAY: int = 60
9
- DISABLE_LOG_PKG: str = ""
10
- LOG_ADD_CONTAINED_ID: bool = False
11
- SAVE_INFO_LEVEL: bool = False
12
- SAVE_DEBUG_LOG: bool = True
13
-
14
- @classmethod
15
- def get_disable_log_pkg(cls) -> List[str]:
16
- if cls.DISABLE_LOG_PKG:
17
- return cls.DISABLE_LOG_PKG.split(",")
18
- else:
19
- return []
@@ -1,5 +0,0 @@
1
- from .log_clean import LoguruCleaner
2
- from .log_setup import LoguruSetup
3
- from .log_starter import LoguruStarter
4
-
5
- __all__ = ["LoguruCleaner", "LoguruSetup", "LoguruStarter"]
@@ -1,140 +0,0 @@
1
- import os
2
- import threading
3
- import time
4
- from datetime import datetime, timedelta
5
- from typing import List, Optional
6
-
7
- from loguru import logger
8
- from pydantic import BaseModel, Field
9
-
10
-
11
- class LogInfo(BaseModel):
12
- """
13
- 日志信息
14
- """
15
-
16
- file_path: str = Field(..., description="日志文件路径")
17
- file_size: Optional[int] = None # 日志文件大小
18
- create_time: Optional[datetime] = None # 日志文件创建时间
19
- modify_time: Optional[datetime] = None # 日志文件修改时间
20
- access_time: Optional[datetime] = None # 日志文件访问时间
21
-
22
- @classmethod
23
- def gen(cls, file_path: str):
24
- if not os.path.exists(file_path):
25
- return cls(file_path=file_path)
26
- return cls(
27
- file_path=file_path,
28
- file_size=os.path.getsize(file_path),
29
- create_time=datetime.fromtimestamp(os.path.getctime(file_path)),
30
- modify_time=datetime.fromtimestamp(os.path.getmtime(file_path)),
31
- access_time=datetime.fromtimestamp(os.path.getatime(file_path)),
32
- )
33
-
34
-
35
- class LoguruCleaner:
36
- """
37
- 清理时间较长的日志,默认保留60天的日志
38
- """
39
-
40
- def __init__(self, log_dir: str, retention_day: int = 60):
41
- """
42
- :param log_dir: 日志目录
43
- :param retention_day: 保留天数
44
- """
45
- self.retention_day = retention_day
46
- self.log_dir = log_dir
47
- self.current_time: datetime = datetime.now()
48
- self.threshold_time: datetime = self.current_time - timedelta(
49
- days=self.retention_day
50
- )
51
-
52
- def scan_log(self) -> List[LogInfo]:
53
- """
54
- 扫描日志目录,返回所有日志文件
55
- """
56
- log_files = []
57
- if os.path.exists(self.log_dir):
58
- log_files = [
59
- LogInfo.gen(os.path.join(self.log_dir, file))
60
- for file in os.listdir(self.log_dir)
61
- if file.endswith(".log")
62
- ]
63
- return log_files
64
-
65
- def judge_is_old_log(self, log_file: LogInfo) -> bool:
66
- """
67
- 判断日志文件是否过期
68
- 过期标准:
69
- """
70
- if log_file.modify_time and log_file.modify_time < self.threshold_time:
71
- return True
72
- return False
73
-
74
- def extract_old_log(self) -> List[LogInfo]:
75
- """
76
- 提取过期的日志文件
77
- """
78
- log_files = self.scan_log()
79
-
80
- if not log_files:
81
- logger.debug("No log files found")
82
- return []
83
-
84
- logger.debug(
85
- f"[ExtractOldLogFiles]: scan [{len(log_files)}] logs from [{self.log_dir}]"
86
- )
87
-
88
- old_log_files = [o for o in log_files if self.judge_is_old_log(o)]
89
-
90
- logger.debug(
91
- f"[ExtractOldLogFiles]: extract [{len(old_log_files)}] old logs, threshold day [{self.threshold_time}]"
92
- )
93
-
94
- return old_log_files
95
-
96
- def delete_log(self, log_file: List[LogInfo]):
97
- """
98
- 删除日志文件
99
- """
100
- if log_file:
101
- logger.debug(f"[DeleteLogFiles]: Start to delete [{len(log_file)}] logs")
102
- cnt = 0
103
- for log in log_file:
104
- try:
105
- os.remove(log.file_path)
106
- logger.debug(f"[DeleteLogFiles]: Delete log [{log.file_path}]")
107
- cnt += 1
108
- except FileNotFoundError as e:
109
- logger.warning(
110
- f"[DeleteLogFiles]: Failed to delete log [{log.file_path}], error [{e}]"
111
- )
112
-
113
- logger.debug(f"[DeleteLogFiles]: Delete [{cnt}]/[{len(log_file)}] logs")
114
-
115
- def delete_lod_log(self):
116
- """
117
- 删除过时的日志文件
118
- """
119
- old_logs = self.extract_old_log()
120
- self.delete_log(old_logs)
121
-
122
- @classmethod
123
- def schedule_run(cls, log_dir: str, retention_day: int = 60):
124
- """
125
- 启动线程,定时清理日志
126
- """
127
- # 创建线程
128
- interval = 60 * 60 * 24
129
-
130
- def worker():
131
- while True:
132
- log_cleaner = LoguruCleaner(log_dir, retention_day)
133
- logger.debug(
134
- f"pid={os.getpid()} | [ScheduleRun]: Start to run log cleaner, log dir [{log_dir}], retention day [{retention_day}]"
135
- )
136
- log_cleaner.delete_lod_log()
137
- time.sleep(interval) # 暂停一天
138
-
139
- thread = threading.Thread(target=worker, daemon=True)
140
- thread.start()
@@ -1,89 +0,0 @@
1
- import os
2
- import sys
3
- from typing import Optional
4
-
5
- from loguru import logger
6
-
7
- from .patch_logging import patch_logging_to_loguru
8
-
9
-
10
- class LoguruSetup:
11
- FORMAT = (
12
- "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
13
- "<level>{level: <8}</level> | "
14
- "<cyan>p-{process}</cyan> | "
15
- "<cyan>t-{thread}</cyan> | "
16
- "<cyan>{thread.name}</cyan> | "
17
- "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
18
- "<level>{message}</level>"
19
- )
20
-
21
- __IS_SET_ROTATE = False
22
-
23
- @classmethod
24
- def rotate_daily(
25
- cls,
26
- *,
27
- log_dir: str,
28
- service_name: str,
29
- add_pid_suffix: bool = True,
30
- save_info: bool = True,
31
- save_debug: bool = True,
32
- stderr_colorize: bool = True,
33
- run_id_suffix: Optional[str] = None,
34
- ):
35
- """
36
- 日志输出终端和落盘
37
- : params: log_dir: 日志目录
38
- : params: service_name: 服务名
39
- : params: add_pid_suffix: 是否添加进程ID后缀
40
- : params: save_info: 是否保存INFO级别日志
41
- : params: save_debug: 是否保存DEBUG级别日志
42
- : params: stderr_colorize: 是否启用终端颜色显示
43
- : params: run_id_suffix: 运行ID后缀,用于避免多副本时的冲突
44
- """
45
- if cls.__IS_SET_ROTATE:
46
- return
47
-
48
- logger.remove() # 清空设置,防止重复
49
-
50
- os.makedirs(log_dir, exist_ok=True)
51
- pid_suffix = f"_{os.getpid()}" if add_pid_suffix else ""
52
- run_id_suffix = f"_r{run_id_suffix}" if run_id_suffix else ""
53
-
54
- # 添加终端处理器(控制台输出)
55
- logger.add(
56
- sink=sys.stderr, # 输出到标准错误流
57
- level="DEBUG", # 终端显示更详细的DEBUG日志
58
- format=cls.FORMAT,
59
- colorize=stderr_colorize, # 启用颜色显示
60
- backtrace=True, # 堆栈信息显示在终端
61
- )
62
-
63
- if save_info:
64
- # 配置 INFO 及以上级别日志
65
- logger.add(
66
- os.path.join(
67
- log_dir, f"{service_name}_info{pid_suffix}{run_id_suffix}.log"
68
- ),
69
- rotation="1 day", # 每日滚动
70
- filter=lambda record: record["level"].no >= 20,
71
- format=cls.FORMAT,
72
- enqueue=True,
73
- )
74
-
75
- if save_debug:
76
- # 配置 DEBUG 级别日志
77
- logger.add(
78
- os.path.join(
79
- log_dir, f"{service_name}_debug{pid_suffix}{run_id_suffix}.log"
80
- ),
81
- rotation="1 day", # 每日滚动
82
- level="DEBUG",
83
- filter=lambda record: record["level"].no >= 10,
84
- format=cls.FORMAT,
85
- enqueue=True,
86
- )
87
- patch_logging_to_loguru()
88
- logger.info("日志设置完成")
89
- cls.__IS_SET_ROTATE = True
@@ -1,65 +0,0 @@
1
- import os
2
- import socket
3
-
4
- from loguru import logger
5
-
6
- from algo_backend.config import LoguruConfig as LogConfig
7
-
8
- from ..common import BasicLogStarter
9
- from .log_clean import LoguruCleaner
10
- from .log_setup import LoguruSetup
11
-
12
-
13
- class LoguruStarter(BasicLogStarter):
14
- """
15
- 容器内日志目录默认是:/logger/服务名
16
- """
17
-
18
- def __init__(self, service_name: str):
19
- super().__init__(service_name)
20
-
21
- @property
22
- def service_log_dir(self):
23
- return os.path.join(LogConfig.LOGGER_PATH, self.service_name)
24
-
25
- def setup_log(self):
26
- """
27
- 日志设置
28
- """
29
-
30
- LoguruSetup.rotate_daily(
31
- log_dir=self.service_log_dir,
32
- service_name=self.add_container_id(service_name=self.service_name),
33
- add_pid_suffix=True,
34
- save_info=LogConfig.SAVE_INFO_LEVEL,
35
- save_debug=LogConfig.SAVE_DEBUG_LOG,
36
- )
37
-
38
- for pkg in LogConfig.get_disable_log_pkg():
39
- # 忽略一些包的日志
40
- logger.debug(f"ignore log: {pkg}")
41
- logger.disable(pkg)
42
-
43
- def run_log_cleaner(self):
44
- """
45
- 启动定时任务清理日志
46
- """
47
- LoguruCleaner.schedule_run(
48
- log_dir=self.service_log_dir,
49
- retention_day=LogConfig.LOG_RETENTION_DAY,
50
- )
51
-
52
- @classmethod
53
- def add_container_id(cls, service_name: str):
54
- if not LogConfig.LOG_ADD_CONTAINED_ID:
55
- logger.info("日志名不增加containerId")
56
- return service_name
57
-
58
- try:
59
- socket_hostname = f"-{socket.gethostname()}"
60
- except:
61
- socket_hostname = ""
62
- if service_name in socket_hostname:
63
- return service_name
64
- else:
65
- return f"{service_name}{socket_hostname}"
@@ -1,83 +0,0 @@
1
- import logging
2
-
3
- from loguru import logger
4
-
5
- from algo_backend.config import LoguruConfig
6
-
7
- disable_log_pkg = LoguruConfig.get_disable_log_pkg()
8
-
9
-
10
- def patch_logging_to_loguru():
11
- """
12
- 将 Python 原生 logging 系统的所有日志重定向到 loguru
13
- """
14
-
15
- class LoguruHandler(logging.Handler):
16
- def emit(self, record):
17
- # 过滤特定模块的日志
18
- if any(excluded in record.name for excluded in disable_log_pkg):
19
- return
20
-
21
- try:
22
- level = logger.level(record.levelname).name
23
- except ValueError:
24
- level = record.levelno
25
-
26
- # 从当前帧开始,找到真正调用 logging 的位置
27
- frame = logging.currentframe()
28
- depth = 0 # 从0开始计算,动态确定深度
29
-
30
- # 遍历调用栈,跳过所有 logging 模块的内部调用
31
- while frame:
32
- filename = frame.f_code.co_filename
33
- func_name = frame.f_code.co_name
34
-
35
- # 检查是否为 logging 模块的内部调用
36
- is_logging_internal = (
37
- # 标准库 logging 模块路径
38
- "logging" in filename
39
- and (
40
- filename.endswith("logging/__init__.py")
41
- or "/logging/" in filename
42
- or "\\logging\\" in filename
43
- )
44
- ) or (
45
- # logging 内部函数名
46
- func_name
47
- in (
48
- "callHandlers",
49
- "handle",
50
- "emit",
51
- "handleError",
52
- "_log",
53
- "makeRecord",
54
- "getLogger",
55
- "debug",
56
- "info",
57
- "warning",
58
- "error",
59
- "exception",
60
- "critical",
61
- )
62
- )
63
-
64
- if is_logging_internal:
65
- frame = frame.f_back
66
- depth += 1
67
- else:
68
- # 找到真实的调用者,跳出循环
69
- break
70
-
71
- # 如果找不到合适的帧,回退到默认行为
72
- if not frame:
73
- depth = 2
74
-
75
- logger.opt(depth=depth, exception=record.exc_info).log(
76
- level, record.getMessage()
77
- )
78
-
79
- # 设置根记录器级别为最低级别,确保所有日志都被处理
80
- logging.root.setLevel(logging.DEBUG)
81
-
82
- logging.root.handlers = []
83
- logging.root.addHandler(LoguruHandler())
File without changes