algo-backend-framework 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. algo_backend/__init__.py +0 -0
  2. algo_backend/config/__init__.py +8 -0
  3. algo_backend/config/basic_config.py +13 -0
  4. algo_backend/config/loguru_config.py +19 -0
  5. algo_backend/exception/__init__.py +22 -0
  6. algo_backend/exception/error_code_manage.py +126 -0
  7. algo_backend/exception/exception.py +42 -0
  8. algo_backend/exception/status_code.py +103 -0
  9. algo_backend/handler/__init__.py +3 -0
  10. algo_backend/handler/exception_to_vo.py +37 -0
  11. algo_backend/handler/operation_handler.py +71 -0
  12. algo_backend/intercept/__init__.py +9 -0
  13. algo_backend/intercept/common.py +45 -0
  14. algo_backend/intercept/http.py +40 -0
  15. algo_backend/intercept/validate.py +78 -0
  16. algo_backend/log/__init__.py +1 -0
  17. algo_backend/log/common.py +16 -0
  18. algo_backend/log/loguru/__init__.py +5 -0
  19. algo_backend/log/loguru/log_clean.py +140 -0
  20. algo_backend/log/loguru/log_setup.py +89 -0
  21. algo_backend/log/loguru/log_starter.py +65 -0
  22. algo_backend/log/loguru/patch_logging.py +83 -0
  23. algo_backend/log/nblog/__init__.py +0 -0
  24. algo_backend/metrics/__init__.py +22 -0
  25. algo_backend/metrics/collector/__init__.py +12 -0
  26. algo_backend/metrics/collector/common.py +17 -0
  27. algo_backend/metrics/collector/gc_metrics.py +74 -0
  28. algo_backend/metrics/collector/schedule_monitor.py +50 -0
  29. algo_backend/metrics/collector/system_metrics.py +169 -0
  30. algo_backend/metrics/http_metrics.py +56 -0
  31. algo_backend/metrics/prometheus_context.py +55 -0
  32. algo_backend/metrics/time_cost_metrics.py +146 -0
  33. algo_backend/middleware/__init__.py +4 -0
  34. algo_backend/middleware/cors.py +10 -0
  35. algo_backend/middleware/metrics.py +12 -0
  36. algo_backend/schema/__init__.py +3 -0
  37. algo_backend/schema/vo.py +83 -0
  38. algo_backend/starter/__init__.py +4 -0
  39. algo_backend/starter/default_app_generator.py +169 -0
  40. algo_backend/starter/default_service_starter.py +70 -0
  41. algo_backend/starter/event_list.py +32 -0
  42. algo_backend/utils/__init__.py +8 -0
  43. algo_backend/utils/meta_class.py +50 -0
  44. algo_backend/utils/utils.py +22 -0
  45. algo_backend_framework-0.0.1.dist-info/METADATA +60 -0
  46. algo_backend_framework-0.0.1.dist-info/RECORD +48 -0
  47. algo_backend_framework-0.0.1.dist-info/WHEEL +5 -0
  48. algo_backend_framework-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,169 @@
1
+ import asyncio
2
+ import logging
3
+ import os
4
+ import threading
5
+ from abc import ABC
6
+ from typing import List, Type
7
+
8
+ import psutil
9
+ from prometheus_client import Gauge
10
+
11
+ from .common import AbstractMetricCollector
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class AbstractSystemMetricCollector(AbstractMetricCollector, ABC):
17
+ """系统指标收集器抽象基类"""
18
+
19
+ def __init__(self, process: psutil.Process, pid: int, **kwargs):
20
+ super().__init__()
21
+ self.process = process
22
+ self.pid = pid
23
+
24
+
25
+ class CPUMetricCollector(AbstractSystemMetricCollector):
26
+ """CPU使用率指标收集器"""
27
+
28
+ def __init__(self, process: psutil.Process, pid: int):
29
+ super().__init__(process, pid)
30
+ self.gauge = Gauge(
31
+ "cpu_usage_percent",
32
+ "CPU usage in percent",
33
+ ["pid"],
34
+ multiprocess_mode="all",
35
+ )
36
+
37
+ async def collect(self):
38
+ cpu_usage = self.process.cpu_percent()
39
+ self.gauge.labels(pid=self.pid).set(cpu_usage)
40
+
41
+
42
+ class MemoryMetricCollector(AbstractSystemMetricCollector):
43
+ """内存使用量指标收集器"""
44
+
45
+ def __init__(self, process: psutil.Process, pid: int):
46
+ super().__init__(process, pid)
47
+ self.gauge_rss = Gauge(
48
+ "resident_memory_usage_mb",
49
+ "Resident Memory usage in MB",
50
+ ["pid"],
51
+ multiprocess_mode="all",
52
+ )
53
+ self.gauge_vms = Gauge(
54
+ "virtual_memory_usage_mb",
55
+ "Virtual Memory usage in MB",
56
+ ["pid"],
57
+ multiprocess_mode="all",
58
+ )
59
+
60
+ async def collect(self):
61
+ memory_info = self.process.memory_info()
62
+ rss = memory_info.rss / (1024 * 1024) # resident set size resident set size
63
+ vms = memory_info.vms / (1024 * 1024) # virtual memory size
64
+ self.gauge_rss.labels(pid=self.pid).set(rss)
65
+ self.gauge_vms.labels(pid=self.pid).set(vms)
66
+
67
+
68
+ class ThreadCountMetricCollector(AbstractSystemMetricCollector):
69
+ """线程数指标收集器"""
70
+
71
+ def __init__(self, process: psutil.Process, pid: int):
72
+ super().__init__(process, pid)
73
+ self.gauge_active_thread_count = Gauge(
74
+ "active_thread_count",
75
+ "Number of active threads",
76
+ ["pid"],
77
+ multiprocess_mode="all",
78
+ )
79
+ self.gauge_pid_thread_count = Gauge(
80
+ "thread_count", "Number of Pid threads", ["pid"], multiprocess_mode="all"
81
+ )
82
+
83
+ async def collect(self):
84
+ pid_thread_count = self.process.num_threads()
85
+ self.gauge_pid_thread_count.labels(pid=self.pid).set(pid_thread_count)
86
+ self.gauge_active_thread_count.labels(pid=self.pid).set(
87
+ threading.active_count()
88
+ )
89
+
90
+
91
+ class FileMetricCollector(AbstractSystemMetricCollector):
92
+ """文件描述符数指标收集器"""
93
+
94
+ def __init__(self, process: psutil.Process, pid: int):
95
+ super().__init__(process, pid)
96
+ self.gauge = Gauge(
97
+ "open_fds",
98
+ "Number of open files",
99
+ ["pid"],
100
+ multiprocess_mode="all",
101
+ )
102
+
103
+ async def collect(self):
104
+ file_descriptor_count = len(self.process.open_files())
105
+ self.gauge.labels(pid=self.pid).set(file_descriptor_count)
106
+
107
+
108
+ class SystemMetricsMonitor(AbstractMetricCollector):
109
+ """系统指标监控器 - 采用组装模式"""
110
+
111
+ def __init__(self, interval_sec: int = 30):
112
+ super().__init__(interval_sec=interval_sec)
113
+ self.pid = os.getpid()
114
+ self.process = psutil.Process(self.pid)
115
+ self.collectors: List[AbstractSystemMetricCollector] = []
116
+
117
+ def add(
118
+ self, collector_classes: Type[AbstractSystemMetricCollector], **kwargs
119
+ ) -> "SystemMetricsMonitor":
120
+ self.collectors.append(collector_classes(self.process, self.pid, **kwargs))
121
+ return self
122
+
123
+ def cpu(self):
124
+ return self.add(CPUMetricCollector)
125
+
126
+ def memory(self):
127
+ return self.add(MemoryMetricCollector)
128
+
129
+ def thread(self):
130
+ return self.add(ThreadCountMetricCollector)
131
+
132
+ def file(self):
133
+ return self.add(FileMetricCollector)
134
+
135
+ def register_default_collectors(self) -> "SystemMetricsMonitor":
136
+ """
137
+ 组装默认指标收集器
138
+ """
139
+ self.add(CPUMetricCollector)
140
+ self.add(MemoryMetricCollector)
141
+ self.add(ThreadCountMetricCollector)
142
+ self.add(FileMetricCollector)
143
+ return self
144
+
145
+ async def collect(self):
146
+ for collector in self.collectors:
147
+ await collector.collect()
148
+
149
+ async def run_monitor(self, interval_sec: int = 10):
150
+ """运行监控任务"""
151
+
152
+ self.set_interval(interval_sec)
153
+
154
+ async def work():
155
+ while True:
156
+ # 执行所有收集器的收集任务
157
+ for collector in self.collectors:
158
+ await collector.collect()
159
+
160
+ await asyncio.sleep(interval_sec)
161
+
162
+ if len(self.collectors) == 0:
163
+ self.register_default_collectors()
164
+
165
+ logger.info(
166
+ f"Start system metrics monitor, interval: {interval_sec}s, collector num = {len(self.collectors)}"
167
+ )
168
+
169
+ asyncio.create_task(work())
@@ -0,0 +1,56 @@
1
+ import os
2
+ from typing import List, Optional
3
+
4
+ from prometheus_client import Counter, Histogram
5
+ from starlette.middleware.base import BaseHTTPMiddleware
6
+ from starlette.requests import Request
7
+
8
+
9
+ class RequestTimeCostMiddleware(BaseHTTPMiddleware):
10
+ def __init__(
11
+ self,
12
+ app,
13
+ buckets: Optional[List[float]] = None,
14
+ ignore_paths: Optional[List[str]] = None,
15
+ ):
16
+ """
17
+ :param buckets: 默认[0.1, 0.5, 1, 3, 5]
18
+ :param ignore_paths: 默认不屏蔽,但是建议用户屏蔽/metrics
19
+ """
20
+ super().__init__(app)
21
+
22
+ default_buckets = [0.1, 0.5, 1, 3, 5]
23
+ self.buckets = buckets if buckets is not None else default_buckets
24
+ self.ignore_paths = ignore_paths if ignore_paths is not None else []
25
+
26
+ self.request_duration = Histogram(
27
+ "http_request_duration_seconds",
28
+ "Request duration in seconds",
29
+ ["uri", "pid"],
30
+ buckets=self.buckets,
31
+ )
32
+
33
+ self.error_requests = Counter(
34
+ "http_error_requests", "error HTTP requests", ["uri", "status"]
35
+ )
36
+
37
+ async def dispatch(self, request: Request, call_next):
38
+ # 规范化路径:去掉末尾的斜杠(根路径除外)
39
+ path = request.url.path
40
+ normalized_path = path.rstrip("/") if path != "/" else path
41
+
42
+ if normalized_path in self.ignore_paths:
43
+ return await call_next(request)
44
+ else:
45
+ with self.request_duration.labels(
46
+ uri=normalized_path, pid=os.getpid()
47
+ ).time():
48
+ response = await call_next(request)
49
+
50
+ # 记录错误请求
51
+ if response.status_code >= 400:
52
+ self.error_requests.labels(
53
+ uri=normalized_path, status=response.status_code
54
+ ).inc()
55
+
56
+ return response
@@ -0,0 +1,55 @@
1
+ import logging
2
+ import os
3
+ import shutil
4
+
5
+ from fastapi import FastAPI
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ class PrometheusContext:
11
+ PROM_MULTI_PROC_OS_VAR = "PROMETHEUS_MULTIPROC_DIR" #
12
+ DEFAULT_PROMETHEUS_MULTIPROC_DIR = "/temp/prometheus_multiproc_dir"
13
+
14
+ @classmethod
15
+ def get_prometheus_multiproc_dir(cls):
16
+ return os.environ.get(
17
+ cls.PROM_MULTI_PROC_OS_VAR, cls.DEFAULT_PROMETHEUS_MULTIPROC_DIR
18
+ )
19
+
20
+ @classmethod
21
+ def init(cls):
22
+ """
23
+ Initialize the Prometheus multiprocessing directory.
24
+ """
25
+ prom_dir = os.environ.get(cls.PROM_MULTI_PROC_OS_VAR, None)
26
+ if not prom_dir:
27
+ logger.warning(
28
+ f"Prometheus multiprocessing directory is not set. Using default directory {cls.DEFAULT_PROMETHEUS_MULTIPROC_DIR}."
29
+ )
30
+ prom_dir = cls.DEFAULT_PROMETHEUS_MULTIPROC_DIR
31
+ os.environ["prometheus_multiproc_dir"] = prom_dir
32
+ else:
33
+ logger.info(
34
+ f"Using Prometheus multiprocessing directory from environment variable: {prom_dir}"
35
+ )
36
+
37
+ if os.path.exists(prom_dir):
38
+ shutil.rmtree(prom_dir)
39
+ logger.info(
40
+ f"Removed existing Prometheus multiprocessing directory: {prom_dir}"
41
+ )
42
+
43
+ os.makedirs(prom_dir, exist_ok=True)
44
+ logger.info(f"Created new Prometheus multiprocessing directory: {prom_dir}")
45
+
46
+ @classmethod
47
+ def mount_prometheus_endpoint(cls, app: FastAPI):
48
+ """
49
+ Mount the Prometheus metrics endpoint on the given application.
50
+ """
51
+ from prometheus_client import CollectorRegistry, make_asgi_app, multiprocess
52
+
53
+ registry = CollectorRegistry()
54
+ multiprocess.MultiProcessCollector(registry=registry)
55
+ app.mount("/metrics", make_asgi_app(registry=registry))
@@ -0,0 +1,146 @@
1
+ import logging
2
+ import os
3
+ import time
4
+ from functools import wraps
5
+ from typing import Awaitable, Callable, List, Optional
6
+
7
+ from prometheus_client import Counter, Histogram
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class BasicTimeCostMetrics:
13
+ """
14
+ 如果启动prometheus则使用此类
15
+ """
16
+
17
+ def __init__(self, buckets: Optional[List[float]] = None):
18
+ default_buckets = [2, 5, 10, 30, 60, 120, 180, 240, 300]
19
+ self.buckets = buckets if buckets is not None else default_buckets
20
+ self.pid = os.getpid()
21
+
22
+ def add(self, key, cost): ...
23
+
24
+ def add_error(self, key): ...
25
+
26
+
27
+ class ApiTimeCostMetrics(BasicTimeCostMetrics):
28
+ """
29
+ 接口操作的耗时统计
30
+ 指标名如下:
31
+ operation_duration_seconds_count
32
+ operation_duration_seconds_bucket
33
+ operation_errors_total
34
+ """
35
+
36
+ def __init__(self, buckets: Optional[List[float]] = None):
37
+ super().__init__(buckets=buckets)
38
+
39
+ self.ops_duration = Histogram(
40
+ "operation_duration_seconds",
41
+ "Api operation duration in seconds",
42
+ ["operation", "pid"],
43
+ buckets=self.buckets,
44
+ )
45
+ self.ops_error_cnt = Counter(
46
+ "operation_errors_total",
47
+ "The total number of errors in operations.",
48
+ ["operation", "pid"],
49
+ )
50
+
51
+ def add(self, key, cost):
52
+ self.ops_duration.labels(operation=key, pid=self.pid).observe(cost)
53
+
54
+ def add_error(self, key):
55
+ self.ops_error_cnt.labels(operation=key, pid=self.pid).inc(1)
56
+
57
+
58
+ class ClientTimeCostMetrics(BasicTimeCostMetrics):
59
+ """
60
+ 统计client请求的耗时
61
+ """
62
+
63
+ def __init__(self, buckets: Optional[List[float]] = None):
64
+ super().__init__(buckets=buckets)
65
+
66
+ self.duration = Histogram(
67
+ "client_duration_seconds",
68
+ "Client response duration in seconds",
69
+ ["client", "pid"],
70
+ buckets=self.buckets,
71
+ )
72
+ self.error_cnt = Counter(
73
+ "client_errors_total",
74
+ "The total number of errors in client response.",
75
+ ["client", "pid"],
76
+ )
77
+
78
+ def add(self, key, cost):
79
+ self.duration.labels(client=key, pid=self.pid).observe(cost)
80
+
81
+ def add_error(self, key):
82
+ self.error_cnt.labels(client=key, pid=self.pid).inc(1)
83
+
84
+
85
+ class PrometheusTimeCostMetricSetting:
86
+ __default_metrics = BasicTimeCostMetrics()
87
+ __api_metrics = None
88
+ __client_metrics = None
89
+
90
+ @classmethod
91
+ def initialize(
92
+ cls,
93
+ *,
94
+ api_metrics_buckets: Optional[List[int]] = None,
95
+ client_metrics_buckets: Optional[List[int]] = None,
96
+ ):
97
+ if api_metrics_buckets is None:
98
+ cls.__api_metrics = ApiTimeCostMetrics()
99
+ elif len(api_metrics_buckets) == 0:
100
+ cls.__api_metrics = BasicTimeCostMetrics()
101
+ else:
102
+ cls.__api_metrics = ApiTimeCostMetrics(buckets=list(api_metrics_buckets))
103
+
104
+ if client_metrics_buckets is None:
105
+ cls.__client_metrics = ClientTimeCostMetrics()
106
+ elif len(client_metrics_buckets) == 0:
107
+ cls.__client_metrics = BasicTimeCostMetrics()
108
+ else:
109
+ cls.__client_metrics = ClientTimeCostMetrics(
110
+ buckets=list(client_metrics_buckets)
111
+ )
112
+
113
+ @classmethod
114
+ def api_metrics_instance(cls) -> BasicTimeCostMetrics:
115
+ return cls.__api_metrics or cls.__default_metrics
116
+
117
+ @classmethod
118
+ def client_metrics_instance(cls) -> BasicTimeCostMetrics:
119
+ return cls.__client_metrics or cls.__default_metrics
120
+
121
+ @classmethod
122
+ def metrics_handler(cls, key: str, metrics: BasicTimeCostMetrics):
123
+ def decorator(func: Callable[..., Awaitable]) -> Callable[..., Awaitable]:
124
+ @wraps(func)
125
+ async def wrapper(*args, **kwargs):
126
+ try:
127
+ start = time.perf_counter()
128
+ result = await func(*args, **kwargs)
129
+ cost = time.perf_counter() - start
130
+ metrics.add(key, cost)
131
+ return result
132
+ except Exception as e:
133
+ metrics.add_error(key)
134
+ raise e
135
+
136
+ return wrapper
137
+
138
+ return decorator
139
+
140
+ @classmethod
141
+ def client_metrics_handler(cls, client_name: str):
142
+ return cls.metrics_handler(client_name, cls.client_metrics_instance())
143
+
144
+ @classmethod
145
+ def api_metrics_handler(cls, api_name: str):
146
+ return cls.metrics_handler(api_name, cls.api_metrics_instance())
@@ -0,0 +1,4 @@
1
+ from .cors import default_cors_middleware
2
+ from .metrics import http_request_time_cost_middleware
3
+
4
+ __all__ = ["default_cors_middleware", "http_request_time_cost_middleware"]
@@ -0,0 +1,10 @@
1
+ from starlette.middleware import Middleware
2
+ from starlette.middleware.cors import CORSMiddleware
3
+
4
+ default_cors_middleware = Middleware(
5
+ CORSMiddleware,
6
+ allow_origins=["*"],
7
+ allow_credentials=True,
8
+ allow_methods=["*"],
9
+ allow_headers=["*"],
10
+ )
@@ -0,0 +1,12 @@
1
+ from typing import Optional, List
2
+
3
+ from algo_backend.metrics import RequestTimeCostMiddleware
4
+ from starlette.middleware import Middleware
5
+
6
+
7
+ def http_request_time_cost_middleware(
8
+ buckets: Optional[List[float]] = None, ignore_paths: Optional[List[str]] = None
9
+ ):
10
+ return Middleware(
11
+ RequestTimeCostMiddleware, buckets=buckets, ignore_paths=ignore_paths
12
+ )
@@ -0,0 +1,3 @@
1
+ from .vo import AbstractRespVo, BaseRespVo
2
+
3
+ __all__ = ["BaseRespVo", "AbstractRespVo"]
@@ -0,0 +1,83 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Generic, Optional, TypeVar
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from algo_backend.exception import BasicException, BasicStatusCode, CommonStatusCode
7
+
8
+ T = TypeVar("T")
9
+
10
+
11
+ class AbstractRespVo(BaseModel, ABC):
12
+ @classmethod
13
+ @abstractmethod
14
+ def success(cls, result: Optional[T]) -> "AbstractRespVo": ...
15
+
16
+ @classmethod
17
+ @abstractmethod
18
+ def from_exception(
19
+ cls,
20
+ e: BasicException,
21
+ *,
22
+ api_name: Optional[str] = None,
23
+ request_id: Optional[str] = None,
24
+ ) -> "AbstractRespVo":
25
+ """
26
+ 将异常转换为vo
27
+ """
28
+ ...
29
+
30
+ @abstractmethod
31
+ def set_request_id(self, request_id: Optional[str]) -> "AbstractRespVo":
32
+ """
33
+ 增加requestId,以帮助追踪问题
34
+ """
35
+ ...
36
+
37
+
38
+ class BaseRespVo(AbstractRespVo, Generic[T]):
39
+ """
40
+ 默认vo实现
41
+ """
42
+
43
+ code: int
44
+ message: str
45
+ result: Optional[T] = Field(None, description="结果")
46
+ requestId: Optional[str] = None
47
+
48
+ @classmethod
49
+ def success(cls, result: Optional[T]) -> "BaseRespVo":
50
+ return BaseRespVo(
51
+ code=CommonStatusCode.SUCCESS.code,
52
+ result=result,
53
+ message=CommonStatusCode.SUCCESS.msg,
54
+ )
55
+
56
+ @classmethod
57
+ def from_status_code(cls, status_code: BasicStatusCode, **kwargs) -> "BaseRespVo":
58
+ return BaseRespVo(
59
+ code=status_code.code,
60
+ message=status_code.msg.format(**kwargs),
61
+ result=None,
62
+ )
63
+
64
+ @classmethod
65
+ def from_exception(
66
+ cls,
67
+ e: BasicException,
68
+ *,
69
+ api_name: Optional[str] = None,
70
+ request_id: Optional[str] = None,
71
+ ) -> "BaseRespVo":
72
+ vo = BaseRespVo(
73
+ code=e.code,
74
+ message=e.msg,
75
+ result=None,
76
+ )
77
+ vo.set_request_id(request_id)
78
+ vo.message = f"接口{api_name}报错:{vo.message}" if api_name else vo.message
79
+ return vo
80
+
81
+ def set_request_id(self, request_id: Optional[str]) -> "BaseRespVo":
82
+ self.requestId = request_id
83
+ return self
@@ -0,0 +1,4 @@
1
+ from .default_app_generator import DefaultAlgoAppGenerator
2
+ from .default_service_starter import DefaultAlgoServiceStarter
3
+
4
+ __all__ = ["DefaultAlgoAppGenerator", "DefaultAlgoServiceStarter"]