fast-telemetry 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- __init__.py +0 -0
- fast_telemetry/__init__.py +10 -0
- fast_telemetry/core.py +232 -0
- fast_telemetry/integrations/__init__.py +0 -0
- fast_telemetry/integrations/fastapi.py +30 -0
- fast_telemetry/integrations/faststream.py +102 -0
- fast_telemetry/integrations/worker.py +124 -0
- fast_telemetry-0.0.0.dist-info/METADATA +11 -0
- fast_telemetry-0.0.0.dist-info/RECORD +11 -0
- fast_telemetry-0.0.0.dist-info/WHEEL +5 -0
- fast_telemetry-0.0.0.dist-info/top_level.txt +2 -0
__init__.py
ADDED
|
File without changes
|
fast_telemetry/core.py
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
4
|
+
from abc import abstractmethod, ABC
|
|
5
|
+
from typing import (
|
|
6
|
+
Callable,
|
|
7
|
+
Any,
|
|
8
|
+
ContextManager,
|
|
9
|
+
TypeVar,
|
|
10
|
+
ParamSpec,
|
|
11
|
+
overload,
|
|
12
|
+
Type,
|
|
13
|
+
)
|
|
14
|
+
from prometheus_client import CollectorRegistry, Counter, Histogram, Gauge, generate_latest
|
|
15
|
+
|
|
16
|
+
P = ParamSpec("P")
|
|
17
|
+
R = TypeVar("R")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MetricsExporter(ABC):
|
|
21
|
+
"""Интерфейс для экспорта метрик (например, в /metrics endpoint)."""
|
|
22
|
+
|
|
23
|
+
@abstractmethod
|
|
24
|
+
def get_registry(self) -> CollectorRegistry:
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def generate_latest_metrics(self) -> bytes:
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class MetricsCollector(ABC):
|
|
33
|
+
"""Интерфейс для сбора метрик в коде приложения."""
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def service_name(self) -> str:
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def inc_error(self, error_type: str | BaseException = "generic") -> None:
|
|
42
|
+
"""Инкремент счетчика ошибок."""
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def timer(self, task_type: str, long_task: bool = False) -> ContextManager[Any]:
|
|
47
|
+
"""
|
|
48
|
+
Возвращает контекстный менеджер для замера времени.
|
|
49
|
+
:param task_type: Метка типа задачи (label).
|
|
50
|
+
:param long_task: Если True, пишет в гистограмму для долгих задач.
|
|
51
|
+
"""
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
@overload
|
|
55
|
+
def measure_task(self, func: Callable[P, R]) -> Callable[P, R]: ...
|
|
56
|
+
|
|
57
|
+
@overload
|
|
58
|
+
def measure_task(
|
|
59
|
+
self, name: str | None = None, *, long_task: bool = False
|
|
60
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]: ...
|
|
61
|
+
|
|
62
|
+
def measure_task(
|
|
63
|
+
self, arg: str | Callable[P, R] | None = None, long_task: bool = False
|
|
64
|
+
) -> Callable[P, R] | Callable[[Callable[P, R]], Callable[P, R]]:
|
|
65
|
+
"""
|
|
66
|
+
Универсальный декоратор для замера времени выполнения функций.
|
|
67
|
+
Поддерживает sync и async функции.
|
|
68
|
+
"""
|
|
69
|
+
# Случай 1: @metrics.measure
|
|
70
|
+
if callable(arg):
|
|
71
|
+
func = arg
|
|
72
|
+
return self._create_timer_wrapper(func, func.__name__, long_task=False)
|
|
73
|
+
|
|
74
|
+
# Случай 2: @metrics.measure("name") или @metrics.measure(long_task=True)
|
|
75
|
+
task_name = arg
|
|
76
|
+
|
|
77
|
+
def decorator(f: Callable[P, R]) -> Callable[P, R]:
|
|
78
|
+
name = task_name or f.__name__
|
|
79
|
+
return self._create_timer_wrapper(f, name, long_task=long_task)
|
|
80
|
+
|
|
81
|
+
return decorator
|
|
82
|
+
|
|
83
|
+
def _create_timer_wrapper(self, func: Callable[P, R], task_name: str, long_task: bool) -> Callable[P, R]:
|
|
84
|
+
if inspect.iscoroutinefunction(func):
|
|
85
|
+
|
|
86
|
+
@functools.wraps(func)
|
|
87
|
+
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
88
|
+
with self.timer(task_name, long_task=long_task):
|
|
89
|
+
return await func(*args, **kwargs) # type: ignore
|
|
90
|
+
|
|
91
|
+
return async_wrapper # type: ignore
|
|
92
|
+
else:
|
|
93
|
+
|
|
94
|
+
@functools.wraps(func)
|
|
95
|
+
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
96
|
+
with self.timer(task_name, long_task=long_task):
|
|
97
|
+
return func(*args, **kwargs)
|
|
98
|
+
|
|
99
|
+
return sync_wrapper
|
|
100
|
+
|
|
101
|
+
@abstractmethod
|
|
102
|
+
def track_exception(
|
|
103
|
+
self, *, exclude: list[Type[BaseException]] | None = None
|
|
104
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class PrometheusMetrics(MetricsCollector, MetricsExporter):
|
|
109
|
+
"""
|
|
110
|
+
Реализация метрик на базе prometheus-client.
|
|
111
|
+
Разделяет задачи на Fast (API, легкая логика) и Long (Background tasks, crons).
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
BUCKETS_FAST: tuple[float, ...] = (0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0)
|
|
115
|
+
BUCKETS_LONG: tuple[float, ...] = (1.0, 5.0, 10.0, 30.0, 60.0, 300.0, 600.0, 1800.0, 3600.0, 7200.0, 14400.0)
|
|
116
|
+
|
|
117
|
+
def __init__(
|
|
118
|
+
self,
|
|
119
|
+
service_name: str,
|
|
120
|
+
version: str | None = None,
|
|
121
|
+
env: str | None = None,
|
|
122
|
+
prefix: str = "fasttelemetry",
|
|
123
|
+
) -> None:
|
|
124
|
+
self._service_name = service_name
|
|
125
|
+
self._env = env or os.getenv("APP_ENV", "dev")
|
|
126
|
+
self._version = version or os.getenv("APP_VERSION", "unknown")
|
|
127
|
+
self._prefix = f"{prefix.rstrip('_')}_"
|
|
128
|
+
|
|
129
|
+
self._registry = CollectorRegistry(auto_describe=True)
|
|
130
|
+
|
|
131
|
+
self._app_info: Gauge = self._create_app_info()
|
|
132
|
+
self._business_errors: Counter = self._create_business_errors()
|
|
133
|
+
self._task_duration_fast: Histogram = self._create_fast_histogram()
|
|
134
|
+
self._task_duration_long: Histogram = self._create_long_histogram()
|
|
135
|
+
|
|
136
|
+
self._set_initial_values()
|
|
137
|
+
|
|
138
|
+
def _create_app_info(self) -> Gauge:
|
|
139
|
+
return Gauge(
|
|
140
|
+
name=f"{self._prefix}app_info",
|
|
141
|
+
documentation="Application information",
|
|
142
|
+
labelnames=["service", "env", "version"],
|
|
143
|
+
registry=self._registry,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def _create_business_errors(self) -> Counter:
|
|
147
|
+
return Counter(
|
|
148
|
+
name=f"{self._prefix}business_errors_total",
|
|
149
|
+
documentation="Total count of business logic errors",
|
|
150
|
+
labelnames=["error_type"],
|
|
151
|
+
registry=self._registry,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def _create_fast_histogram(self) -> Histogram:
|
|
155
|
+
return Histogram(
|
|
156
|
+
name=f"{self._prefix}task_processing_seconds",
|
|
157
|
+
documentation="Time spent processing internal tasks (fast)",
|
|
158
|
+
labelnames=["task_type"],
|
|
159
|
+
registry=self._registry,
|
|
160
|
+
buckets=self.BUCKETS_FAST,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def _create_long_histogram(self) -> Histogram:
|
|
164
|
+
return Histogram(
|
|
165
|
+
name=f"{self._prefix}long_task_processing_seconds",
|
|
166
|
+
documentation="Time spent processing long-running tasks",
|
|
167
|
+
labelnames=["task_type"],
|
|
168
|
+
registry=self._registry,
|
|
169
|
+
buckets=self.BUCKETS_LONG,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
def _set_initial_values(self) -> None:
|
|
173
|
+
"""Устанавливает статические значения при старте."""
|
|
174
|
+
self._app_info.labels(service=self._service_name, env=self._env, version=self._version).set(1)
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def service_name(self) -> str:
|
|
178
|
+
return self._service_name
|
|
179
|
+
|
|
180
|
+
def inc_error(self, error_type: str | BaseException = "generic") -> None:
|
|
181
|
+
if isinstance(error_type, BaseException):
|
|
182
|
+
error_type = type(error_type).__name__
|
|
183
|
+
self._business_errors.labels(error_type=error_type).inc()
|
|
184
|
+
|
|
185
|
+
def track_exception(
|
|
186
|
+
self, arg: Callable[P, R] | None = None, *, exclude: list[Type[BaseException]] | None = None
|
|
187
|
+
) -> Any:
|
|
188
|
+
"""
|
|
189
|
+
Декоратор для подсчета исключений.
|
|
190
|
+
Использует имя класса исключения как метку error_type.
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
|
194
|
+
if inspect.iscoroutinefunction(func):
|
|
195
|
+
|
|
196
|
+
@functools.wraps(func)
|
|
197
|
+
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
198
|
+
try:
|
|
199
|
+
return await func(*args, **kwargs) # type: ignore
|
|
200
|
+
except Exception as e:
|
|
201
|
+
if not exclude or not isinstance(e, tuple(exclude)):
|
|
202
|
+
self.inc_error(e)
|
|
203
|
+
raise
|
|
204
|
+
|
|
205
|
+
return async_wrapper # type: ignore
|
|
206
|
+
else:
|
|
207
|
+
|
|
208
|
+
@functools.wraps(func)
|
|
209
|
+
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
210
|
+
try:
|
|
211
|
+
return func(*args, **kwargs)
|
|
212
|
+
except Exception as e:
|
|
213
|
+
if not exclude or not isinstance(e, tuple(exclude)):
|
|
214
|
+
self.inc_error(e)
|
|
215
|
+
raise
|
|
216
|
+
|
|
217
|
+
return sync_wrapper
|
|
218
|
+
|
|
219
|
+
if callable(arg):
|
|
220
|
+
return decorator(arg)
|
|
221
|
+
return decorator
|
|
222
|
+
|
|
223
|
+
def timer(self, task_type: str, long_task: bool = False) -> ContextManager[Any]:
|
|
224
|
+
if long_task:
|
|
225
|
+
return self._task_duration_long.labels(task_type=task_type).time()
|
|
226
|
+
return self._task_duration_fast.labels(task_type=task_type).time()
|
|
227
|
+
|
|
228
|
+
def get_registry(self) -> CollectorRegistry:
|
|
229
|
+
return self._registry
|
|
230
|
+
|
|
231
|
+
def generate_latest_metrics(self) -> bytes:
|
|
232
|
+
return generate_latest(self._registry)
|
|
File without changes
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from typing import Iterable
|
|
2
|
+
|
|
3
|
+
from fastapi import FastAPI
|
|
4
|
+
from prometheus_fastapi_instrumentator import Instrumentator
|
|
5
|
+
from ..core import PrometheusMetrics
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def setup_fastapi_metrics(
|
|
9
|
+
app: FastAPI, metrics: PrometheusMetrics, excluded_routes: Iterable[str] | None = None
|
|
10
|
+
) -> None:
|
|
11
|
+
"""
|
|
12
|
+
Настраивает автоматический мониторинг HTTP запросов
|
|
13
|
+
и добавляет эндпоинт /metrics.
|
|
14
|
+
"""
|
|
15
|
+
if excluded_routes is None:
|
|
16
|
+
excluded_routes = set()
|
|
17
|
+
else:
|
|
18
|
+
excluded_routes = set(excluded_routes)
|
|
19
|
+
excluded_routes.update({"/metrics", app.docs_url, app.redoc_url, app.openapi_url})
|
|
20
|
+
instrumentator = Instrumentator(
|
|
21
|
+
should_group_status_codes=False,
|
|
22
|
+
should_ignore_untemplated=True,
|
|
23
|
+
excluded_handlers=list(excluded_routes),
|
|
24
|
+
inprogress_name="http_requests_inprogress",
|
|
25
|
+
inprogress_labels=True,
|
|
26
|
+
registry=metrics.get_registry(),
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
instrumentator.instrument(app)
|
|
30
|
+
instrumentator.expose(app, include_in_schema=False)
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
from typing import Type, Protocol
|
|
2
|
+
|
|
3
|
+
from faststream._internal.broker import BrokerUsecase
|
|
4
|
+
from faststream.asgi import AsgiFastStream
|
|
5
|
+
from faststream.prometheus import PrometheusMiddleware
|
|
6
|
+
from prometheus_client import make_asgi_app
|
|
7
|
+
|
|
8
|
+
from ..core import PrometheusMetrics
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MiddlewareFactory(Protocol):
|
|
12
|
+
def __call__(self, metrics: PrometheusMetrics) -> PrometheusMiddleware: ...
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
_BROKER_MIDDLEWARE_MAP: dict[Type[BrokerUsecase], MiddlewareFactory] = {}
|
|
16
|
+
|
|
17
|
+
# --- RabbitMQ ---
|
|
18
|
+
try:
|
|
19
|
+
from faststream.rabbit import RabbitBroker
|
|
20
|
+
from faststream.rabbit.prometheus import RabbitPrometheusMiddleware
|
|
21
|
+
|
|
22
|
+
class CustomRabbitMiddleware(RabbitPrometheusMiddleware):
|
|
23
|
+
def __init__(self, metrics: PrometheusMetrics):
|
|
24
|
+
super().__init__(registry=metrics.get_registry(), custom_labels={"service": metrics.service_name})
|
|
25
|
+
|
|
26
|
+
_BROKER_MIDDLEWARE_MAP[RabbitBroker] = CustomRabbitMiddleware
|
|
27
|
+
except ImportError:
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
# --- Kafka ---
|
|
31
|
+
try:
|
|
32
|
+
from faststream.kafka import KafkaBroker
|
|
33
|
+
from faststream.kafka.prometheus import KafkaPrometheusMiddleware
|
|
34
|
+
|
|
35
|
+
class CustomKafkaMiddleware(KafkaPrometheusMiddleware):
|
|
36
|
+
def __init__(self, metrics: PrometheusMetrics):
|
|
37
|
+
super().__init__(registry=metrics.get_registry(), custom_labels={"service": metrics.service_name})
|
|
38
|
+
|
|
39
|
+
_BROKER_MIDDLEWARE_MAP[KafkaBroker] = CustomKafkaMiddleware
|
|
40
|
+
except ImportError:
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
# --- Redis ---
|
|
44
|
+
try:
|
|
45
|
+
from faststream.redis import RedisBroker
|
|
46
|
+
from faststream.redis.prometheus import RedisPrometheusMiddleware
|
|
47
|
+
|
|
48
|
+
class CustomRedisMiddleware(RedisPrometheusMiddleware):
|
|
49
|
+
def __init__(self, metrics: PrometheusMetrics):
|
|
50
|
+
super().__init__(registry=metrics.get_registry(), custom_labels={"service": metrics.service_name})
|
|
51
|
+
|
|
52
|
+
_BROKER_MIDDLEWARE_MAP[RedisBroker] = CustomRedisMiddleware
|
|
53
|
+
except ImportError:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
from faststream.confluent import KafkaBroker as ConfluentBroker
|
|
58
|
+
from faststream.confluent.prometheus import KafkaPrometheusMiddleware as ConfluentPrometheusMiddleware
|
|
59
|
+
|
|
60
|
+
class CustomConfluentPrometheusMiddleware(ConfluentPrometheusMiddleware):
|
|
61
|
+
def __init__(self, metrics: PrometheusMetrics):
|
|
62
|
+
super().__init__(registry=metrics.get_registry(), custom_labels={"service": metrics.service_name})
|
|
63
|
+
|
|
64
|
+
_BROKER_MIDDLEWARE_MAP[ConfluentBroker] = CustomConfluentPrometheusMiddleware
|
|
65
|
+
except ImportError:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
# --- Nats ---
|
|
69
|
+
try:
|
|
70
|
+
from faststream.nats import NatsBroker
|
|
71
|
+
from faststream.nats.prometheus import NatsPrometheusMiddleware
|
|
72
|
+
|
|
73
|
+
class CustomNatsPrometheusMiddleware(NatsPrometheusMiddleware):
|
|
74
|
+
def __init__(self, metrics: PrometheusMetrics):
|
|
75
|
+
super().__init__(registry=metrics.get_registry(), custom_labels={"service": metrics.service_name})
|
|
76
|
+
|
|
77
|
+
_BROKER_MIDDLEWARE_MAP[NatsBroker] = CustomNatsPrometheusMiddleware
|
|
78
|
+
|
|
79
|
+
except ImportError:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def setup_faststream_metrics(app: AsgiFastStream, metrics: PrometheusMetrics) -> None:
|
|
84
|
+
broker = app.broker
|
|
85
|
+
broker_type = type(broker)
|
|
86
|
+
|
|
87
|
+
middleware_cls = _BROKER_MIDDLEWARE_MAP.get(broker_type)
|
|
88
|
+
|
|
89
|
+
if not middleware_cls:
|
|
90
|
+
for b_cls, m_cls in _BROKER_MIDDLEWARE_MAP.items():
|
|
91
|
+
if isinstance(broker, b_cls):
|
|
92
|
+
middleware_cls = m_cls
|
|
93
|
+
break
|
|
94
|
+
|
|
95
|
+
if not middleware_cls:
|
|
96
|
+
available = [k.__name__ for k in _BROKER_MIDDLEWARE_MAP.keys()]
|
|
97
|
+
raise ValueError(
|
|
98
|
+
f"Unsupported broker type: {broker_type.__name__}. " f"Installed drivers support: {', '.join(available)}"
|
|
99
|
+
)
|
|
100
|
+
app.routes.append(("/metrics", make_asgi_app(metrics.get_registry())))
|
|
101
|
+
|
|
102
|
+
app.broker.add_middleware(middleware_cls(metrics=metrics))
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import socket
|
|
7
|
+
from abc import abstractmethod
|
|
8
|
+
from types import TracebackType
|
|
9
|
+
from typing import Type, Callable
|
|
10
|
+
|
|
11
|
+
from prometheus_client import start_http_server, push_to_gateway
|
|
12
|
+
|
|
13
|
+
from ..core import PrometheusMetrics, MetricsCollector, P, R
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class IJobMetricsCollector(MetricsCollector):
|
|
19
|
+
@abstractmethod
|
|
20
|
+
def track_job(
|
|
21
|
+
self, gateway_url: str, timeout: float = 5.0, grouping_key: dict[str, str] | None = None
|
|
22
|
+
) -> "_UnifiedJobPusher":
|
|
23
|
+
"""
|
|
24
|
+
Создает трекер для job-а.
|
|
25
|
+
Может использоваться как контекстный менеджер или декоратор.
|
|
26
|
+
"""
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def start_server(self, port: int = 8000, addr: str = "0.0.0.0") -> None:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class _UnifiedJobPusher:
|
|
35
|
+
"""
|
|
36
|
+
Гибридный объект: Context Manager + Decorator.
|
|
37
|
+
Отправляет метрики в Pushgateway при завершении работы блока или функции.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, metrics: "WorkerMetrics", gateway_url: str, timeout: float, grouping_key: dict[str, str] | None):
|
|
41
|
+
self.metrics = metrics
|
|
42
|
+
self.gateway_url = gateway_url
|
|
43
|
+
self.timeout = timeout
|
|
44
|
+
# Группируем по инстансу, чтобы воркеры не перезатирали метрики друг друга
|
|
45
|
+
self.grouping_key = grouping_key or {"instance": self.metrics.instance_id}
|
|
46
|
+
|
|
47
|
+
def _push(self) -> None:
|
|
48
|
+
"""Безопасная отправка метрик (не должна ронять скрипт)."""
|
|
49
|
+
try:
|
|
50
|
+
push_to_gateway(
|
|
51
|
+
self.gateway_url,
|
|
52
|
+
job=self.metrics.service_name,
|
|
53
|
+
registry=self.metrics.get_registry(),
|
|
54
|
+
grouping_key=self.grouping_key,
|
|
55
|
+
timeout=self.timeout,
|
|
56
|
+
)
|
|
57
|
+
except Exception:
|
|
58
|
+
logger.exception(f"Failed to push metrics to {self.gateway_url}")
|
|
59
|
+
|
|
60
|
+
def __enter__(self) -> "_UnifiedJobPusher":
|
|
61
|
+
return self
|
|
62
|
+
|
|
63
|
+
def __exit__(
|
|
64
|
+
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
|
|
65
|
+
) -> None:
|
|
66
|
+
self._push()
|
|
67
|
+
|
|
68
|
+
async def __aenter__(self) -> "_UnifiedJobPusher":
|
|
69
|
+
return self
|
|
70
|
+
|
|
71
|
+
async def __aexit__(
|
|
72
|
+
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
|
|
73
|
+
) -> None:
|
|
74
|
+
await asyncio.to_thread(self._push)
|
|
75
|
+
|
|
76
|
+
def __call__(self, func: Callable[[P], R]) -> Callable[[P], R]:
|
|
77
|
+
"""Позволяет использовать объект как декоратор @metrics.track_job(...)"""
|
|
78
|
+
if inspect.iscoroutinefunction(func):
|
|
79
|
+
|
|
80
|
+
@functools.wraps(func)
|
|
81
|
+
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
82
|
+
async with self:
|
|
83
|
+
return await func(*args, **kwargs) # type: ignore
|
|
84
|
+
|
|
85
|
+
return async_wrapper # type: ignore
|
|
86
|
+
else:
|
|
87
|
+
|
|
88
|
+
@functools.wraps(func)
|
|
89
|
+
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
90
|
+
with self:
|
|
91
|
+
return func(*args, **kwargs)
|
|
92
|
+
|
|
93
|
+
return sync_wrapper
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class WorkerMetrics(PrometheusMetrics, IJobMetricsCollector):
|
|
97
|
+
def __init__(self, service_name: str, version: str | None = None, env: str | None = None) -> None:
|
|
98
|
+
super().__init__(service_name=service_name, version=version, env=env)
|
|
99
|
+
self._is_server_started = False
|
|
100
|
+
# Уникальный ID для каждого запуска скрипта/пода
|
|
101
|
+
self._instance_id = f"{socket.gethostname()}-{os.getpid()}"
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def instance_id(self) -> str:
|
|
105
|
+
return self._instance_id
|
|
106
|
+
|
|
107
|
+
def start_server(self, port: int = 8000, addr: str = "0.0.0.0") -> None:
|
|
108
|
+
"""
|
|
109
|
+
Запускает HTTP сервер (Daemon thread) для pull-модели (если нужно).
|
|
110
|
+
"""
|
|
111
|
+
if self._is_server_started:
|
|
112
|
+
logger.warning("Metrics server is already running")
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
start_http_server(port, addr=addr, registry=self._registry)
|
|
116
|
+
self._is_server_started = True
|
|
117
|
+
|
|
118
|
+
def track_job(
|
|
119
|
+
self, gateway_url: str, timeout: float = 5.0, grouping_key: dict[str, str] | None = None
|
|
120
|
+
) -> _UnifiedJobPusher:
|
|
121
|
+
"""
|
|
122
|
+
Возвращает объект, который работает и как ContextManager, и как Decorator.
|
|
123
|
+
"""
|
|
124
|
+
return _UnifiedJobPusher(metrics=self, gateway_url=gateway_url, timeout=timeout, grouping_key=grouping_key)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fast-telemetry
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: Internal standardized metrics library
|
|
5
|
+
Requires-Python: >=3.11
|
|
6
|
+
Requires-Dist: prometheus-client~=0.21.0
|
|
7
|
+
Provides-Extra: web
|
|
8
|
+
Requires-Dist: fastapi~=0.115.0; extra == "web"
|
|
9
|
+
Requires-Dist: prometheus-fastapi-instrumentator~=7.1.0; extra == "web"
|
|
10
|
+
Provides-Extra: stream
|
|
11
|
+
Requires-Dist: faststream~=0.6.0; extra == "stream"
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
fast_telemetry/__init__.py,sha256=maomIZLdB92E2j9kW1caPUa-_B799ZTGEi24NY-ERZI,191
|
|
3
|
+
fast_telemetry/core.py,sha256=T39rEQuM2W95S9_9ww05a6RSIRFgvo7XFagqKBXGqCk,8431
|
|
4
|
+
fast_telemetry/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
|
+
fast_telemetry/integrations/fastapi.py,sha256=-IMpWpViu27iBXWvBTyXs5Uxeq2JFB0Jt5Hz5Y5KEck,1052
|
|
6
|
+
fast_telemetry/integrations/faststream.py,sha256=12WvzC4l8o2Vec8GSI2-58cx6aP63o7-cohxZNgyZqA,3650
|
|
7
|
+
fast_telemetry/integrations/worker.py,sha256=BQCLdjxTB3PluD7Bxv-rPzOjWTkBde6ssg4KOxQENeQ,4713
|
|
8
|
+
fast_telemetry-0.0.0.dist-info/METADATA,sha256=eASeq92li4u940ns7EExrhXRTLMxYXehFwpWUpdW92w,385
|
|
9
|
+
fast_telemetry-0.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
10
|
+
fast_telemetry-0.0.0.dist-info/top_level.txt,sha256=OvY6pUtFsIZyuDZiiIF6RFF-mfjMuvXDpUbz4XXXop0,24
|
|
11
|
+
fast_telemetry-0.0.0.dist-info/RECORD,,
|