qyro 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qyro/__init__.py +17 -0
- qyro/adapters/__init__.py +4 -0
- qyro/adapters/language_adapters/__init__.py +4 -0
- qyro/adapters/language_adapters/c/__init__.py +4 -0
- qyro/adapters/language_adapters/python/__init__.py +4 -0
- qyro/adapters/language_adapters/python/python_adapter.py +584 -0
- qyro/cli/__init__.py +8 -0
- qyro/cli/__main__.py +5 -0
- qyro/cli/cli.py +392 -0
- qyro/cli/interactive.py +297 -0
- qyro/common/__init__.py +37 -0
- qyro/common/animation.py +82 -0
- qyro/common/builder.py +434 -0
- qyro/common/compiler.py +895 -0
- qyro/common/config.py +93 -0
- qyro/common/constants.py +99 -0
- qyro/common/errors.py +176 -0
- qyro/common/frontend.py +74 -0
- qyro/common/health.py +358 -0
- qyro/common/kafka_manager.py +192 -0
- qyro/common/logging.py +149 -0
- qyro/common/memory.py +147 -0
- qyro/common/metrics.py +301 -0
- qyro/common/monitoring.py +468 -0
- qyro/common/parser.py +91 -0
- qyro/common/platform.py +609 -0
- qyro/common/redis_memory.py +1108 -0
- qyro/common/rpc.py +287 -0
- qyro/common/sandbox.py +191 -0
- qyro/common/schema_loader.py +33 -0
- qyro/common/secure_sandbox.py +490 -0
- qyro/common/toolchain_validator.py +617 -0
- qyro/common/type_generator.py +176 -0
- qyro/common/validation.py +401 -0
- qyro/common/validator.py +204 -0
- qyro/gateway/__init__.py +8 -0
- qyro/gateway/gateway.py +303 -0
- qyro/orchestrator/__init__.py +8 -0
- qyro/orchestrator/orchestrator.py +1223 -0
- qyro-2.0.0.dist-info/METADATA +244 -0
- qyro-2.0.0.dist-info/RECORD +45 -0
- qyro-2.0.0.dist-info/WHEEL +5 -0
- qyro-2.0.0.dist-info/entry_points.txt +2 -0
- qyro-2.0.0.dist-info/licenses/LICENSE +21 -0
- qyro-2.0.0.dist-info/top_level.txt +1 -0
qyro/common/memory.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Qyro Memory Compatibility Layer
|
|
3
|
+
Provides backward compatibility with older QyroMemory API.
|
|
4
|
+
Supports Redis, distributed Redis, and standalone in-memory implementations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import socket
|
|
9
|
+
from typing import Optional, Dict, Any, Callable, List
|
|
10
|
+
|
|
11
|
+
from .redis_memory import RedisQyroMemory, RedisConnectionError as MemoryConnectionError
|
|
12
|
+
# from .distributed_memory import create_distributed_memory # Commented out as file was removed
|
|
13
|
+
# from .standalone_memory import StandaloneNexusMemory # Commented out as file was removed
|
|
14
|
+
|
|
15
|
+
class NexusMemory:
|
|
16
|
+
"""
|
|
17
|
+
Backward-compatible wrapper supporting Redis, distributed, and standalone implementations.
|
|
18
|
+
|
|
19
|
+
This class provides the old QyroMemory API interface while
|
|
20
|
+
automatically selecting the appropriate backend based on availability.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, create: bool = True, redis_host: str = 'localhost', redis_port: int = 6379,
|
|
24
|
+
use_distributed: bool = False, cluster_hosts: list = None, cluster_ports: list = None,
|
|
25
|
+
force_standalone: bool = None):
|
|
26
|
+
"""
|
|
27
|
+
Initialize QyroMemory with Redis, distributed, or standalone backend.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
create: Unused (kept for backward compatibility)
|
|
31
|
+
redis_host: Redis server hostname (for single Redis mode)
|
|
32
|
+
redis_port: Redis server port (for single Redis mode)
|
|
33
|
+
use_distributed: Whether to use distributed memory system
|
|
34
|
+
cluster_hosts: List of Redis hosts for distributed mode
|
|
35
|
+
cluster_ports: List of Redis ports for distributed mode
|
|
36
|
+
force_standalone: Force standalone mode (True), Redis mode (False), or auto-detect (None)
|
|
37
|
+
"""
|
|
38
|
+
# Check if standalone mode is forced via parameter or environment
|
|
39
|
+
if force_standalone is None:
|
|
40
|
+
force_standalone = os.environ.get('QYRO_STANDALONE_MODE', '').lower() == 'true'
|
|
41
|
+
|
|
42
|
+
if force_standalone:
|
|
43
|
+
# Use standalone memory regardless of Redis availability - functionality removed in this version
|
|
44
|
+
print("[QYRO] Standalone mode requested but not available, using Redis]")
|
|
45
|
+
# Fall through to Redis mode
|
|
46
|
+
force_standalone = False
|
|
47
|
+
elif use_distributed:
|
|
48
|
+
# Use distributed memory system - functionality removed in this version
|
|
49
|
+
print(f"[QYRO] Distributed memory requested but not available, using Redis")
|
|
50
|
+
# Fall through to Redis mode
|
|
51
|
+
use_distributed = False
|
|
52
|
+
else:
|
|
53
|
+
# Try Redis first, fallback to standalone
|
|
54
|
+
try:
|
|
55
|
+
# Check if Redis is available before connecting
|
|
56
|
+
if self._is_redis_available(redis_host, redis_port):
|
|
57
|
+
self._memory = RedisQyroMemory(host=redis_host, port=redis_port)
|
|
58
|
+
self._backend_type = 'redis'
|
|
59
|
+
print(f"[QYRO] Connected to Redis at {redis_host}:{redis_port}")
|
|
60
|
+
else:
|
|
61
|
+
print(f"[QYRO] Redis not available at {redis_host}:{redis_port}, exiting")
|
|
62
|
+
raise RuntimeError(f"Redis not available at {redis_host}:{redis_port} and standalone mode not available")
|
|
63
|
+
except Exception as e:
|
|
64
|
+
print(f"[QYRO] Redis connection failed: {e}, exiting")
|
|
65
|
+
raise RuntimeError(f"Redis connection failed: {e} and standalone mode not available")
|
|
66
|
+
|
|
67
|
+
self._redis_host = redis_host
|
|
68
|
+
self._redis_port = redis_port
|
|
69
|
+
self._use_distributed = use_distributed
|
|
70
|
+
|
|
71
|
+
def _is_redis_available(self, host: str, port: int) -> bool:
|
|
72
|
+
"""Check if Redis is available without creating a full connection."""
|
|
73
|
+
try:
|
|
74
|
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
75
|
+
sock.settimeout(2) # 2 second timeout
|
|
76
|
+
result = sock.connect_ex((host, port))
|
|
77
|
+
sock.close()
|
|
78
|
+
return result == 0
|
|
79
|
+
except:
|
|
80
|
+
return False
|
|
81
|
+
|
|
82
|
+
def read(self) -> dict:
|
|
83
|
+
"""
|
|
84
|
+
Read the current state from Redis.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Dictionary containing the current state.
|
|
88
|
+
"""
|
|
89
|
+
if self._use_distributed:
|
|
90
|
+
return self._memory.read('state') # Distributed memory uses keys
|
|
91
|
+
else:
|
|
92
|
+
return self._memory.read()
|
|
93
|
+
|
|
94
|
+
def write(self, data: dict) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Write state to Redis.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
data: Dictionary containing state to write
|
|
100
|
+
"""
|
|
101
|
+
if self._use_distributed:
|
|
102
|
+
self._memory.write('state', data) # Distributed memory uses keys
|
|
103
|
+
else:
|
|
104
|
+
self._memory.write(data)
|
|
105
|
+
|
|
106
|
+
def subscribe_to_changes(self, callback) -> None:
|
|
107
|
+
"""
|
|
108
|
+
Subscribe to state change notifications.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
callback: Function to call when state changes
|
|
112
|
+
"""
|
|
113
|
+
if self._use_distributed:
|
|
114
|
+
self._memory.subscribe_to_topic('qyro:state:changed', callback)
|
|
115
|
+
else:
|
|
116
|
+
self._memory.subscribe_to_changes(callback)
|
|
117
|
+
|
|
118
|
+
def close(self) -> None:
|
|
119
|
+
"""Close Redis connections."""
|
|
120
|
+
self._memory.close()
|
|
121
|
+
|
|
122
|
+
def clear(self) -> None:
|
|
123
|
+
"""Clear all state."""
|
|
124
|
+
if self._use_distributed:
|
|
125
|
+
# In distributed mode, we might want to clear from all shards
|
|
126
|
+
# For now, just clear from the 'state' key in the appropriate shard
|
|
127
|
+
self._memory.write('state', {})
|
|
128
|
+
else:
|
|
129
|
+
self._memory.clear_state()
|
|
130
|
+
|
|
131
|
+
def __enter__(self):
|
|
132
|
+
"""Context manager entry."""
|
|
133
|
+
return self
|
|
134
|
+
|
|
135
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
136
|
+
"""Context manager exit."""
|
|
137
|
+
self.close()
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
# Export aliases for backward compatibility
|
|
142
|
+
NexusConnectionError = MemoryConnectionError
|
|
143
|
+
|
|
144
|
+
# Qyro compatibility alias
|
|
145
|
+
class QyroMemory(NexusMemory):
|
|
146
|
+
"""Qyro memory class for backward compatibility."""
|
|
147
|
+
pass
|
qyro/common/metrics.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Nexus Metrics Collection
|
|
3
|
+
Prometheus-compatible metrics for observability.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import time
|
|
7
|
+
import threading
|
|
8
|
+
from typing import Dict, Any, Optional, List
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from contextlib import contextmanager
|
|
11
|
+
|
|
12
|
+
from .logging import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger("nexus.metrics")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class MetricValue:
|
|
19
|
+
"""A single metric value with labels."""
|
|
20
|
+
value: float
|
|
21
|
+
labels: Dict[str, str] = field(default_factory=dict)
|
|
22
|
+
timestamp: float = field(default_factory=time.time)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class Counter:
|
|
26
|
+
"""A counter metric that only increases."""
|
|
27
|
+
|
|
28
|
+
def __init__(self, name: str, description: str = ""):
|
|
29
|
+
self.name = name
|
|
30
|
+
self.description = description
|
|
31
|
+
self._values: Dict[tuple, float] = {}
|
|
32
|
+
self._lock = threading.Lock()
|
|
33
|
+
|
|
34
|
+
def inc(self, amount: float = 1.0, **labels):
|
|
35
|
+
"""Increment the counter."""
|
|
36
|
+
key = tuple(sorted(labels.items()))
|
|
37
|
+
with self._lock:
|
|
38
|
+
self._values[key] = self._values.get(key, 0) + amount
|
|
39
|
+
|
|
40
|
+
def get(self, **labels) -> float:
|
|
41
|
+
"""Get current value."""
|
|
42
|
+
key = tuple(sorted(labels.items()))
|
|
43
|
+
return self._values.get(key, 0)
|
|
44
|
+
|
|
45
|
+
def values(self) -> List[MetricValue]:
|
|
46
|
+
"""Get all values with labels."""
|
|
47
|
+
result = []
|
|
48
|
+
for key, value in self._values.items():
|
|
49
|
+
labels = dict(key)
|
|
50
|
+
result.append(MetricValue(value=value, labels=labels))
|
|
51
|
+
return result
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Gauge:
|
|
55
|
+
"""A gauge metric that can go up and down."""
|
|
56
|
+
|
|
57
|
+
def __init__(self, name: str, description: str = ""):
|
|
58
|
+
self.name = name
|
|
59
|
+
self.description = description
|
|
60
|
+
self._values: Dict[tuple, float] = {}
|
|
61
|
+
self._lock = threading.Lock()
|
|
62
|
+
|
|
63
|
+
def set(self, value: float, **labels):
|
|
64
|
+
"""Set the gauge value."""
|
|
65
|
+
key = tuple(sorted(labels.items()))
|
|
66
|
+
with self._lock:
|
|
67
|
+
self._values[key] = value
|
|
68
|
+
|
|
69
|
+
def inc(self, amount: float = 1.0, **labels):
|
|
70
|
+
"""Increment the gauge."""
|
|
71
|
+
key = tuple(sorted(labels.items()))
|
|
72
|
+
with self._lock:
|
|
73
|
+
self._values[key] = self._values.get(key, 0) + amount
|
|
74
|
+
|
|
75
|
+
def dec(self, amount: float = 1.0, **labels):
|
|
76
|
+
"""Decrement the gauge."""
|
|
77
|
+
self.inc(-amount, **labels)
|
|
78
|
+
|
|
79
|
+
def get(self, **labels) -> float:
|
|
80
|
+
"""Get current value."""
|
|
81
|
+
key = tuple(sorted(labels.items()))
|
|
82
|
+
return self._values.get(key, 0)
|
|
83
|
+
|
|
84
|
+
def values(self) -> List[MetricValue]:
|
|
85
|
+
"""Get all values with labels."""
|
|
86
|
+
result = []
|
|
87
|
+
for key, value in self._values.items():
|
|
88
|
+
labels = dict(key)
|
|
89
|
+
result.append(MetricValue(value=value, labels=labels))
|
|
90
|
+
return result
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class Histogram:
|
|
94
|
+
"""A histogram metric for measuring distributions."""
|
|
95
|
+
|
|
96
|
+
DEFAULT_BUCKETS = (0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0)
|
|
97
|
+
|
|
98
|
+
def __init__(self, name: str, description: str = "", buckets: tuple = None):
|
|
99
|
+
self.name = name
|
|
100
|
+
self.description = description
|
|
101
|
+
self.buckets = buckets or self.DEFAULT_BUCKETS
|
|
102
|
+
self._counts: Dict[tuple, Dict[float, int]] = {}
|
|
103
|
+
self._sums: Dict[tuple, float] = {}
|
|
104
|
+
self._totals: Dict[tuple, int] = {}
|
|
105
|
+
self._lock = threading.Lock()
|
|
106
|
+
|
|
107
|
+
def observe(self, value: float, **labels):
|
|
108
|
+
"""Record an observation."""
|
|
109
|
+
key = tuple(sorted(labels.items()))
|
|
110
|
+
|
|
111
|
+
with self._lock:
|
|
112
|
+
if key not in self._counts:
|
|
113
|
+
self._counts[key] = {b: 0 for b in self.buckets}
|
|
114
|
+
self._counts[key][float('inf')] = 0
|
|
115
|
+
self._sums[key] = 0
|
|
116
|
+
self._totals[key] = 0
|
|
117
|
+
|
|
118
|
+
for bucket in self.buckets:
|
|
119
|
+
if value <= bucket:
|
|
120
|
+
self._counts[key][bucket] += 1
|
|
121
|
+
self._counts[key][float('inf')] += 1
|
|
122
|
+
|
|
123
|
+
self._sums[key] += value
|
|
124
|
+
self._totals[key] += 1
|
|
125
|
+
|
|
126
|
+
@contextmanager
|
|
127
|
+
def time(self, **labels):
|
|
128
|
+
"""Context manager to measure duration."""
|
|
129
|
+
start = time.time()
|
|
130
|
+
yield
|
|
131
|
+
self.observe(time.time() - start, **labels)
|
|
132
|
+
|
|
133
|
+
def get_percentile(self, percentile: float, **labels) -> float:
|
|
134
|
+
"""Estimate a percentile from the histogram."""
|
|
135
|
+
key = tuple(sorted(labels.items()))
|
|
136
|
+
if key not in self._totals or self._totals[key] == 0:
|
|
137
|
+
return 0.0
|
|
138
|
+
|
|
139
|
+
target = self._totals[key] * percentile / 100
|
|
140
|
+
cumulative = 0
|
|
141
|
+
prev_bucket = 0
|
|
142
|
+
|
|
143
|
+
for bucket in sorted(self.buckets):
|
|
144
|
+
cumulative = self._counts[key].get(bucket, 0)
|
|
145
|
+
if cumulative >= target:
|
|
146
|
+
return bucket
|
|
147
|
+
prev_bucket = bucket
|
|
148
|
+
|
|
149
|
+
return self.buckets[-1]
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class MetricsRegistry:
|
|
153
|
+
"""Registry of all metrics."""
|
|
154
|
+
|
|
155
|
+
def __init__(self, prefix: str = "nexus"):
|
|
156
|
+
self.prefix = prefix
|
|
157
|
+
self._counters: Dict[str, Counter] = {}
|
|
158
|
+
self._gauges: Dict[str, Gauge] = {}
|
|
159
|
+
self._histograms: Dict[str, Histogram] = {}
|
|
160
|
+
|
|
161
|
+
def counter(self, name: str, description: str = "") -> Counter:
|
|
162
|
+
"""Get or create a counter."""
|
|
163
|
+
full_name = f"{self.prefix}_{name}"
|
|
164
|
+
if full_name not in self._counters:
|
|
165
|
+
self._counters[full_name] = Counter(full_name, description)
|
|
166
|
+
return self._counters[full_name]
|
|
167
|
+
|
|
168
|
+
def gauge(self, name: str, description: str = "") -> Gauge:
|
|
169
|
+
"""Get or create a gauge."""
|
|
170
|
+
full_name = f"{self.prefix}_{name}"
|
|
171
|
+
if full_name not in self._gauges:
|
|
172
|
+
self._gauges[full_name] = Gauge(full_name, description)
|
|
173
|
+
return self._gauges[full_name]
|
|
174
|
+
|
|
175
|
+
def histogram(self, name: str, description: str = "", buckets: tuple = None) -> Histogram:
|
|
176
|
+
"""Get or create a histogram."""
|
|
177
|
+
full_name = f"{self.prefix}_{name}"
|
|
178
|
+
if full_name not in self._histograms:
|
|
179
|
+
self._histograms[full_name] = Histogram(full_name, description, buckets)
|
|
180
|
+
return self._histograms[full_name]
|
|
181
|
+
|
|
182
|
+
def export_prometheus(self) -> str:
|
|
183
|
+
"""Export all metrics in Prometheus text format."""
|
|
184
|
+
lines = []
|
|
185
|
+
|
|
186
|
+
for name, counter in self._counters.items():
|
|
187
|
+
if counter.description:
|
|
188
|
+
lines.append(f"# HELP {name} {counter.description}")
|
|
189
|
+
lines.append(f"# TYPE {name} counter")
|
|
190
|
+
for mv in counter.values():
|
|
191
|
+
labels = ','.join(f'{k}="{v}"' for k, v in mv.labels.items())
|
|
192
|
+
label_str = f"{{{labels}}}" if labels else ""
|
|
193
|
+
lines.append(f"{name}{label_str} {mv.value}")
|
|
194
|
+
|
|
195
|
+
for name, gauge in self._gauges.items():
|
|
196
|
+
if gauge.description:
|
|
197
|
+
lines.append(f"# HELP {name} {gauge.description}")
|
|
198
|
+
lines.append(f"# TYPE {name} gauge")
|
|
199
|
+
for mv in gauge.values():
|
|
200
|
+
labels = ','.join(f'{k}="{v}"' for k, v in mv.labels.items())
|
|
201
|
+
label_str = f"{{{labels}}}" if labels else ""
|
|
202
|
+
lines.append(f"{name}{label_str} {mv.value}")
|
|
203
|
+
|
|
204
|
+
for name, hist in self._histograms.items():
|
|
205
|
+
if hist.description:
|
|
206
|
+
lines.append(f"# HELP {name} {hist.description}")
|
|
207
|
+
lines.append(f"# TYPE {name} histogram")
|
|
208
|
+
# Simplified histogram export
|
|
209
|
+
for key, counts in hist._counts.items():
|
|
210
|
+
labels_dict = dict(key)
|
|
211
|
+
for bucket, count in counts.items():
|
|
212
|
+
if bucket == float('inf'):
|
|
213
|
+
bucket_str = "+Inf"
|
|
214
|
+
else:
|
|
215
|
+
bucket_str = str(bucket)
|
|
216
|
+
labels = ','.join(f'{k}="{v}"' for k, v in labels_dict.items())
|
|
217
|
+
if labels:
|
|
218
|
+
labels = f"{labels},le=\"{bucket_str}\""
|
|
219
|
+
else:
|
|
220
|
+
labels = f'le="{bucket_str}"'
|
|
221
|
+
lines.append(f"{name}_bucket{{{labels}}} {count}")
|
|
222
|
+
|
|
223
|
+
return '\n'.join(lines)
|
|
224
|
+
|
|
225
|
+
def export_json(self) -> Dict[str, Any]:
|
|
226
|
+
"""Export all metrics as JSON."""
|
|
227
|
+
return {
|
|
228
|
+
"counters": {
|
|
229
|
+
name: [{"value": mv.value, "labels": mv.labels} for mv in c.values()]
|
|
230
|
+
for name, c in self._counters.items()
|
|
231
|
+
},
|
|
232
|
+
"gauges": {
|
|
233
|
+
name: [{"value": mv.value, "labels": mv.labels} for mv in g.values()]
|
|
234
|
+
for name, g in self._gauges.items()
|
|
235
|
+
},
|
|
236
|
+
"histograms": {
|
|
237
|
+
name: {
|
|
238
|
+
"buckets": h.buckets,
|
|
239
|
+
"data": [
|
|
240
|
+
{"labels": dict(key), "sum": h._sums.get(key, 0), "count": h._totals.get(key, 0)}
|
|
241
|
+
for key in h._counts.keys()
|
|
242
|
+
]
|
|
243
|
+
}
|
|
244
|
+
for name, h in self._histograms.items()
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# Global metrics registry
|
|
250
|
+
metrics = MetricsRegistry()
|
|
251
|
+
|
|
252
|
+
# Pre-defined metrics
|
|
253
|
+
memory_reads = metrics.counter("memory_reads_total", "Total memory read operations")
|
|
254
|
+
memory_writes = metrics.counter("memory_writes_total", "Total memory write operations")
|
|
255
|
+
memory_errors = metrics.counter("memory_errors_total", "Total memory errors")
|
|
256
|
+
memory_size = metrics.gauge("memory_size_bytes", "Current memory size")
|
|
257
|
+
memory_utilization = metrics.gauge("memory_utilization_ratio", "Memory utilization (0-1)")
|
|
258
|
+
active_processes = metrics.gauge("active_processes", "Number of active supervised processes")
|
|
259
|
+
rpc_calls = metrics.counter("rpc_calls_total", "Total RPC calls")
|
|
260
|
+
rpc_latency = metrics.histogram("rpc_call_duration_seconds", "RPC call latency")
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
# HTTP endpoint handler
|
|
264
|
+
def create_metrics_routes(app, registry: MetricsRegistry = None):
|
|
265
|
+
"""Add metrics routes to FastAPI or Flask app."""
|
|
266
|
+
registry = registry or metrics
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
from fastapi import APIRouter, Response
|
|
270
|
+
router = APIRouter()
|
|
271
|
+
|
|
272
|
+
@router.get("/metrics")
|
|
273
|
+
def prometheus_metrics():
|
|
274
|
+
return Response(
|
|
275
|
+
content=registry.export_prometheus(),
|
|
276
|
+
media_type="text/plain"
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
@router.get("/metrics/json")
|
|
280
|
+
def json_metrics():
|
|
281
|
+
return registry.export_json()
|
|
282
|
+
|
|
283
|
+
app.include_router(router)
|
|
284
|
+
return router
|
|
285
|
+
|
|
286
|
+
except ImportError:
|
|
287
|
+
pass
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
from flask import Response, jsonify
|
|
291
|
+
|
|
292
|
+
@app.route("/metrics")
|
|
293
|
+
def prometheus_metrics():
|
|
294
|
+
return Response(registry.export_prometheus(), mimetype="text/plain")
|
|
295
|
+
|
|
296
|
+
@app.route("/metrics/json")
|
|
297
|
+
def json_metrics():
|
|
298
|
+
return jsonify(registry.export_json())
|
|
299
|
+
|
|
300
|
+
except ImportError:
|
|
301
|
+
logger.warning("no_web_framework")
|