altcodepro-polydb-python 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- altcodepro_polydb_python-2.1.0.dist-info/METADATA +378 -0
- altcodepro_polydb_python-2.1.0.dist-info/RECORD +51 -0
- altcodepro_polydb_python-2.1.0.dist-info/WHEEL +5 -0
- altcodepro_polydb_python-2.1.0.dist-info/licenses/LICENSE +21 -0
- altcodepro_polydb_python-2.1.0.dist-info/top_level.txt +1 -0
- polydb/__init__.py +64 -0
- polydb/adapters/AzureBlobStorageAdapter.py +77 -0
- polydb/adapters/AzureFileStorageAdapter.py +79 -0
- polydb/adapters/AzureQueueAdapter.py +61 -0
- polydb/adapters/AzureTableStorageAdapter.py +182 -0
- polydb/adapters/DynamoDBAdapter.py +216 -0
- polydb/adapters/EFSAdapter.py +50 -0
- polydb/adapters/FirestoreAdapter.py +193 -0
- polydb/adapters/GCPStorageAdapter.py +81 -0
- polydb/adapters/MongoDBAdapter.py +136 -0
- polydb/adapters/PostgreSQLAdapter.py +453 -0
- polydb/adapters/PubSubAdapter.py +83 -0
- polydb/adapters/S3Adapter.py +86 -0
- polydb/adapters/S3CompatibleAdapter.py +90 -0
- polydb/adapters/SQSAdapter.py +84 -0
- polydb/adapters/VercelKVAdapter.py +327 -0
- polydb/adapters/__init__.py +0 -0
- polydb/advanced_query.py +147 -0
- polydb/audit/AuditStorage.py +136 -0
- polydb/audit/__init__.py +7 -0
- polydb/audit/context.py +53 -0
- polydb/audit/manager.py +47 -0
- polydb/audit/models.py +86 -0
- polydb/base/NoSQLKVAdapter.py +301 -0
- polydb/base/ObjectStorageAdapter.py +42 -0
- polydb/base/QueueAdapter.py +27 -0
- polydb/base/SharedFilesAdapter.py +32 -0
- polydb/base/__init__.py +0 -0
- polydb/batch.py +163 -0
- polydb/cache.py +204 -0
- polydb/databaseFactory.py +748 -0
- polydb/decorators.py +21 -0
- polydb/errors.py +82 -0
- polydb/factory.py +107 -0
- polydb/models.py +39 -0
- polydb/monitoring.py +313 -0
- polydb/multitenancy.py +197 -0
- polydb/py.typed +0 -0
- polydb/query.py +150 -0
- polydb/registry.py +71 -0
- polydb/retry.py +76 -0
- polydb/schema.py +205 -0
- polydb/security.py +458 -0
- polydb/types.py +127 -0
- polydb/utils.py +61 -0
- polydb/validation.py +131 -0
polydb/decorators.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# src/polydb/decorators.py
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from typing import Type, TypeVar
|
|
5
|
+
|
|
6
|
+
from .registry import ModelRegistry
|
|
7
|
+
|
|
8
|
+
T = TypeVar("T", bound=type)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def polydb_model(cls: T) -> T:
|
|
12
|
+
"""
|
|
13
|
+
Decorator to auto-register a model at import time.
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
@polydb_model
|
|
17
|
+
class UserEntity:
|
|
18
|
+
__polydb__ = {"storage": "nosql"}
|
|
19
|
+
"""
|
|
20
|
+
ModelRegistry.register(cls)
|
|
21
|
+
return cls
|
polydb/errors.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# src/polydb/errors.py
|
|
2
|
+
"""
|
|
3
|
+
Structured exceptions for cloud database operations
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CloudDBError(Exception):
|
|
9
|
+
"""Base exception for all database errors"""
|
|
10
|
+
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DatabaseError(CloudDBError):
|
|
15
|
+
"""SQL database operation failed"""
|
|
16
|
+
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class NoSQLError(CloudDBError):
|
|
21
|
+
"""NoSQL operation failed"""
|
|
22
|
+
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class StorageError(CloudDBError):
|
|
27
|
+
"""Object storage operation failed"""
|
|
28
|
+
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class QueueError(CloudDBError):
|
|
33
|
+
"""Queue operation failed"""
|
|
34
|
+
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ConnectionError(CloudDBError):
|
|
39
|
+
"""Connection to service failed"""
|
|
40
|
+
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ValidationError(CloudDBError):
|
|
45
|
+
"""Input validation failed"""
|
|
46
|
+
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
class PolyDBError(Exception):
|
|
50
|
+
"""Base exception for polydb."""
|
|
51
|
+
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ModelNotRegisteredError(PolyDBError):
|
|
56
|
+
"""Raised when a model has not been registered in the registry."""
|
|
57
|
+
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class InvalidModelMetadataError(PolyDBError):
|
|
62
|
+
"""Raised when a model has invalid __polydb__ metadata."""
|
|
63
|
+
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class UnsupportedStorageTypeError(PolyDBError):
|
|
68
|
+
"""Raised when the declared storage type is unknown/unsupported."""
|
|
69
|
+
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class AdapterConfigurationError(PolyDBError):
|
|
74
|
+
"""Raised when the required adapter is missing or misconfigured."""
|
|
75
|
+
|
|
76
|
+
pass
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class OperationNotSupportedError(PolyDBError):
|
|
80
|
+
"""Raised when an adapter cannot perform a requested operation."""
|
|
81
|
+
|
|
82
|
+
pass
|
polydb/factory.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# src/polydb/factory.py
|
|
2
|
+
import os
|
|
3
|
+
import threading
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from .models import CloudProvider, PartitionConfig
|
|
7
|
+
from .utils import setup_logger
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CloudDatabaseFactory:
|
|
11
|
+
"""Cloud-independent factory with auto-detection"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, provider: Optional[CloudProvider] = None):
|
|
14
|
+
self.logger = setup_logger(__name__)
|
|
15
|
+
self.provider = provider or self._detect_provider()
|
|
16
|
+
self.connections = {}
|
|
17
|
+
self._lock = threading.Lock()
|
|
18
|
+
|
|
19
|
+
self.logger.info(f"CloudDatabaseFactory provider: {self.provider.value}")
|
|
20
|
+
|
|
21
|
+
def _detect_provider(self) -> CloudProvider:
|
|
22
|
+
explicit = os.getenv('CLOUD_PROVIDER')
|
|
23
|
+
if explicit:
|
|
24
|
+
try:
|
|
25
|
+
return CloudProvider(explicit.lower())
|
|
26
|
+
except ValueError:
|
|
27
|
+
self.logger.warning(f"Invalid CLOUD_PROVIDER: {explicit}")
|
|
28
|
+
|
|
29
|
+
rules = [
|
|
30
|
+
('AZURE_STORAGE_CONNECTION_STRING', CloudProvider.AZURE),
|
|
31
|
+
('AWS_ACCESS_KEY_ID', CloudProvider.AWS),
|
|
32
|
+
('GOOGLE_CLOUD_PROJECT', CloudProvider.GCP),
|
|
33
|
+
('VERCEL_ENV', CloudProvider.VERCEL),
|
|
34
|
+
('MONGODB_URI', CloudProvider.MONGODB),
|
|
35
|
+
('POSTGRES_URL', CloudProvider.POSTGRESQL),
|
|
36
|
+
('POSTGRES_CONNECTION_STRING', CloudProvider.POSTGRESQL),
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
for env_var, provider in rules:
|
|
40
|
+
if os.getenv(env_var):
|
|
41
|
+
return provider
|
|
42
|
+
|
|
43
|
+
self.logger.warning("No provider detected, defaulting to PostgreSQL")
|
|
44
|
+
return CloudProvider.POSTGRESQL
|
|
45
|
+
|
|
46
|
+
def get_nosql_kv(self, partition_config: Optional[PartitionConfig] = None):
|
|
47
|
+
if self.provider == CloudProvider.AZURE:
|
|
48
|
+
from .adapters.AzureTableStorageAdapter import AzureTableStorageAdapter
|
|
49
|
+
return AzureTableStorageAdapter(partition_config)
|
|
50
|
+
elif self.provider == CloudProvider.AWS:
|
|
51
|
+
from .adapters.DynamoDBAdapter import DynamoDBAdapter
|
|
52
|
+
return DynamoDBAdapter(partition_config)
|
|
53
|
+
elif self.provider == CloudProvider.GCP:
|
|
54
|
+
from .adapters.FirestoreAdapter import FirestoreAdapter
|
|
55
|
+
return FirestoreAdapter(partition_config)
|
|
56
|
+
elif self.provider == CloudProvider.VERCEL:
|
|
57
|
+
from .adapters.VercelKVAdapter import VercelKVAdapter
|
|
58
|
+
return VercelKVAdapter(partition_config)
|
|
59
|
+
else:
|
|
60
|
+
from .adapters.MongoDBAdapter import MongoDBAdapter
|
|
61
|
+
return MongoDBAdapter(partition_config)
|
|
62
|
+
|
|
63
|
+
def get_object_storage(self):
|
|
64
|
+
if self.provider == CloudProvider.AZURE:
|
|
65
|
+
from .adapters.AzureBlobStorageAdapter import AzureBlobStorageAdapter
|
|
66
|
+
return AzureBlobStorageAdapter()
|
|
67
|
+
elif self.provider == CloudProvider.AWS:
|
|
68
|
+
from .adapters.S3Adapter import S3Adapter
|
|
69
|
+
return S3Adapter()
|
|
70
|
+
elif self.provider == CloudProvider.GCP:
|
|
71
|
+
from .adapters.GCPStorageAdapter import GCPStorageAdapter
|
|
72
|
+
return GCPStorageAdapter()
|
|
73
|
+
elif self.provider == CloudProvider.VERCEL:
|
|
74
|
+
from .adapters.VercelKVAdapter import VercelBlobAdapter
|
|
75
|
+
return VercelBlobAdapter()
|
|
76
|
+
else:
|
|
77
|
+
from .adapters.S3CompatibleAdapter import S3CompatibleAdapter
|
|
78
|
+
return S3CompatibleAdapter()
|
|
79
|
+
|
|
80
|
+
def get_queue(self):
|
|
81
|
+
if self.provider == CloudProvider.AZURE:
|
|
82
|
+
from .adapters.AzureQueueAdapter import AzureQueueAdapter
|
|
83
|
+
return AzureQueueAdapter()
|
|
84
|
+
elif self.provider == CloudProvider.AWS:
|
|
85
|
+
from .adapters.SQSAdapter import SQSAdapter
|
|
86
|
+
return SQSAdapter()
|
|
87
|
+
elif self.provider == CloudProvider.GCP:
|
|
88
|
+
from .adapters.PubSubAdapter import PubSubAdapter
|
|
89
|
+
return PubSubAdapter()
|
|
90
|
+
elif self.provider == CloudProvider.VERCEL:
|
|
91
|
+
from .adapters.VercelKVAdapter import VercelQueueAdapter
|
|
92
|
+
return VercelQueueAdapter()
|
|
93
|
+
|
|
94
|
+
def get_shared_files(self):
|
|
95
|
+
if self.provider == CloudProvider.AZURE:
|
|
96
|
+
from .adapters.AzureFileStorageAdapter import AzureFileStorageAdapter
|
|
97
|
+
return AzureFileStorageAdapter()
|
|
98
|
+
elif self.provider == CloudProvider.AWS:
|
|
99
|
+
from .adapters.EFSAdapter import EFSAdapter
|
|
100
|
+
return EFSAdapter()
|
|
101
|
+
elif self.provider == CloudProvider.GCP:
|
|
102
|
+
from .adapters.GCPStorageAdapter import GCPStorageAdapter
|
|
103
|
+
return GCPStorageAdapter()
|
|
104
|
+
|
|
105
|
+
def get_sql(self):
|
|
106
|
+
from .adapters.PostgreSQLAdapter import PostgreSQLAdapter
|
|
107
|
+
return PostgreSQLAdapter()
|
polydb/models.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# src/polydb/models.py
|
|
2
|
+
"""
|
|
3
|
+
Data models and configurations
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Optional, List, Callable, Any
|
|
8
|
+
from enum import Enum
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class CloudProvider(Enum):
|
|
12
|
+
"""Supported cloud providers"""
|
|
13
|
+
AZURE = "azure"
|
|
14
|
+
AWS = "aws"
|
|
15
|
+
GCP = "gcp"
|
|
16
|
+
VERCEL = "vercel"
|
|
17
|
+
MONGODB = "mongodb"
|
|
18
|
+
S3_COMPATIBLE = "s3_compatible"
|
|
19
|
+
POSTGRESQL = "postgresql"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class PartitionConfig:
|
|
24
|
+
"""Configuration for partition and row keys"""
|
|
25
|
+
partition_key_template: str = "default_{id}"
|
|
26
|
+
row_key_template: Optional[str] = None
|
|
27
|
+
composite_keys: Optional[List[str]] = None
|
|
28
|
+
auto_generate: bool = True
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class QueryOptions:
|
|
33
|
+
"""LINQ-style query options"""
|
|
34
|
+
filter_func: Optional[Callable] = None
|
|
35
|
+
order_by: Optional[str] = None
|
|
36
|
+
skip: int = 0
|
|
37
|
+
take: Optional[int] = None
|
|
38
|
+
select_fields: Optional[List[str]] = None
|
|
39
|
+
count_only: bool = False
|
polydb/monitoring.py
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
# src/polydb/monitoring.py
|
|
2
|
+
"""
|
|
3
|
+
Comprehensive monitoring, metrics, and observability
|
|
4
|
+
"""
|
|
5
|
+
from typing import Dict, Any, Optional, List, Callable
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from datetime import datetime, timedelta
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class QueryMetrics:
|
|
16
|
+
"""Metrics for a single query"""
|
|
17
|
+
operation: str
|
|
18
|
+
model: str
|
|
19
|
+
duration_ms: float
|
|
20
|
+
success: bool
|
|
21
|
+
error: Optional[str] = None
|
|
22
|
+
timestamp: datetime = field(default_factory=datetime.utcnow)
|
|
23
|
+
tenant_id: Optional[str] = None
|
|
24
|
+
actor_id: Optional[str] = None
|
|
25
|
+
rows_affected: Optional[int] = None
|
|
26
|
+
cache_hit: bool = False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class AggregatedMetrics:
|
|
31
|
+
"""Aggregated metrics over time window"""
|
|
32
|
+
total_queries: int = 0
|
|
33
|
+
successful_queries: int = 0
|
|
34
|
+
failed_queries: int = 0
|
|
35
|
+
total_duration_ms: float = 0.0
|
|
36
|
+
avg_duration_ms: float = 0.0
|
|
37
|
+
min_duration_ms: float = float('inf')
|
|
38
|
+
max_duration_ms: float = 0.0
|
|
39
|
+
cache_hit_rate: float = 0.0
|
|
40
|
+
queries_by_operation: Dict[str, int] = field(default_factory=dict)
|
|
41
|
+
queries_by_model: Dict[str, int] = field(default_factory=dict)
|
|
42
|
+
slow_queries: List[QueryMetrics] = field(default_factory=list)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class MetricsCollector:
|
|
46
|
+
"""Collects and aggregates metrics"""
|
|
47
|
+
|
|
48
|
+
def __init__(self, slow_query_threshold_ms: float = 1000.0):
|
|
49
|
+
self.slow_query_threshold = slow_query_threshold_ms
|
|
50
|
+
self._metrics: List[QueryMetrics] = []
|
|
51
|
+
self._lock = threading.Lock()
|
|
52
|
+
self._hooks: List[Callable] = []
|
|
53
|
+
self.logger = logging.getLogger(__name__)
|
|
54
|
+
|
|
55
|
+
def record(self, metric: QueryMetrics):
|
|
56
|
+
"""Record a query metric"""
|
|
57
|
+
with self._lock:
|
|
58
|
+
self._metrics.append(metric)
|
|
59
|
+
|
|
60
|
+
# Log slow queries
|
|
61
|
+
if metric.duration_ms > self.slow_query_threshold:
|
|
62
|
+
self.logger.warning(
|
|
63
|
+
f"Slow query detected: {metric.operation} on {metric.model} "
|
|
64
|
+
f"took {metric.duration_ms:.2f}ms"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Trigger hooks
|
|
68
|
+
for hook in self._hooks:
|
|
69
|
+
try:
|
|
70
|
+
hook(metric)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
self.logger.error(f"Metrics hook failed: {e}")
|
|
73
|
+
|
|
74
|
+
def register_hook(self, hook: Callable[[QueryMetrics], None]):
|
|
75
|
+
"""Register a metrics hook"""
|
|
76
|
+
self._hooks.append(hook)
|
|
77
|
+
|
|
78
|
+
def get_metrics(
|
|
79
|
+
self,
|
|
80
|
+
since: Optional[datetime] = None,
|
|
81
|
+
model: Optional[str] = None,
|
|
82
|
+
operation: Optional[str] = None
|
|
83
|
+
) -> List[QueryMetrics]:
|
|
84
|
+
"""Get filtered metrics"""
|
|
85
|
+
with self._lock:
|
|
86
|
+
metrics = self._metrics.copy()
|
|
87
|
+
|
|
88
|
+
if since:
|
|
89
|
+
metrics = [m for m in metrics if m.timestamp >= since]
|
|
90
|
+
|
|
91
|
+
if model:
|
|
92
|
+
metrics = [m for m in metrics if m.model == model]
|
|
93
|
+
|
|
94
|
+
if operation:
|
|
95
|
+
metrics = [m for m in metrics if m.operation == operation]
|
|
96
|
+
|
|
97
|
+
return metrics
|
|
98
|
+
|
|
99
|
+
def aggregate(
|
|
100
|
+
self,
|
|
101
|
+
since: Optional[datetime] = None,
|
|
102
|
+
model: Optional[str] = None
|
|
103
|
+
) -> AggregatedMetrics:
|
|
104
|
+
"""Generate aggregated metrics"""
|
|
105
|
+
metrics = self.get_metrics(since=since, model=model)
|
|
106
|
+
|
|
107
|
+
if not metrics:
|
|
108
|
+
return AggregatedMetrics()
|
|
109
|
+
|
|
110
|
+
agg = AggregatedMetrics()
|
|
111
|
+
agg.total_queries = len(metrics)
|
|
112
|
+
|
|
113
|
+
durations = []
|
|
114
|
+
cache_hits = 0
|
|
115
|
+
|
|
116
|
+
for m in metrics:
|
|
117
|
+
if m.success:
|
|
118
|
+
agg.successful_queries += 1
|
|
119
|
+
else:
|
|
120
|
+
agg.failed_queries += 1
|
|
121
|
+
|
|
122
|
+
durations.append(m.duration_ms)
|
|
123
|
+
agg.total_duration_ms += m.duration_ms
|
|
124
|
+
|
|
125
|
+
if m.cache_hit:
|
|
126
|
+
cache_hits += 1
|
|
127
|
+
|
|
128
|
+
# Count by operation
|
|
129
|
+
agg.queries_by_operation[m.operation] = \
|
|
130
|
+
agg.queries_by_operation.get(m.operation, 0) + 1
|
|
131
|
+
|
|
132
|
+
# Count by model
|
|
133
|
+
agg.queries_by_model[m.model] = \
|
|
134
|
+
agg.queries_by_model.get(m.model, 0) + 1
|
|
135
|
+
|
|
136
|
+
# Track slow queries
|
|
137
|
+
if m.duration_ms > self.slow_query_threshold:
|
|
138
|
+
agg.slow_queries.append(m)
|
|
139
|
+
|
|
140
|
+
agg.avg_duration_ms = agg.total_duration_ms / agg.total_queries
|
|
141
|
+
agg.min_duration_ms = min(durations)
|
|
142
|
+
agg.max_duration_ms = max(durations)
|
|
143
|
+
agg.cache_hit_rate = cache_hits / agg.total_queries
|
|
144
|
+
|
|
145
|
+
return agg
|
|
146
|
+
|
|
147
|
+
def clear_old_metrics(self, older_than: timedelta):
|
|
148
|
+
"""Clear metrics older than specified duration"""
|
|
149
|
+
cutoff = datetime.utcnow() - older_than
|
|
150
|
+
|
|
151
|
+
with self._lock:
|
|
152
|
+
self._metrics = [m for m in self._metrics if m.timestamp >= cutoff]
|
|
153
|
+
|
|
154
|
+
def export_prometheus(self) -> str:
|
|
155
|
+
"""Export metrics in Prometheus format"""
|
|
156
|
+
agg = self.aggregate()
|
|
157
|
+
|
|
158
|
+
lines = [
|
|
159
|
+
f"# HELP polydb_queries_total Total number of queries",
|
|
160
|
+
f"# TYPE polydb_queries_total counter",
|
|
161
|
+
f"polydb_queries_total {agg.total_queries}",
|
|
162
|
+
"",
|
|
163
|
+
f"# HELP polydb_queries_successful Successful queries",
|
|
164
|
+
f"# TYPE polydb_queries_successful counter",
|
|
165
|
+
f"polydb_queries_successful {agg.successful_queries}",
|
|
166
|
+
"",
|
|
167
|
+
f"# HELP polydb_queries_failed Failed queries",
|
|
168
|
+
f"# TYPE polydb_queries_failed counter",
|
|
169
|
+
f"polydb_queries_failed {agg.failed_queries}",
|
|
170
|
+
"",
|
|
171
|
+
f"# HELP polydb_query_duration_ms Query duration",
|
|
172
|
+
f"# TYPE polydb_query_duration_ms summary",
|
|
173
|
+
f"polydb_query_duration_ms_sum {agg.total_duration_ms}",
|
|
174
|
+
f"polydb_query_duration_ms_count {agg.total_queries}",
|
|
175
|
+
"",
|
|
176
|
+
f"# HELP polydb_cache_hit_rate Cache hit rate",
|
|
177
|
+
f"# TYPE polydb_cache_hit_rate gauge",
|
|
178
|
+
f"polydb_cache_hit_rate {agg.cache_hit_rate}",
|
|
179
|
+
]
|
|
180
|
+
|
|
181
|
+
return "\n".join(lines)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class PerformanceMonitor:
|
|
185
|
+
"""Context manager for automatic query timing"""
|
|
186
|
+
|
|
187
|
+
def __init__(
|
|
188
|
+
self,
|
|
189
|
+
collector: MetricsCollector,
|
|
190
|
+
operation: str,
|
|
191
|
+
model: str,
|
|
192
|
+
tenant_id: Optional[str] = None,
|
|
193
|
+
actor_id: Optional[str] = None
|
|
194
|
+
):
|
|
195
|
+
self.collector = collector
|
|
196
|
+
self.operation = operation
|
|
197
|
+
self.model = model
|
|
198
|
+
self.tenant_id = tenant_id
|
|
199
|
+
self.actor_id = actor_id
|
|
200
|
+
self.start_time = None
|
|
201
|
+
self.success = False
|
|
202
|
+
self.error = None
|
|
203
|
+
self.rows_affected = None
|
|
204
|
+
self.cache_hit = False
|
|
205
|
+
|
|
206
|
+
def __enter__(self):
|
|
207
|
+
self.start_time = time.perf_counter()
|
|
208
|
+
return self
|
|
209
|
+
|
|
210
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
211
|
+
duration_ms = (time.perf_counter() - self.start_time) * 1000 # type: ignore
|
|
212
|
+
|
|
213
|
+
if exc_type is None:
|
|
214
|
+
self.success = True
|
|
215
|
+
else:
|
|
216
|
+
self.error = str(exc_val)
|
|
217
|
+
|
|
218
|
+
metric = QueryMetrics(
|
|
219
|
+
operation=self.operation,
|
|
220
|
+
model=self.model,
|
|
221
|
+
duration_ms=duration_ms,
|
|
222
|
+
success=self.success,
|
|
223
|
+
error=self.error,
|
|
224
|
+
tenant_id=self.tenant_id,
|
|
225
|
+
actor_id=self.actor_id,
|
|
226
|
+
rows_affected=self.rows_affected,
|
|
227
|
+
cache_hit=self.cache_hit
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
self.collector.record(metric)
|
|
231
|
+
|
|
232
|
+
return False
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class HealthCheck:
|
|
236
|
+
"""System health monitoring"""
|
|
237
|
+
|
|
238
|
+
def __init__(self, factory):
|
|
239
|
+
self.factory = factory
|
|
240
|
+
self.logger = logging.getLogger(__name__)
|
|
241
|
+
|
|
242
|
+
def check_sql_health(self) -> Dict[str, Any]:
|
|
243
|
+
"""Check SQL database health"""
|
|
244
|
+
try:
|
|
245
|
+
start = time.perf_counter()
|
|
246
|
+
self.factory._sql.execute("SELECT 1", fetch_one=True)
|
|
247
|
+
duration_ms = (time.perf_counter() - start) * 1000
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
'status': 'healthy',
|
|
251
|
+
'latency_ms': duration_ms,
|
|
252
|
+
'provider': self.factory._provider_name
|
|
253
|
+
}
|
|
254
|
+
except Exception as e:
|
|
255
|
+
return {
|
|
256
|
+
'status': 'unhealthy',
|
|
257
|
+
'error': str(e),
|
|
258
|
+
'provider': self.factory._provider_name
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
def check_nosql_health(self) -> Dict[str, Any]:
|
|
262
|
+
"""Check NoSQL database health"""
|
|
263
|
+
try:
|
|
264
|
+
# Attempt a simple operation
|
|
265
|
+
start = time.perf_counter()
|
|
266
|
+
# This would need a test model
|
|
267
|
+
duration_ms = (time.perf_counter() - start) * 1000
|
|
268
|
+
|
|
269
|
+
return {
|
|
270
|
+
'status': 'healthy',
|
|
271
|
+
'latency_ms': duration_ms,
|
|
272
|
+
'provider': self.factory._provider_name
|
|
273
|
+
}
|
|
274
|
+
except Exception as e:
|
|
275
|
+
return {
|
|
276
|
+
'status': 'unhealthy',
|
|
277
|
+
'error': str(e),
|
|
278
|
+
'provider': self.factory._provider_name
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
def check_cache_health(self) -> Dict[str, Any]:
|
|
282
|
+
"""Check cache health"""
|
|
283
|
+
if not self.factory._cache:
|
|
284
|
+
return {'status': 'disabled'}
|
|
285
|
+
|
|
286
|
+
try:
|
|
287
|
+
# Test cache operations
|
|
288
|
+
test_key = "_health_check"
|
|
289
|
+
test_value = {"test": True}
|
|
290
|
+
|
|
291
|
+
start = time.perf_counter()
|
|
292
|
+
self.factory._cache.set(test_key, {}, test_value, 10)
|
|
293
|
+
retrieved = self.factory._cache.get(test_key, {})
|
|
294
|
+
duration_ms = (time.perf_counter() - start) * 1000
|
|
295
|
+
|
|
296
|
+
return {
|
|
297
|
+
'status': 'healthy',
|
|
298
|
+
'latency_ms': duration_ms
|
|
299
|
+
}
|
|
300
|
+
except Exception as e:
|
|
301
|
+
return {
|
|
302
|
+
'status': 'unhealthy',
|
|
303
|
+
'error': str(e)
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
def full_health_check(self) -> Dict[str, Any]:
|
|
307
|
+
"""Complete system health check"""
|
|
308
|
+
return {
|
|
309
|
+
'timestamp': datetime.utcnow().isoformat(),
|
|
310
|
+
'sql': self.check_sql_health(),
|
|
311
|
+
'nosql': self.check_nosql_health(),
|
|
312
|
+
'cache': self.check_cache_health()
|
|
313
|
+
}
|