mcp-hangar 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_hangar/__init__.py +139 -0
- mcp_hangar/application/__init__.py +1 -0
- mcp_hangar/application/commands/__init__.py +67 -0
- mcp_hangar/application/commands/auth_commands.py +118 -0
- mcp_hangar/application/commands/auth_handlers.py +296 -0
- mcp_hangar/application/commands/commands.py +59 -0
- mcp_hangar/application/commands/handlers.py +189 -0
- mcp_hangar/application/discovery/__init__.py +21 -0
- mcp_hangar/application/discovery/discovery_metrics.py +283 -0
- mcp_hangar/application/discovery/discovery_orchestrator.py +497 -0
- mcp_hangar/application/discovery/lifecycle_manager.py +315 -0
- mcp_hangar/application/discovery/security_validator.py +414 -0
- mcp_hangar/application/event_handlers/__init__.py +50 -0
- mcp_hangar/application/event_handlers/alert_handler.py +191 -0
- mcp_hangar/application/event_handlers/audit_handler.py +203 -0
- mcp_hangar/application/event_handlers/knowledge_base_handler.py +120 -0
- mcp_hangar/application/event_handlers/logging_handler.py +69 -0
- mcp_hangar/application/event_handlers/metrics_handler.py +152 -0
- mcp_hangar/application/event_handlers/persistent_audit_store.py +217 -0
- mcp_hangar/application/event_handlers/security_handler.py +604 -0
- mcp_hangar/application/mcp/tooling.py +158 -0
- mcp_hangar/application/ports/__init__.py +9 -0
- mcp_hangar/application/ports/observability.py +237 -0
- mcp_hangar/application/queries/__init__.py +52 -0
- mcp_hangar/application/queries/auth_handlers.py +237 -0
- mcp_hangar/application/queries/auth_queries.py +118 -0
- mcp_hangar/application/queries/handlers.py +227 -0
- mcp_hangar/application/read_models/__init__.py +11 -0
- mcp_hangar/application/read_models/provider_views.py +139 -0
- mcp_hangar/application/sagas/__init__.py +11 -0
- mcp_hangar/application/sagas/group_rebalance_saga.py +137 -0
- mcp_hangar/application/sagas/provider_failover_saga.py +266 -0
- mcp_hangar/application/sagas/provider_recovery_saga.py +172 -0
- mcp_hangar/application/services/__init__.py +9 -0
- mcp_hangar/application/services/provider_service.py +208 -0
- mcp_hangar/application/services/traced_provider_service.py +211 -0
- mcp_hangar/bootstrap/runtime.py +328 -0
- mcp_hangar/context.py +178 -0
- mcp_hangar/domain/__init__.py +117 -0
- mcp_hangar/domain/contracts/__init__.py +57 -0
- mcp_hangar/domain/contracts/authentication.py +225 -0
- mcp_hangar/domain/contracts/authorization.py +229 -0
- mcp_hangar/domain/contracts/event_store.py +178 -0
- mcp_hangar/domain/contracts/metrics_publisher.py +59 -0
- mcp_hangar/domain/contracts/persistence.py +383 -0
- mcp_hangar/domain/contracts/provider_runtime.py +146 -0
- mcp_hangar/domain/discovery/__init__.py +20 -0
- mcp_hangar/domain/discovery/conflict_resolver.py +267 -0
- mcp_hangar/domain/discovery/discovered_provider.py +185 -0
- mcp_hangar/domain/discovery/discovery_service.py +412 -0
- mcp_hangar/domain/discovery/discovery_source.py +192 -0
- mcp_hangar/domain/events.py +433 -0
- mcp_hangar/domain/exceptions.py +525 -0
- mcp_hangar/domain/model/__init__.py +70 -0
- mcp_hangar/domain/model/aggregate.py +58 -0
- mcp_hangar/domain/model/circuit_breaker.py +152 -0
- mcp_hangar/domain/model/event_sourced_api_key.py +413 -0
- mcp_hangar/domain/model/event_sourced_provider.py +423 -0
- mcp_hangar/domain/model/event_sourced_role_assignment.py +268 -0
- mcp_hangar/domain/model/health_tracker.py +183 -0
- mcp_hangar/domain/model/load_balancer.py +185 -0
- mcp_hangar/domain/model/provider.py +810 -0
- mcp_hangar/domain/model/provider_group.py +656 -0
- mcp_hangar/domain/model/tool_catalog.py +105 -0
- mcp_hangar/domain/policies/__init__.py +19 -0
- mcp_hangar/domain/policies/provider_health.py +187 -0
- mcp_hangar/domain/repository.py +249 -0
- mcp_hangar/domain/security/__init__.py +85 -0
- mcp_hangar/domain/security/input_validator.py +710 -0
- mcp_hangar/domain/security/rate_limiter.py +387 -0
- mcp_hangar/domain/security/roles.py +237 -0
- mcp_hangar/domain/security/sanitizer.py +387 -0
- mcp_hangar/domain/security/secrets.py +501 -0
- mcp_hangar/domain/services/__init__.py +20 -0
- mcp_hangar/domain/services/audit_service.py +376 -0
- mcp_hangar/domain/services/image_builder.py +328 -0
- mcp_hangar/domain/services/provider_launcher.py +1046 -0
- mcp_hangar/domain/value_objects.py +1138 -0
- mcp_hangar/errors.py +818 -0
- mcp_hangar/fastmcp_server.py +1105 -0
- mcp_hangar/gc.py +134 -0
- mcp_hangar/infrastructure/__init__.py +79 -0
- mcp_hangar/infrastructure/async_executor.py +133 -0
- mcp_hangar/infrastructure/auth/__init__.py +37 -0
- mcp_hangar/infrastructure/auth/api_key_authenticator.py +388 -0
- mcp_hangar/infrastructure/auth/event_sourced_store.py +567 -0
- mcp_hangar/infrastructure/auth/jwt_authenticator.py +360 -0
- mcp_hangar/infrastructure/auth/middleware.py +340 -0
- mcp_hangar/infrastructure/auth/opa_authorizer.py +243 -0
- mcp_hangar/infrastructure/auth/postgres_store.py +659 -0
- mcp_hangar/infrastructure/auth/projections.py +366 -0
- mcp_hangar/infrastructure/auth/rate_limiter.py +311 -0
- mcp_hangar/infrastructure/auth/rbac_authorizer.py +323 -0
- mcp_hangar/infrastructure/auth/sqlite_store.py +624 -0
- mcp_hangar/infrastructure/command_bus.py +112 -0
- mcp_hangar/infrastructure/discovery/__init__.py +110 -0
- mcp_hangar/infrastructure/discovery/docker_source.py +289 -0
- mcp_hangar/infrastructure/discovery/entrypoint_source.py +249 -0
- mcp_hangar/infrastructure/discovery/filesystem_source.py +383 -0
- mcp_hangar/infrastructure/discovery/kubernetes_source.py +247 -0
- mcp_hangar/infrastructure/event_bus.py +260 -0
- mcp_hangar/infrastructure/event_sourced_repository.py +443 -0
- mcp_hangar/infrastructure/event_store.py +396 -0
- mcp_hangar/infrastructure/knowledge_base/__init__.py +259 -0
- mcp_hangar/infrastructure/knowledge_base/contracts.py +202 -0
- mcp_hangar/infrastructure/knowledge_base/memory.py +177 -0
- mcp_hangar/infrastructure/knowledge_base/postgres.py +545 -0
- mcp_hangar/infrastructure/knowledge_base/sqlite.py +513 -0
- mcp_hangar/infrastructure/metrics_publisher.py +36 -0
- mcp_hangar/infrastructure/observability/__init__.py +10 -0
- mcp_hangar/infrastructure/observability/langfuse_adapter.py +534 -0
- mcp_hangar/infrastructure/persistence/__init__.py +33 -0
- mcp_hangar/infrastructure/persistence/audit_repository.py +371 -0
- mcp_hangar/infrastructure/persistence/config_repository.py +398 -0
- mcp_hangar/infrastructure/persistence/database.py +333 -0
- mcp_hangar/infrastructure/persistence/database_common.py +330 -0
- mcp_hangar/infrastructure/persistence/event_serializer.py +280 -0
- mcp_hangar/infrastructure/persistence/event_upcaster.py +166 -0
- mcp_hangar/infrastructure/persistence/in_memory_event_store.py +150 -0
- mcp_hangar/infrastructure/persistence/recovery_service.py +312 -0
- mcp_hangar/infrastructure/persistence/sqlite_event_store.py +386 -0
- mcp_hangar/infrastructure/persistence/unit_of_work.py +409 -0
- mcp_hangar/infrastructure/persistence/upcasters/README.md +13 -0
- mcp_hangar/infrastructure/persistence/upcasters/__init__.py +7 -0
- mcp_hangar/infrastructure/query_bus.py +153 -0
- mcp_hangar/infrastructure/saga_manager.py +401 -0
- mcp_hangar/logging_config.py +209 -0
- mcp_hangar/metrics.py +1007 -0
- mcp_hangar/models.py +31 -0
- mcp_hangar/observability/__init__.py +54 -0
- mcp_hangar/observability/health.py +487 -0
- mcp_hangar/observability/metrics.py +319 -0
- mcp_hangar/observability/tracing.py +433 -0
- mcp_hangar/progress.py +542 -0
- mcp_hangar/retry.py +613 -0
- mcp_hangar/server/__init__.py +120 -0
- mcp_hangar/server/__main__.py +6 -0
- mcp_hangar/server/auth_bootstrap.py +340 -0
- mcp_hangar/server/auth_cli.py +335 -0
- mcp_hangar/server/auth_config.py +305 -0
- mcp_hangar/server/bootstrap.py +735 -0
- mcp_hangar/server/cli.py +161 -0
- mcp_hangar/server/config.py +224 -0
- mcp_hangar/server/context.py +215 -0
- mcp_hangar/server/http_auth_middleware.py +165 -0
- mcp_hangar/server/lifecycle.py +467 -0
- mcp_hangar/server/state.py +117 -0
- mcp_hangar/server/tools/__init__.py +16 -0
- mcp_hangar/server/tools/discovery.py +186 -0
- mcp_hangar/server/tools/groups.py +75 -0
- mcp_hangar/server/tools/health.py +301 -0
- mcp_hangar/server/tools/provider.py +939 -0
- mcp_hangar/server/tools/registry.py +320 -0
- mcp_hangar/server/validation.py +113 -0
- mcp_hangar/stdio_client.py +229 -0
- mcp_hangar-0.2.0.dist-info/METADATA +347 -0
- mcp_hangar-0.2.0.dist-info/RECORD +160 -0
- mcp_hangar-0.2.0.dist-info/WHEEL +4 -0
- mcp_hangar-0.2.0.dist-info/entry_points.txt +2 -0
- mcp_hangar-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,545 @@
|
|
|
1
|
+
"""PostgreSQL implementation of IKnowledgeBase."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timedelta, timezone
|
|
4
|
+
import hashlib
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
from ...logging_config import get_logger
|
|
9
|
+
from .contracts import AuditEntry, IKnowledgeBase, KnowledgeBaseConfig, MetricEntry, ProviderStateEntry
|
|
10
|
+
|
|
11
|
+
logger = get_logger(__name__)
|
|
12
|
+
|
|
13
|
+
# SQL Migrations for PostgreSQL
|
|
14
|
+
MIGRATIONS = [
|
|
15
|
+
{
|
|
16
|
+
"version": 1,
|
|
17
|
+
"name": "initial_schema",
|
|
18
|
+
"sql": """
|
|
19
|
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
|
20
|
+
version INTEGER PRIMARY KEY,
|
|
21
|
+
name TEXT NOT NULL,
|
|
22
|
+
applied_at TIMESTAMPTZ DEFAULT NOW()
|
|
23
|
+
);
|
|
24
|
+
|
|
25
|
+
CREATE TABLE IF NOT EXISTS tool_cache (
|
|
26
|
+
id SERIAL PRIMARY KEY,
|
|
27
|
+
provider TEXT NOT NULL,
|
|
28
|
+
tool TEXT NOT NULL,
|
|
29
|
+
arguments_hash TEXT NOT NULL,
|
|
30
|
+
result JSONB NOT NULL,
|
|
31
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
32
|
+
expires_at TIMESTAMPTZ NOT NULL,
|
|
33
|
+
UNIQUE(provider, tool, arguments_hash)
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
CREATE INDEX IF NOT EXISTS idx_tool_cache_lookup
|
|
37
|
+
ON tool_cache(provider, tool, arguments_hash);
|
|
38
|
+
CREATE INDEX IF NOT EXISTS idx_tool_cache_expires
|
|
39
|
+
ON tool_cache(expires_at);
|
|
40
|
+
|
|
41
|
+
CREATE TABLE IF NOT EXISTS audit_log (
|
|
42
|
+
id SERIAL PRIMARY KEY,
|
|
43
|
+
timestamp TIMESTAMPTZ DEFAULT NOW(),
|
|
44
|
+
event_type TEXT NOT NULL,
|
|
45
|
+
provider TEXT,
|
|
46
|
+
tool TEXT,
|
|
47
|
+
arguments JSONB,
|
|
48
|
+
result_summary TEXT,
|
|
49
|
+
duration_ms INTEGER,
|
|
50
|
+
success BOOLEAN NOT NULL,
|
|
51
|
+
error_message TEXT,
|
|
52
|
+
correlation_id TEXT
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
CREATE INDEX IF NOT EXISTS idx_audit_log_timestamp
|
|
56
|
+
ON audit_log(timestamp DESC);
|
|
57
|
+
CREATE INDEX IF NOT EXISTS idx_audit_log_provider
|
|
58
|
+
ON audit_log(provider, timestamp DESC);
|
|
59
|
+
|
|
60
|
+
CREATE TABLE IF NOT EXISTS provider_state_history (
|
|
61
|
+
id SERIAL PRIMARY KEY,
|
|
62
|
+
provider_id TEXT NOT NULL,
|
|
63
|
+
old_state TEXT,
|
|
64
|
+
new_state TEXT NOT NULL,
|
|
65
|
+
timestamp TIMESTAMPTZ DEFAULT NOW(),
|
|
66
|
+
reason TEXT
|
|
67
|
+
);
|
|
68
|
+
|
|
69
|
+
CREATE INDEX IF NOT EXISTS idx_provider_state_provider
|
|
70
|
+
ON provider_state_history(provider_id, timestamp DESC);
|
|
71
|
+
|
|
72
|
+
CREATE TABLE IF NOT EXISTS provider_metrics (
|
|
73
|
+
id SERIAL PRIMARY KEY,
|
|
74
|
+
provider_id TEXT NOT NULL,
|
|
75
|
+
timestamp TIMESTAMPTZ DEFAULT NOW(),
|
|
76
|
+
metric_name TEXT NOT NULL,
|
|
77
|
+
metric_value DOUBLE PRECISION NOT NULL,
|
|
78
|
+
labels JSONB DEFAULT '{}'
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
CREATE INDEX IF NOT EXISTS idx_provider_metrics_lookup
|
|
82
|
+
ON provider_metrics(provider_id, metric_name, timestamp DESC);
|
|
83
|
+
""",
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
"version": 2,
|
|
87
|
+
"name": "cleanup_function",
|
|
88
|
+
"sql": """
|
|
89
|
+
CREATE OR REPLACE FUNCTION cleanup_expired_cache() RETURNS INTEGER AS $$
|
|
90
|
+
DECLARE
|
|
91
|
+
deleted_count INTEGER;
|
|
92
|
+
BEGIN
|
|
93
|
+
DELETE FROM tool_cache WHERE expires_at < NOW();
|
|
94
|
+
GET DIAGNOSTICS deleted_count = ROW_COUNT;
|
|
95
|
+
RETURN deleted_count;
|
|
96
|
+
END;
|
|
97
|
+
$$ LANGUAGE plpgsql;
|
|
98
|
+
""",
|
|
99
|
+
},
|
|
100
|
+
]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class PostgresKnowledgeBase(IKnowledgeBase):
|
|
104
|
+
"""PostgreSQL implementation of knowledge base."""
|
|
105
|
+
|
|
106
|
+
def __init__(self, config: KnowledgeBaseConfig):
|
|
107
|
+
self._config = config
|
|
108
|
+
self._pool = None
|
|
109
|
+
self._initialized = False
|
|
110
|
+
|
|
111
|
+
async def initialize(self) -> bool:
|
|
112
|
+
"""Initialize connection pool and run migrations."""
|
|
113
|
+
try:
|
|
114
|
+
import asyncpg
|
|
115
|
+
|
|
116
|
+
self._pool = await asyncpg.create_pool(
|
|
117
|
+
self._config.dsn,
|
|
118
|
+
min_size=1,
|
|
119
|
+
max_size=self._config.pool_size,
|
|
120
|
+
command_timeout=10,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Run migrations
|
|
124
|
+
await self._run_migrations()
|
|
125
|
+
|
|
126
|
+
self._initialized = True
|
|
127
|
+
logger.info(
|
|
128
|
+
"postgres_kb_initialized",
|
|
129
|
+
pool_size=self._config.pool_size,
|
|
130
|
+
)
|
|
131
|
+
return True
|
|
132
|
+
|
|
133
|
+
except ImportError:
|
|
134
|
+
logger.error("asyncpg_not_installed", hint="pip install asyncpg")
|
|
135
|
+
return False
|
|
136
|
+
except Exception as e:
|
|
137
|
+
logger.error("postgres_kb_init_failed", error=str(e))
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
async def _run_migrations(self) -> None:
|
|
141
|
+
"""Run pending migrations."""
|
|
142
|
+
async with self._pool.acquire() as conn:
|
|
143
|
+
# Get current version
|
|
144
|
+
try:
|
|
145
|
+
version = await conn.fetchval("SELECT MAX(version) FROM schema_migrations")
|
|
146
|
+
except OSError:
|
|
147
|
+
# Table doesn't exist yet or connection failed - this is first run
|
|
148
|
+
version = 0
|
|
149
|
+
|
|
150
|
+
current_version = version or 0
|
|
151
|
+
|
|
152
|
+
for migration in MIGRATIONS:
|
|
153
|
+
if migration["version"] <= current_version:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
logger.info(
|
|
157
|
+
"applying_migration",
|
|
158
|
+
version=migration["version"],
|
|
159
|
+
name=migration["name"],
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
await conn.execute(migration["sql"])
|
|
163
|
+
await conn.execute(
|
|
164
|
+
"INSERT INTO schema_migrations (version, name) VALUES ($1, $2)",
|
|
165
|
+
migration["version"],
|
|
166
|
+
migration["name"],
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
final_version = await conn.fetchval("SELECT MAX(version) FROM schema_migrations")
|
|
170
|
+
logger.info("postgres_kb_schema_ready", version=final_version)
|
|
171
|
+
|
|
172
|
+
async def close(self) -> None:
|
|
173
|
+
"""Close connection pool."""
|
|
174
|
+
if self._pool:
|
|
175
|
+
await self._pool.close()
|
|
176
|
+
self._pool = None
|
|
177
|
+
self._initialized = False
|
|
178
|
+
logger.info("postgres_kb_closed")
|
|
179
|
+
|
|
180
|
+
async def is_healthy(self) -> bool:
|
|
181
|
+
"""Check if database is reachable."""
|
|
182
|
+
if not self._pool:
|
|
183
|
+
return False
|
|
184
|
+
try:
|
|
185
|
+
async with self._pool.acquire() as conn:
|
|
186
|
+
await conn.fetchval("SELECT 1")
|
|
187
|
+
return True
|
|
188
|
+
except (OSError, ConnectionError, TimeoutError) as e:
|
|
189
|
+
logger.debug("postgres_health_check_failed", error=str(e))
|
|
190
|
+
return False
|
|
191
|
+
|
|
192
|
+
def _hash_arguments(self, arguments: dict) -> str:
|
|
193
|
+
"""Create hash of arguments for cache key."""
|
|
194
|
+
serialized = json.dumps(arguments, sort_keys=True, default=str)
|
|
195
|
+
return hashlib.sha256(serialized.encode()).hexdigest()[:32]
|
|
196
|
+
|
|
197
|
+
# === Cache Operations ===
|
|
198
|
+
|
|
199
|
+
async def cache_get(self, provider: str, tool: str, arguments: dict) -> Optional[dict]:
|
|
200
|
+
if not self._pool:
|
|
201
|
+
return None
|
|
202
|
+
|
|
203
|
+
args_hash = self._hash_arguments(arguments)
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
async with self._pool.acquire() as conn:
|
|
207
|
+
row = await conn.fetchrow(
|
|
208
|
+
"""
|
|
209
|
+
SELECT result FROM tool_cache
|
|
210
|
+
WHERE provider = $1 AND tool = $2 AND arguments_hash = $3
|
|
211
|
+
AND expires_at > NOW()
|
|
212
|
+
""",
|
|
213
|
+
provider,
|
|
214
|
+
tool,
|
|
215
|
+
args_hash,
|
|
216
|
+
)
|
|
217
|
+
if row:
|
|
218
|
+
logger.debug("cache_hit", provider=provider, tool=tool)
|
|
219
|
+
return json.loads(row["result"])
|
|
220
|
+
return None
|
|
221
|
+
except Exception as e:
|
|
222
|
+
logger.warning("cache_get_failed", error=str(e))
|
|
223
|
+
return None
|
|
224
|
+
|
|
225
|
+
async def cache_set(
|
|
226
|
+
self,
|
|
227
|
+
provider: str,
|
|
228
|
+
tool: str,
|
|
229
|
+
arguments: dict,
|
|
230
|
+
result: Any,
|
|
231
|
+
ttl_s: Optional[int] = None,
|
|
232
|
+
) -> bool:
|
|
233
|
+
if not self._pool:
|
|
234
|
+
return False
|
|
235
|
+
|
|
236
|
+
args_hash = self._hash_arguments(arguments)
|
|
237
|
+
ttl = ttl_s or self._config.cache_ttl_s
|
|
238
|
+
expires_at = datetime.now(timezone.utc) + timedelta(seconds=ttl)
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
async with self._pool.acquire() as conn:
|
|
242
|
+
await conn.execute(
|
|
243
|
+
"""
|
|
244
|
+
INSERT INTO tool_cache (provider, tool, arguments_hash, result, expires_at)
|
|
245
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
246
|
+
ON CONFLICT (provider, tool, arguments_hash)
|
|
247
|
+
DO UPDATE SET result = $4, expires_at = $5, created_at = NOW()
|
|
248
|
+
""",
|
|
249
|
+
provider,
|
|
250
|
+
tool,
|
|
251
|
+
args_hash,
|
|
252
|
+
json.dumps(result, default=str),
|
|
253
|
+
expires_at,
|
|
254
|
+
)
|
|
255
|
+
return True
|
|
256
|
+
except Exception as e:
|
|
257
|
+
logger.warning("cache_set_failed", error=str(e))
|
|
258
|
+
return False
|
|
259
|
+
|
|
260
|
+
async def cache_invalidate(self, provider: Optional[str] = None, tool: Optional[str] = None) -> int:
|
|
261
|
+
if not self._pool:
|
|
262
|
+
return 0
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
async with self._pool.acquire() as conn:
|
|
266
|
+
if provider and tool:
|
|
267
|
+
result = await conn.execute(
|
|
268
|
+
"DELETE FROM tool_cache WHERE provider = $1 AND tool = $2",
|
|
269
|
+
provider,
|
|
270
|
+
tool,
|
|
271
|
+
)
|
|
272
|
+
elif provider:
|
|
273
|
+
result = await conn.execute("DELETE FROM tool_cache WHERE provider = $1", provider)
|
|
274
|
+
else:
|
|
275
|
+
result = await conn.execute("DELETE FROM tool_cache")
|
|
276
|
+
# Parse "DELETE N" to get count
|
|
277
|
+
return int(result.split()[-1]) if result else 0
|
|
278
|
+
except Exception as e:
|
|
279
|
+
logger.warning("cache_invalidate_failed", error=str(e))
|
|
280
|
+
return 0
|
|
281
|
+
|
|
282
|
+
async def cache_cleanup(self) -> int:
|
|
283
|
+
if not self._pool:
|
|
284
|
+
return 0
|
|
285
|
+
|
|
286
|
+
try:
|
|
287
|
+
async with self._pool.acquire() as conn:
|
|
288
|
+
result = await conn.fetchval("SELECT cleanup_expired_cache()")
|
|
289
|
+
logger.info("cache_cleanup", deleted=result)
|
|
290
|
+
return result or 0
|
|
291
|
+
except Exception as e:
|
|
292
|
+
logger.warning("cache_cleanup_failed", error=str(e))
|
|
293
|
+
return 0
|
|
294
|
+
|
|
295
|
+
# === Audit Operations ===
|
|
296
|
+
|
|
297
|
+
async def audit_log(self, entry: AuditEntry) -> bool:
|
|
298
|
+
if not self._pool:
|
|
299
|
+
return False
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
async with self._pool.acquire() as conn:
|
|
303
|
+
await conn.execute(
|
|
304
|
+
"""
|
|
305
|
+
INSERT INTO audit_log
|
|
306
|
+
(event_type, provider, tool, arguments, result_summary,
|
|
307
|
+
duration_ms, success, error_message, correlation_id)
|
|
308
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
309
|
+
""",
|
|
310
|
+
entry.event_type,
|
|
311
|
+
entry.provider,
|
|
312
|
+
entry.tool,
|
|
313
|
+
json.dumps(entry.arguments, default=str) if entry.arguments else None,
|
|
314
|
+
entry.result_summary,
|
|
315
|
+
entry.duration_ms,
|
|
316
|
+
entry.success,
|
|
317
|
+
entry.error_message,
|
|
318
|
+
entry.correlation_id,
|
|
319
|
+
)
|
|
320
|
+
return True
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.warning("audit_log_failed", error=str(e))
|
|
323
|
+
return False
|
|
324
|
+
|
|
325
|
+
async def audit_query(
|
|
326
|
+
self,
|
|
327
|
+
provider: Optional[str] = None,
|
|
328
|
+
tool: Optional[str] = None,
|
|
329
|
+
success: Optional[bool] = None,
|
|
330
|
+
since: Optional[datetime] = None,
|
|
331
|
+
limit: int = 100,
|
|
332
|
+
) -> list[AuditEntry]:
|
|
333
|
+
if not self._pool:
|
|
334
|
+
return []
|
|
335
|
+
|
|
336
|
+
try:
|
|
337
|
+
conditions = []
|
|
338
|
+
params = []
|
|
339
|
+
param_idx = 1
|
|
340
|
+
|
|
341
|
+
if provider:
|
|
342
|
+
conditions.append(f"provider = ${param_idx}")
|
|
343
|
+
params.append(provider)
|
|
344
|
+
param_idx += 1
|
|
345
|
+
if tool:
|
|
346
|
+
conditions.append(f"tool = ${param_idx}")
|
|
347
|
+
params.append(tool)
|
|
348
|
+
param_idx += 1
|
|
349
|
+
if success is not None:
|
|
350
|
+
conditions.append(f"success = ${param_idx}")
|
|
351
|
+
params.append(success)
|
|
352
|
+
param_idx += 1
|
|
353
|
+
if since:
|
|
354
|
+
conditions.append(f"timestamp >= ${param_idx}")
|
|
355
|
+
params.append(since)
|
|
356
|
+
param_idx += 1
|
|
357
|
+
|
|
358
|
+
where_clause = " AND ".join(conditions) if conditions else "1=1"
|
|
359
|
+
params.append(limit)
|
|
360
|
+
|
|
361
|
+
async with self._pool.acquire() as conn:
|
|
362
|
+
rows = await conn.fetch(
|
|
363
|
+
f"""
|
|
364
|
+
SELECT event_type, provider, tool, arguments, result_summary,
|
|
365
|
+
duration_ms, success, error_message, correlation_id, timestamp
|
|
366
|
+
FROM audit_log
|
|
367
|
+
WHERE {where_clause}
|
|
368
|
+
ORDER BY timestamp DESC
|
|
369
|
+
LIMIT ${param_idx}
|
|
370
|
+
""",
|
|
371
|
+
*params,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
return [
|
|
375
|
+
AuditEntry(
|
|
376
|
+
event_type=row["event_type"],
|
|
377
|
+
provider=row["provider"],
|
|
378
|
+
tool=row["tool"],
|
|
379
|
+
arguments=json.loads(row["arguments"]) if row["arguments"] else None,
|
|
380
|
+
result_summary=row["result_summary"],
|
|
381
|
+
duration_ms=row["duration_ms"],
|
|
382
|
+
success=row["success"],
|
|
383
|
+
error_message=row["error_message"],
|
|
384
|
+
correlation_id=row["correlation_id"],
|
|
385
|
+
timestamp=row["timestamp"],
|
|
386
|
+
)
|
|
387
|
+
for row in rows
|
|
388
|
+
]
|
|
389
|
+
except Exception as e:
|
|
390
|
+
logger.warning("audit_query_failed", error=str(e))
|
|
391
|
+
return []
|
|
392
|
+
|
|
393
|
+
async def audit_stats(self, hours: int = 24) -> dict:
|
|
394
|
+
if not self._pool:
|
|
395
|
+
return {}
|
|
396
|
+
|
|
397
|
+
try:
|
|
398
|
+
async with self._pool.acquire() as conn:
|
|
399
|
+
row = await conn.fetchrow(
|
|
400
|
+
f"""
|
|
401
|
+
SELECT
|
|
402
|
+
COUNT(*) as total,
|
|
403
|
+
COUNT(*) FILTER (WHERE success) as success_count,
|
|
404
|
+
COUNT(*) FILTER (WHERE NOT success) as error_count,
|
|
405
|
+
COUNT(DISTINCT provider) as providers,
|
|
406
|
+
COUNT(DISTINCT tool) as tools,
|
|
407
|
+
AVG(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) as avg_duration_ms
|
|
408
|
+
FROM audit_log
|
|
409
|
+
WHERE timestamp > NOW() - INTERVAL '{hours} hours'
|
|
410
|
+
"""
|
|
411
|
+
)
|
|
412
|
+
return dict(row) if row else {}
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.warning("audit_stats_failed", error=str(e))
|
|
415
|
+
return {}
|
|
416
|
+
|
|
417
|
+
# === Provider State Operations ===
|
|
418
|
+
|
|
419
|
+
async def record_state_change(self, entry: ProviderStateEntry) -> bool:
|
|
420
|
+
if not self._pool:
|
|
421
|
+
return False
|
|
422
|
+
|
|
423
|
+
try:
|
|
424
|
+
async with self._pool.acquire() as conn:
|
|
425
|
+
await conn.execute(
|
|
426
|
+
"""
|
|
427
|
+
INSERT INTO provider_state_history
|
|
428
|
+
(provider_id, old_state, new_state, reason)
|
|
429
|
+
VALUES ($1, $2, $3, $4)
|
|
430
|
+
""",
|
|
431
|
+
entry.provider_id,
|
|
432
|
+
entry.old_state,
|
|
433
|
+
entry.new_state,
|
|
434
|
+
entry.reason,
|
|
435
|
+
)
|
|
436
|
+
return True
|
|
437
|
+
except Exception as e:
|
|
438
|
+
logger.warning("record_state_failed", error=str(e))
|
|
439
|
+
return False
|
|
440
|
+
|
|
441
|
+
async def get_state_history(self, provider_id: str, limit: int = 100) -> list[ProviderStateEntry]:
|
|
442
|
+
if not self._pool:
|
|
443
|
+
return []
|
|
444
|
+
|
|
445
|
+
try:
|
|
446
|
+
async with self._pool.acquire() as conn:
|
|
447
|
+
rows = await conn.fetch(
|
|
448
|
+
"""
|
|
449
|
+
SELECT provider_id, old_state, new_state, reason, timestamp
|
|
450
|
+
FROM provider_state_history
|
|
451
|
+
WHERE provider_id = $1
|
|
452
|
+
ORDER BY timestamp DESC
|
|
453
|
+
LIMIT $2
|
|
454
|
+
""",
|
|
455
|
+
provider_id,
|
|
456
|
+
limit,
|
|
457
|
+
)
|
|
458
|
+
return [
|
|
459
|
+
ProviderStateEntry(
|
|
460
|
+
provider_id=row["provider_id"],
|
|
461
|
+
old_state=row["old_state"],
|
|
462
|
+
new_state=row["new_state"],
|
|
463
|
+
reason=row["reason"],
|
|
464
|
+
timestamp=row["timestamp"],
|
|
465
|
+
)
|
|
466
|
+
for row in rows
|
|
467
|
+
]
|
|
468
|
+
except Exception as e:
|
|
469
|
+
logger.warning("get_state_history_failed", error=str(e))
|
|
470
|
+
return []
|
|
471
|
+
|
|
472
|
+
# === Metrics Operations ===
|
|
473
|
+
|
|
474
|
+
async def record_metric(self, entry: MetricEntry) -> bool:
|
|
475
|
+
if not self._pool:
|
|
476
|
+
return False
|
|
477
|
+
|
|
478
|
+
try:
|
|
479
|
+
async with self._pool.acquire() as conn:
|
|
480
|
+
await conn.execute(
|
|
481
|
+
"""
|
|
482
|
+
INSERT INTO provider_metrics
|
|
483
|
+
(provider_id, metric_name, metric_value, labels)
|
|
484
|
+
VALUES ($1, $2, $3, $4)
|
|
485
|
+
""",
|
|
486
|
+
entry.provider_id,
|
|
487
|
+
entry.metric_name,
|
|
488
|
+
entry.metric_value,
|
|
489
|
+
json.dumps(entry.labels or {}),
|
|
490
|
+
)
|
|
491
|
+
return True
|
|
492
|
+
except Exception as e:
|
|
493
|
+
logger.warning("record_metric_failed", error=str(e))
|
|
494
|
+
return False
|
|
495
|
+
|
|
496
|
+
async def get_metrics(
|
|
497
|
+
self,
|
|
498
|
+
provider_id: str,
|
|
499
|
+
metric_name: Optional[str] = None,
|
|
500
|
+
since: Optional[datetime] = None,
|
|
501
|
+
limit: int = 100,
|
|
502
|
+
) -> list[MetricEntry]:
|
|
503
|
+
if not self._pool:
|
|
504
|
+
return []
|
|
505
|
+
|
|
506
|
+
try:
|
|
507
|
+
conditions = ["provider_id = $1"]
|
|
508
|
+
params = [provider_id]
|
|
509
|
+
param_idx = 2
|
|
510
|
+
|
|
511
|
+
if metric_name:
|
|
512
|
+
conditions.append(f"metric_name = ${param_idx}")
|
|
513
|
+
params.append(metric_name)
|
|
514
|
+
param_idx += 1
|
|
515
|
+
if since:
|
|
516
|
+
conditions.append(f"timestamp >= ${param_idx}")
|
|
517
|
+
params.append(since)
|
|
518
|
+
param_idx += 1
|
|
519
|
+
|
|
520
|
+
params.append(limit)
|
|
521
|
+
|
|
522
|
+
async with self._pool.acquire() as conn:
|
|
523
|
+
rows = await conn.fetch(
|
|
524
|
+
f"""
|
|
525
|
+
SELECT provider_id, metric_name, metric_value, labels, timestamp
|
|
526
|
+
FROM provider_metrics
|
|
527
|
+
WHERE {" AND ".join(conditions)}
|
|
528
|
+
ORDER BY timestamp DESC
|
|
529
|
+
LIMIT ${param_idx}
|
|
530
|
+
""",
|
|
531
|
+
*params,
|
|
532
|
+
)
|
|
533
|
+
return [
|
|
534
|
+
MetricEntry(
|
|
535
|
+
provider_id=row["provider_id"],
|
|
536
|
+
metric_name=row["metric_name"],
|
|
537
|
+
metric_value=row["metric_value"],
|
|
538
|
+
labels=json.loads(row["labels"]) if row["labels"] else None,
|
|
539
|
+
timestamp=row["timestamp"],
|
|
540
|
+
)
|
|
541
|
+
for row in rows
|
|
542
|
+
]
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.warning("get_metrics_failed", error=str(e))
|
|
545
|
+
return []
|