kailash 0.4.2__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/client/__init__.py +12 -0
- kailash/client/enhanced_client.py +306 -0
- kailash/core/actors/__init__.py +16 -0
- kailash/core/actors/connection_actor.py +566 -0
- kailash/core/actors/supervisor.py +364 -0
- kailash/edge/__init__.py +16 -0
- kailash/edge/compliance.py +834 -0
- kailash/edge/discovery.py +659 -0
- kailash/edge/location.py +582 -0
- kailash/gateway/__init__.py +33 -0
- kailash/gateway/api.py +289 -0
- kailash/gateway/enhanced_gateway.py +357 -0
- kailash/gateway/resource_resolver.py +217 -0
- kailash/gateway/security.py +227 -0
- kailash/middleware/auth/models.py +2 -2
- kailash/middleware/database/base_models.py +1 -7
- kailash/middleware/database/repositories.py +3 -1
- kailash/middleware/gateway/__init__.py +22 -0
- kailash/middleware/gateway/checkpoint_manager.py +398 -0
- kailash/middleware/gateway/deduplicator.py +382 -0
- kailash/middleware/gateway/durable_gateway.py +417 -0
- kailash/middleware/gateway/durable_request.py +498 -0
- kailash/middleware/gateway/event_store.py +459 -0
- kailash/nodes/admin/audit_log.py +364 -6
- kailash/nodes/admin/permission_check.py +817 -33
- kailash/nodes/admin/role_management.py +1242 -108
- kailash/nodes/admin/schema_manager.py +438 -0
- kailash/nodes/admin/user_management.py +1209 -681
- kailash/nodes/api/http.py +95 -71
- kailash/nodes/base.py +281 -164
- kailash/nodes/base_async.py +30 -31
- kailash/nodes/code/__init__.py +8 -1
- kailash/nodes/code/async_python.py +1035 -0
- kailash/nodes/code/python.py +1 -0
- kailash/nodes/data/async_sql.py +12 -25
- kailash/nodes/data/sql.py +20 -11
- kailash/nodes/data/workflow_connection_pool.py +643 -0
- kailash/nodes/rag/__init__.py +1 -4
- kailash/resources/__init__.py +40 -0
- kailash/resources/factory.py +533 -0
- kailash/resources/health.py +319 -0
- kailash/resources/reference.py +288 -0
- kailash/resources/registry.py +392 -0
- kailash/runtime/async_local.py +711 -302
- kailash/testing/__init__.py +34 -0
- kailash/testing/async_test_case.py +353 -0
- kailash/testing/async_utils.py +345 -0
- kailash/testing/fixtures.py +458 -0
- kailash/testing/mock_registry.py +495 -0
- kailash/utils/resource_manager.py +420 -0
- kailash/workflow/__init__.py +8 -0
- kailash/workflow/async_builder.py +621 -0
- kailash/workflow/async_patterns.py +766 -0
- kailash/workflow/builder.py +93 -10
- kailash/workflow/cyclic_runner.py +111 -41
- kailash/workflow/graph.py +7 -2
- kailash/workflow/resilience.py +11 -1
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/METADATA +12 -7
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/RECORD +64 -28
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/WHEEL +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,438 @@
|
|
1
|
+
"""
|
2
|
+
Admin Node Schema Manager
|
3
|
+
|
4
|
+
Production-ready database schema management for Kailash Admin Nodes.
|
5
|
+
Handles schema creation, migration, and validation with comprehensive error handling.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import json
|
9
|
+
import logging
|
10
|
+
from pathlib import Path
|
11
|
+
from typing import Any, Dict, List, Optional, Tuple
|
12
|
+
|
13
|
+
from kailash.nodes.data import SQLDatabaseNode
|
14
|
+
from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
|
15
|
+
|
16
|
+
|
17
|
+
class AdminSchemaManager:
|
18
|
+
"""Manages admin node database schema creation and migration."""
|
19
|
+
|
20
|
+
def __init__(self, database_config: Dict[str, Any]):
|
21
|
+
"""Initialize schema manager with database configuration."""
|
22
|
+
self.database_config = database_config
|
23
|
+
self.db_node = SQLDatabaseNode(name="admin_schema_manager", **database_config)
|
24
|
+
self.logger = logging.getLogger(__name__)
|
25
|
+
|
26
|
+
# Schema version for migration tracking
|
27
|
+
self.current_schema_version = "1.0.0"
|
28
|
+
|
29
|
+
def create_full_schema(self, drop_existing: bool = False) -> Dict[str, Any]:
|
30
|
+
"""
|
31
|
+
Create the complete admin node schema.
|
32
|
+
|
33
|
+
Args:
|
34
|
+
drop_existing: If True, drop existing tables first
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
Dict with creation results and metadata
|
38
|
+
"""
|
39
|
+
try:
|
40
|
+
results = {
|
41
|
+
"schema_version": self.current_schema_version,
|
42
|
+
"tables_created": [],
|
43
|
+
"indexes_created": [],
|
44
|
+
"triggers_created": [],
|
45
|
+
"functions_created": [],
|
46
|
+
"success": True,
|
47
|
+
"errors": [],
|
48
|
+
}
|
49
|
+
|
50
|
+
# Drop existing tables if requested
|
51
|
+
if drop_existing:
|
52
|
+
self._drop_existing_schema()
|
53
|
+
|
54
|
+
# Load and execute schema
|
55
|
+
schema_path = Path(__file__).parent / "schema.sql"
|
56
|
+
with open(schema_path, "r") as f:
|
57
|
+
schema_sql = f.read()
|
58
|
+
|
59
|
+
# Execute schema creation
|
60
|
+
self.db_node.run(query=schema_sql)
|
61
|
+
|
62
|
+
# Verify schema creation
|
63
|
+
tables = self._get_existing_tables()
|
64
|
+
results["tables_created"] = tables
|
65
|
+
|
66
|
+
# Create schema version tracking
|
67
|
+
self._create_schema_version_table()
|
68
|
+
self._record_schema_version()
|
69
|
+
|
70
|
+
self.logger.info(f"Admin schema created successfully: {len(tables)} tables")
|
71
|
+
return results
|
72
|
+
|
73
|
+
except Exception as e:
|
74
|
+
self.logger.error(f"Schema creation failed: {e}")
|
75
|
+
raise NodeExecutionError(f"Failed to create admin schema: {str(e)}")
|
76
|
+
|
77
|
+
def validate_schema(self) -> Dict[str, Any]:
|
78
|
+
"""
|
79
|
+
Validate that the admin schema is complete and correct.
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
Dict with validation results
|
83
|
+
"""
|
84
|
+
try:
|
85
|
+
validation = {
|
86
|
+
"is_valid": True,
|
87
|
+
"schema_version": None,
|
88
|
+
"missing_tables": [],
|
89
|
+
"missing_indexes": [],
|
90
|
+
"table_issues": [],
|
91
|
+
"recommendations": [],
|
92
|
+
}
|
93
|
+
|
94
|
+
# Check required tables
|
95
|
+
required_tables = [
|
96
|
+
"users",
|
97
|
+
"roles",
|
98
|
+
"user_role_assignments",
|
99
|
+
"permissions",
|
100
|
+
"permission_cache",
|
101
|
+
"user_attributes",
|
102
|
+
"resource_attributes",
|
103
|
+
"user_sessions",
|
104
|
+
"admin_audit_log",
|
105
|
+
]
|
106
|
+
|
107
|
+
existing_tables = self._get_existing_tables()
|
108
|
+
|
109
|
+
for table in required_tables:
|
110
|
+
if table not in existing_tables:
|
111
|
+
validation["missing_tables"].append(table)
|
112
|
+
validation["is_valid"] = False
|
113
|
+
|
114
|
+
# Check schema version
|
115
|
+
try:
|
116
|
+
version_result = self.db_node.run(
|
117
|
+
query="SELECT version FROM admin_schema_version ORDER BY created_at DESC LIMIT 1",
|
118
|
+
result_format="dict",
|
119
|
+
)
|
120
|
+
if version_result.get("data"):
|
121
|
+
validation["schema_version"] = version_result["data"][0]["version"]
|
122
|
+
else:
|
123
|
+
validation["recommendations"].append(
|
124
|
+
"Schema version tracking not found"
|
125
|
+
)
|
126
|
+
except Exception:
|
127
|
+
validation["recommendations"].append("Unable to check schema version")
|
128
|
+
|
129
|
+
# Check critical indexes
|
130
|
+
critical_indexes = [
|
131
|
+
"idx_users_tenant_status",
|
132
|
+
"idx_roles_tenant_active",
|
133
|
+
"idx_user_roles_user",
|
134
|
+
"idx_permission_cache_user",
|
135
|
+
]
|
136
|
+
|
137
|
+
existing_indexes = self._get_existing_indexes()
|
138
|
+
for index in critical_indexes:
|
139
|
+
if index not in existing_indexes:
|
140
|
+
validation["missing_indexes"].append(index)
|
141
|
+
validation["recommendations"].append(
|
142
|
+
f"Consider creating index: {index}"
|
143
|
+
)
|
144
|
+
|
145
|
+
# Validate table structures
|
146
|
+
validation["table_issues"] = self._validate_table_structures()
|
147
|
+
|
148
|
+
return validation
|
149
|
+
|
150
|
+
except Exception as e:
|
151
|
+
self.logger.error(f"Schema validation failed: {e}")
|
152
|
+
raise NodeExecutionError(f"Failed to validate schema: {str(e)}")
|
153
|
+
|
154
|
+
def migrate_schema(self, target_version: str = None) -> Dict[str, Any]:
|
155
|
+
"""
|
156
|
+
Migrate schema to target version.
|
157
|
+
|
158
|
+
Args:
|
159
|
+
target_version: Target schema version (default: latest)
|
160
|
+
|
161
|
+
Returns:
|
162
|
+
Dict with migration results
|
163
|
+
"""
|
164
|
+
target_version = target_version or self.current_schema_version
|
165
|
+
|
166
|
+
try:
|
167
|
+
current_version = self._get_current_schema_version()
|
168
|
+
|
169
|
+
if current_version == target_version:
|
170
|
+
return {
|
171
|
+
"migration_needed": False,
|
172
|
+
"current_version": current_version,
|
173
|
+
"target_version": target_version,
|
174
|
+
"message": "Schema is already at target version",
|
175
|
+
}
|
176
|
+
|
177
|
+
# For now, we only support creating from scratch
|
178
|
+
# Future versions would implement incremental migrations
|
179
|
+
if current_version is None:
|
180
|
+
return self.create_full_schema(drop_existing=False)
|
181
|
+
else:
|
182
|
+
return {
|
183
|
+
"migration_needed": True,
|
184
|
+
"current_version": current_version,
|
185
|
+
"target_version": target_version,
|
186
|
+
"error": "Incremental migrations not yet implemented",
|
187
|
+
"recommendation": "Use create_full_schema() with drop_existing=True",
|
188
|
+
}
|
189
|
+
|
190
|
+
except Exception as e:
|
191
|
+
self.logger.error(f"Schema migration failed: {e}")
|
192
|
+
raise NodeExecutionError(f"Failed to migrate schema: {str(e)}")
|
193
|
+
|
194
|
+
def get_schema_info(self) -> Dict[str, Any]:
|
195
|
+
"""Get comprehensive schema information."""
|
196
|
+
try:
|
197
|
+
info = {
|
198
|
+
"schema_version": self._get_current_schema_version(),
|
199
|
+
"tables": self._get_table_info(),
|
200
|
+
"indexes": self._get_existing_indexes(),
|
201
|
+
"row_counts": self._get_table_row_counts(),
|
202
|
+
"database_info": self._get_database_info(),
|
203
|
+
}
|
204
|
+
|
205
|
+
return info
|
206
|
+
|
207
|
+
except Exception as e:
|
208
|
+
self.logger.error(f"Failed to get schema info: {e}")
|
209
|
+
raise NodeExecutionError(f"Failed to get schema info: {str(e)}")
|
210
|
+
|
211
|
+
def _drop_existing_schema(self):
|
212
|
+
"""Drop existing admin schema tables."""
|
213
|
+
tables_to_drop = [
|
214
|
+
"admin_audit_log",
|
215
|
+
"user_sessions",
|
216
|
+
"resource_attributes",
|
217
|
+
"user_attributes",
|
218
|
+
"permission_cache",
|
219
|
+
"permissions",
|
220
|
+
"user_role_assignments",
|
221
|
+
"roles",
|
222
|
+
"users",
|
223
|
+
"admin_schema_version",
|
224
|
+
]
|
225
|
+
|
226
|
+
for table in tables_to_drop:
|
227
|
+
try:
|
228
|
+
self.db_node.run(query=f"DROP TABLE IF EXISTS {table} CASCADE")
|
229
|
+
except Exception as e:
|
230
|
+
self.logger.warning(f"Could not drop table {table}: {e}")
|
231
|
+
|
232
|
+
def _get_existing_tables(self) -> List[str]:
|
233
|
+
"""Get list of existing tables in the database."""
|
234
|
+
try:
|
235
|
+
result = self.db_node.run(
|
236
|
+
query="""
|
237
|
+
SELECT table_name
|
238
|
+
FROM information_schema.tables
|
239
|
+
WHERE table_schema = 'public'
|
240
|
+
AND table_type = 'BASE TABLE'
|
241
|
+
ORDER BY table_name
|
242
|
+
""",
|
243
|
+
result_format="dict",
|
244
|
+
)
|
245
|
+
|
246
|
+
return [row["table_name"] for row in result.get("data", [])]
|
247
|
+
|
248
|
+
except Exception as e:
|
249
|
+
self.logger.warning(f"Could not get existing tables: {e}")
|
250
|
+
return []
|
251
|
+
|
252
|
+
def _get_existing_indexes(self) -> List[str]:
|
253
|
+
"""Get list of existing indexes."""
|
254
|
+
try:
|
255
|
+
result = self.db_node.run(
|
256
|
+
query="""
|
257
|
+
SELECT indexname
|
258
|
+
FROM pg_indexes
|
259
|
+
WHERE schemaname = 'public'
|
260
|
+
ORDER BY indexname
|
261
|
+
""",
|
262
|
+
result_format="dict",
|
263
|
+
)
|
264
|
+
|
265
|
+
return [row["indexname"] for row in result.get("data", [])]
|
266
|
+
|
267
|
+
except Exception as e:
|
268
|
+
self.logger.warning(f"Could not get existing indexes: {e}")
|
269
|
+
return []
|
270
|
+
|
271
|
+
def _create_schema_version_table(self):
|
272
|
+
"""Create table for tracking schema versions."""
|
273
|
+
version_table_sql = """
|
274
|
+
CREATE TABLE IF NOT EXISTS admin_schema_version (
|
275
|
+
id SERIAL PRIMARY KEY,
|
276
|
+
version VARCHAR(50) NOT NULL,
|
277
|
+
applied_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
278
|
+
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
279
|
+
migration_notes TEXT
|
280
|
+
)
|
281
|
+
"""
|
282
|
+
|
283
|
+
self.db_node.run(query=version_table_sql)
|
284
|
+
|
285
|
+
def _record_schema_version(self):
|
286
|
+
"""Record the current schema version."""
|
287
|
+
self.db_node.run(
|
288
|
+
query="""
|
289
|
+
INSERT INTO admin_schema_version (version, migration_notes)
|
290
|
+
VALUES ($1, $2)
|
291
|
+
""",
|
292
|
+
parameters=[
|
293
|
+
self.current_schema_version,
|
294
|
+
f"Full schema creation for admin nodes v{self.current_schema_version}",
|
295
|
+
],
|
296
|
+
)
|
297
|
+
|
298
|
+
def _get_current_schema_version(self) -> Optional[str]:
|
299
|
+
"""Get the current schema version."""
|
300
|
+
try:
|
301
|
+
result = self.db_node.run(
|
302
|
+
query="SELECT version FROM admin_schema_version ORDER BY created_at DESC LIMIT 1",
|
303
|
+
result_format="dict",
|
304
|
+
)
|
305
|
+
|
306
|
+
if result.get("data"):
|
307
|
+
return result["data"][0]["version"]
|
308
|
+
return None
|
309
|
+
|
310
|
+
except Exception:
|
311
|
+
return None
|
312
|
+
|
313
|
+
def _validate_table_structures(self) -> List[Dict[str, Any]]:
|
314
|
+
"""Validate table structures against expected schema."""
|
315
|
+
issues = []
|
316
|
+
|
317
|
+
# Check users table structure
|
318
|
+
try:
|
319
|
+
users_columns = self._get_table_columns("users")
|
320
|
+
required_users_columns = [
|
321
|
+
"user_id",
|
322
|
+
"email",
|
323
|
+
"status",
|
324
|
+
"tenant_id",
|
325
|
+
"roles",
|
326
|
+
"attributes",
|
327
|
+
]
|
328
|
+
|
329
|
+
for col in required_users_columns:
|
330
|
+
if col not in users_columns:
|
331
|
+
issues.append(
|
332
|
+
{
|
333
|
+
"table": "users",
|
334
|
+
"issue": f"Missing column: {col}",
|
335
|
+
"severity": "error",
|
336
|
+
}
|
337
|
+
)
|
338
|
+
|
339
|
+
except Exception as e:
|
340
|
+
issues.append(
|
341
|
+
{
|
342
|
+
"table": "users",
|
343
|
+
"issue": f"Could not validate structure: {e}",
|
344
|
+
"severity": "warning",
|
345
|
+
}
|
346
|
+
)
|
347
|
+
|
348
|
+
return issues
|
349
|
+
|
350
|
+
def _get_table_columns(self, table_name: str) -> List[str]:
|
351
|
+
"""Get column names for a table."""
|
352
|
+
try:
|
353
|
+
result = self.db_node.run(
|
354
|
+
query="""
|
355
|
+
SELECT column_name
|
356
|
+
FROM information_schema.columns
|
357
|
+
WHERE table_name = $1
|
358
|
+
ORDER BY ordinal_position
|
359
|
+
""",
|
360
|
+
parameters=[table_name],
|
361
|
+
result_format="dict",
|
362
|
+
)
|
363
|
+
|
364
|
+
return [row["column_name"] for row in result.get("data", [])]
|
365
|
+
|
366
|
+
except Exception as e:
|
367
|
+
self.logger.warning(f"Could not get columns for {table_name}: {e}")
|
368
|
+
return []
|
369
|
+
|
370
|
+
def _get_table_info(self) -> Dict[str, Any]:
|
371
|
+
"""Get detailed table information."""
|
372
|
+
try:
|
373
|
+
result = self.db_node.run(
|
374
|
+
query="""
|
375
|
+
SELECT
|
376
|
+
t.table_name,
|
377
|
+
t.table_type,
|
378
|
+
pg_size_pretty(pg_total_relation_size(c.oid)) as size
|
379
|
+
FROM information_schema.tables t
|
380
|
+
LEFT JOIN pg_class c ON c.relname = t.table_name
|
381
|
+
WHERE t.table_schema = 'public'
|
382
|
+
AND t.table_type = 'BASE TABLE'
|
383
|
+
ORDER BY t.table_name
|
384
|
+
""",
|
385
|
+
result_format="dict",
|
386
|
+
)
|
387
|
+
|
388
|
+
return {row["table_name"]: row for row in result.get("data", [])}
|
389
|
+
|
390
|
+
except Exception as e:
|
391
|
+
self.logger.warning(f"Could not get table info: {e}")
|
392
|
+
return {}
|
393
|
+
|
394
|
+
def _get_table_row_counts(self) -> Dict[str, int]:
|
395
|
+
"""Get row counts for all admin tables."""
|
396
|
+
tables = ["users", "roles", "user_role_assignments", "permission_cache"]
|
397
|
+
counts = {}
|
398
|
+
|
399
|
+
for table in tables:
|
400
|
+
try:
|
401
|
+
result = self.db_node.run(
|
402
|
+
query=f"SELECT COUNT(*) as count FROM {table}",
|
403
|
+
result_format="dict",
|
404
|
+
)
|
405
|
+
counts[table] = result["data"][0]["count"] if result.get("data") else 0
|
406
|
+
except Exception:
|
407
|
+
counts[table] = -1 # Error indicator
|
408
|
+
|
409
|
+
return counts
|
410
|
+
|
411
|
+
def _get_database_info(self) -> Dict[str, Any]:
|
412
|
+
"""Get general database information."""
|
413
|
+
try:
|
414
|
+
version_result = self.db_node.run(
|
415
|
+
query="SELECT version()", result_format="dict"
|
416
|
+
)
|
417
|
+
|
418
|
+
size_result = self.db_node.run(
|
419
|
+
query="SELECT pg_size_pretty(pg_database_size(current_database())) as size",
|
420
|
+
result_format="dict",
|
421
|
+
)
|
422
|
+
|
423
|
+
return {
|
424
|
+
"version": (
|
425
|
+
version_result["data"][0]["version"]
|
426
|
+
if version_result.get("data")
|
427
|
+
else "Unknown"
|
428
|
+
),
|
429
|
+
"size": (
|
430
|
+
size_result["data"][0]["size"]
|
431
|
+
if size_result.get("data")
|
432
|
+
else "Unknown"
|
433
|
+
),
|
434
|
+
}
|
435
|
+
|
436
|
+
except Exception as e:
|
437
|
+
self.logger.warning(f"Could not get database info: {e}")
|
438
|
+
return {"version": "Unknown", "size": "Unknown"}
|