cinchdb 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cinchdb/__init__.py +15 -2
- cinchdb/cli/commands/__init__.py +2 -1
- cinchdb/cli/commands/data.py +350 -0
- cinchdb/cli/commands/index.py +2 -2
- cinchdb/cli/commands/tenant.py +47 -0
- cinchdb/cli/main.py +3 -6
- cinchdb/core/connection.py +12 -17
- cinchdb/core/database.py +207 -70
- cinchdb/core/path_utils.py +1 -1
- cinchdb/infrastructure/metadata_connection_pool.py +0 -1
- cinchdb/infrastructure/metadata_db.py +15 -1
- cinchdb/managers/branch.py +1 -1
- cinchdb/managers/data.py +189 -13
- cinchdb/managers/index.py +1 -2
- cinchdb/managers/query.py +0 -1
- cinchdb/managers/table.py +30 -5
- cinchdb/managers/tenant.py +89 -149
- cinchdb/plugins/__init__.py +17 -0
- cinchdb/plugins/base.py +99 -0
- cinchdb/plugins/decorators.py +45 -0
- cinchdb/plugins/manager.py +178 -0
- {cinchdb-0.1.13.dist-info → cinchdb-0.1.15.dist-info}/METADATA +15 -24
- {cinchdb-0.1.13.dist-info → cinchdb-0.1.15.dist-info}/RECORD +26 -23
- cinchdb/security/__init__.py +0 -1
- cinchdb/security/encryption.py +0 -108
- {cinchdb-0.1.13.dist-info → cinchdb-0.1.15.dist-info}/WHEEL +0 -0
- {cinchdb-0.1.13.dist-info → cinchdb-0.1.15.dist-info}/entry_points.txt +0 -0
- {cinchdb-0.1.13.dist-info → cinchdb-0.1.15.dist-info}/licenses/LICENSE +0 -0
cinchdb/managers/table.py
CHANGED
@@ -19,6 +19,9 @@ class TableManager:
|
|
19
19
|
|
20
20
|
# Protected column names that users cannot use
|
21
21
|
PROTECTED_COLUMNS = {"id", "created_at", "updated_at"}
|
22
|
+
|
23
|
+
# Protected table name prefixes that users cannot use
|
24
|
+
PROTECTED_TABLE_PREFIXES = ("__", "sqlite_")
|
22
25
|
|
23
26
|
def __init__(
|
24
27
|
self, project_root: Path, database: str, branch: str, tenant: str = "main"
|
@@ -48,18 +51,24 @@ class TableManager:
|
|
48
51
|
tables = []
|
49
52
|
|
50
53
|
with DatabaseConnection(self.db_path) as conn:
|
51
|
-
# Get all
|
54
|
+
# Get all tables first, then filter in Python (more reliable than SQL LIKE)
|
52
55
|
cursor = conn.execute(
|
53
56
|
"""
|
54
57
|
SELECT name FROM sqlite_master
|
55
58
|
WHERE type='table'
|
56
|
-
AND name NOT LIKE 'sqlite_%'
|
57
59
|
ORDER BY name
|
58
60
|
"""
|
59
61
|
)
|
60
|
-
|
61
|
-
|
62
|
-
|
62
|
+
|
63
|
+
# Filter out system tables and protected tables using Python
|
64
|
+
all_table_names = [row["name"] for row in cursor.fetchall()]
|
65
|
+
user_table_names = [
|
66
|
+
name for name in all_table_names
|
67
|
+
if not name.startswith('sqlite_') and not name.startswith('__')
|
68
|
+
]
|
69
|
+
|
70
|
+
for table_name in user_table_names:
|
71
|
+
table = self.get_table(table_name)
|
63
72
|
tables.append(table)
|
64
73
|
|
65
74
|
return tables
|
@@ -82,6 +91,14 @@ class TableManager:
|
|
82
91
|
"""
|
83
92
|
# Check maintenance mode
|
84
93
|
check_maintenance_mode(self.project_root, self.database, self.branch)
|
94
|
+
|
95
|
+
# Validate table name doesn't use protected prefixes
|
96
|
+
for prefix in self.PROTECTED_TABLE_PREFIXES:
|
97
|
+
if table_name.startswith(prefix):
|
98
|
+
raise ValueError(
|
99
|
+
f"Table name '{table_name}' is not allowed. "
|
100
|
+
f"Table names cannot start with '{prefix}' as these are reserved for system use."
|
101
|
+
)
|
85
102
|
|
86
103
|
# Validate table doesn't exist
|
87
104
|
if self._table_exists(table_name):
|
@@ -325,6 +342,14 @@ class TableManager:
|
|
325
342
|
"""
|
326
343
|
# Check maintenance mode
|
327
344
|
check_maintenance_mode(self.project_root, self.database, self.branch)
|
345
|
+
|
346
|
+
# Validate target table name doesn't use protected prefixes
|
347
|
+
for prefix in self.PROTECTED_TABLE_PREFIXES:
|
348
|
+
if target_table.startswith(prefix):
|
349
|
+
raise ValueError(
|
350
|
+
f"Table name '{target_table}' is not allowed. "
|
351
|
+
f"Table names cannot start with '{prefix}' as these are reserved for system use."
|
352
|
+
)
|
328
353
|
|
329
354
|
if not self._table_exists(source_table):
|
330
355
|
raise ValueError(f"Source table '{source_table}' does not exist")
|
cinchdb/managers/tenant.py
CHANGED
@@ -12,7 +12,6 @@ from cinchdb.models import Tenant
|
|
12
12
|
from cinchdb.core.path_utils import (
|
13
13
|
get_branch_path,
|
14
14
|
get_tenant_db_path,
|
15
|
-
get_database_path,
|
16
15
|
list_tenants,
|
17
16
|
)
|
18
17
|
from cinchdb.core.connection import DatabaseConnection
|
@@ -279,18 +278,18 @@ class TenantManager:
|
|
279
278
|
with DatabaseConnection(empty_db_path):
|
280
279
|
pass # Just initialize with PRAGMAs
|
281
280
|
|
282
|
-
#
|
281
|
+
# Set reasonable default page size for template
|
283
282
|
# We need to rebuild the database with new page size
|
284
283
|
temp_path = empty_db_path.with_suffix('.tmp')
|
285
284
|
|
286
|
-
# Create new database with
|
285
|
+
# Create new database with 4KB pages (SQLite default, good balance for general use)
|
287
286
|
vacuum_conn = sqlite3.connect(str(empty_db_path))
|
288
287
|
vacuum_conn.isolation_level = None
|
289
|
-
vacuum_conn.execute("PRAGMA page_size =
|
288
|
+
vacuum_conn.execute("PRAGMA page_size = 4096")
|
290
289
|
vacuum_conn.execute(f"VACUUM INTO '{temp_path}'")
|
291
290
|
vacuum_conn.close()
|
292
291
|
|
293
|
-
# Replace original with optimized version
|
292
|
+
# Replace original with default optimized version
|
294
293
|
shutil.move(str(temp_path), str(empty_db_path))
|
295
294
|
|
296
295
|
# Mark as materialized now that the file exists
|
@@ -351,150 +350,6 @@ class TenantManager:
|
|
351
350
|
if shm_path.exists():
|
352
351
|
shm_path.unlink()
|
353
352
|
|
354
|
-
def optimize_all_tenants(self, force: bool = False) -> dict:
|
355
|
-
"""Optimize storage for all materialized tenants in the branch.
|
356
|
-
|
357
|
-
This is designed to be called periodically (e.g., every minute) to:
|
358
|
-
- Reclaim unused space with VACUUM
|
359
|
-
- Adjust page sizes as databases grow
|
360
|
-
- Keep small databases compact
|
361
|
-
|
362
|
-
Args:
|
363
|
-
force: If True, optimize all tenants regardless of size
|
364
|
-
|
365
|
-
Returns:
|
366
|
-
Dictionary with optimization results:
|
367
|
-
- optimized: List of tenant names that were optimized
|
368
|
-
- skipped: List of tenant names that were skipped
|
369
|
-
- errors: List of tuples (tenant_name, error_message)
|
370
|
-
"""
|
371
|
-
results = {
|
372
|
-
"optimized": [],
|
373
|
-
"skipped": [],
|
374
|
-
"errors": []
|
375
|
-
}
|
376
|
-
|
377
|
-
# Ensure initialization
|
378
|
-
self._ensure_initialized()
|
379
|
-
|
380
|
-
if not self.branch_id:
|
381
|
-
return results
|
382
|
-
|
383
|
-
# Get all materialized tenants for this branch
|
384
|
-
tenants = self.metadata_db.list_tenants(self.branch_id, materialized_only=True)
|
385
|
-
|
386
|
-
for tenant in tenants:
|
387
|
-
tenant_name = tenant['name']
|
388
|
-
|
389
|
-
# Skip system tenants unless forced
|
390
|
-
if not force and tenant_name in ["main", self._empty_tenant_name]:
|
391
|
-
results["skipped"].append(tenant_name)
|
392
|
-
continue
|
393
|
-
|
394
|
-
try:
|
395
|
-
optimized = self.optimize_tenant_storage(tenant_name, force=force)
|
396
|
-
if optimized:
|
397
|
-
results["optimized"].append(tenant_name)
|
398
|
-
else:
|
399
|
-
results["skipped"].append(tenant_name)
|
400
|
-
except Exception as e:
|
401
|
-
results["errors"].append((tenant_name, str(e)))
|
402
|
-
|
403
|
-
return results
|
404
|
-
|
405
|
-
def optimize_tenant_storage(self, tenant_name: str, force: bool = False) -> bool:
|
406
|
-
"""Optimize tenant database storage with VACUUM and optional page size adjustment.
|
407
|
-
|
408
|
-
This performs:
|
409
|
-
1. Always: VACUUM to reclaim unused space and defragment
|
410
|
-
2. If needed: Rebuild with optimal page size based on database size
|
411
|
-
|
412
|
-
Args:
|
413
|
-
tenant_name: Name of tenant to optimize
|
414
|
-
force: If True, always perform VACUUM even if page size is optimal
|
415
|
-
|
416
|
-
Returns:
|
417
|
-
True if optimization was performed, False if tenant doesn't exist
|
418
|
-
"""
|
419
|
-
# Ensure initialization
|
420
|
-
self._ensure_initialized()
|
421
|
-
|
422
|
-
if not self.branch_id:
|
423
|
-
return False
|
424
|
-
|
425
|
-
# Skip system tenants
|
426
|
-
if tenant_name in ["main", self._empty_tenant_name]:
|
427
|
-
return False
|
428
|
-
|
429
|
-
# Get tenant info
|
430
|
-
tenant_info = self.metadata_db.get_tenant(self.branch_id, tenant_name)
|
431
|
-
if not tenant_info or not tenant_info['materialized']:
|
432
|
-
return False
|
433
|
-
|
434
|
-
db_path = get_tenant_db_path(
|
435
|
-
self.project_root, self.database, self.branch, tenant_name
|
436
|
-
)
|
437
|
-
|
438
|
-
if not db_path.exists():
|
439
|
-
return False
|
440
|
-
|
441
|
-
# Check current page size
|
442
|
-
conn = sqlite3.connect(str(db_path))
|
443
|
-
current_page_size = conn.execute("PRAGMA page_size").fetchone()[0]
|
444
|
-
conn.close()
|
445
|
-
|
446
|
-
# Determine optimal page size
|
447
|
-
optimal_page_size = self._get_optimal_page_size(db_path)
|
448
|
-
|
449
|
-
# Decide if we need to rebuild with new page size
|
450
|
-
needs_page_size_change = (current_page_size != optimal_page_size and
|
451
|
-
db_path.stat().st_size > 1024 * 1024) # Only if > 1MB
|
452
|
-
|
453
|
-
if needs_page_size_change:
|
454
|
-
# Rebuild with new page size using VACUUM INTO
|
455
|
-
temp_path = db_path.with_suffix('.tmp')
|
456
|
-
conn = sqlite3.connect(str(db_path))
|
457
|
-
conn.isolation_level = None
|
458
|
-
conn.execute(f"PRAGMA page_size = {optimal_page_size}")
|
459
|
-
conn.execute(f"VACUUM INTO '{temp_path}'")
|
460
|
-
conn.close()
|
461
|
-
|
462
|
-
# Replace original with optimized version
|
463
|
-
shutil.move(str(temp_path), str(db_path))
|
464
|
-
return True
|
465
|
-
elif force or current_page_size == 512:
|
466
|
-
# Just run regular VACUUM to defragment and reclaim space
|
467
|
-
# Always vacuum 512-byte page databases to keep them compact
|
468
|
-
conn = sqlite3.connect(str(db_path))
|
469
|
-
conn.isolation_level = None
|
470
|
-
conn.execute("VACUUM")
|
471
|
-
conn.close()
|
472
|
-
return True
|
473
|
-
|
474
|
-
return False
|
475
|
-
|
476
|
-
def _get_optimal_page_size(self, db_path: Path) -> int:
|
477
|
-
"""Determine optimal page size based on database file size.
|
478
|
-
|
479
|
-
Args:
|
480
|
-
db_path: Path to database file
|
481
|
-
|
482
|
-
Returns:
|
483
|
-
Optimal page size in bytes
|
484
|
-
"""
|
485
|
-
if not db_path.exists():
|
486
|
-
return 512 # Default for new/empty databases
|
487
|
-
|
488
|
-
size_mb = db_path.stat().st_size / (1024 * 1024)
|
489
|
-
|
490
|
-
if size_mb < 0.1: # < 100KB
|
491
|
-
return 512
|
492
|
-
elif size_mb < 10: # < 10MB
|
493
|
-
return 4096 # 4KB - good balance for small-medium DBs
|
494
|
-
elif size_mb < 100: # < 100MB
|
495
|
-
return 8192 # 8KB - better for larger rows
|
496
|
-
else: # >= 100MB
|
497
|
-
return 16384 # 16KB - optimal for bulk operations
|
498
353
|
|
499
354
|
def materialize_tenant(self, tenant_name: str) -> None:
|
500
355
|
"""Materialize a lazy tenant into an actual database file.
|
@@ -899,3 +754,88 @@ class TenantManager:
|
|
899
754
|
"""
|
900
755
|
db_path = self.get_tenant_db_path_for_operation(tenant_name, is_write)
|
901
756
|
return DatabaseConnection(db_path)
|
757
|
+
|
758
|
+
def vacuum_tenant(self, tenant_name: str) -> dict:
|
759
|
+
"""Run VACUUM operation on a specific tenant to reclaim space and optimize performance.
|
760
|
+
|
761
|
+
This performs SQLite's VACUUM command which:
|
762
|
+
- Reclaims space from deleted records
|
763
|
+
- Defragments the database file
|
764
|
+
- Can improve query performance
|
765
|
+
- Rebuilds database statistics
|
766
|
+
|
767
|
+
Args:
|
768
|
+
tenant_name: Name of the tenant to vacuum
|
769
|
+
|
770
|
+
Returns:
|
771
|
+
Dictionary with vacuum results:
|
772
|
+
- success: Whether vacuum completed successfully
|
773
|
+
- tenant: Name of the tenant
|
774
|
+
- size_before: Size in bytes before vacuum
|
775
|
+
- size_after: Size in bytes after vacuum
|
776
|
+
- space_reclaimed: Bytes reclaimed by vacuum
|
777
|
+
- duration_seconds: Time taken for vacuum operation
|
778
|
+
|
779
|
+
Raises:
|
780
|
+
ValueError: If tenant doesn't exist or is not materialized
|
781
|
+
"""
|
782
|
+
import time
|
783
|
+
|
784
|
+
# Ensure initialization
|
785
|
+
self._ensure_initialized()
|
786
|
+
|
787
|
+
# Check if tenant exists
|
788
|
+
if tenant_name != self._empty_tenant_name:
|
789
|
+
if not self.branch_id:
|
790
|
+
raise ValueError(f"Branch '{self.branch}' not found")
|
791
|
+
|
792
|
+
tenant_info = self.metadata_db.get_tenant(self.branch_id, tenant_name)
|
793
|
+
if not tenant_info:
|
794
|
+
raise ValueError(f"Tenant '{tenant_name}' does not exist")
|
795
|
+
|
796
|
+
# Check if tenant is materialized
|
797
|
+
if self.is_tenant_lazy(tenant_name):
|
798
|
+
raise ValueError(f"Cannot vacuum lazy tenant '{tenant_name}'. Tenant must be materialized first.")
|
799
|
+
|
800
|
+
# Get database path
|
801
|
+
db_path = self._get_sharded_tenant_db_path(tenant_name)
|
802
|
+
|
803
|
+
if not db_path.exists():
|
804
|
+
raise ValueError(f"Database file for tenant '{tenant_name}' does not exist")
|
805
|
+
|
806
|
+
# Get size before vacuum
|
807
|
+
size_before = db_path.stat().st_size
|
808
|
+
|
809
|
+
# Perform vacuum operation
|
810
|
+
start_time = time.time()
|
811
|
+
success = False
|
812
|
+
error_message = None
|
813
|
+
|
814
|
+
try:
|
815
|
+
with DatabaseConnection(db_path) as conn:
|
816
|
+
# Run VACUUM command
|
817
|
+
conn.execute("VACUUM")
|
818
|
+
success = True
|
819
|
+
except Exception as e:
|
820
|
+
error_message = str(e)
|
821
|
+
|
822
|
+
duration = time.time() - start_time
|
823
|
+
|
824
|
+
# Get size after vacuum
|
825
|
+
size_after = db_path.stat().st_size if db_path.exists() else 0
|
826
|
+
space_reclaimed = max(0, size_before - size_after)
|
827
|
+
|
828
|
+
result = {
|
829
|
+
"success": success,
|
830
|
+
"tenant": tenant_name,
|
831
|
+
"size_before": size_before,
|
832
|
+
"size_after": size_after,
|
833
|
+
"space_reclaimed": space_reclaimed,
|
834
|
+
"space_reclaimed_mb": round(space_reclaimed / (1024 * 1024), 2),
|
835
|
+
"duration_seconds": round(duration, 2)
|
836
|
+
}
|
837
|
+
|
838
|
+
if not success:
|
839
|
+
result["error"] = error_message
|
840
|
+
|
841
|
+
return result
|
@@ -0,0 +1,17 @@
|
|
1
|
+
"""
|
2
|
+
CinchDB Plugin System
|
3
|
+
|
4
|
+
Extensible plugin architecture for CinchDB.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from .base import BasePlugin, PluginHook
|
8
|
+
from .manager import PluginManager
|
9
|
+
from .decorators import hook, plugin_method
|
10
|
+
|
11
|
+
__all__ = [
|
12
|
+
"BasePlugin",
|
13
|
+
"PluginHook",
|
14
|
+
"PluginManager",
|
15
|
+
"hook",
|
16
|
+
"plugin_method",
|
17
|
+
]
|
cinchdb/plugins/base.py
ADDED
@@ -0,0 +1,99 @@
|
|
1
|
+
"""
|
2
|
+
Base classes for CinchDB plugins.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from abc import ABC, abstractmethod
|
6
|
+
from typing import Any, Dict, List, Callable
|
7
|
+
from enum import Enum
|
8
|
+
|
9
|
+
|
10
|
+
class PluginHook(Enum):
|
11
|
+
"""Available plugin hooks."""
|
12
|
+
# Database lifecycle hooks
|
13
|
+
DATABASE_INIT = "database_init"
|
14
|
+
DATABASE_CONNECT = "database_connect"
|
15
|
+
DATABASE_DISCONNECT = "database_disconnect"
|
16
|
+
|
17
|
+
# Query hooks
|
18
|
+
QUERY_BEFORE = "query_before"
|
19
|
+
QUERY_AFTER = "query_after"
|
20
|
+
QUERY_ERROR = "query_error"
|
21
|
+
|
22
|
+
# Table hooks
|
23
|
+
TABLE_CREATE = "table_create"
|
24
|
+
TABLE_DROP = "table_drop"
|
25
|
+
TABLE_ALTER = "table_alter"
|
26
|
+
|
27
|
+
# Tenant hooks
|
28
|
+
TENANT_CREATE = "tenant_create"
|
29
|
+
TENANT_DROP = "tenant_drop"
|
30
|
+
|
31
|
+
# Branch hooks
|
32
|
+
BRANCH_CREATE = "branch_create"
|
33
|
+
BRANCH_SWITCH = "branch_switch"
|
34
|
+
BRANCH_MERGE = "branch_merge"
|
35
|
+
|
36
|
+
# CLI hooks
|
37
|
+
CLI_COMMAND_BEFORE = "cli_command_before"
|
38
|
+
CLI_COMMAND_AFTER = "cli_command_after"
|
39
|
+
|
40
|
+
|
41
|
+
class BasePlugin(ABC):
|
42
|
+
"""Base class for all CinchDB plugins."""
|
43
|
+
|
44
|
+
def __init__(self):
|
45
|
+
self.name = self.__class__.__name__
|
46
|
+
self.version = "1.0.0"
|
47
|
+
self.description = ""
|
48
|
+
self._hooks: Dict[PluginHook, List[Callable]] = {}
|
49
|
+
self._methods: Dict[str, Callable] = {}
|
50
|
+
|
51
|
+
@abstractmethod
|
52
|
+
def initialize(self, cinchdb_instance) -> None:
|
53
|
+
"""Initialize the plugin with a CinchDB instance."""
|
54
|
+
pass
|
55
|
+
|
56
|
+
def register_hook(self, hook: PluginHook, callback: Callable) -> None:
|
57
|
+
"""Register a callback for a specific hook."""
|
58
|
+
if hook not in self._hooks:
|
59
|
+
self._hooks[hook] = []
|
60
|
+
self._hooks[hook].append(callback)
|
61
|
+
|
62
|
+
def register_method(self, method_name: str, method: Callable) -> None:
|
63
|
+
"""Register a new method to be added to CinchDB instances."""
|
64
|
+
self._methods[method_name] = method
|
65
|
+
|
66
|
+
def get_hooks(self) -> Dict[PluginHook, List[Callable]]:
|
67
|
+
"""Get all registered hooks."""
|
68
|
+
return self._hooks.copy()
|
69
|
+
|
70
|
+
def get_methods(self) -> Dict[str, Callable]:
|
71
|
+
"""Get all registered methods."""
|
72
|
+
return self._methods.copy()
|
73
|
+
|
74
|
+
def call_hook(self, hook: PluginHook, *args, **kwargs) -> Any:
|
75
|
+
"""Call all callbacks for a specific hook."""
|
76
|
+
results = []
|
77
|
+
for callback in self._hooks.get(hook, []):
|
78
|
+
try:
|
79
|
+
result = callback(*args, **kwargs)
|
80
|
+
results.append(result)
|
81
|
+
except Exception as e:
|
82
|
+
# Log error but don't break other plugins
|
83
|
+
print(f"Plugin {self.name} hook {hook} failed: {e}")
|
84
|
+
return results
|
85
|
+
|
86
|
+
def cleanup(self) -> None:
|
87
|
+
"""Cleanup when plugin is unloaded."""
|
88
|
+
pass
|
89
|
+
|
90
|
+
@property
|
91
|
+
def metadata(self) -> Dict[str, Any]:
|
92
|
+
"""Get plugin metadata."""
|
93
|
+
return {
|
94
|
+
"name": self.name,
|
95
|
+
"version": self.version,
|
96
|
+
"description": self.description,
|
97
|
+
"hooks": list(self._hooks.keys()),
|
98
|
+
"methods": list(self._methods.keys()),
|
99
|
+
}
|
@@ -0,0 +1,45 @@
|
|
1
|
+
"""
|
2
|
+
Decorators for plugin development.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Callable
|
6
|
+
|
7
|
+
from .base import PluginHook
|
8
|
+
|
9
|
+
|
10
|
+
def hook(hook_type: PluginHook):
|
11
|
+
"""Decorator to register a method as a hook callback."""
|
12
|
+
def decorator(func: Callable) -> Callable:
|
13
|
+
func._plugin_hook = hook_type
|
14
|
+
return func
|
15
|
+
return decorator
|
16
|
+
|
17
|
+
|
18
|
+
def plugin_method(method_name: str):
|
19
|
+
"""Decorator to register a method to be added to CinchDB instances."""
|
20
|
+
def decorator(func: Callable) -> Callable:
|
21
|
+
func._plugin_method_name = method_name
|
22
|
+
return func
|
23
|
+
return decorator
|
24
|
+
|
25
|
+
|
26
|
+
class PluginDecorators:
|
27
|
+
"""Helper class to collect decorated methods from a plugin class."""
|
28
|
+
|
29
|
+
@staticmethod
|
30
|
+
def collect_hooks(plugin_instance) -> None:
|
31
|
+
"""Collect and register hook methods from a plugin instance."""
|
32
|
+
for attr_name in dir(plugin_instance):
|
33
|
+
attr = getattr(plugin_instance, attr_name)
|
34
|
+
if callable(attr) and hasattr(attr, '_plugin_hook'):
|
35
|
+
hook_type = attr._plugin_hook
|
36
|
+
plugin_instance.register_hook(hook_type, attr)
|
37
|
+
|
38
|
+
@staticmethod
|
39
|
+
def collect_methods(plugin_instance) -> None:
|
40
|
+
"""Collect and register plugin methods from a plugin instance."""
|
41
|
+
for attr_name in dir(plugin_instance):
|
42
|
+
attr = getattr(plugin_instance, attr_name)
|
43
|
+
if callable(attr) and hasattr(attr, '_plugin_method_name'):
|
44
|
+
method_name = attr._plugin_method_name
|
45
|
+
plugin_instance.register_method(method_name, attr)
|
@@ -0,0 +1,178 @@
|
|
1
|
+
"""
|
2
|
+
Plugin manager for CinchDB.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import importlib
|
6
|
+
import importlib.util
|
7
|
+
import logging
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import Dict, List, Optional, Any
|
10
|
+
try:
|
11
|
+
from importlib.metadata import entry_points
|
12
|
+
except ImportError:
|
13
|
+
# Fallback for Python < 3.8
|
14
|
+
from importlib_metadata import entry_points
|
15
|
+
|
16
|
+
from .base import BasePlugin, PluginHook
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
|
21
|
+
class PluginManager:
|
22
|
+
"""Manages plugin lifecycle and hooks for CinchDB."""
|
23
|
+
|
24
|
+
def __init__(self):
|
25
|
+
self.plugins: Dict[str, BasePlugin] = {}
|
26
|
+
self._cinchdb_instance = None
|
27
|
+
|
28
|
+
def set_cinchdb_instance(self, instance):
|
29
|
+
"""Set the CinchDB instance for plugins."""
|
30
|
+
self._cinchdb_instance = instance
|
31
|
+
|
32
|
+
# Initialize any already loaded plugins
|
33
|
+
for plugin in self.plugins.values():
|
34
|
+
try:
|
35
|
+
plugin.initialize(instance)
|
36
|
+
except Exception as e:
|
37
|
+
logger.error(f"Failed to initialize plugin {plugin.name}: {e}")
|
38
|
+
|
39
|
+
def register_plugin(self, plugin: BasePlugin) -> None:
|
40
|
+
"""Register a plugin instance."""
|
41
|
+
plugin_name = plugin.name
|
42
|
+
|
43
|
+
if plugin_name in self.plugins:
|
44
|
+
logger.warning(f"Plugin {plugin_name} already registered, replacing")
|
45
|
+
|
46
|
+
self.plugins[plugin_name] = plugin
|
47
|
+
|
48
|
+
# Initialize with CinchDB instance if available
|
49
|
+
if self._cinchdb_instance:
|
50
|
+
try:
|
51
|
+
plugin.initialize(self._cinchdb_instance)
|
52
|
+
self._apply_plugin_methods(plugin)
|
53
|
+
except Exception as e:
|
54
|
+
logger.error(f"Failed to initialize plugin {plugin_name}: {e}")
|
55
|
+
|
56
|
+
logger.info(f"Plugin {plugin_name} registered successfully")
|
57
|
+
|
58
|
+
def unregister_plugin(self, plugin_name: str) -> None:
|
59
|
+
"""Unregister a plugin."""
|
60
|
+
if plugin_name in self.plugins:
|
61
|
+
plugin = self.plugins[plugin_name]
|
62
|
+
try:
|
63
|
+
plugin.cleanup()
|
64
|
+
except Exception as e:
|
65
|
+
logger.error(f"Error cleaning up plugin {plugin_name}: {e}")
|
66
|
+
|
67
|
+
del self.plugins[plugin_name]
|
68
|
+
logger.info(f"Plugin {plugin_name} unregistered")
|
69
|
+
|
70
|
+
def load_plugin_from_module(self, module_name: str) -> None:
|
71
|
+
"""Load a plugin from a module name."""
|
72
|
+
try:
|
73
|
+
module = importlib.import_module(module_name)
|
74
|
+
|
75
|
+
# Look for plugin classes
|
76
|
+
for attr_name in dir(module):
|
77
|
+
attr = getattr(module, attr_name)
|
78
|
+
if (isinstance(attr, type) and
|
79
|
+
issubclass(attr, BasePlugin) and
|
80
|
+
attr != BasePlugin):
|
81
|
+
|
82
|
+
plugin_instance = attr()
|
83
|
+
self.register_plugin(plugin_instance)
|
84
|
+
return
|
85
|
+
|
86
|
+
logger.warning(f"No plugin class found in module {module_name}")
|
87
|
+
|
88
|
+
except ImportError as e:
|
89
|
+
logger.error(f"Failed to import plugin module {module_name}: {e}")
|
90
|
+
except Exception as e:
|
91
|
+
logger.error(f"Failed to load plugin from {module_name}: {e}")
|
92
|
+
|
93
|
+
def load_plugin_from_file(self, file_path: Path) -> None:
|
94
|
+
"""Load a plugin from a Python file."""
|
95
|
+
try:
|
96
|
+
spec = importlib.util.spec_from_file_location("plugin_module", file_path)
|
97
|
+
if spec and spec.loader:
|
98
|
+
module = importlib.util.module_from_spec(spec)
|
99
|
+
spec.loader.exec_module(module)
|
100
|
+
|
101
|
+
# Look for plugin classes
|
102
|
+
for attr_name in dir(module):
|
103
|
+
attr = getattr(module, attr_name)
|
104
|
+
if (isinstance(attr, type) and
|
105
|
+
issubclass(attr, BasePlugin) and
|
106
|
+
attr != BasePlugin):
|
107
|
+
|
108
|
+
plugin_instance = attr()
|
109
|
+
self.register_plugin(plugin_instance)
|
110
|
+
return
|
111
|
+
|
112
|
+
logger.warning(f"No plugin class found in file {file_path}")
|
113
|
+
|
114
|
+
except Exception as e:
|
115
|
+
logger.error(f"Failed to load plugin from file {file_path}: {e}")
|
116
|
+
|
117
|
+
def discover_plugins(self) -> None:
|
118
|
+
"""Discover plugins using entry points."""
|
119
|
+
try:
|
120
|
+
eps = entry_points()
|
121
|
+
# Handle both old and new entry_points API
|
122
|
+
if hasattr(eps, 'select'):
|
123
|
+
# New API (Python 3.10+)
|
124
|
+
plugin_eps = eps.select(group='cinchdb.plugins')
|
125
|
+
else:
|
126
|
+
# Old API
|
127
|
+
plugin_eps = eps.get('cinchdb.plugins', [])
|
128
|
+
|
129
|
+
for entry_point in plugin_eps:
|
130
|
+
try:
|
131
|
+
plugin_class = entry_point.load()
|
132
|
+
if issubclass(plugin_class, BasePlugin):
|
133
|
+
plugin_instance = plugin_class()
|
134
|
+
self.register_plugin(plugin_instance)
|
135
|
+
except Exception as e:
|
136
|
+
logger.error(f"Failed to load plugin {entry_point.name}: {e}")
|
137
|
+
except Exception as e:
|
138
|
+
logger.error(f"Failed to discover plugins: {e}")
|
139
|
+
|
140
|
+
def _apply_plugin_methods(self, plugin: BasePlugin) -> None:
|
141
|
+
"""Apply plugin methods to the CinchDB instance."""
|
142
|
+
if not self._cinchdb_instance:
|
143
|
+
return
|
144
|
+
|
145
|
+
for method_name, method in plugin.get_methods().items():
|
146
|
+
# Bind method to the instance
|
147
|
+
bound_method = method.__get__(self._cinchdb_instance, type(self._cinchdb_instance))
|
148
|
+
setattr(self._cinchdb_instance, method_name, bound_method)
|
149
|
+
|
150
|
+
def call_hook(self, hook: PluginHook, *args, **kwargs) -> List[Any]:
|
151
|
+
"""Call all plugin hooks for a specific event."""
|
152
|
+
results = []
|
153
|
+
|
154
|
+
for plugin in self.plugins.values():
|
155
|
+
try:
|
156
|
+
plugin_results = plugin.call_hook(hook, *args, **kwargs)
|
157
|
+
results.extend(plugin_results)
|
158
|
+
except Exception as e:
|
159
|
+
logger.error(f"Plugin {plugin.name} hook {hook} failed: {e}")
|
160
|
+
|
161
|
+
return results
|
162
|
+
|
163
|
+
def get_plugin(self, name: str) -> Optional[BasePlugin]:
|
164
|
+
"""Get a plugin by name."""
|
165
|
+
return self.plugins.get(name)
|
166
|
+
|
167
|
+
def list_plugins(self) -> List[Dict[str, Any]]:
|
168
|
+
"""List all registered plugins with their metadata."""
|
169
|
+
return [plugin.metadata for plugin in self.plugins.values()]
|
170
|
+
|
171
|
+
def plugin_exists(self, name: str) -> bool:
|
172
|
+
"""Check if a plugin is registered."""
|
173
|
+
return name in self.plugins
|
174
|
+
|
175
|
+
def cleanup_all(self) -> None:
|
176
|
+
"""Cleanup all plugins."""
|
177
|
+
for plugin_name in list(self.plugins.keys()):
|
178
|
+
self.unregister_plugin(plugin_name)
|