altcodepro-polydb-python 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. altcodepro_polydb_python-2.1.0.dist-info/METADATA +378 -0
  2. altcodepro_polydb_python-2.1.0.dist-info/RECORD +51 -0
  3. altcodepro_polydb_python-2.1.0.dist-info/WHEEL +5 -0
  4. altcodepro_polydb_python-2.1.0.dist-info/licenses/LICENSE +21 -0
  5. altcodepro_polydb_python-2.1.0.dist-info/top_level.txt +1 -0
  6. polydb/__init__.py +64 -0
  7. polydb/adapters/AzureBlobStorageAdapter.py +77 -0
  8. polydb/adapters/AzureFileStorageAdapter.py +79 -0
  9. polydb/adapters/AzureQueueAdapter.py +61 -0
  10. polydb/adapters/AzureTableStorageAdapter.py +182 -0
  11. polydb/adapters/DynamoDBAdapter.py +216 -0
  12. polydb/adapters/EFSAdapter.py +50 -0
  13. polydb/adapters/FirestoreAdapter.py +193 -0
  14. polydb/adapters/GCPStorageAdapter.py +81 -0
  15. polydb/adapters/MongoDBAdapter.py +136 -0
  16. polydb/adapters/PostgreSQLAdapter.py +453 -0
  17. polydb/adapters/PubSubAdapter.py +83 -0
  18. polydb/adapters/S3Adapter.py +86 -0
  19. polydb/adapters/S3CompatibleAdapter.py +90 -0
  20. polydb/adapters/SQSAdapter.py +84 -0
  21. polydb/adapters/VercelKVAdapter.py +327 -0
  22. polydb/adapters/__init__.py +0 -0
  23. polydb/advanced_query.py +147 -0
  24. polydb/audit/AuditStorage.py +136 -0
  25. polydb/audit/__init__.py +7 -0
  26. polydb/audit/context.py +53 -0
  27. polydb/audit/manager.py +47 -0
  28. polydb/audit/models.py +86 -0
  29. polydb/base/NoSQLKVAdapter.py +301 -0
  30. polydb/base/ObjectStorageAdapter.py +42 -0
  31. polydb/base/QueueAdapter.py +27 -0
  32. polydb/base/SharedFilesAdapter.py +32 -0
  33. polydb/base/__init__.py +0 -0
  34. polydb/batch.py +163 -0
  35. polydb/cache.py +204 -0
  36. polydb/databaseFactory.py +748 -0
  37. polydb/decorators.py +21 -0
  38. polydb/errors.py +82 -0
  39. polydb/factory.py +107 -0
  40. polydb/models.py +39 -0
  41. polydb/monitoring.py +313 -0
  42. polydb/multitenancy.py +197 -0
  43. polydb/py.typed +0 -0
  44. polydb/query.py +150 -0
  45. polydb/registry.py +71 -0
  46. polydb/retry.py +76 -0
  47. polydb/schema.py +205 -0
  48. polydb/security.py +458 -0
  49. polydb/types.py +127 -0
  50. polydb/utils.py +61 -0
  51. polydb/validation.py +131 -0
polydb/multitenancy.py ADDED
@@ -0,0 +1,197 @@
1
+ # src/polydb/multitenancy.py
2
+ """
3
+ Multi-tenancy enforcement and isolation
4
+ """
5
+ from typing import Dict, Any, List, Optional, Callable
6
+ from contextvars import ContextVar
7
+ from dataclasses import dataclass
8
+ from enum import Enum
9
+
10
+
11
+ class IsolationLevel(Enum):
12
+ """Tenant isolation levels"""
13
+ SHARED_SCHEMA = "shared" # Shared tables with tenant_id
14
+ SEPARATE_SCHEMA = "schema" # Separate schema per tenant
15
+ SEPARATE_DATABASE = "database" # Separate DB per tenant
16
+
17
+
18
+ @dataclass
19
+ class TenantConfig:
20
+ """Tenant configuration"""
21
+ tenant_id: str
22
+ isolation_level: IsolationLevel
23
+ schema_name: Optional[str] = None
24
+ database_name: Optional[str] = None
25
+ max_connections: int = 10
26
+ storage_quota_gb: Optional[float] = None
27
+ features: List[str] = []
28
+
29
+
30
+ class TenantRegistry:
31
+ """Registry of tenant configurations"""
32
+
33
+ def __init__(self):
34
+ self._tenants: Dict[str, TenantConfig] = {}
35
+
36
+ def register(self, config: TenantConfig):
37
+ """Register tenant"""
38
+ self._tenants[config.tenant_id] = config
39
+
40
+ def get(self, tenant_id: str) -> Optional[TenantConfig]:
41
+ """Get tenant config"""
42
+ return self._tenants.get(tenant_id)
43
+
44
+ def list_all(self) -> List[TenantConfig]:
45
+ """List all tenants"""
46
+ return list(self._tenants.values())
47
+
48
+
49
+ class TenantContext:
50
+ """Tenant context management"""
51
+
52
+ current_tenant: ContextVar[Optional[TenantConfig]] = \
53
+ ContextVar("current_tenant", default=None)
54
+
55
+ @classmethod
56
+ def set_tenant(cls, tenant_id: str, registry: TenantRegistry):
57
+ """Set current tenant"""
58
+ config = registry.get(tenant_id)
59
+ if not config:
60
+ raise ValueError(f"Tenant not found: {tenant_id}")
61
+
62
+ cls.current_tenant.set(config)
63
+
64
+ @classmethod
65
+ def get_tenant(cls) -> Optional[TenantConfig]:
66
+ """Get current tenant"""
67
+ return cls.current_tenant.get()
68
+
69
+ @classmethod
70
+ def clear(cls):
71
+ """Clear tenant context"""
72
+ cls.current_tenant.set(None)
73
+
74
+
75
+ class TenantIsolationEnforcer:
76
+ """Enforces tenant isolation at query level"""
77
+
78
+ def __init__(self, registry: TenantRegistry):
79
+ self.registry = registry
80
+
81
+ def enforce_read(
82
+ self,
83
+ model: str,
84
+ query: Dict[str, Any]
85
+ ) -> Dict[str, Any]:
86
+ """Enforce tenant isolation on read"""
87
+ tenant = TenantContext.get_tenant()
88
+
89
+ if not tenant:
90
+ raise ValueError("No tenant context set")
91
+
92
+ if tenant.isolation_level == IsolationLevel.SHARED_SCHEMA:
93
+ # Add tenant_id filter
94
+ query = query.copy()
95
+ query['tenant_id'] = tenant.tenant_id
96
+
97
+ return query
98
+
99
+ def enforce_write(
100
+ self,
101
+ model: str,
102
+ data: Dict[str, Any]
103
+ ) -> Dict[str, Any]:
104
+ """Enforce tenant isolation on write"""
105
+ tenant = TenantContext.get_tenant()
106
+
107
+ if not tenant:
108
+ raise ValueError("No tenant context set")
109
+
110
+ if tenant.isolation_level == IsolationLevel.SHARED_SCHEMA:
111
+ # Add tenant_id
112
+ data = data.copy()
113
+ data['tenant_id'] = tenant.tenant_id
114
+
115
+ return data
116
+
117
+ def get_table_name(self, base_table: str) -> str:
118
+ """Get tenant-specific table name"""
119
+ tenant = TenantContext.get_tenant()
120
+
121
+ if not tenant:
122
+ raise ValueError("No tenant context set")
123
+
124
+ if tenant.isolation_level == IsolationLevel.SEPARATE_SCHEMA:
125
+ return f"{tenant.schema_name}.{base_table}"
126
+ elif tenant.isolation_level == IsolationLevel.SEPARATE_DATABASE:
127
+ return f"{tenant.database_name}.public.{base_table}"
128
+ else:
129
+ return base_table
130
+
131
+
132
+ class TenantQuotaManager:
133
+ """Manages tenant resource quotas"""
134
+
135
+ def __init__(self, registry: TenantRegistry):
136
+ self.registry = registry
137
+ self._usage: Dict[str, Dict[str, float]] = {}
138
+
139
+ def check_storage_quota(self, tenant_id: str, size_gb: float) -> bool:
140
+ """Check if operation would exceed storage quota"""
141
+ config = self.registry.get(tenant_id)
142
+ if not config or not config.storage_quota_gb:
143
+ return True
144
+
145
+ current_usage = self._usage.get(tenant_id, {}).get('storage_gb', 0.0)
146
+ return (current_usage + size_gb) <= config.storage_quota_gb
147
+
148
+ def record_storage_usage(self, tenant_id: str, size_gb: float):
149
+ """Record storage usage"""
150
+ if tenant_id not in self._usage:
151
+ self._usage[tenant_id] = {}
152
+
153
+ self._usage[tenant_id]['storage_gb'] = \
154
+ self._usage[tenant_id].get('storage_gb', 0.0) + size_gb
155
+
156
+ def get_usage(self, tenant_id: str) -> Dict[str, float]:
157
+ """Get tenant resource usage"""
158
+ return self._usage.get(tenant_id, {})
159
+
160
+
161
+ class TenantMigrationManager:
162
+ """Manages tenant migrations and onboarding"""
163
+
164
+ def __init__(self, factory, registry: TenantRegistry):
165
+ self.factory = factory
166
+ self.registry = registry
167
+
168
+ def provision_tenant(self, config: TenantConfig):
169
+ """Provision new tenant"""
170
+ # Register tenant
171
+ self.registry.register(config)
172
+
173
+ if config.isolation_level == IsolationLevel.SEPARATE_SCHEMA:
174
+ # Create schema
175
+ schema_sql = f"CREATE SCHEMA IF NOT EXISTS {config.schema_name};"
176
+ self.factory._sql.execute(schema_sql)
177
+
178
+ elif config.isolation_level == IsolationLevel.SEPARATE_DATABASE:
179
+ # Create database (requires superuser)
180
+ db_sql = f"CREATE DATABASE {config.database_name};"
181
+ self.factory._sql.execute(db_sql)
182
+
183
+ def deprovision_tenant(self, tenant_id: str):
184
+ """Deprovision tenant"""
185
+ config = self.registry.get(tenant_id)
186
+ if not config:
187
+ return
188
+
189
+ if config.isolation_level == IsolationLevel.SEPARATE_SCHEMA:
190
+ # Drop schema
191
+ schema_sql = f"DROP SCHEMA IF EXISTS {config.schema_name} CASCADE;"
192
+ self.factory._sql.execute(schema_sql)
193
+
194
+ elif config.isolation_level == IsolationLevel.SEPARATE_DATABASE:
195
+ # Drop database
196
+ db_sql = f"DROP DATABASE IF EXISTS {config.database_name};"
197
+ self.factory._sql.execute(db_sql)
polydb/py.typed ADDED
File without changes
polydb/query.py ADDED
@@ -0,0 +1,150 @@
1
+ # src/polydb/query.py
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+ from enum import Enum
7
+
8
+
9
+ class Operator(Enum):
10
+ EQ = "=="
11
+ NE = "!="
12
+ GT = ">"
13
+ GTE = ">="
14
+ LT = "<"
15
+ LTE = "<="
16
+ IN = "in"
17
+ NOT_IN = "not_in"
18
+ CONTAINS = "contains"
19
+ STARTS_WITH = "starts_with"
20
+ ENDS_WITH = "ends_with"
21
+
22
+
23
+ @dataclass
24
+ class QueryFilter:
25
+ field: str
26
+ operator: Operator
27
+ value: Any
28
+
29
+
30
+ @dataclass
31
+ class QueryBuilder:
32
+ """LINQ-style query builder supporting all SQL clauses"""
33
+
34
+ filters: List[QueryFilter] = field(default_factory=list)
35
+ order_by_fields: List[tuple[str, bool]] = field(default_factory=list) # (field, desc)
36
+ skip_count: int = 0
37
+ take_count: Optional[int] = None
38
+ select_fields: Optional[List[str]] = None
39
+ group_by_fields: Optional[List[str]] = None
40
+ distinct: bool = False
41
+ count_only: bool = False
42
+
43
+ def where(self, field: str, operator: Union[Operator, str], value: Any) -> QueryBuilder:
44
+ """Add filter condition"""
45
+ if isinstance(operator, str):
46
+ operator = Operator(operator)
47
+ self.filters.append(QueryFilter(field, operator, value))
48
+ return self
49
+
50
+ def order_by(self, field: str, descending: bool = False) -> QueryBuilder:
51
+ """Add ordering"""
52
+ self.order_by_fields.append((field, descending))
53
+ return self
54
+
55
+ def skip(self, count: int) -> QueryBuilder:
56
+ """Skip records"""
57
+ self.skip_count = count
58
+ return self
59
+
60
+ def take(self, count: int) -> QueryBuilder:
61
+ """Take records"""
62
+ self.take_count = count
63
+ return self
64
+
65
+ def select(self, *fields: str) -> QueryBuilder:
66
+ """Select specific fields"""
67
+ self.select_fields = list(fields)
68
+ return self
69
+
70
+ def group_by(self, *fields: str) -> QueryBuilder:
71
+ """Group by fields"""
72
+ self.group_by_fields = list(fields)
73
+ return self
74
+
75
+ def distinct_on(self) -> QueryBuilder:
76
+ """Return distinct results"""
77
+ self.distinct = True
78
+ return self
79
+
80
+ def count(self) -> QueryBuilder:
81
+ """Return count only"""
82
+ self.count_only = True
83
+ return self
84
+
85
+ def to_sql_where(self) -> tuple[str, List[Any]]:
86
+ """Convert to SQL WHERE clause"""
87
+ if not self.filters:
88
+ return "", []
89
+
90
+ clauses = []
91
+ params = []
92
+
93
+ for f in self.filters:
94
+ if f.operator == Operator.EQ:
95
+ clauses.append(f"{f.field} = %s")
96
+ params.append(f.value)
97
+ elif f.operator == Operator.NE:
98
+ clauses.append(f"{f.field} != %s")
99
+ params.append(f.value)
100
+ elif f.operator == Operator.GT:
101
+ clauses.append(f"{f.field} > %s")
102
+ params.append(f.value)
103
+ elif f.operator == Operator.GTE:
104
+ clauses.append(f"{f.field} >= %s")
105
+ params.append(f.value)
106
+ elif f.operator == Operator.LT:
107
+ clauses.append(f"{f.field} < %s")
108
+ params.append(f.value)
109
+ elif f.operator == Operator.LTE:
110
+ clauses.append(f"{f.field} <= %s")
111
+ params.append(f.value)
112
+ elif f.operator == Operator.IN:
113
+ placeholders = ','.join(['%s'] * len(f.value))
114
+ clauses.append(f"{f.field} IN ({placeholders})")
115
+ params.extend(f.value)
116
+ elif f.operator == Operator.NOT_IN:
117
+ placeholders = ','.join(['%s'] * len(f.value))
118
+ clauses.append(f"{f.field} NOT IN ({placeholders})")
119
+ params.extend(f.value)
120
+ elif f.operator == Operator.CONTAINS:
121
+ clauses.append(f"{f.field} LIKE %s")
122
+ params.append(f"%{f.value}%")
123
+ elif f.operator == Operator.STARTS_WITH:
124
+ clauses.append(f"{f.field} LIKE %s")
125
+ params.append(f"{f.value}%")
126
+ elif f.operator == Operator.ENDS_WITH:
127
+ clauses.append(f"{f.field} LIKE %s")
128
+ params.append(f"%{f.value}")
129
+
130
+ return " AND ".join(clauses), params
131
+
132
+ def to_nosql_filter(self) -> Dict[str, Any]:
133
+ """Convert to NoSQL filter dict"""
134
+ result = {}
135
+ for f in self.filters:
136
+ if f.operator == Operator.EQ:
137
+ result[f.field] = f.value
138
+ elif f.operator == Operator.IN:
139
+ result[f"{f.field}__in"] = f.value
140
+ elif f.operator == Operator.GT:
141
+ result[f"{f.field}__gt"] = f.value
142
+ elif f.operator == Operator.GTE:
143
+ result[f"{f.field}__gte"] = f.value
144
+ elif f.operator == Operator.LT:
145
+ result[f"{f.field}__lt"] = f.value
146
+ elif f.operator == Operator.LTE:
147
+ result[f"{f.field}__lte"] = f.value
148
+ elif f.operator == Operator.CONTAINS:
149
+ result[f"{f.field}__contains"] = f.value
150
+ return result
polydb/registry.py ADDED
@@ -0,0 +1,71 @@
1
+ # src/polydb/registry.py
2
+ from typing import Dict, Type
3
+
4
+
5
+ class ModelRegistry:
6
+ """
7
+ Lightweight model registry.
8
+
9
+ Supports:
10
+ - register(model)
11
+ - get(model) → metadata
12
+ - resolve(model_or_name) → model class
13
+ """
14
+
15
+ _models: Dict[Type, Dict] = {}
16
+
17
+ @classmethod
18
+ def register(cls, model):
19
+ meta = getattr(model, "__polydb__", None)
20
+ if not meta:
21
+ raise ValueError(f"{model} missing __polydb__ config")
22
+
23
+ cls._models[model] = meta
24
+
25
+ # -------------------------
26
+ # NEW: resolve()
27
+ # -------------------------
28
+ @classmethod
29
+ def resolve(cls, model):
30
+ """
31
+ Resolve model class from:
32
+ - class
33
+ - class name string
34
+
35
+ Returns actual class
36
+ """
37
+ # Already class
38
+ if isinstance(model, type):
39
+ if model not in cls._models:
40
+ raise ValueError(f"Model not registered: {model.__name__}")
41
+ return model
42
+
43
+ # String name
44
+ if isinstance(model, str):
45
+ for m in cls._models:
46
+ if m.__name__ == model:
47
+ return m
48
+
49
+ raise ValueError(f"Model not registered: '{model}'")
50
+
51
+ raise TypeError(f"Invalid model reference {model!r}. Must be class or class name.")
52
+
53
+ @classmethod
54
+ def get(cls, model):
55
+ """Get model metadata"""
56
+ from .types import ModelMeta
57
+
58
+ model_cls = cls.resolve(model)
59
+ raw_meta = cls._models[model_cls]
60
+
61
+ # Convert to ModelMeta
62
+ return ModelMeta(
63
+ storage=raw_meta.get("storage", "sql"),
64
+ table=raw_meta.get("table"),
65
+ collection=raw_meta.get("collection"),
66
+ pk_field=raw_meta.get("pk_field"),
67
+ rk_field=raw_meta.get("rk_field"),
68
+ provider=raw_meta.get("provider"),
69
+ cache=raw_meta.get("cache", False),
70
+ cache_ttl=raw_meta.get("cache_ttl"),
71
+ )
polydb/retry.py ADDED
@@ -0,0 +1,76 @@
1
+ # src/polydb/retry.py
2
+ """
3
+ Retry logic with exponential backoff and metrics hooks
4
+ """
5
+
6
+ import time
7
+ import logging
8
+ from functools import wraps
9
+ from typing import Callable, Optional, Tuple, Type
10
+
11
+
12
+ # Metrics hooks for enterprise monitoring
13
+ class MetricsHooks:
14
+ """Metrics hooks that users can override for monitoring"""
15
+
16
+ @staticmethod
17
+ def on_query_start(operation: str, **kwargs):
18
+ """Called when query starts"""
19
+ pass
20
+
21
+ @staticmethod
22
+ def on_query_end(operation: str, duration: float, success: bool, **kwargs):
23
+ """Called when query ends"""
24
+ pass
25
+
26
+ @staticmethod
27
+ def on_error(operation: str, error: Exception, **kwargs):
28
+ """Called when error occurs"""
29
+ pass
30
+
31
+
32
+ def retry(max_attempts: int = 3, delay: float = 1.0, backoff: float = 2.0,
33
+ exceptions: Tuple[Type[Exception], ...] = (Exception,)):
34
+ """
35
+ Retry decorator with exponential backoff
36
+
37
+ Args:
38
+ max_attempts: Maximum number of retry attempts
39
+ delay: Initial delay between retries (seconds)
40
+ backoff: Backoff multiplier
41
+ exceptions: Tuple of exceptions to catch
42
+ """
43
+ def decorator(func: Callable) -> Callable:
44
+ @wraps(func)
45
+ def wrapper(*args, **kwargs):
46
+ attempt = 0
47
+ current_delay = delay
48
+
49
+ logger = logging.getLogger(__name__)
50
+
51
+ while attempt < max_attempts:
52
+ start_time = time.time()
53
+ try:
54
+ MetricsHooks.on_query_start(func.__name__, args=args, kwargs=kwargs)
55
+ result = func(*args, **kwargs)
56
+ duration = time.time() - start_time
57
+ MetricsHooks.on_query_end(func.__name__, duration, True)
58
+ return result
59
+ except exceptions as e:
60
+ attempt += 1
61
+ duration = time.time() - start_time
62
+ MetricsHooks.on_query_end(func.__name__, duration, False)
63
+ MetricsHooks.on_error(func.__name__, e)
64
+
65
+ if attempt >= max_attempts:
66
+ raise
67
+
68
+ logger.warning(
69
+ f"Attempt {attempt}/{max_attempts} failed for {func.__name__}: {str(e)}. "
70
+ f"Retrying in {current_delay}s..."
71
+ )
72
+ time.sleep(current_delay)
73
+ current_delay *= backoff
74
+
75
+ return wrapper
76
+ return decorator
polydb/schema.py ADDED
@@ -0,0 +1,205 @@
1
+ # src/polydb/schema.py
2
+ """
3
+ Schema management and migrations
4
+ """
5
+ from typing import Dict, List, Optional, Any
6
+ from dataclasses import dataclass
7
+ from enum import Enum
8
+ import json
9
+ from datetime import datetime
10
+
11
+
12
+ class ColumnType(Enum):
13
+ INTEGER = "INTEGER"
14
+ BIGINT = "BIGINT"
15
+ VARCHAR = "VARCHAR"
16
+ TEXT = "TEXT"
17
+ BOOLEAN = "BOOLEAN"
18
+ TIMESTAMP = "TIMESTAMP"
19
+ DATE = "DATE"
20
+ JSONB = "JSONB"
21
+ UUID = "UUID"
22
+ FLOAT = "FLOAT"
23
+ DECIMAL = "DECIMAL"
24
+
25
+
26
+ @dataclass
27
+ class Column:
28
+ name: str
29
+ type: ColumnType
30
+ nullable: bool = True
31
+ default: Optional[Any] = None
32
+ primary_key: bool = False
33
+ unique: bool = False
34
+ max_length: Optional[int] = None
35
+
36
+
37
+ @dataclass
38
+ class Index:
39
+ name: str
40
+ columns: List[str]
41
+ unique: bool = False
42
+
43
+
44
+ class SchemaBuilder:
45
+ """Build SQL schema definitions"""
46
+
47
+ def __init__(self):
48
+ self.columns: List[Column] = []
49
+ self.indexes: List[Index] = []
50
+ self.primary_keys: List[str] = []
51
+
52
+ def add_column(self, column: Column) -> 'SchemaBuilder':
53
+ self.columns.append(column)
54
+ if column.primary_key:
55
+ self.primary_keys.append(column.name)
56
+ return self
57
+
58
+ def add_index(self, index: Index) -> 'SchemaBuilder':
59
+ self.indexes.append(index)
60
+ return self
61
+
62
+ def to_create_table(self, table_name: str) -> str:
63
+ """Generate CREATE TABLE statement"""
64
+ col_defs = []
65
+
66
+ for col in self.columns:
67
+ parts = [col.name]
68
+
69
+ # Type
70
+ if col.type == ColumnType.VARCHAR and col.max_length:
71
+ parts.append(f"VARCHAR({col.max_length})")
72
+ else:
73
+ parts.append(col.type.value)
74
+
75
+ # Nullable
76
+ if not col.nullable:
77
+ parts.append("NOT NULL")
78
+
79
+ # Default
80
+ if col.default is not None:
81
+ if isinstance(col.default, str):
82
+ parts.append(f"DEFAULT '{col.default}'")
83
+ else:
84
+ parts.append(f"DEFAULT {col.default}")
85
+
86
+ # Unique
87
+ if col.unique:
88
+ parts.append("UNIQUE")
89
+
90
+ col_defs.append(" ".join(parts))
91
+
92
+ # Primary key
93
+ if self.primary_keys:
94
+ col_defs.append(f"PRIMARY KEY ({', '.join(self.primary_keys)})")
95
+
96
+ sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n"
97
+ sql += ",\n".join(f" {col}" for col in col_defs)
98
+ sql += "\n);"
99
+
100
+ return sql
101
+
102
+ def to_create_indexes(self, table_name: str) -> List[str]:
103
+ """Generate CREATE INDEX statements"""
104
+ statements = []
105
+
106
+ for idx in self.indexes:
107
+ unique = "UNIQUE " if idx.unique else ""
108
+ cols = ", ".join(idx.columns)
109
+ sql = f"CREATE {unique}INDEX IF NOT EXISTS {idx.name} ON {table_name}({cols});"
110
+ statements.append(sql)
111
+
112
+ return statements
113
+
114
+
115
+ class MigrationManager:
116
+ """Database migration management"""
117
+
118
+ def __init__(self, sql_adapter):
119
+ self.sql = sql_adapter
120
+ self._ensure_migrations_table()
121
+
122
+ def _ensure_migrations_table(self):
123
+ """Create migrations tracking table"""
124
+ schema = """
125
+ CREATE TABLE IF NOT EXISTS polydb_migrations (
126
+ id SERIAL PRIMARY KEY,
127
+ version VARCHAR(255) UNIQUE NOT NULL,
128
+ name VARCHAR(255) NOT NULL,
129
+ applied_at TIMESTAMP DEFAULT NOW(),
130
+ rollback_sql TEXT,
131
+ checksum VARCHAR(64)
132
+ );
133
+ """
134
+ self.sql.execute(schema)
135
+
136
+ def apply_migration(
137
+ self,
138
+ version: str,
139
+ name: str,
140
+ up_sql: str,
141
+ down_sql: Optional[str] = None
142
+ ) -> bool:
143
+ """Apply a migration"""
144
+ import hashlib
145
+
146
+ # Check if already applied
147
+ existing = self.sql.execute(
148
+ "SELECT version FROM polydb_migrations WHERE version = %s",
149
+ [version],
150
+ fetch_one=True
151
+ )
152
+
153
+ if existing:
154
+ return False
155
+
156
+ # Calculate checksum
157
+ checksum = hashlib.sha256(up_sql.encode()).hexdigest()
158
+
159
+ try:
160
+ # Execute migration
161
+ self.sql.execute(up_sql)
162
+
163
+ # Record migration
164
+ self.sql.insert('polydb_migrations', {
165
+ 'version': version,
166
+ 'name': name,
167
+ 'rollback_sql': down_sql,
168
+ 'checksum': checksum
169
+ })
170
+
171
+ return True
172
+ except Exception as e:
173
+ raise Exception(f"Migration {version} failed: {str(e)}")
174
+
175
+ def rollback_migration(self, version: str) -> bool:
176
+ """Rollback a migration"""
177
+ migration = self.sql.execute(
178
+ "SELECT rollback_sql FROM polydb_migrations WHERE version = %s",
179
+ [version],
180
+ fetch_one=True
181
+ )
182
+
183
+ if not migration or not migration.get('rollback_sql'):
184
+ raise Exception(f"No rollback available for {version}")
185
+
186
+ try:
187
+ # Execute rollback
188
+ self.sql.execute(migration['rollback_sql'])
189
+
190
+ # Remove from migrations
191
+ self.sql.execute(
192
+ "DELETE FROM polydb_migrations WHERE version = %s",
193
+ [version]
194
+ )
195
+
196
+ return True
197
+ except Exception as e:
198
+ raise Exception(f"Rollback {version} failed: {str(e)}")
199
+
200
+ def get_applied_migrations(self) -> List[Dict[str, Any]]:
201
+ """Get list of applied migrations"""
202
+ return self.sql.execute(
203
+ "SELECT * FROM polydb_migrations ORDER BY applied_at",
204
+ fetch=True
205
+ )