fraiseql-confiture 0.3.7__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. confiture/__init__.py +48 -0
  2. confiture/_core.cpython-311-darwin.so +0 -0
  3. confiture/cli/__init__.py +0 -0
  4. confiture/cli/dry_run.py +116 -0
  5. confiture/cli/lint_formatter.py +193 -0
  6. confiture/cli/main.py +1893 -0
  7. confiture/config/__init__.py +0 -0
  8. confiture/config/environment.py +263 -0
  9. confiture/core/__init__.py +51 -0
  10. confiture/core/anonymization/__init__.py +0 -0
  11. confiture/core/anonymization/audit.py +485 -0
  12. confiture/core/anonymization/benchmarking.py +372 -0
  13. confiture/core/anonymization/breach_notification.py +652 -0
  14. confiture/core/anonymization/compliance.py +617 -0
  15. confiture/core/anonymization/composer.py +298 -0
  16. confiture/core/anonymization/data_subject_rights.py +669 -0
  17. confiture/core/anonymization/factory.py +319 -0
  18. confiture/core/anonymization/governance.py +737 -0
  19. confiture/core/anonymization/performance.py +1092 -0
  20. confiture/core/anonymization/profile.py +284 -0
  21. confiture/core/anonymization/registry.py +195 -0
  22. confiture/core/anonymization/security/kms_manager.py +547 -0
  23. confiture/core/anonymization/security/lineage.py +888 -0
  24. confiture/core/anonymization/security/token_store.py +686 -0
  25. confiture/core/anonymization/strategies/__init__.py +41 -0
  26. confiture/core/anonymization/strategies/address.py +359 -0
  27. confiture/core/anonymization/strategies/credit_card.py +374 -0
  28. confiture/core/anonymization/strategies/custom.py +161 -0
  29. confiture/core/anonymization/strategies/date.py +218 -0
  30. confiture/core/anonymization/strategies/differential_privacy.py +398 -0
  31. confiture/core/anonymization/strategies/email.py +141 -0
  32. confiture/core/anonymization/strategies/format_preserving_encryption.py +310 -0
  33. confiture/core/anonymization/strategies/hash.py +150 -0
  34. confiture/core/anonymization/strategies/ip_address.py +235 -0
  35. confiture/core/anonymization/strategies/masking_retention.py +252 -0
  36. confiture/core/anonymization/strategies/name.py +298 -0
  37. confiture/core/anonymization/strategies/phone.py +119 -0
  38. confiture/core/anonymization/strategies/preserve.py +85 -0
  39. confiture/core/anonymization/strategies/redact.py +101 -0
  40. confiture/core/anonymization/strategies/salted_hashing.py +322 -0
  41. confiture/core/anonymization/strategies/text_redaction.py +183 -0
  42. confiture/core/anonymization/strategies/tokenization.py +334 -0
  43. confiture/core/anonymization/strategy.py +241 -0
  44. confiture/core/anonymization/syncer_audit.py +357 -0
  45. confiture/core/blue_green.py +683 -0
  46. confiture/core/builder.py +500 -0
  47. confiture/core/checksum.py +358 -0
  48. confiture/core/connection.py +184 -0
  49. confiture/core/differ.py +522 -0
  50. confiture/core/drift.py +564 -0
  51. confiture/core/dry_run.py +182 -0
  52. confiture/core/health.py +313 -0
  53. confiture/core/hooks/__init__.py +87 -0
  54. confiture/core/hooks/base.py +232 -0
  55. confiture/core/hooks/context.py +146 -0
  56. confiture/core/hooks/execution_strategies.py +57 -0
  57. confiture/core/hooks/observability.py +220 -0
  58. confiture/core/hooks/phases.py +53 -0
  59. confiture/core/hooks/registry.py +295 -0
  60. confiture/core/large_tables.py +775 -0
  61. confiture/core/linting/__init__.py +70 -0
  62. confiture/core/linting/composer.py +192 -0
  63. confiture/core/linting/libraries/__init__.py +17 -0
  64. confiture/core/linting/libraries/gdpr.py +168 -0
  65. confiture/core/linting/libraries/general.py +184 -0
  66. confiture/core/linting/libraries/hipaa.py +144 -0
  67. confiture/core/linting/libraries/pci_dss.py +104 -0
  68. confiture/core/linting/libraries/sox.py +120 -0
  69. confiture/core/linting/schema_linter.py +491 -0
  70. confiture/core/linting/versioning.py +151 -0
  71. confiture/core/locking.py +389 -0
  72. confiture/core/migration_generator.py +298 -0
  73. confiture/core/migrator.py +882 -0
  74. confiture/core/observability/__init__.py +44 -0
  75. confiture/core/observability/audit.py +323 -0
  76. confiture/core/observability/logging.py +187 -0
  77. confiture/core/observability/metrics.py +174 -0
  78. confiture/core/observability/tracing.py +192 -0
  79. confiture/core/pg_version.py +418 -0
  80. confiture/core/pool.py +406 -0
  81. confiture/core/risk/__init__.py +39 -0
  82. confiture/core/risk/predictor.py +188 -0
  83. confiture/core/risk/scoring.py +248 -0
  84. confiture/core/rollback_generator.py +388 -0
  85. confiture/core/schema_analyzer.py +769 -0
  86. confiture/core/schema_to_schema.py +590 -0
  87. confiture/core/security/__init__.py +32 -0
  88. confiture/core/security/logging.py +201 -0
  89. confiture/core/security/validation.py +416 -0
  90. confiture/core/signals.py +371 -0
  91. confiture/core/syncer.py +540 -0
  92. confiture/exceptions.py +192 -0
  93. confiture/integrations/__init__.py +0 -0
  94. confiture/models/__init__.py +24 -0
  95. confiture/models/lint.py +193 -0
  96. confiture/models/migration.py +265 -0
  97. confiture/models/schema.py +203 -0
  98. confiture/models/sql_file_migration.py +225 -0
  99. confiture/scenarios/__init__.py +36 -0
  100. confiture/scenarios/compliance.py +586 -0
  101. confiture/scenarios/ecommerce.py +199 -0
  102. confiture/scenarios/financial.py +253 -0
  103. confiture/scenarios/healthcare.py +315 -0
  104. confiture/scenarios/multi_tenant.py +340 -0
  105. confiture/scenarios/saas.py +295 -0
  106. confiture/testing/FRAMEWORK_API.md +722 -0
  107. confiture/testing/__init__.py +100 -0
  108. confiture/testing/fixtures/__init__.py +11 -0
  109. confiture/testing/fixtures/data_validator.py +229 -0
  110. confiture/testing/fixtures/migration_runner.py +167 -0
  111. confiture/testing/fixtures/schema_snapshotter.py +352 -0
  112. confiture/testing/frameworks/__init__.py +10 -0
  113. confiture/testing/frameworks/mutation.py +587 -0
  114. confiture/testing/frameworks/performance.py +479 -0
  115. confiture/testing/loader.py +225 -0
  116. confiture/testing/pytest/__init__.py +38 -0
  117. confiture/testing/pytest_plugin.py +190 -0
  118. confiture/testing/sandbox.py +304 -0
  119. confiture/testing/utils/__init__.py +0 -0
  120. fraiseql_confiture-0.3.7.dist-info/METADATA +438 -0
  121. fraiseql_confiture-0.3.7.dist-info/RECORD +124 -0
  122. fraiseql_confiture-0.3.7.dist-info/WHEEL +4 -0
  123. fraiseql_confiture-0.3.7.dist-info/entry_points.txt +4 -0
  124. fraiseql_confiture-0.3.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,389 @@
1
+ """Distributed locking for migration coordination.
2
+
3
+ Uses PostgreSQL advisory locks to ensure only one migration
4
+ process runs at a time across all application instances.
5
+
6
+ This is critical for Kubernetes/multi-pod deployments where
7
+ multiple pods may start simultaneously and attempt to run migrations.
8
+
9
+ PostgreSQL advisory locks are:
10
+ - Session-scoped (auto-release on disconnect)
11
+ - Reentrant (same session can acquire multiple times)
12
+ - Database-scoped (different databases = different locks)
13
+ """
14
+
15
+ import contextlib
16
+ import hashlib
17
+ import logging
18
+ from collections.abc import Generator
19
+ from contextlib import contextmanager
20
+ from dataclasses import dataclass, field
21
+ from enum import Enum
22
+ from typing import TYPE_CHECKING
23
+
24
+ if TYPE_CHECKING:
25
+ import psycopg
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class LockMode(Enum):
31
+ """Lock acquisition modes."""
32
+
33
+ BLOCKING = "blocking" # Wait until lock available
34
+ NON_BLOCKING = "non_blocking" # Return immediately if locked
35
+
36
+
37
+ @dataclass
38
+ class LockConfig:
39
+ """Configuration for migration locking.
40
+
41
+ Attributes:
42
+ enabled: Whether locking is enabled (default: True)
43
+ timeout_ms: Lock acquisition timeout in milliseconds (default: 30000)
44
+ lock_id: Custom lock ID (auto-generated from database name if None)
45
+ mode: Lock acquisition mode (blocking or non-blocking)
46
+
47
+ Example:
48
+ >>> config = LockConfig(timeout_ms=60000) # 1 minute timeout
49
+ >>> config = LockConfig(enabled=False) # Disable locking
50
+ >>> config = LockConfig(mode=LockMode.NON_BLOCKING) # Fail fast
51
+ """
52
+
53
+ enabled: bool = True
54
+ timeout_ms: int = 30000 # 30 seconds default
55
+ lock_id: int | None = None # Custom lock ID (auto-generated if None)
56
+ mode: LockMode = field(default=LockMode.BLOCKING)
57
+
58
+
59
+ class LockAcquisitionError(Exception):
60
+ """Raised when lock cannot be acquired.
61
+
62
+ Attributes:
63
+ timeout: True if the error was due to timeout, False otherwise
64
+ """
65
+
66
+ def __init__(self, message: str, timeout: bool = False):
67
+ super().__init__(message)
68
+ self.timeout = timeout
69
+
70
+
71
+ class MigrationLock:
72
+ """Manages distributed locks for migration execution.
73
+
74
+ Uses PostgreSQL advisory locks which are:
75
+ - Session-scoped (auto-release on disconnect)
76
+ - Reentrant (same session can acquire multiple times)
77
+ - Database-scoped (different databases = different locks)
78
+
79
+ Advisory locks use two 32-bit integers: (classid, objid).
80
+ We use a fixed namespace (classid) and a database-specific objid.
81
+
82
+ Example:
83
+ >>> import psycopg
84
+ >>> conn = psycopg.connect('postgresql://localhost/mydb')
85
+ >>> lock = MigrationLock(conn)
86
+ >>> with lock.acquire():
87
+ ... # Run migrations here - guaranteed exclusive access
88
+ ... migrator.migrate_up()
89
+
90
+ >>> # Non-blocking mode
91
+ >>> lock = MigrationLock(conn, LockConfig(mode=LockMode.NON_BLOCKING))
92
+ >>> try:
93
+ ... with lock.acquire():
94
+ ... migrator.migrate_up()
95
+ ... except LockAcquisitionError:
96
+ ... print("Another migration is running, skipping")
97
+ """
98
+
99
+ # Default lock namespace (first 32 bits of SHA256("confiture_migrations"))
100
+ DEFAULT_LOCK_NAMESPACE = 1751936052
101
+
102
+ def __init__(
103
+ self,
104
+ connection: "psycopg.Connection",
105
+ config: LockConfig | None = None,
106
+ ):
107
+ """Initialize migration lock.
108
+
109
+ Args:
110
+ connection: psycopg3 database connection
111
+ config: Lock configuration (uses defaults if None)
112
+ """
113
+ self.connection = connection
114
+ self.config = config or LockConfig()
115
+ self._lock_held = False
116
+ self._lock_id: int | None = None
117
+
118
+ def _get_lock_id(self) -> int:
119
+ """Get or generate the lock ID.
120
+
121
+ Returns:
122
+ Lock ID integer (32-bit positive)
123
+ """
124
+ if self._lock_id is not None:
125
+ return self._lock_id
126
+
127
+ if self.config.lock_id is not None:
128
+ self._lock_id = self.config.lock_id
129
+ else:
130
+ self._lock_id = self._generate_lock_id()
131
+
132
+ return self._lock_id
133
+
134
+ def _generate_lock_id(self) -> int:
135
+ """Generate deterministic lock ID from database name.
136
+
137
+ The lock ID is derived from the database name to ensure
138
+ each database has its own lock scope.
139
+
140
+ Returns:
141
+ 32-bit positive integer lock ID
142
+ """
143
+ # Get database name from connection
144
+ with self.connection.cursor() as cur:
145
+ cur.execute("SELECT current_database()")
146
+ result = cur.fetchone()
147
+ db_name = result[0] if result else "unknown"
148
+
149
+ # Hash to 32-bit positive integer
150
+ hash_bytes = hashlib.sha256(db_name.encode()).digest()
151
+ return int.from_bytes(hash_bytes[:4], "big") & 0x7FFFFFFF
152
+
153
+ @contextmanager
154
+ def acquire(self) -> Generator[None, None, None]:
155
+ """Context manager for lock acquisition.
156
+
157
+ Acquires the lock on entry and releases it on exit (even if an
158
+ exception occurs). The lock is also automatically released if
159
+ the database connection drops.
160
+
161
+ Yields:
162
+ None - lock is held while in context
163
+
164
+ Raises:
165
+ LockAcquisitionError: If lock cannot be acquired
166
+
167
+ Example:
168
+ >>> with lock.acquire():
169
+ ... # Exclusive access guaranteed here
170
+ ... run_migrations()
171
+ # Lock automatically released here
172
+ """
173
+ if not self.config.enabled:
174
+ logger.debug("Locking disabled, skipping lock acquisition")
175
+ yield
176
+ return
177
+
178
+ try:
179
+ self._acquire_lock()
180
+ yield
181
+ finally:
182
+ self._release_lock()
183
+
184
+ def _acquire_lock(self) -> None:
185
+ """Acquire the advisory lock.
186
+
187
+ Raises:
188
+ LockAcquisitionError: If lock cannot be acquired
189
+ """
190
+ lock_id = self._get_lock_id()
191
+
192
+ if self.config.mode == LockMode.NON_BLOCKING:
193
+ self._acquire_non_blocking(lock_id)
194
+ else:
195
+ self._acquire_blocking(lock_id)
196
+
197
+ self._lock_held = True
198
+ logger.info(
199
+ f"Acquired migration lock (namespace={self.DEFAULT_LOCK_NAMESPACE}, id={lock_id})"
200
+ )
201
+
202
+ def _acquire_blocking(self, lock_id: int) -> None:
203
+ """Acquire lock with timeout.
204
+
205
+ Uses SET LOCAL statement_timeout to implement lock timeout.
206
+ This setting only affects the current transaction.
207
+
208
+ Args:
209
+ lock_id: Lock object ID
210
+
211
+ Raises:
212
+ LockAcquisitionError: If timeout expires
213
+ """
214
+ import psycopg
215
+
216
+ timeout_sec = self.config.timeout_ms / 1000
217
+
218
+ with self.connection.cursor() as cur:
219
+ # Set statement timeout for lock acquisition
220
+ # Using string formatting for timeout is safe (integer value)
221
+ cur.execute(f"SET LOCAL statement_timeout = '{self.config.timeout_ms}ms'")
222
+
223
+ try:
224
+ cur.execute(
225
+ "SELECT pg_advisory_lock(%s, %s)",
226
+ (self.DEFAULT_LOCK_NAMESPACE, lock_id),
227
+ )
228
+ # Reset statement timeout on success
229
+ cur.execute("SET LOCAL statement_timeout = '0'")
230
+ except psycopg.errors.QueryCanceled as e:
231
+ # Rollback the failed transaction to clear the error state
232
+ with contextlib.suppress(Exception):
233
+ self.connection.rollback()
234
+ raise LockAcquisitionError(
235
+ f"Could not acquire migration lock within {timeout_sec}s. "
236
+ "Another migration may be running. "
237
+ "Use --no-lock to bypass (dangerous in multi-pod environments).",
238
+ timeout=True,
239
+ ) from e
240
+
241
+ def _acquire_non_blocking(self, lock_id: int) -> None:
242
+ """Try to acquire lock without waiting.
243
+
244
+ Uses pg_try_advisory_lock which returns immediately with
245
+ true (acquired) or false (locked by another session).
246
+
247
+ Args:
248
+ lock_id: Lock object ID
249
+
250
+ Raises:
251
+ LockAcquisitionError: If lock is held by another process
252
+ """
253
+ with self.connection.cursor() as cur:
254
+ cur.execute(
255
+ "SELECT pg_try_advisory_lock(%s, %s)",
256
+ (self.DEFAULT_LOCK_NAMESPACE, lock_id),
257
+ )
258
+ result = cur.fetchone()
259
+ acquired = result[0] if result else False
260
+
261
+ if not acquired:
262
+ # Get information about who holds the lock
263
+ holder = self.get_lock_holder()
264
+ holder_info = ""
265
+ if holder:
266
+ holder_info = (
267
+ f" Held by PID {holder['pid']}"
268
+ f" ({holder['application'] or 'unknown app'})"
269
+ f" since {holder['started_at']}"
270
+ )
271
+
272
+ raise LockAcquisitionError(
273
+ f"Migration lock is held by another process.{holder_info} "
274
+ "Try again later or use blocking mode with --lock-timeout.",
275
+ timeout=False,
276
+ )
277
+
278
+ def _release_lock(self) -> None:
279
+ """Release the advisory lock.
280
+
281
+ Safe to call even if lock was not acquired (no-op in that case).
282
+ Logs a warning if release fails but does not raise an exception
283
+ since the lock will be released when the connection closes anyway.
284
+ """
285
+ if not self._lock_held:
286
+ return
287
+
288
+ lock_id = self._get_lock_id()
289
+
290
+ try:
291
+ with self.connection.cursor() as cur:
292
+ cur.execute(
293
+ "SELECT pg_advisory_unlock(%s, %s)",
294
+ (self.DEFAULT_LOCK_NAMESPACE, lock_id),
295
+ )
296
+ result = cur.fetchone()
297
+ unlocked = result[0] if result else False
298
+
299
+ if unlocked:
300
+ logger.info(f"Released migration lock (id={lock_id})")
301
+ else:
302
+ logger.warning(
303
+ f"Lock release returned false (id={lock_id}) - lock may not have been held"
304
+ )
305
+
306
+ except Exception as e:
307
+ # Don't raise - lock will be released when connection closes
308
+ logger.warning(f"Error releasing lock (id={lock_id}): {e}")
309
+ finally:
310
+ self._lock_held = False
311
+
312
+ def is_locked(self) -> bool:
313
+ """Check if migration lock is currently held (by any process).
314
+
315
+ This can be used to check if another migration is running
316
+ before attempting to acquire the lock.
317
+
318
+ Returns:
319
+ True if lock is held, False otherwise
320
+ """
321
+ lock_id = self._get_lock_id()
322
+
323
+ with self.connection.cursor() as cur:
324
+ cur.execute(
325
+ """
326
+ SELECT EXISTS (
327
+ SELECT 1 FROM pg_locks
328
+ WHERE locktype = 'advisory'
329
+ AND classid = %s
330
+ AND objid = %s
331
+ )
332
+ """,
333
+ (self.DEFAULT_LOCK_NAMESPACE, lock_id),
334
+ )
335
+ result = cur.fetchone()
336
+ return result[0] if result else False
337
+
338
+ def get_lock_holder(self) -> dict | None:
339
+ """Get information about the current lock holder.
340
+
341
+ Useful for diagnostics when a lock cannot be acquired.
342
+
343
+ Returns:
344
+ Dictionary with lock holder info, or None if lock not held:
345
+ - pid: Process ID holding the lock
346
+ - user: Database username
347
+ - application: Application name (from connection)
348
+ - client_addr: Client IP address
349
+ - started_at: When the session started
350
+ """
351
+ lock_id = self._get_lock_id()
352
+
353
+ with self.connection.cursor() as cur:
354
+ cur.execute(
355
+ """
356
+ SELECT
357
+ l.pid,
358
+ a.usename,
359
+ a.application_name,
360
+ a.client_addr,
361
+ a.backend_start
362
+ FROM pg_locks l
363
+ JOIN pg_stat_activity a ON l.pid = a.pid
364
+ WHERE l.locktype = 'advisory'
365
+ AND l.classid = %s
366
+ AND l.objid = %s
367
+ """,
368
+ (self.DEFAULT_LOCK_NAMESPACE, lock_id),
369
+ )
370
+ result = cur.fetchone()
371
+
372
+ if result:
373
+ return {
374
+ "pid": result[0],
375
+ "user": result[1],
376
+ "application": result[2],
377
+ "client_addr": str(result[3]) if result[3] else None,
378
+ "started_at": result[4],
379
+ }
380
+ return None
381
+
382
+ @property
383
+ def lock_held(self) -> bool:
384
+ """Check if this instance currently holds the lock.
385
+
386
+ Returns:
387
+ True if this instance holds the lock, False otherwise
388
+ """
389
+ return self._lock_held
@@ -0,0 +1,298 @@
1
+ """Migration file generator from schema diffs.
2
+
3
+ This module generates Python migration files from SchemaDiff objects.
4
+ Each migration file contains up() and down() methods with the necessary SQL.
5
+ """
6
+
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+
10
+ from confiture.models.schema import SchemaChange, SchemaDiff
11
+
12
+
13
+ class MigrationGenerator:
14
+ """Generates Python migration files from schema diffs.
15
+
16
+ Example:
17
+ >>> generator = MigrationGenerator(migrations_dir=Path("db/migrations"))
18
+ >>> diff = SchemaDiff(changes=[...])
19
+ >>> migration_file = generator.generate(diff, name="add_users_table")
20
+ """
21
+
22
+ def __init__(self, migrations_dir: Path):
23
+ """Initialize migration generator.
24
+
25
+ Args:
26
+ migrations_dir: Directory where migration files will be created
27
+ """
28
+ self.migrations_dir = migrations_dir
29
+
30
+ def generate(self, diff: SchemaDiff, name: str) -> Path:
31
+ """Generate migration file from schema diff.
32
+
33
+ Args:
34
+ diff: Schema diff containing changes
35
+ name: Name for the migration (snake_case)
36
+
37
+ Returns:
38
+ Path to generated migration file
39
+
40
+ Raises:
41
+ ValueError: If diff has no changes
42
+ """
43
+ if not diff.has_changes():
44
+ raise ValueError("No changes to generate migration from")
45
+
46
+ # Get next version number
47
+ version = self._get_next_version()
48
+
49
+ # Generate file path
50
+ filename = f"{version}_{name}.py"
51
+ filepath = self.migrations_dir / filename
52
+
53
+ # Generate migration code
54
+ code = self._generate_migration_code(diff, version, name)
55
+
56
+ # Write file
57
+ filepath.write_text(code)
58
+
59
+ return filepath
60
+
61
+ def _get_next_version(self) -> str:
62
+ """Get next sequential migration version number.
63
+
64
+ Returns:
65
+ Version string (e.g., "001", "002", etc.)
66
+ """
67
+ if not self.migrations_dir.exists():
68
+ return "001"
69
+
70
+ # Find existing migration files
71
+ migration_files = sorted(self.migrations_dir.glob("*.py"))
72
+
73
+ if not migration_files:
74
+ return "001"
75
+
76
+ # Extract version from last file (e.g., "003_name.py" -> 3)
77
+ last_file = migration_files[-1]
78
+ last_version_str = last_file.name.split("_")[0]
79
+
80
+ try:
81
+ last_version = int(last_version_str)
82
+ next_version = last_version + 1
83
+ return f"{next_version:03d}"
84
+ except ValueError:
85
+ # If we can't parse version, start over
86
+ return "001"
87
+
88
+ def _generate_migration_code(self, diff: SchemaDiff, version: str, name: str) -> str:
89
+ """Generate Python migration code.
90
+
91
+ Args:
92
+ diff: Schema diff containing changes
93
+ version: Version number
94
+ name: Migration name
95
+
96
+ Returns:
97
+ Python code as string
98
+ """
99
+ class_name = self._to_class_name(name)
100
+ timestamp = datetime.now().isoformat()
101
+
102
+ # Generate up and down statements
103
+ up_statements = self._generate_up_statements(diff.changes)
104
+ down_statements = self._generate_down_statements(diff.changes)
105
+
106
+ template = '''"""Migration: {name}
107
+
108
+ Version: {version}
109
+ Generated: {timestamp}
110
+ """
111
+
112
+ from confiture.models.migration import Migration
113
+
114
+
115
+ class {class_name}(Migration):
116
+ """Migration: {name}."""
117
+
118
+ version = "{version}"
119
+ name = "{name}"
120
+
121
+ def up(self) -> None:
122
+ """Apply migration."""
123
+ {up_statements}
124
+
125
+ def down(self) -> None:
126
+ """Rollback migration."""
127
+ {down_statements}
128
+ '''
129
+
130
+ return template.format(
131
+ name=name,
132
+ version=version,
133
+ class_name=class_name,
134
+ up_statements=up_statements,
135
+ down_statements=down_statements,
136
+ timestamp=timestamp,
137
+ )
138
+
139
+ def _to_class_name(self, snake_case: str) -> str:
140
+ """Convert snake_case to PascalCase.
141
+
142
+ Args:
143
+ snake_case: String in snake_case format
144
+
145
+ Returns:
146
+ String in PascalCase format
147
+
148
+ Example:
149
+ >>> gen._to_class_name("add_users_table")
150
+ 'AddUsersTable'
151
+ """
152
+ words = snake_case.split("_")
153
+ return "".join(word.capitalize() for word in words)
154
+
155
+ def _generate_up_statements(self, changes: list[SchemaChange]) -> str:
156
+ """Generate SQL statements for up migration.
157
+
158
+ Args:
159
+ changes: List of schema changes
160
+
161
+ Returns:
162
+ Python code with execute() calls
163
+ """
164
+ statements = []
165
+
166
+ for change in changes:
167
+ sql = self._change_to_up_sql(change)
168
+ if sql:
169
+ statements.append(f' self.execute("{sql}")')
170
+
171
+ return "\n".join(statements) if statements else " pass # No operations"
172
+
173
+ def _generate_down_statements(self, changes: list[SchemaChange]) -> str:
174
+ """Generate SQL statements for down migration.
175
+
176
+ Args:
177
+ changes: List of schema changes
178
+
179
+ Returns:
180
+ Python code with execute() calls
181
+ """
182
+ statements = []
183
+
184
+ # Process changes in reverse order for rollback
185
+ for change in reversed(changes):
186
+ sql = self._change_to_down_sql(change)
187
+ if sql:
188
+ statements.append(f' self.execute("{sql}")')
189
+
190
+ return "\n".join(statements) if statements else " pass # No operations"
191
+
192
+ def _change_to_up_sql(self, change: SchemaChange) -> str | None:
193
+ """Convert schema change to SQL for up migration.
194
+
195
+ Args:
196
+ change: Schema change
197
+
198
+ Returns:
199
+ SQL string or None if not applicable
200
+ """
201
+ if change.type == "ADD_TABLE":
202
+ # We don't have full schema info, so create a placeholder
203
+ return f"# TODO: ADD_TABLE {change.table}"
204
+
205
+ elif change.type == "DROP_TABLE":
206
+ return f"DROP TABLE {change.table}"
207
+
208
+ elif change.type == "RENAME_TABLE":
209
+ return f"ALTER TABLE {change.old_value} RENAME TO {change.new_value}"
210
+
211
+ elif change.type == "ADD_COLUMN":
212
+ # For ADD_COLUMN, we might have type info in new_value
213
+ col_def = change.new_value if change.new_value else "TEXT"
214
+ return f"ALTER TABLE {change.table} ADD COLUMN {change.column} {col_def}"
215
+
216
+ elif change.type == "DROP_COLUMN":
217
+ return f"ALTER TABLE {change.table} DROP COLUMN {change.column}"
218
+
219
+ elif change.type == "RENAME_COLUMN":
220
+ return (
221
+ f"ALTER TABLE {change.table} RENAME COLUMN {change.old_value} TO {change.new_value}"
222
+ )
223
+
224
+ elif change.type == "CHANGE_COLUMN_TYPE":
225
+ return (
226
+ f"ALTER TABLE {change.table} ALTER COLUMN {change.column} TYPE {change.new_value}"
227
+ )
228
+
229
+ elif change.type == "CHANGE_COLUMN_NULLABLE":
230
+ if change.new_value == "false":
231
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} SET NOT NULL"
232
+ else:
233
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} DROP NOT NULL"
234
+
235
+ elif change.type == "CHANGE_COLUMN_DEFAULT":
236
+ if change.new_value:
237
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} SET DEFAULT {change.new_value}"
238
+ else:
239
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} DROP DEFAULT"
240
+
241
+ return None
242
+
243
+ def _change_to_down_sql(self, change: SchemaChange) -> str | None:
244
+ """Convert schema change to SQL for down migration (reverse).
245
+
246
+ Args:
247
+ change: Schema change
248
+
249
+ Returns:
250
+ SQL string or None if not applicable
251
+ """
252
+ if change.type == "ADD_TABLE":
253
+ # Reverse of ADD is DROP
254
+ return f"DROP TABLE {change.table}"
255
+
256
+ elif change.type == "DROP_TABLE":
257
+ # Can't recreate without schema info
258
+ return f"# WARNING: Cannot auto-generate down migration for DROP_TABLE {change.table}"
259
+
260
+ elif change.type == "RENAME_TABLE":
261
+ # Reverse the rename
262
+ return f"ALTER TABLE {change.new_value} RENAME TO {change.old_value}"
263
+
264
+ elif change.type == "ADD_COLUMN":
265
+ # Reverse of ADD is DROP
266
+ return f"ALTER TABLE {change.table} DROP COLUMN {change.column}"
267
+
268
+ elif change.type == "DROP_COLUMN":
269
+ # Can't recreate without schema info
270
+ return f"# WARNING: Cannot auto-generate down migration for DROP_COLUMN {change.table}.{change.column}"
271
+
272
+ elif change.type == "RENAME_COLUMN":
273
+ # Reverse the rename
274
+ return (
275
+ f"ALTER TABLE {change.table} RENAME COLUMN {change.new_value} TO {change.old_value}"
276
+ )
277
+
278
+ elif change.type == "CHANGE_COLUMN_TYPE":
279
+ # Reverse the type change
280
+ return (
281
+ f"ALTER TABLE {change.table} ALTER COLUMN {change.column} TYPE {change.old_value}"
282
+ )
283
+
284
+ elif change.type == "CHANGE_COLUMN_NULLABLE":
285
+ # Reverse the nullable change
286
+ if change.old_value == "false":
287
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} SET NOT NULL"
288
+ else:
289
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} DROP NOT NULL"
290
+
291
+ elif change.type == "CHANGE_COLUMN_DEFAULT":
292
+ # Reverse the default change
293
+ if change.old_value:
294
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} SET DEFAULT {change.old_value}"
295
+ else:
296
+ return f"ALTER TABLE {change.table} ALTER COLUMN {change.column} DROP DEFAULT"
297
+
298
+ return None