pg-partsmith 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. pg_partsmith/__init__.py +61 -0
  2. pg_partsmith/__version__.py +1 -0
  3. pg_partsmith/aio/__init__.py +24 -0
  4. pg_partsmith/aio/hooks.py +168 -0
  5. pg_partsmith/aio/lock/__init__.py +9 -0
  6. pg_partsmith/aio/lock/postgres.py +211 -0
  7. pg_partsmith/aio/lock/redis.py +266 -0
  8. pg_partsmith/aio/maintainer.py +187 -0
  9. pg_partsmith/aio/metadata.py +428 -0
  10. pg_partsmith/aio/protocols.py +255 -0
  11. pg_partsmith/aio/repositories/__init__.py +5 -0
  12. pg_partsmith/aio/repositories/creator.py +116 -0
  13. pg_partsmith/aio/repositories/fk_manager.py +57 -0
  14. pg_partsmith/aio/repositories/remover.py +256 -0
  15. pg_partsmith/aio/repositories/repository.py +119 -0
  16. pg_partsmith/aio/repositories/resolver.py +93 -0
  17. pg_partsmith/aio/service.py +209 -0
  18. pg_partsmith/aio/services/__init__.py +3 -0
  19. pg_partsmith/aio/services/base.py +67 -0
  20. pg_partsmith/aio/services/creation.py +305 -0
  21. pg_partsmith/aio/services/deletion.py +76 -0
  22. pg_partsmith/aio/services/detachment.py +74 -0
  23. pg_partsmith/aio/services/pruning.py +136 -0
  24. pg_partsmith/aio/services/validation.py +54 -0
  25. pg_partsmith/constants.py +18 -0
  26. pg_partsmith/entities.py +440 -0
  27. pg_partsmith/exceptions.py +87 -0
  28. pg_partsmith/protocols.py +86 -0
  29. pg_partsmith/py.typed +0 -0
  30. pg_partsmith/settings.py +114 -0
  31. pg_partsmith/strategies/__init__.py +15 -0
  32. pg_partsmith/strategies/base.py +87 -0
  33. pg_partsmith/strategies/day.py +52 -0
  34. pg_partsmith/strategies/month.py +50 -0
  35. pg_partsmith/strategies/selector.py +34 -0
  36. pg_partsmith/strategies/week.py +50 -0
  37. pg_partsmith/strategies/year.py +43 -0
  38. pg_partsmith/types.py +9 -0
  39. pg_partsmith/utils.py +287 -0
  40. pg_partsmith-0.1.0.dist-info/METADATA +529 -0
  41. pg_partsmith-0.1.0.dist-info/RECORD +43 -0
  42. pg_partsmith-0.1.0.dist-info/WHEEL +4 -0
  43. pg_partsmith-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,61 @@
1
+ """PostgreSQL partition lifecycle management with extensible hooks."""
2
+
3
+ from .__version__ import __version__
4
+ from .entities import (
5
+ MaintenanceIssueStep,
6
+ MaintenanceResult,
7
+ PartitionGranularity,
8
+ PartitionInfo,
9
+ PartitionStrategy,
10
+ PartitionType,
11
+ Period,
12
+ TablePartitionConfig,
13
+ )
14
+ from .exceptions import (
15
+ DropRetryExhaustedError,
16
+ InvalidPartitionConfigError,
17
+ LockAcquisitionError,
18
+ PartitionAlreadyExistsError,
19
+ PartitionAttachedError,
20
+ PartitionDetachInProgressError,
21
+ PartitionError,
22
+ PartitionNotFoundError,
23
+ UnmanagedPartitionDropError,
24
+ )
25
+ from .protocols import PeriodCalculator
26
+ from .strategies import (
27
+ BasePeriodCalculator,
28
+ DayPeriodCalculator,
29
+ MonthPeriodCalculator,
30
+ WeekPeriodCalculator,
31
+ YearPeriodCalculator,
32
+ )
33
+ from .strategies.selector import get_period_calculator
34
+
35
+ __all__ = [
36
+ "BasePeriodCalculator",
37
+ "DayPeriodCalculator",
38
+ "DropRetryExhaustedError",
39
+ "InvalidPartitionConfigError",
40
+ "LockAcquisitionError",
41
+ "MaintenanceIssueStep",
42
+ "MaintenanceResult",
43
+ "MonthPeriodCalculator",
44
+ "PartitionAlreadyExistsError",
45
+ "PartitionAttachedError",
46
+ "PartitionDetachInProgressError",
47
+ "PartitionError",
48
+ "PartitionGranularity",
49
+ "PartitionInfo",
50
+ "PartitionNotFoundError",
51
+ "PartitionStrategy",
52
+ "PartitionType",
53
+ "Period",
54
+ "PeriodCalculator",
55
+ "TablePartitionConfig",
56
+ "UnmanagedPartitionDropError",
57
+ "WeekPeriodCalculator",
58
+ "YearPeriodCalculator",
59
+ "__version__",
60
+ "get_period_calculator",
61
+ ]
@@ -0,0 +1 @@
1
+ __version__ = "0.1.0"
@@ -0,0 +1,24 @@
1
+ """Async implementations for partition management."""
2
+
3
+ from .hooks import BasePartitionLifecycleHooks, PartitionLifecycleHooks
4
+ from .lock import PostgresAdvisoryLockManager, RedisDistributedLockManager
5
+ from .maintainer import PartitionMaintainer, maintain_partitions
6
+ from .metadata import PostgresMetadataProvider
7
+ from .protocols import LockManager, PartitionMetadataProvider, PartitionRepository
8
+ from .repositories import PostgresPartitionRepository
9
+ from .service import PartitionLifecycleService
10
+
11
+ __all__ = [
12
+ "BasePartitionLifecycleHooks",
13
+ "LockManager",
14
+ "PartitionLifecycleHooks",
15
+ "PartitionLifecycleService",
16
+ "PartitionMaintainer",
17
+ "PartitionMetadataProvider",
18
+ "PartitionRepository",
19
+ "PostgresAdvisoryLockManager",
20
+ "PostgresMetadataProvider",
21
+ "PostgresPartitionRepository",
22
+ "RedisDistributedLockManager",
23
+ "maintain_partitions",
24
+ ]
@@ -0,0 +1,168 @@
1
+ """Lifecycle hooks (middleware) and protocol for partition operations.
2
+
3
+ Hooks let you inject custom logic at each step of the partition lifecycle -
4
+ for example, exporting data before a partition is dropped, publishing events
5
+ to Kafka after a partition is created, or archiving rows before detachment.
6
+
7
+ Usage example::
8
+
9
+ class KafkaExportHooks(BasePartitionLifecycleHooks):
10
+ def __init__(self, producer: KafkaProducer) -> None:
11
+ self._producer = producer
12
+
13
+ async def before_drop(self, table_name: str, partition_name: str) -> None:
14
+ await self._producer.send("partition.before_drop", {
15
+ "table": table_name,
16
+ "partition": partition_name,
17
+ })
18
+
19
+ service = PartitionLifecycleService(
20
+ repo=repo,
21
+ metadata=metadata,
22
+ locks=locks,
23
+ period_calculator=calculator,
24
+ hooks=[KafkaExportHooks(producer)],
25
+ )
26
+ """
27
+
28
+ from typing import Protocol, runtime_checkable
29
+
30
+ from pg_partsmith.entities import PartitionInfo, TablePartitionConfig
31
+
32
+
33
+ @runtime_checkable
34
+ class PartitionLifecycleHooks(Protocol):
35
+ """Protocol for partition lifecycle hooks.
36
+
37
+ Implement this protocol to inject custom logic at each step of the partition
38
+ lifecycle without inheriting from a specific base class.
39
+ """
40
+
41
+ async def before_create(
42
+ self,
43
+ config: TablePartitionConfig,
44
+ partition_name: str,
45
+ from_value: str,
46
+ to_value: str,
47
+ ) -> None: ...
48
+
49
+ async def after_create(
50
+ self,
51
+ config: TablePartitionConfig,
52
+ partition: PartitionInfo,
53
+ ) -> None: ...
54
+
55
+ async def before_detach(
56
+ self,
57
+ table_name: str,
58
+ partition: PartitionInfo,
59
+ ) -> None: ...
60
+
61
+ async def after_detach(
62
+ self,
63
+ table_name: str,
64
+ partition_name: str,
65
+ ) -> None: ...
66
+
67
+ async def before_drop(
68
+ self,
69
+ table_name: str,
70
+ partition_name: str,
71
+ ) -> None: ...
72
+
73
+ async def after_drop(
74
+ self,
75
+ table_name: str,
76
+ partition_name: str,
77
+ ) -> None: ...
78
+
79
+
80
+ class BasePartitionLifecycleHooks:
81
+ """No-op base implementation of partition lifecycle hooks.
82
+
83
+ Subclass and override only the methods you need.
84
+ All methods are no-ops by default so you can selectively add behaviour
85
+ without implementing every step.
86
+ """
87
+
88
+ async def before_create(
89
+ self,
90
+ config: TablePartitionConfig,
91
+ partition_name: str,
92
+ from_value: str,
93
+ to_value: str,
94
+ ) -> None:
95
+ """Called before a partition is created.
96
+
97
+ Args:
98
+ config: Table partition configuration.
99
+ partition_name: Name the new partition will be given.
100
+ from_value: Start boundary value.
101
+ to_value: End boundary value.
102
+ """
103
+
104
+ async def after_create(
105
+ self,
106
+ config: TablePartitionConfig,
107
+ partition: PartitionInfo,
108
+ ) -> None:
109
+ """Called after a partition has been created (and optionally attached).
110
+
111
+ Args:
112
+ config: Table partition configuration.
113
+ partition: Info about the newly created partition.
114
+ """
115
+
116
+ async def before_detach(
117
+ self,
118
+ table_name: str,
119
+ partition: PartitionInfo,
120
+ ) -> None:
121
+ """Called before a partition is detached from its parent table.
122
+
123
+ This is a good place to export or archive data while the partition
124
+ is still accessible via the parent table's indexes and constraints.
125
+
126
+ Args:
127
+ table_name: Parent table name.
128
+ partition: Info about the partition being detached.
129
+ """
130
+
131
+ async def after_detach(
132
+ self,
133
+ table_name: str,
134
+ partition_name: str,
135
+ ) -> None:
136
+ """Called after a partition has been detached.
137
+
138
+ Args:
139
+ table_name: Parent table name.
140
+ partition_name: Name of the detached partition.
141
+ """
142
+
143
+ async def before_drop(
144
+ self,
145
+ table_name: str,
146
+ partition_name: str,
147
+ ) -> None:
148
+ """Called before a partition table is dropped.
149
+
150
+ This is the last chance to read or export data from the partition
151
+ before it is permanently destroyed.
152
+
153
+ Args:
154
+ table_name: Parent table name.
155
+ partition_name: Name of the partition about to be dropped.
156
+ """
157
+
158
+ async def after_drop(
159
+ self,
160
+ table_name: str,
161
+ partition_name: str,
162
+ ) -> None:
163
+ """Called after a partition table has been dropped.
164
+
165
+ Args:
166
+ table_name: Parent table name.
167
+ partition_name: Name of the dropped partition.
168
+ """
@@ -0,0 +1,9 @@
1
+ """Lock manager implementations."""
2
+
3
+ from .postgres import PostgresAdvisoryLockManager
4
+ from .redis import RedisDistributedLockManager
5
+
6
+ __all__ = [
7
+ "PostgresAdvisoryLockManager",
8
+ "RedisDistributedLockManager",
9
+ ]
@@ -0,0 +1,211 @@
1
+ """PostgreSQL advisory lock manager."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import contextlib
7
+ import logging
8
+ import time
9
+ from contextlib import asynccontextmanager
10
+ from typing import TYPE_CHECKING
11
+
12
+ from sqlalchemy import text
13
+ from sqlalchemy.exc import SQLAlchemyError
14
+
15
+ from pg_partsmith.exceptions import LockAcquisitionError
16
+ from pg_partsmith.utils import calculate_lock_id
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import AsyncIterator
20
+ from contextlib import AbstractAsyncContextManager
21
+
22
+ from sqlalchemy.ext.asyncio import AsyncConnection, AsyncEngine
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class PostgresAdvisoryLockManager:
28
+ """Lock manager using PostgreSQL advisory locks.
29
+
30
+ Holds the advisory lock on a dedicated AUTOCOMMIT connection from the
31
+ engine pool. This guarantees the lock survives any number of commits or
32
+ rollbacks on the caller's session, which is required when the caller
33
+ needs to commit DDL (e.g. ATTACH PARTITION) before running
34
+ DETACH PARTITION CONCURRENTLY.
35
+
36
+ Override `_compute_lock_id` to customise the lock ID derivation.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ engine: AsyncEngine,
42
+ prefix: str = "partitioner",
43
+ acquire_min_interval_seconds: float = 0.0,
44
+ ) -> None:
45
+ """Initialize lock manager.
46
+
47
+ Args:
48
+ engine: SQLAlchemy async engine used to open a dedicated connection
49
+ for the advisory lock.
50
+ prefix: Prefix for lock key generation.
51
+ acquire_min_interval_seconds: Minimum seconds between acquire attempts
52
+ per table (rate limiting). 0 disables.
53
+ """
54
+ self._engine = engine
55
+ self._prefix = prefix
56
+ self._acquire_min_interval = max(0.0, acquire_min_interval_seconds)
57
+ self._last_acquire_time: dict[str, float] = {}
58
+ self._rate_limit_lock = asyncio.Lock()
59
+
60
+ def _compute_lock_id(self, table_name: str) -> int:
61
+ """Compute the advisory lock ID for a table name.
62
+
63
+ Override this method to customise the ID derivation strategy.
64
+
65
+ Args:
66
+ table_name: Table name to lock.
67
+
68
+ Returns:
69
+ Advisory lock ID.
70
+ """
71
+ return calculate_lock_id(table_name, prefix=self._prefix)
72
+
73
+ def acquire_lock(self, table_name: str) -> AbstractAsyncContextManager[None]:
74
+ """Acquire advisory lock for a table.
75
+
76
+ Opens a dedicated AUTOCOMMIT connection from the engine pool and
77
+ acquires a session-level advisory lock on it. The lock is released
78
+ when the context manager exits, with cancellation-safe cleanup.
79
+
80
+ Args:
81
+ table_name: Table name to lock.
82
+
83
+ Returns:
84
+ Async context manager for the lock.
85
+
86
+ Raises:
87
+ LockAcquisitionError: If the lock cannot be acquired.
88
+ """
89
+ return self._lock_scope(table_name)
90
+
91
+ @asynccontextmanager
92
+ async def _lock_scope(self, table_name: str) -> AsyncIterator[None]:
93
+ """Internal acquire/release flow for a single advisory lock."""
94
+ await self._respect_rate_limit(table_name)
95
+ lock_id = self._compute_lock_id(table_name)
96
+
97
+ async with self._engine.connect() as base_conn:
98
+ conn = await base_conn.execution_options(isolation_level="AUTOCOMMIT")
99
+ await self._try_acquire(conn, lock_id, table_name)
100
+
101
+ body_exc: BaseException | None = None
102
+ try:
103
+ yield
104
+ except BaseException as exc:
105
+ body_exc = exc
106
+ raise
107
+ finally:
108
+ await self._release_safely(conn, lock_id, table_name, body_exc)
109
+
110
+ async def _respect_rate_limit(self, table_name: str) -> None:
111
+ """Sleep enough to enforce the configured min-interval between acquires."""
112
+ if self._acquire_min_interval <= 0:
113
+ return
114
+ async with self._rate_limit_lock:
115
+ now = time.monotonic()
116
+ last = self._last_acquire_time.get(table_name, 0.0)
117
+ delay = last + self._acquire_min_interval - now
118
+ if delay > 0:
119
+ await asyncio.sleep(delay)
120
+ self._last_acquire_time[table_name] = time.monotonic()
121
+
122
+ async def _try_acquire(self, conn: AsyncConnection, lock_id: int, table_name: str) -> None:
123
+ """Run ``pg_try_advisory_lock`` and raise if not granted."""
124
+ result = await conn.execute(text("SELECT pg_try_advisory_lock(:lock_id)"), {"lock_id": lock_id})
125
+ if not result.scalar():
126
+ raise LockAcquisitionError(table_name, "advisory lock unavailable")
127
+
128
+ async def _release_safely(
129
+ self,
130
+ conn: AsyncConnection,
131
+ lock_id: int,
132
+ table_name: str,
133
+ body_exc: BaseException | None,
134
+ ) -> None:
135
+ """Release the lock with shielding so cancellation cannot leak a held lock."""
136
+ try:
137
+ await asyncio.shield(self._unlock(conn, lock_id, table_name))
138
+ except (asyncio.CancelledError, KeyboardInterrupt, SystemExit):
139
+ # Defensively invalidate so the connection is not returned to the pool with a dangling lock.
140
+ with contextlib.suppress(Exception):
141
+ await asyncio.shield(conn.invalidate())
142
+ raise
143
+ except Exception:
144
+ # Body exception takes precedence; otherwise propagate the unlock failure.
145
+ if body_exc is None:
146
+ raise
147
+ logger.warning(
148
+ "Failed to release advisory lock",
149
+ extra={"table_name": table_name, "lock_id": lock_id},
150
+ )
151
+
152
+ async def _unlock(self, conn: AsyncConnection, lock_id: int, table_name: str) -> None:
153
+ """Run ``pg_advisory_unlock``; invalidate the connection on any failure."""
154
+ try:
155
+ await conn.execute(text("SELECT pg_advisory_unlock(:lock_id)"), {"lock_id": lock_id})
156
+ except (asyncio.CancelledError, KeyboardInterrupt, SystemExit):
157
+ raise
158
+ except (SQLAlchemyError, OSError) as e:
159
+ logger.warning(
160
+ "Failed to release advisory lock (recoverable)",
161
+ extra={"table_name": table_name, "lock_id": lock_id, "error": str(e)},
162
+ )
163
+ with contextlib.suppress(Exception):
164
+ await conn.invalidate()
165
+ raise
166
+ except Exception:
167
+ logger.exception(
168
+ "Unexpected error while releasing advisory lock",
169
+ extra={"table_name": table_name, "lock_id": lock_id},
170
+ )
171
+ with contextlib.suppress(Exception):
172
+ await conn.invalidate()
173
+ raise
174
+
175
+ async def is_locked(self, table_name: str) -> bool:
176
+ """Check if lock is held by any session.
177
+
178
+ Args:
179
+ table_name: Table name.
180
+
181
+ Returns:
182
+ True if the advisory lock for the given table is currently held.
183
+ """
184
+ lock_id = self._compute_lock_id(table_name)
185
+ # Split 64-bit lock_id into classid and objid as stored in pg_locks for int8 advisory locks (objsubid=1).
186
+ class_id = (lock_id >> 32) & 0xFFFFFFFF
187
+ if class_id > 0x7FFFFFFF:
188
+ class_id -= 0x100000000
189
+ obj_id = lock_id & 0xFFFFFFFF
190
+ if obj_id > 0x7FFFFFFF:
191
+ obj_id -= 0x100000000
192
+
193
+ async with self._engine.connect() as base_conn:
194
+ conn = await base_conn.execution_options(isolation_level="AUTOCOMMIT")
195
+ result = await conn.execute(
196
+ text(
197
+ """
198
+ SELECT count(*)
199
+ FROM pg_locks
200
+ WHERE locktype = 'advisory'
201
+ AND granted = true
202
+ AND database = (SELECT oid FROM pg_database WHERE datname = current_database())
203
+ AND classid = CAST(:class_id AS int4)
204
+ AND objid = CAST(:obj_id AS int4)
205
+ AND objsubid = 1
206
+ """
207
+ ),
208
+ {"class_id": class_id, "obj_id": obj_id},
209
+ )
210
+ count = result.scalar()
211
+ return bool(count is not None and count > 0)