lumera 0.4.20__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lumera/locks.py ADDED
@@ -0,0 +1,216 @@
1
+ """
2
+ Lock management for preventing concurrent operations.
3
+
4
+ Provides two types of locks:
5
+ 1. Record-level locks: Lock specific records (uses platform lm_locks table)
6
+ 2. Operation-level locks: Lock entire operations globally (requires custom collection)
7
+
8
+ Available functions:
9
+ claim_record_locks() - Lock specific records for processing
10
+ release_record_locks() - Release previously claimed record locks
11
+ acquire_operation_lock() - Lock an entire operation (NOT YET IMPLEMENTED)
12
+ release_operation_lock() - Release operation lock (NOT YET IMPLEMENTED)
13
+ operation_lock() - Context manager for operation locks (NOT YET IMPLEMENTED)
14
+
15
+ Example:
16
+ >>> from lumera import locks
17
+ >>> result = locks.claim_record_locks("export", "deposits", ["dep_1", "dep_2"])
18
+ >>> for id in result["claimed"]:
19
+ ... process(id)
20
+ >>> locks.release_record_locks("export", record_ids=result["claimed"])
21
+ """
22
+
23
+ __all__ = [
24
+ "claim_record_locks",
25
+ "release_record_locks",
26
+ "acquire_operation_lock",
27
+ "release_operation_lock",
28
+ "operation_lock",
29
+ ]
30
+
31
+ from contextlib import contextmanager
32
+ from typing import Any, Iterator
33
+
34
+ # Import platform lock primitives from the main SDK module
35
+ from .sdk import claim_locks as _claim_locks
36
+ from .sdk import release_locks as _release_locks
37
+
38
+
39
+ def claim_record_locks(
40
+ job_type: str,
41
+ collection: str,
42
+ record_ids: list[str],
43
+ *,
44
+ ttl_seconds: int = 900,
45
+ job_id: str | None = None,
46
+ ) -> dict[str, Any]:
47
+ """Claim record-level locks (using platform lm_locks).
48
+
49
+ Prevents multiple workers from processing the same records concurrently.
50
+ Uses the platform's built-in lm_locks table.
51
+
52
+ Args:
53
+ job_type: Workflow name (e.g., "deposit_processing")
54
+ collection: Collection name
55
+ record_ids: List of record IDs to lock
56
+ ttl_seconds: Lock duration in seconds (default 900 = 15 minutes)
57
+ job_id: Optional job identifier for grouping locks
58
+
59
+ Returns:
60
+ {
61
+ "claimed": ["id1", "id2"], # Successfully locked
62
+ "skipped": ["id3"], # Already locked by another process
63
+ "ttl_seconds": 900
64
+ }
65
+
66
+ Example:
67
+ >>> result = claim_record_locks(
68
+ ... job_type="export",
69
+ ... collection="deposits",
70
+ ... record_ids=["dep_1", "dep_2", "dep_3"]
71
+ ... )
72
+ >>> for dep_id in result["claimed"]:
73
+ ... process(dep_id)
74
+ >>> # Release when done
75
+ >>> release_record_locks("export", record_ids=result["claimed"])
76
+ """
77
+ return _claim_locks(
78
+ job_type=job_type,
79
+ collection=collection,
80
+ record_ids=record_ids,
81
+ ttl_seconds=ttl_seconds,
82
+ job_id=job_id,
83
+ )
84
+
85
+
86
+ def release_record_locks(
87
+ job_type: str,
88
+ *,
89
+ collection: str | None = None,
90
+ record_ids: list[str] | None = None,
91
+ job_id: str | None = None,
92
+ ) -> int:
93
+ """Release record-level locks.
94
+
95
+ Args:
96
+ job_type: Workflow name (required)
97
+ collection: Optional collection filter
98
+ record_ids: Optional specific records to release
99
+ job_id: Optional job identifier filter
100
+
101
+ Returns:
102
+ Number of locks released
103
+
104
+ Example:
105
+ >>> released = release_record_locks(
106
+ ... job_type="export",
107
+ ... record_ids=["dep_1", "dep_2"]
108
+ ... )
109
+ >>> print(f"Released {released} locks")
110
+ """
111
+ return _release_locks(
112
+ job_type=job_type, collection=collection, record_ids=record_ids, job_id=job_id
113
+ )
114
+
115
+
116
+ # Operation-level locks (simple key-value locks)
117
+ # Note: These would need a custom collection like "export_locks" to be implemented
118
+ # For now, providing the interface that should be implemented
119
+
120
+
121
+ def acquire_operation_lock(
122
+ lock_name: str, *, ttl_seconds: int = 600, wait: bool = False, wait_timeout: int = 30
123
+ ) -> bool:
124
+ """Acquire an operation-level lock.
125
+
126
+ For preventing concurrent execution of entire operations (like exports).
127
+ Uses a simple key-value lock, not tied to specific records.
128
+
129
+ Note: This requires a custom locks collection to be created.
130
+ See the Charter Impact export_locks collection as an example.
131
+
132
+ Args:
133
+ lock_name: Unique lock identifier (e.g., "csv_export")
134
+ ttl_seconds: Lock duration in seconds (default 600 = 10 minutes)
135
+ wait: If True, wait for lock to become available
136
+ wait_timeout: Max seconds to wait (if wait=True)
137
+
138
+ Returns:
139
+ True if lock acquired, False if already held (when wait=False)
140
+
141
+ Raises:
142
+ TimeoutError: If wait=True and timeout exceeded
143
+ NotImplementedError: If operation locks collection doesn't exist
144
+
145
+ Example:
146
+ >>> if acquire_operation_lock("csv_export"):
147
+ ... try:
148
+ ... perform_export()
149
+ ... finally:
150
+ ... release_operation_lock("csv_export")
151
+ ... else:
152
+ ... print("Export already in progress")
153
+ """
154
+ raise NotImplementedError(
155
+ "Operation-level locks require a custom locks collection. "
156
+ "Create a collection like 'operation_locks' with fields: "
157
+ "lock_name (text, unique), held_by (text), acquired_at (date), expires_at (date). "
158
+ "Then implement acquire/release using pb.search/create/delete."
159
+ )
160
+
161
+
162
+ def release_operation_lock(lock_name: str) -> bool:
163
+ """Release an operation-level lock.
164
+
165
+ Args:
166
+ lock_name: Lock identifier
167
+
168
+ Returns:
169
+ True if lock was released, False if wasn't held
170
+
171
+ Raises:
172
+ NotImplementedError: If operation locks collection doesn't exist
173
+
174
+ Example:
175
+ >>> release_operation_lock("csv_export")
176
+ """
177
+ raise NotImplementedError(
178
+ "Operation-level locks require a custom locks collection. "
179
+ "See acquire_operation_lock() for details."
180
+ )
181
+
182
+
183
+ @contextmanager
184
+ def operation_lock(
185
+ lock_name: str, *, ttl_seconds: int = 600, wait: bool = False, wait_timeout: int = 30
186
+ ) -> Iterator[None]:
187
+ """Context manager for operation locks.
188
+
189
+ Args:
190
+ lock_name: Lock identifier
191
+ ttl_seconds: Lock duration in seconds
192
+ wait: Wait for lock if held
193
+ wait_timeout: Max wait time in seconds
194
+
195
+ Raises:
196
+ NotImplementedError: If operation locks collection doesn't exist
197
+ TimeoutError: If wait=True and timeout exceeded
198
+
199
+ Example:
200
+ >>> with operation_lock("csv_export"):
201
+ ... perform_export()
202
+ ... # Lock automatically released on exit
203
+ """
204
+ # This would acquire the lock
205
+ acquired = acquire_operation_lock(
206
+ lock_name, ttl_seconds=ttl_seconds, wait=wait, wait_timeout=wait_timeout
207
+ )
208
+
209
+ if not acquired:
210
+ raise RuntimeError(f"Failed to acquire lock: {lock_name}")
211
+
212
+ try:
213
+ yield
214
+ finally:
215
+ # Always release the lock
216
+ release_operation_lock(lock_name)
lumera/pb.py ADDED
@@ -0,0 +1,316 @@
1
+ """
2
+ Record operations for Lumera collections.
3
+
4
+ This module provides a clean interface for working with Lumera collections,
5
+ using the `pb.` namespace convention familiar from automation contexts.
6
+
7
+ Available functions:
8
+ search() - Query records with filters, pagination, sorting
9
+ get() - Get single record by ID
10
+ get_by_external_id() - Get record by external_id field
11
+ create() - Create new record
12
+ update() - Update existing record
13
+ upsert() - Create or update by external_id
14
+ delete() - Delete record
15
+ iter_all() - Iterate all matching records (auto-pagination)
16
+
17
+ Filter Syntax:
18
+ Filters can be passed as dict or JSON string to search() and iter_all().
19
+
20
+ Simple equality:
21
+ {"status": "pending"}
22
+ {"external_id": "dep-001"}
23
+
24
+ Comparison operators (eq, gt, gte, lt, lte):
25
+ {"amount": {"gt": 1000}}
26
+ {"amount": {"gte": 100, "lte": 500}}
27
+
28
+ OR logic:
29
+ {"or": [{"status": "pending"}, {"status": "review"}]}
30
+
31
+ AND logic (implicit - multiple fields at same level):
32
+ {"status": "pending", "amount": {"gt": 1000}}
33
+
34
+ Combined:
35
+ {"status": "active", "or": [{"priority": "high"}, {"amount": {"gt": 5000}}]}
36
+
37
+ Example:
38
+ >>> from lumera import pb
39
+ >>> results = pb.search("deposits", filter={"status": "pending"})
40
+ >>> deposit = pb.get("deposits", "rec_abc123")
41
+ """
42
+
43
+ from typing import Any, Iterator, Mapping
44
+
45
+ __all__ = [
46
+ "search",
47
+ "get",
48
+ "get_by_external_id",
49
+ "create",
50
+ "update",
51
+ "upsert",
52
+ "delete",
53
+ "iter_all",
54
+ ]
55
+
56
+ # Import underlying SDK functions (prefixed with _ to indicate internal use)
57
+ from .sdk import (
58
+ create_record as _create_record,
59
+ )
60
+ from .sdk import (
61
+ delete_record as _delete_record,
62
+ )
63
+ from .sdk import (
64
+ get_record as _get_record,
65
+ )
66
+ from .sdk import (
67
+ get_record_by_external_id as _get_record_by_external_id,
68
+ )
69
+ from .sdk import (
70
+ list_records as _list_records,
71
+ )
72
+ from .sdk import (
73
+ update_record as _update_record,
74
+ )
75
+ from .sdk import (
76
+ upsert_record as _upsert_record,
77
+ )
78
+
79
+
80
+ def search(
81
+ collection: str,
82
+ *,
83
+ filter: Mapping[str, Any] | str | None = None,
84
+ per_page: int = 50,
85
+ page: int = 1,
86
+ sort: str | None = None,
87
+ expand: str | None = None,
88
+ ) -> dict[str, Any]:
89
+ """Search records in a collection.
90
+
91
+ Args:
92
+ collection: Collection name or ID
93
+ filter: Filter as dict or JSON string. See module docstring for syntax.
94
+ Examples:
95
+ - {"status": "pending"} - equality
96
+ - {"amount": {"gt": 1000}} - comparison
97
+ - {"or": [{"status": "a"}, {"status": "b"}]} - OR logic
98
+ per_page: Results per page (max 500, default 50)
99
+ page: Page number, 1-indexed (default 1)
100
+ sort: Sort expression (e.g., "-created,name" for created DESC, name ASC)
101
+ expand: Comma-separated relation fields to expand (e.g., "user_id,company_id")
102
+
103
+ Returns:
104
+ Paginated results with structure:
105
+ {
106
+ "items": [...], # List of records
107
+ "page": 1,
108
+ "perPage": 50,
109
+ "totalItems": 100,
110
+ "totalPages": 2
111
+ }
112
+
113
+ Example:
114
+ >>> results = pb.search("deposits",
115
+ ... filter={"status": "pending", "amount": {"gt": 1000}},
116
+ ... per_page=100,
117
+ ... sort="-created"
118
+ ... )
119
+ >>> for deposit in results["items"]:
120
+ ... print(deposit["id"], deposit["amount"])
121
+ """
122
+ # Note: _list_records handles both dict and str filters via JSON encoding
123
+ return _list_records(
124
+ collection,
125
+ filter=filter,
126
+ per_page=per_page,
127
+ page=page,
128
+ sort=sort,
129
+ expand=expand,
130
+ )
131
+
132
+
133
+ def get(collection: str, record_id: str) -> dict[str, Any]:
134
+ """Get a single record by ID.
135
+
136
+ Args:
137
+ collection: Collection name or ID
138
+ record_id: Record ID (15-character alphanumeric ID)
139
+
140
+ Returns:
141
+ Record data with all fields
142
+
143
+ Raises:
144
+ LumeraAPIError: If record doesn't exist (404)
145
+
146
+ Example:
147
+ >>> deposit = pb.get("deposits", "dep_abc123")
148
+ >>> print(deposit["amount"])
149
+ """
150
+ return _get_record(collection, record_id)
151
+
152
+
153
+ def get_by_external_id(collection: str, external_id: str) -> dict[str, Any]:
154
+ """Get a single record by external_id (unique field).
155
+
156
+ Args:
157
+ collection: Collection name or ID
158
+ external_id: Value of the external_id field (your business identifier)
159
+
160
+ Returns:
161
+ Record data
162
+
163
+ Raises:
164
+ LumeraAPIError: If no record with that external_id (404)
165
+
166
+ Example:
167
+ >>> deposit = pb.get_by_external_id("deposits", "dep-2024-001")
168
+ """
169
+ return _get_record_by_external_id(collection, external_id)
170
+
171
+
172
+ def create(collection: str, data: dict[str, Any]) -> dict[str, Any]:
173
+ """Create a new record.
174
+
175
+ Args:
176
+ collection: Collection name or ID
177
+ data: Record data as dict mapping field names to values
178
+
179
+ Returns:
180
+ Created record with id, created, and updated timestamps
181
+
182
+ Raises:
183
+ LumeraAPIError: If validation fails or unique constraint violated
184
+
185
+ Example:
186
+ >>> deposit = pb.create("deposits", {
187
+ ... "external_id": "dep-001",
188
+ ... "amount": 1000,
189
+ ... "status": "pending"
190
+ ... })
191
+ >>> print(deposit["id"])
192
+ """
193
+ return _create_record(collection, data)
194
+
195
+
196
+ def update(collection: str, record_id: str, data: dict[str, Any]) -> dict[str, Any]:
197
+ """Update an existing record (partial update).
198
+
199
+ Args:
200
+ collection: Collection name or ID
201
+ record_id: Record ID to update
202
+ data: Fields to update (only include fields you want to change)
203
+
204
+ Returns:
205
+ Updated record
206
+
207
+ Raises:
208
+ LumeraAPIError: If record doesn't exist or validation fails
209
+
210
+ Example:
211
+ >>> deposit = pb.update("deposits", "dep_abc123", {
212
+ ... "status": "processed",
213
+ ... "processed_at": datetime.utcnow().isoformat()
214
+ ... })
215
+ """
216
+ return _update_record(collection, record_id, data)
217
+
218
+
219
+ def upsert(collection: str, data: dict[str, Any]) -> dict[str, Any]:
220
+ """Create or update a record by external_id.
221
+
222
+ If a record with the given external_id exists, updates it.
223
+ Otherwise, creates a new record. This is useful for idempotent imports.
224
+
225
+ Args:
226
+ collection: Collection name or ID
227
+ data: Record data (MUST include "external_id" field)
228
+
229
+ Returns:
230
+ Created or updated record
231
+
232
+ Raises:
233
+ ValueError: If data doesn't contain "external_id"
234
+ LumeraAPIError: If validation fails
235
+
236
+ Example:
237
+ >>> deposit = pb.upsert("deposits", {
238
+ ... "external_id": "dep-001",
239
+ ... "amount": 1000,
240
+ ... "status": "pending"
241
+ ... })
242
+ >>> # Second call updates the existing record
243
+ >>> deposit = pb.upsert("deposits", {
244
+ ... "external_id": "dep-001",
245
+ ... "amount": 2000
246
+ ... })
247
+ """
248
+ return _upsert_record(collection, data)
249
+
250
+
251
+ def delete(collection: str, record_id: str) -> None:
252
+ """Delete a record.
253
+
254
+ Args:
255
+ collection: Collection name or ID
256
+ record_id: Record ID to delete
257
+
258
+ Raises:
259
+ LumeraAPIError: If record doesn't exist
260
+
261
+ Example:
262
+ >>> pb.delete("deposits", "dep_abc123")
263
+ """
264
+ _delete_record(collection, record_id)
265
+
266
+
267
+ def iter_all(
268
+ collection: str,
269
+ *,
270
+ filter: Mapping[str, Any] | str | None = None,
271
+ sort: str | None = None,
272
+ expand: str | None = None,
273
+ batch_size: int = 500,
274
+ ) -> Iterator[dict[str, Any]]:
275
+ """Iterate over all matching records, handling pagination automatically.
276
+
277
+ This is a convenience function for processing large result sets without
278
+ manually handling pagination. Use this instead of manual page loops.
279
+
280
+ Args:
281
+ collection: Collection name or ID
282
+ filter: Optional filter as dict or JSON string (e.g., {"status": "pending"})
283
+ sort: Optional sort expression (e.g., "-created" for newest first)
284
+ expand: Optional comma-separated relation fields to expand
285
+ batch_size: Records per API request (max 500, default 500)
286
+
287
+ Yields:
288
+ Individual records
289
+
290
+ Example:
291
+ >>> for deposit in pb.iter_all("deposits", filter={"status": "pending"}):
292
+ ... process(deposit)
293
+ """
294
+ page = 1
295
+ while True:
296
+ result = search(
297
+ collection,
298
+ filter=filter,
299
+ per_page=batch_size,
300
+ page=page,
301
+ sort=sort,
302
+ expand=expand,
303
+ )
304
+
305
+ items = result.get("items", [])
306
+ if not items:
307
+ break
308
+
309
+ yield from items
310
+
311
+ # Check if there are more pages
312
+ total_pages = result.get("totalPages", 0)
313
+ if page >= total_pages:
314
+ break
315
+
316
+ page += 1