lumera 0.4.6__py3-none-any.whl → 0.9.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lumera/__init__.py +99 -4
- lumera/_utils.py +782 -0
- lumera/automations.py +904 -0
- lumera/exceptions.py +72 -0
- lumera/files.py +97 -0
- lumera/google.py +47 -270
- lumera/integrations/__init__.py +34 -0
- lumera/integrations/google.py +338 -0
- lumera/llm.py +481 -0
- lumera/locks.py +216 -0
- lumera/pb.py +679 -0
- lumera/sdk.py +927 -380
- lumera/storage.py +270 -0
- lumera/webhooks.py +304 -0
- lumera-0.9.6.dist-info/METADATA +37 -0
- lumera-0.9.6.dist-info/RECORD +18 -0
- {lumera-0.4.6.dist-info → lumera-0.9.6.dist-info}/WHEEL +1 -1
- lumera-0.4.6.dist-info/METADATA +0 -11
- lumera-0.4.6.dist-info/RECORD +0 -7
- {lumera-0.4.6.dist-info → lumera-0.9.6.dist-info}/top_level.txt +0 -0
lumera/sdk.py
CHANGED
|
@@ -1,451 +1,998 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
import mimetypes
|
|
4
|
-
import os
|
|
5
|
-
import pathlib
|
|
6
|
-
import time as _time
|
|
7
|
-
from functools import wraps as _wraps
|
|
8
|
-
from typing import IO, Iterable, TypedDict
|
|
9
|
-
|
|
10
|
-
import requests
|
|
11
|
-
from dotenv import load_dotenv
|
|
12
|
-
|
|
13
|
-
# ---------------------------------------------------------------------------
|
|
14
|
-
# Environment variables inside the kernel VM
|
|
15
|
-
# ---------------------------------------------------------------------------
|
|
16
|
-
|
|
17
|
-
TOKEN_ENV = "LUMERA_TOKEN"
|
|
18
|
-
BASE_URL_ENV = "LUMERA_BASE_URL"
|
|
19
|
-
ENV_PATH = "/root/.env"
|
|
20
|
-
|
|
21
|
-
# Load variables from /root/.env if it exists (and also current dir .env)
|
|
22
|
-
load_dotenv(override=False) # Local .env (no-op in prod)
|
|
23
|
-
load_dotenv(ENV_PATH, override=False)
|
|
1
|
+
"""
|
|
2
|
+
Low-level SDK implementation - prefer high-level modules instead.
|
|
24
3
|
|
|
4
|
+
For most use cases, import from these modules instead of sdk.py:
|
|
25
5
|
|
|
26
|
-
#
|
|
6
|
+
from lumera import pb # Record operations (pb.search, pb.create, pb.update, etc.)
|
|
7
|
+
from lumera import storage # File uploads (storage.upload, storage.upload_file)
|
|
8
|
+
from lumera import llm # LLM completions (llm.complete, llm.chat, llm.embed)
|
|
9
|
+
from lumera import locks # Locking (locks.claim_record_locks, locks.release_record_locks)
|
|
27
10
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
MOUNT_ROOT = os.getenv(MOUNT_ROOT_ENV, DEFAULT_MOUNT_ROOT)
|
|
11
|
+
Example:
|
|
12
|
+
# Instead of:
|
|
13
|
+
from lumera.sdk import list_records, create_record
|
|
14
|
+
result = list_records("deposits", filter={"status": "pending"})
|
|
33
15
|
|
|
16
|
+
# Use:
|
|
17
|
+
from lumera import pb
|
|
18
|
+
result = pb.search("deposits", filter={"status": "pending"})
|
|
34
19
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def _ensure_token() -> str:
|
|
41
|
-
"""Return the personal Lumera token, loading /root/.env if necessary."""
|
|
20
|
+
The functions in this module are used internally by the high-level modules.
|
|
21
|
+
Direct usage is discouraged unless you need low-level control.
|
|
22
|
+
"""
|
|
42
23
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
24
|
+
import json
|
|
25
|
+
import os
|
|
26
|
+
import warnings
|
|
27
|
+
from typing import Any, Iterable, Mapping, MutableMapping, Sequence, TypedDict
|
|
28
|
+
|
|
29
|
+
import requests as _requests
|
|
30
|
+
|
|
31
|
+
from ._utils import (
|
|
32
|
+
API_BASE as _API_BASE,
|
|
33
|
+
)
|
|
34
|
+
from ._utils import (
|
|
35
|
+
LEGACY_MOUNT_ROOT as _LEGACY_MOUNT_ROOT,
|
|
36
|
+
)
|
|
37
|
+
from ._utils import (
|
|
38
|
+
MOUNT_ROOT as _MOUNT_ROOT,
|
|
39
|
+
)
|
|
40
|
+
from ._utils import (
|
|
41
|
+
TOKEN_ENV as _TOKEN_ENV,
|
|
42
|
+
)
|
|
43
|
+
from ._utils import (
|
|
44
|
+
LumeraAPIError as _LumeraAPIError,
|
|
45
|
+
)
|
|
46
|
+
from ._utils import (
|
|
47
|
+
RecordNotUniqueError as _RecordNotUniqueError,
|
|
48
|
+
)
|
|
49
|
+
from ._utils import (
|
|
50
|
+
_api_request,
|
|
51
|
+
_api_url,
|
|
52
|
+
_default_provenance,
|
|
53
|
+
_ensure_mapping,
|
|
54
|
+
_is_sequence,
|
|
55
|
+
_prepare_automation_inputs,
|
|
56
|
+
_record_mutation,
|
|
57
|
+
_upload_automation_files,
|
|
58
|
+
_upload_automation_run_file,
|
|
59
|
+
_upload_document,
|
|
60
|
+
_upload_lumera_file,
|
|
61
|
+
_upload_session_file,
|
|
62
|
+
)
|
|
63
|
+
from ._utils import (
|
|
64
|
+
get_access_token as _get_access_token,
|
|
65
|
+
)
|
|
66
|
+
from ._utils import (
|
|
67
|
+
get_google_access_token as _get_google_access_token,
|
|
68
|
+
)
|
|
69
|
+
from ._utils import (
|
|
70
|
+
get_lumera_token as _get_lumera_token,
|
|
71
|
+
)
|
|
72
|
+
from ._utils import (
|
|
73
|
+
log_timed as _log_timed,
|
|
74
|
+
)
|
|
75
|
+
from ._utils import (
|
|
76
|
+
open_file as _open_file,
|
|
77
|
+
)
|
|
78
|
+
from ._utils import (
|
|
79
|
+
resolve_path as _resolve_path,
|
|
80
|
+
)
|
|
81
|
+
from ._utils import (
|
|
82
|
+
to_filerefs as _to_filerefs,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Expose shared symbols for backwards-compatible imports.
|
|
86
|
+
requests = _requests
|
|
87
|
+
API_BASE = _API_BASE
|
|
88
|
+
MOUNT_ROOT = _MOUNT_ROOT
|
|
89
|
+
LEGACY_MOUNT_ROOT = _LEGACY_MOUNT_ROOT
|
|
90
|
+
TOKEN_ENV = _TOKEN_ENV
|
|
91
|
+
LumeraAPIError = _LumeraAPIError
|
|
92
|
+
RecordNotUniqueError = _RecordNotUniqueError
|
|
93
|
+
get_access_token = _get_access_token
|
|
94
|
+
get_google_access_token = _get_google_access_token
|
|
95
|
+
get_lumera_token = _get_lumera_token
|
|
96
|
+
log_timed = _log_timed
|
|
97
|
+
open_file = _open_file
|
|
98
|
+
resolve_path = _resolve_path
|
|
99
|
+
to_filerefs = _to_filerefs
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def upload_lumera_file(
|
|
103
|
+
collection_id_or_name: str,
|
|
104
|
+
field_name: str,
|
|
105
|
+
file_path: str | os.PathLike[str],
|
|
106
|
+
*,
|
|
107
|
+
record_id: str | None = None,
|
|
108
|
+
) -> dict[str, Any]:
|
|
109
|
+
"""Upload a file to a ``lumera_file`` field and return its descriptor.
|
|
110
|
+
|
|
111
|
+
The returned descriptor can be assigned directly in ``create_record`` or
|
|
112
|
+
``update_record`` payloads (single file or appended to a list for
|
|
113
|
+
multi-select fields).
|
|
114
|
+
|
|
115
|
+
Example::
|
|
116
|
+
|
|
117
|
+
descriptor = upload_lumera_file("projects", "attachments", "~/report.pdf")
|
|
118
|
+
create_record("projects", {"attachments": [descriptor]})
|
|
119
|
+
"""
|
|
46
120
|
|
|
47
|
-
|
|
48
|
-
|
|
121
|
+
return _upload_lumera_file(
|
|
122
|
+
collection_id_or_name,
|
|
123
|
+
field_name,
|
|
124
|
+
file_path,
|
|
125
|
+
record_id=record_id,
|
|
126
|
+
api_request=_api_request,
|
|
49
127
|
)
|
|
50
128
|
|
|
51
129
|
|
|
52
130
|
# ---------------------------------------------------------------------------
|
|
53
|
-
#
|
|
131
|
+
# Unified FileRef helpers
|
|
54
132
|
# ---------------------------------------------------------------------------
|
|
55
133
|
|
|
56
134
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
# Accept multiple formats returned by the API (epoch seconds or ISO-8601), or
|
|
69
|
-
# ``None`` when the token never expires.
|
|
135
|
+
class FileRef(TypedDict, total=False):
|
|
136
|
+
scope: str
|
|
137
|
+
id: str
|
|
138
|
+
name: str
|
|
139
|
+
path: str
|
|
140
|
+
run_path: str
|
|
141
|
+
object_name: str
|
|
142
|
+
mime: str
|
|
143
|
+
size: int
|
|
70
144
|
|
|
71
145
|
|
|
72
|
-
|
|
73
|
-
|
|
146
|
+
class CollectionField(TypedDict, total=False):
|
|
147
|
+
id: str
|
|
148
|
+
name: str
|
|
149
|
+
type: str
|
|
150
|
+
system: bool
|
|
151
|
+
required: bool
|
|
152
|
+
presentable: bool
|
|
153
|
+
hidden: bool
|
|
154
|
+
options: dict[str, Any]
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class HookReplayResult(TypedDict, total=False):
|
|
158
|
+
hook_id: str
|
|
159
|
+
hook_name: str
|
|
160
|
+
status: str
|
|
161
|
+
error: str
|
|
162
|
+
event_log_id: str
|
|
163
|
+
replay_id: str
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
_UNSET = object()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def list_collections() -> dict[str, Any]:
|
|
170
|
+
"""Return all PocketBase collections visible to the current tenant."""
|
|
171
|
+
|
|
172
|
+
return _api_request("GET", "collections")
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def get_collection(collection_id_or_name: str) -> dict[str, Any]:
|
|
176
|
+
"""Retrieve a single PocketBase collection by name or id."""
|
|
177
|
+
|
|
178
|
+
if not collection_id_or_name:
|
|
179
|
+
raise ValueError("collection_id_or_name is required")
|
|
180
|
+
return _api_request("GET", f"collections/{collection_id_or_name}")
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def ensure_collection(
|
|
184
|
+
name: str,
|
|
185
|
+
*,
|
|
186
|
+
collection_type: str = "base",
|
|
187
|
+
schema: Iterable[CollectionField] | object = _UNSET,
|
|
188
|
+
id: str | None = None,
|
|
189
|
+
indexes: Iterable[str] | object = _UNSET,
|
|
190
|
+
) -> dict[str, Any]:
|
|
191
|
+
"""Ensure a collection exists with the given schema and indexes.
|
|
192
|
+
|
|
193
|
+
This is an idempotent operation - it creates the collection if it doesn't exist,
|
|
194
|
+
or updates it if it does. Safe to call multiple times with the same arguments.
|
|
195
|
+
|
|
196
|
+
The `schema` field should contain ONLY user-defined fields. System fields
|
|
197
|
+
(id, created, updated, created_by, updated_by, external_id, lm_provenance)
|
|
198
|
+
are automatically managed by Lumera and should not be included.
|
|
199
|
+
|
|
200
|
+
The `indexes` field should contain ONLY user-defined indexes. System indexes
|
|
201
|
+
(external_id unique index, updated index) are automatically managed.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
name: Collection name (display name, can be renamed later).
|
|
205
|
+
collection_type: Collection type, defaults to "base".
|
|
206
|
+
schema: List of field definitions. If provided, replaces all user fields.
|
|
207
|
+
If omitted, existing fields are preserved.
|
|
208
|
+
id: Optional stable identifier. If provided on creation, this ID will be
|
|
209
|
+
used instead of an auto-generated one. The ID remains stable even if
|
|
210
|
+
the collection is renamed. Must be alphanumeric with underscores only.
|
|
211
|
+
Cannot be changed after creation.
|
|
212
|
+
indexes: List of index DDL statements. If provided, replaces all user indexes.
|
|
213
|
+
If omitted, existing indexes are preserved.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
The collection data including:
|
|
217
|
+
- id: The collection's stable identifier
|
|
218
|
+
- name: The collection's display name
|
|
219
|
+
- schema: User-defined fields only
|
|
220
|
+
- indexes: User-defined indexes only
|
|
221
|
+
- systemInfo: Object with system-managed fields and indexes (read-only)
|
|
222
|
+
|
|
223
|
+
Example:
|
|
224
|
+
# Create with stable ID for automations
|
|
225
|
+
coll = ensure_collection(
|
|
226
|
+
"Customer Orders Q1",
|
|
227
|
+
schema=[
|
|
228
|
+
{"name": "amount", "type": "number", "required": True},
|
|
229
|
+
],
|
|
230
|
+
id="orders", # stable reference
|
|
231
|
+
)
|
|
74
232
|
|
|
75
|
-
|
|
233
|
+
# Later, rename collection but ID stays "orders"
|
|
234
|
+
# Automations using search("orders", ...) still work!
|
|
76
235
|
"""
|
|
236
|
+
if not name or not name.strip():
|
|
237
|
+
raise ValueError("name is required")
|
|
77
238
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
if isinstance(expires_at, (int, float)):
|
|
82
|
-
return float(expires_at)
|
|
83
|
-
|
|
84
|
-
# Assume RFC 3339 / ISO 8601 string.
|
|
85
|
-
if isinstance(expires_at, str):
|
|
86
|
-
if expires_at.endswith("Z"):
|
|
87
|
-
expires_at = expires_at[:-1] + "+00:00"
|
|
88
|
-
return _dt.datetime.fromisoformat(expires_at).timestamp()
|
|
89
|
-
|
|
90
|
-
raise TypeError(f"Unsupported expires_at format: {type(expires_at)!r}")
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def _fetch_access_token(provider: str) -> tuple[str, float]:
|
|
94
|
-
"""Call the Lumera API to obtain a valid access token for *provider*."""
|
|
239
|
+
name = name.strip()
|
|
240
|
+
payload: dict[str, Any] = {}
|
|
95
241
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
raise ValueError("provider is required")
|
|
242
|
+
if collection_type:
|
|
243
|
+
payload["type"] = collection_type
|
|
99
244
|
|
|
100
|
-
|
|
245
|
+
if id is not None and id.strip():
|
|
246
|
+
payload["id"] = id.strip()
|
|
101
247
|
|
|
102
|
-
|
|
103
|
-
|
|
248
|
+
if schema is not _UNSET:
|
|
249
|
+
if schema is None:
|
|
250
|
+
raise ValueError("schema cannot be None; provide an iterable of fields or omit")
|
|
251
|
+
payload["schema"] = [dict(field) for field in schema]
|
|
104
252
|
|
|
105
|
-
|
|
106
|
-
|
|
253
|
+
if indexes is not _UNSET:
|
|
254
|
+
payload["indexes"] = list(indexes) if indexes is not None else []
|
|
107
255
|
|
|
108
|
-
|
|
109
|
-
access_token = data.get("access_token")
|
|
110
|
-
expires_at = data.get("expires_at")
|
|
256
|
+
return _api_request("PUT", f"collections/{name}", json_body=payload)
|
|
111
257
|
|
|
112
|
-
if not access_token:
|
|
113
|
-
raise RuntimeError(f"Malformed response from Lumera when fetching {provider} access token")
|
|
114
258
|
|
|
115
|
-
|
|
116
|
-
|
|
259
|
+
# Backwards compatibility aliases
|
|
260
|
+
def create_collection(
|
|
261
|
+
name: str,
|
|
262
|
+
*,
|
|
263
|
+
collection_type: str = "base",
|
|
264
|
+
schema: Iterable[CollectionField] | None = None,
|
|
265
|
+
indexes: Iterable[str] | None = None,
|
|
266
|
+
) -> dict[str, Any]:
|
|
267
|
+
"""Create a new PocketBase collection.
|
|
117
268
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
"""Return a cached access token for *provider* valid
|
|
121
|
-
*min_valid_seconds*.
|
|
122
|
-
|
|
123
|
-
Automatically refreshes tokens via the Lumera API when they are missing or
|
|
124
|
-
close to expiry. For tokens without an expiry (API keys) the first value
|
|
125
|
-
is cached indefinitely.
|
|
269
|
+
.. deprecated::
|
|
270
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
126
271
|
"""
|
|
272
|
+
warnings.warn(
|
|
273
|
+
"create_collection() is deprecated, use ensure_collection() instead",
|
|
274
|
+
DeprecationWarning,
|
|
275
|
+
stacklevel=2,
|
|
276
|
+
)
|
|
277
|
+
return ensure_collection(
|
|
278
|
+
name,
|
|
279
|
+
collection_type=collection_type,
|
|
280
|
+
schema=schema if schema is not None else [],
|
|
281
|
+
indexes=indexes if indexes is not None else [],
|
|
282
|
+
)
|
|
127
283
|
|
|
128
|
-
global _token_cache
|
|
129
284
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
285
|
+
def update_collection(
|
|
286
|
+
collection_id_or_name: str,
|
|
287
|
+
*,
|
|
288
|
+
name: str | None | object = _UNSET,
|
|
289
|
+
collection_type: str | None | object = _UNSET,
|
|
290
|
+
schema: Iterable[CollectionField] | object = _UNSET,
|
|
291
|
+
indexes: Iterable[str] | object = _UNSET,
|
|
292
|
+
) -> dict[str, Any]:
|
|
293
|
+
"""Update a PocketBase collection.
|
|
133
294
|
|
|
134
|
-
|
|
295
|
+
.. deprecated::
|
|
296
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
297
|
+
Note: The 'name' parameter for renaming is no longer supported.
|
|
298
|
+
"""
|
|
299
|
+
warnings.warn(
|
|
300
|
+
"update_collection() is deprecated, use ensure_collection() instead",
|
|
301
|
+
DeprecationWarning,
|
|
302
|
+
stacklevel=2,
|
|
303
|
+
)
|
|
304
|
+
if name is not _UNSET and name != collection_id_or_name:
|
|
305
|
+
raise ValueError("Renaming collections via 'name' parameter is no longer supported")
|
|
306
|
+
|
|
307
|
+
return ensure_collection(
|
|
308
|
+
collection_id_or_name,
|
|
309
|
+
collection_type=collection_type if collection_type is not _UNSET else "base",
|
|
310
|
+
schema=schema,
|
|
311
|
+
indexes=indexes,
|
|
312
|
+
)
|
|
135
313
|
|
|
136
|
-
cached = _token_cache.get(provider)
|
|
137
|
-
if cached is not None:
|
|
138
|
-
access_token, expiry_ts = cached
|
|
139
|
-
if (expiry_ts - now) >= min_valid_seconds:
|
|
140
|
-
return access_token
|
|
141
314
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
315
|
+
def delete_collection(collection_id_or_name: str) -> None:
|
|
316
|
+
"""Delete a PocketBase collection by name or id."""
|
|
317
|
+
|
|
318
|
+
if not collection_id_or_name:
|
|
319
|
+
raise ValueError("collection_id_or_name is required")
|
|
320
|
+
_api_request("DELETE", f"collections/{collection_id_or_name}")
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def list_records(
|
|
324
|
+
collection_id_or_name: str,
|
|
325
|
+
*,
|
|
326
|
+
page: int | None = None,
|
|
327
|
+
per_page: int | None = None,
|
|
328
|
+
limit: int | None = None,
|
|
329
|
+
offset: int | None = None,
|
|
330
|
+
sort: str | None = None,
|
|
331
|
+
filter: Mapping[str, Any] | Sequence[Any] | None = None,
|
|
332
|
+
expand: str | None = None,
|
|
333
|
+
) -> dict[str, Any]:
|
|
334
|
+
"""List records for the given collection.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
collection_id_or_name: Collection name or ID. Required.
|
|
338
|
+
page: 1-based page index for paginated queries (mutually exclusive
|
|
339
|
+
with ``offset``/``limit``).
|
|
340
|
+
per_page: Page size (max 200). Only used when ``page`` is provided.
|
|
341
|
+
limit: Alternative to ``per_page`` for cursor-style queries.
|
|
342
|
+
offset: Starting offset for cursor-style queries.
|
|
343
|
+
sort: Optional sort expression (e.g. ``"-created"``).
|
|
344
|
+
filter: Filter as dict (JSON object). String filters are NOT supported.
|
|
345
|
+
Use dict syntax with optional comparison operators:
|
|
346
|
+
|
|
347
|
+
* Simple equality: ``{"status": "pending"}``
|
|
348
|
+
* Comparison: ``{"amount": {"gt": 1000}}`` (gt, gte, lt, lte, eq)
|
|
349
|
+
* OR logic: ``{"or": [{"status": "a"}, {"status": "b"}]}``
|
|
350
|
+
* AND (implicit): ``{"status": "active", "amount": {"gt": 100}}``
|
|
351
|
+
|
|
352
|
+
The SDK JSON-encodes the filter for the API.
|
|
353
|
+
expand: Optional comma-separated list of relation fields to expand.
|
|
354
|
+
Expanded relations are included inline in the record response.
|
|
355
|
+
Example: ``"user_id,company_id"`` or ``"line_items_via_deposit_id"``
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
The raw response from ``GET /collections/{id}/records`` including
|
|
359
|
+
``items``, ``page``/``perPage`` metadata, etc.
|
|
360
|
+
"""
|
|
146
361
|
|
|
362
|
+
if not collection_id_or_name:
|
|
363
|
+
raise ValueError("collection_id_or_name is required")
|
|
364
|
+
|
|
365
|
+
params: dict[str, Any] = {}
|
|
366
|
+
if page is not None:
|
|
367
|
+
params["page"] = page
|
|
368
|
+
if per_page is not None:
|
|
369
|
+
params["perPage"] = per_page
|
|
370
|
+
if limit is not None:
|
|
371
|
+
params["limit"] = limit
|
|
372
|
+
if offset is not None:
|
|
373
|
+
params["offset"] = offset
|
|
374
|
+
if sort is not None:
|
|
375
|
+
params["sort"] = sort
|
|
376
|
+
if filter is not None:
|
|
377
|
+
params["filter"] = json.dumps(filter)
|
|
378
|
+
if expand is not None:
|
|
379
|
+
params["expand"] = expand
|
|
380
|
+
|
|
381
|
+
path = f"collections/{collection_id_or_name}/records"
|
|
382
|
+
return _api_request("GET", path, params=params or None)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def query_sql(
|
|
386
|
+
sql: str,
|
|
387
|
+
*,
|
|
388
|
+
params: Mapping[str, Any] | None = None,
|
|
389
|
+
args: Sequence[Any] | None = None,
|
|
390
|
+
) -> dict[str, Any]:
|
|
391
|
+
"""Execute a read-only PocketBase SQL query via ``POST /pb/sql``.
|
|
392
|
+
|
|
393
|
+
Args:
|
|
394
|
+
sql: The SQL statement to execute. Must be a SELECT/read-only
|
|
395
|
+
query; write operations are rejected by the API.
|
|
396
|
+
params: Optional dict of named parameters referenced in the SQL via
|
|
397
|
+
``{{param}}`` placeholders. Mutually exclusive with ``args``.
|
|
398
|
+
args: Optional sequence of positional parameters for ``?``
|
|
399
|
+
placeholders. Mutually exclusive with ``params``.
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
The JSON response from ``/pb/sql`` including ``columns`` and
|
|
403
|
+
``rows`` (when applicable), ``rowsAffected``/``lastInsertId`` (for
|
|
404
|
+
compatible statements), and ``durationMs``.
|
|
405
|
+
"""
|
|
147
406
|
|
|
148
|
-
|
|
407
|
+
sql_text = (sql or "").strip()
|
|
408
|
+
if not sql_text:
|
|
409
|
+
raise ValueError("sql is required")
|
|
410
|
+
if params and args:
|
|
411
|
+
raise ValueError("provide either params or args, not both")
|
|
149
412
|
|
|
413
|
+
payload: dict[str, Any] = {"sql": sql_text}
|
|
414
|
+
if params is not None:
|
|
415
|
+
if not isinstance(params, Mapping):
|
|
416
|
+
raise TypeError("params must be a mapping")
|
|
417
|
+
payload["params"] = dict(params)
|
|
418
|
+
if args is not None:
|
|
419
|
+
if isinstance(args, (str, bytes)):
|
|
420
|
+
raise TypeError("args must be a sequence of values, not a string")
|
|
421
|
+
payload["args"] = list(args)
|
|
150
422
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
423
|
+
response = _api_request("POST", "pb/sql", json_body=payload)
|
|
424
|
+
if isinstance(response, MutableMapping):
|
|
425
|
+
return dict(response)
|
|
426
|
+
raise RuntimeError("unexpected response payload")
|
|
154
427
|
|
|
155
|
-
return get_access_token("google", min_valid_seconds=min_valid_seconds)
|
|
156
428
|
|
|
429
|
+
def get_record(collection_id_or_name: str, record_id: str) -> dict[str, Any]:
|
|
430
|
+
"""Retrieve a single record by id."""
|
|
157
431
|
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
432
|
+
if not collection_id_or_name:
|
|
433
|
+
raise ValueError("collection_id_or_name is required")
|
|
434
|
+
if not record_id:
|
|
435
|
+
raise ValueError("record_id is required")
|
|
161
436
|
|
|
437
|
+
path = f"collections/{collection_id_or_name}/records/{record_id}"
|
|
438
|
+
return _api_request("GET", path)
|
|
162
439
|
|
|
163
|
-
_logger = _logging.getLogger(__name__)
|
|
164
440
|
|
|
441
|
+
def get_record_by_external_id(collection_id_or_name: str, external_id: str) -> dict[str, Any]:
|
|
442
|
+
"""Retrieve a record by its unique external_id."""
|
|
165
443
|
|
|
166
|
-
|
|
167
|
-
|
|
444
|
+
if not collection_id_or_name:
|
|
445
|
+
raise ValueError("collection_id_or_name is required")
|
|
446
|
+
if not external_id:
|
|
447
|
+
raise ValueError("external_id is required")
|
|
168
448
|
|
|
169
|
-
|
|
449
|
+
response = list_records(
|
|
450
|
+
collection_id_or_name,
|
|
451
|
+
per_page=1,
|
|
452
|
+
filter={"external_id": external_id},
|
|
453
|
+
)
|
|
454
|
+
items = response.get("items") if isinstance(response, dict) else None
|
|
455
|
+
if not items:
|
|
456
|
+
url = _api_url(f"collections/{collection_id_or_name}/records")
|
|
457
|
+
raise LumeraAPIError(404, "record not found", url=url, payload=None)
|
|
458
|
+
first = items[0]
|
|
459
|
+
if not isinstance(first, dict):
|
|
460
|
+
raise RuntimeError("unexpected response payload")
|
|
461
|
+
return first
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def run_automation(
|
|
465
|
+
automation_id: str,
|
|
466
|
+
*,
|
|
467
|
+
inputs: Mapping[str, Any] | str | None = None,
|
|
468
|
+
files: Mapping[str, str | os.PathLike[str] | Sequence[str | os.PathLike[str]]] | None = None,
|
|
469
|
+
status: str | None = None,
|
|
470
|
+
error: str | None = None,
|
|
471
|
+
provenance: Mapping[str, Any] | None = None,
|
|
472
|
+
external_id: str | None = None,
|
|
473
|
+
metadata: Mapping[str, Any] | None = None,
|
|
474
|
+
) -> dict[str, Any]:
|
|
475
|
+
"""Create an automation run and optionally upload files for file inputs.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
automation_id: The automation to run. Required.
|
|
479
|
+
inputs: Inputs payload (dict or JSON string). File refs are resolved automatically.
|
|
480
|
+
files: Mapping of input key -> path(s) to upload before run creation.
|
|
481
|
+
status: Optional initial status (defaults to ``queued``).
|
|
482
|
+
error: Optional error string to store alongside the initial status.
|
|
483
|
+
provenance: Custom provenance payload; falls back to environment-derived provenance.
|
|
484
|
+
external_id: Stable idempotency key. If provided, repeated calls with the same value
|
|
485
|
+
will return the existing run (server-side idempotency).
|
|
486
|
+
metadata: Arbitrary JSON metadata to persist with the run (e.g., callback_url).
|
|
170
487
|
"""
|
|
171
488
|
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
t0 = _time.perf_counter()
|
|
176
|
-
try:
|
|
177
|
-
return fn(*args, **kwargs)
|
|
178
|
-
finally:
|
|
179
|
-
dt = _time.perf_counter() - t0
|
|
180
|
-
_logger.info(f"Exiting {fn.__name__}() - took {dt:.3f}s")
|
|
181
|
-
|
|
182
|
-
return wrapper
|
|
489
|
+
automation_id = automation_id.strip()
|
|
490
|
+
if not automation_id:
|
|
491
|
+
raise ValueError("automation_id is required")
|
|
183
492
|
|
|
493
|
+
run_id: str | None = None
|
|
184
494
|
|
|
185
|
-
|
|
186
|
-
# Unified FileRef helpers
|
|
187
|
-
# ---------------------------------------------------------------------------
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
class FileRef(TypedDict, total=False):
|
|
191
|
-
scope: str
|
|
192
|
-
id: str
|
|
193
|
-
name: str
|
|
194
|
-
path: str
|
|
195
|
-
run_path: str
|
|
196
|
-
object_name: str
|
|
197
|
-
mime: str
|
|
198
|
-
size: int
|
|
495
|
+
prepared_inputs = _prepare_automation_inputs(inputs) or {}
|
|
199
496
|
|
|
497
|
+
file_map = files or {}
|
|
498
|
+
run_id, upload_descriptors = _upload_automation_files(
|
|
499
|
+
run_id, file_map, api_request=_api_request
|
|
500
|
+
)
|
|
200
501
|
|
|
201
|
-
|
|
202
|
-
|
|
502
|
+
final_inputs = json.loads(json.dumps(prepared_inputs)) if prepared_inputs else {}
|
|
503
|
+
for key, descriptors in upload_descriptors.items():
|
|
504
|
+
if len(descriptors) == 1 and not _is_sequence(file_map.get(key)):
|
|
505
|
+
final_inputs[key] = descriptors[0]
|
|
506
|
+
else:
|
|
507
|
+
final_inputs[key] = descriptors
|
|
203
508
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
509
|
+
cleaned_status = status.strip() if isinstance(status, str) else ""
|
|
510
|
+
payload: dict[str, Any] = {
|
|
511
|
+
"automation_id": automation_id,
|
|
512
|
+
"inputs": json.dumps(final_inputs),
|
|
513
|
+
"status": cleaned_status or "queued",
|
|
514
|
+
}
|
|
515
|
+
if run_id:
|
|
516
|
+
payload["id"] = run_id
|
|
517
|
+
if error is not None:
|
|
518
|
+
payload["error"] = error
|
|
519
|
+
if external_id:
|
|
520
|
+
payload["external_id"] = external_id.strip()
|
|
521
|
+
if metadata is not None:
|
|
522
|
+
payload["metadata"] = _ensure_mapping(metadata, name="metadata")
|
|
523
|
+
payload["lm_provenance"] = _ensure_mapping(
|
|
524
|
+
provenance, name="provenance"
|
|
525
|
+
) or _default_provenance(automation_id, run_id)
|
|
526
|
+
|
|
527
|
+
run = _api_request("POST", "automation-runs", json_body=payload)
|
|
528
|
+
if not isinstance(run, dict):
|
|
529
|
+
raise RuntimeError("unexpected response payload")
|
|
530
|
+
return run
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
def get_automation_run(
|
|
534
|
+
automation_id: str | None = None,
|
|
535
|
+
*,
|
|
536
|
+
run_id: str | None = None,
|
|
537
|
+
external_id: str | None = None,
|
|
538
|
+
) -> dict[str, Any]:
|
|
539
|
+
"""Fetch an automation run by id or by automation_id + external_id idempotency key.
|
|
540
|
+
|
|
541
|
+
Args:
|
|
542
|
+
automation_id: Automation id for external_id lookup.
|
|
543
|
+
Required when ``run_id`` is not provided.
|
|
544
|
+
run_id: Optional run id. When provided, takes precedence over external_id.
|
|
545
|
+
external_id: Optional idempotency key to look up the latest run for the automation.
|
|
546
|
+
|
|
547
|
+
Raises:
|
|
548
|
+
ValueError: If required identifiers are missing.
|
|
549
|
+
LumeraAPIError: If no matching run is found.
|
|
207
550
|
"""
|
|
208
551
|
|
|
209
|
-
if
|
|
210
|
-
return
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
552
|
+
if run_id:
|
|
553
|
+
return _api_request("GET", f"automation-runs/{run_id}")
|
|
554
|
+
|
|
555
|
+
automation_id = automation_id.strip() if isinstance(automation_id, str) else ""
|
|
556
|
+
external_id = external_id.strip() if isinstance(external_id, str) else ""
|
|
557
|
+
if not automation_id:
|
|
558
|
+
raise ValueError("automation_id is required when run_id is not provided")
|
|
559
|
+
if not external_id:
|
|
560
|
+
raise ValueError("external_id is required when run_id is not provided")
|
|
561
|
+
|
|
562
|
+
resp = _api_request(
|
|
563
|
+
"GET",
|
|
564
|
+
"automation-runs",
|
|
565
|
+
params={
|
|
566
|
+
"automation_id": automation_id,
|
|
567
|
+
"external_id": external_id,
|
|
568
|
+
"limit": 1,
|
|
569
|
+
},
|
|
570
|
+
)
|
|
571
|
+
runs = resp.get("data") if isinstance(resp, dict) else None # Backend returns "data" key
|
|
572
|
+
if runs and isinstance(runs, list) and runs and isinstance(runs[0], dict):
|
|
573
|
+
return runs[0]
|
|
574
|
+
|
|
575
|
+
url = _api_url("automation-runs")
|
|
576
|
+
raise _LumeraAPIError(404, "automation run not found", url=url, payload=None)
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
def update_automation_run(
|
|
580
|
+
run_id: str,
|
|
581
|
+
*,
|
|
582
|
+
result: Mapping[str, Any] | None = None,
|
|
583
|
+
status: str | None = None,
|
|
584
|
+
error: str | None = None,
|
|
585
|
+
metadata: Mapping[str, Any] | None = None,
|
|
586
|
+
) -> dict[str, Any]:
|
|
587
|
+
"""Update an automation run with result, status, or other fields.
|
|
588
|
+
|
|
589
|
+
Args:
|
|
590
|
+
run_id: The run id to update. Required.
|
|
591
|
+
result: Optional result payload to store (max 20KB).
|
|
592
|
+
status: Optional status update.
|
|
593
|
+
error: Optional error string.
|
|
594
|
+
metadata: Optional metadata update.
|
|
595
|
+
|
|
596
|
+
Returns:
|
|
597
|
+
The updated automation run record.
|
|
229
598
|
"""
|
|
599
|
+
run_id = run_id.strip() if isinstance(run_id, str) else ""
|
|
600
|
+
if not run_id:
|
|
601
|
+
raise ValueError("run_id is required")
|
|
602
|
+
|
|
603
|
+
payload: dict[str, Any] = {}
|
|
604
|
+
if result is not None:
|
|
605
|
+
payload["result"] = _ensure_mapping(result, name="result")
|
|
606
|
+
if status is not None:
|
|
607
|
+
payload["status"] = status.strip()
|
|
608
|
+
if error is not None:
|
|
609
|
+
payload["error"] = error
|
|
610
|
+
if metadata is not None:
|
|
611
|
+
payload["metadata"] = _ensure_mapping(metadata, name="metadata")
|
|
612
|
+
|
|
613
|
+
if not payload:
|
|
614
|
+
raise ValueError("at least one field to update is required")
|
|
615
|
+
|
|
616
|
+
response = _api_request("PATCH", f"automation-runs/{run_id}", json_body=payload)
|
|
617
|
+
if not isinstance(response, dict):
|
|
618
|
+
raise RuntimeError("unexpected response payload")
|
|
619
|
+
return response
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
def create_record(
|
|
623
|
+
collection_id_or_name: str,
|
|
624
|
+
payload: Mapping[str, Any] | None = None,
|
|
625
|
+
) -> dict[str, Any]:
|
|
626
|
+
"""Create a record in the specified collection."""
|
|
627
|
+
|
|
628
|
+
return _record_mutation("POST", collection_id_or_name, payload, api_request=_api_request)
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def update_record(
|
|
632
|
+
collection_id_or_name: str,
|
|
633
|
+
record_id: str,
|
|
634
|
+
payload: Mapping[str, Any] | None = None,
|
|
635
|
+
) -> dict[str, Any]:
|
|
636
|
+
"""Update an existing record."""
|
|
637
|
+
|
|
638
|
+
return _record_mutation(
|
|
639
|
+
"PATCH",
|
|
640
|
+
collection_id_or_name,
|
|
641
|
+
payload,
|
|
642
|
+
record_id=record_id,
|
|
643
|
+
api_request=_api_request,
|
|
644
|
+
)
|
|
230
645
|
|
|
231
|
-
p = resolve_path(file_or_path)
|
|
232
|
-
return open(p, mode, **kwargs)
|
|
233
646
|
|
|
647
|
+
def delete_record(collection_id_or_name: str, record_id: str) -> None:
|
|
648
|
+
"""Delete a record from the specified collection."""
|
|
234
649
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
) -> list[FileRef]:
|
|
240
|
-
"""Convert a list of strings or partial dicts into FileRef-like dicts.
|
|
650
|
+
if not collection_id_or_name:
|
|
651
|
+
raise ValueError("collection_id_or_name is required")
|
|
652
|
+
if not record_id:
|
|
653
|
+
raise ValueError("record_id is required")
|
|
241
654
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
"""
|
|
655
|
+
path = f"collections/{collection_id_or_name}/records/{record_id}"
|
|
656
|
+
_api_request("DELETE", path)
|
|
245
657
|
|
|
246
|
-
out: list[FileRef] = []
|
|
247
|
-
for v in values:
|
|
248
|
-
if isinstance(v, str):
|
|
249
|
-
name = os.path.basename(v)
|
|
250
|
-
object_name = f"{scope}/{id}/{name}"
|
|
251
|
-
out.append(
|
|
252
|
-
{
|
|
253
|
-
"scope": scope,
|
|
254
|
-
"id": id,
|
|
255
|
-
"name": name,
|
|
256
|
-
"path": v,
|
|
257
|
-
"object_name": object_name,
|
|
258
|
-
}
|
|
259
|
-
)
|
|
260
|
-
elif isinstance(v, dict):
|
|
261
|
-
# Fill minimal fields if missing
|
|
262
|
-
name = v.get("name") or os.path.basename(v.get("path") or v.get("run_path") or "")
|
|
263
|
-
path = v.get("path") or v.get("run_path") or ""
|
|
264
|
-
object_name = v.get("object_name") or f"{scope}/{id}/{name}"
|
|
265
|
-
out.append(
|
|
266
|
-
{
|
|
267
|
-
"scope": v.get("scope", scope),
|
|
268
|
-
"id": v.get("id", id),
|
|
269
|
-
"name": name,
|
|
270
|
-
"path": path,
|
|
271
|
-
"object_name": object_name,
|
|
272
|
-
**{k: v[k] for k in ("mime", "size") if k in v},
|
|
273
|
-
}
|
|
274
|
-
)
|
|
275
|
-
else:
|
|
276
|
-
raise TypeError("values must contain str or dict entries")
|
|
277
|
-
return out
|
|
278
658
|
|
|
659
|
+
# =============================================================================
|
|
660
|
+
# Bulk Record Operations
|
|
661
|
+
# =============================================================================
|
|
279
662
|
|
|
280
|
-
# ---------------------------------------------------------------------------
|
|
281
|
-
# Document upload helper (unchanged apart from minor refactoring)
|
|
282
|
-
# ---------------------------------------------------------------------------
|
|
283
663
|
|
|
664
|
+
def bulk_delete_records(
|
|
665
|
+
collection_id_or_name: str,
|
|
666
|
+
record_ids: Sequence[str],
|
|
667
|
+
*,
|
|
668
|
+
transaction: bool = False,
|
|
669
|
+
) -> dict[str, Any]:
|
|
670
|
+
"""Bulk delete records by IDs.
|
|
284
671
|
|
|
285
|
-
|
|
286
|
-
|
|
672
|
+
Args:
|
|
673
|
+
collection_id_or_name: Collection name or ID
|
|
674
|
+
record_ids: List of record IDs to delete (max 1000)
|
|
675
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
287
676
|
|
|
288
|
-
|
|
289
|
-
|
|
677
|
+
Returns:
|
|
678
|
+
Result with succeeded/failed counts and any errors
|
|
290
679
|
"""
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
""
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
"
|
|
680
|
+
if not collection_id_or_name:
|
|
681
|
+
raise ValueError("collection_id_or_name is required")
|
|
682
|
+
if not record_ids:
|
|
683
|
+
raise ValueError("record_ids is required")
|
|
684
|
+
|
|
685
|
+
path = f"collections/{collection_id_or_name}/records/bulk/delete"
|
|
686
|
+
body: dict[str, Any] = {"ids": list(record_ids)}
|
|
687
|
+
if transaction:
|
|
688
|
+
body["transaction"] = True
|
|
689
|
+
result = _api_request("POST", path, json_body=body)
|
|
690
|
+
return result if isinstance(result, dict) else {}
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
def bulk_update_records(
|
|
694
|
+
collection_id_or_name: str,
|
|
695
|
+
records: Sequence[Mapping[str, Any]],
|
|
696
|
+
*,
|
|
697
|
+
transaction: bool = False,
|
|
698
|
+
) -> dict[str, Any]:
|
|
699
|
+
"""Update multiple records with individual data per record.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
collection_id_or_name: Collection name or ID
|
|
703
|
+
records: List of records to update (max 1000). Each record must have an 'id' field.
|
|
704
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
705
|
+
|
|
706
|
+
Returns:
|
|
707
|
+
Result with succeeded/failed counts
|
|
708
|
+
"""
|
|
709
|
+
if not collection_id_or_name:
|
|
710
|
+
raise ValueError("collection_id_or_name is required")
|
|
711
|
+
if not records:
|
|
712
|
+
raise ValueError("records is required")
|
|
713
|
+
|
|
714
|
+
path = f"collections/{collection_id_or_name}/records/bulk/update"
|
|
715
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
716
|
+
if transaction:
|
|
717
|
+
body["transaction"] = True
|
|
718
|
+
result = _api_request("POST", path, json_body=body)
|
|
719
|
+
return result if isinstance(result, dict) else {}
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
def bulk_upsert_records(
|
|
723
|
+
collection_id_or_name: str,
|
|
724
|
+
records: Sequence[Mapping[str, Any]],
|
|
725
|
+
*,
|
|
726
|
+
transaction: bool = False,
|
|
727
|
+
) -> dict[str, Any]:
|
|
728
|
+
"""Upsert multiple records (create or update by ID).
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
collection_id_or_name: Collection name or ID
|
|
732
|
+
records: List of records (max 1000). Include 'id' field to update existing.
|
|
733
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
734
|
+
|
|
735
|
+
Returns:
|
|
736
|
+
Result with succeeded/failed counts and created record IDs
|
|
737
|
+
"""
|
|
738
|
+
if not collection_id_or_name:
|
|
739
|
+
raise ValueError("collection_id_or_name is required")
|
|
740
|
+
if not records:
|
|
741
|
+
raise ValueError("records is required")
|
|
742
|
+
|
|
743
|
+
path = f"collections/{collection_id_or_name}/records/bulk/upsert"
|
|
744
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
745
|
+
if transaction:
|
|
746
|
+
body["transaction"] = True
|
|
747
|
+
result = _api_request("POST", path, json_body=body)
|
|
748
|
+
return result if isinstance(result, dict) else {}
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
def bulk_insert_records(
|
|
752
|
+
collection_id_or_name: str,
|
|
753
|
+
records: Sequence[Mapping[str, Any]],
|
|
754
|
+
*,
|
|
755
|
+
transaction: bool = False,
|
|
756
|
+
) -> dict[str, Any]:
|
|
757
|
+
"""Insert multiple new records.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
collection_id_or_name: Collection name or ID
|
|
761
|
+
records: List of records to create (max 1000)
|
|
762
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
Result with succeeded/failed counts and created record IDs
|
|
766
|
+
"""
|
|
767
|
+
if not collection_id_or_name:
|
|
768
|
+
raise ValueError("collection_id_or_name is required")
|
|
769
|
+
if not records:
|
|
770
|
+
raise ValueError("records is required")
|
|
771
|
+
|
|
772
|
+
path = f"collections/{collection_id_or_name}/records/bulk/insert"
|
|
773
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
774
|
+
if transaction:
|
|
775
|
+
body["transaction"] = True
|
|
776
|
+
result = _api_request("POST", path, json_body=body)
|
|
777
|
+
return result if isinstance(result, dict) else {}
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def replay_hook(
|
|
781
|
+
collection_id_or_name: str,
|
|
782
|
+
event: str,
|
|
783
|
+
record_id: str,
|
|
784
|
+
*,
|
|
785
|
+
hook_ids: Sequence[str] | None = None,
|
|
786
|
+
original_event_id: str | None = None,
|
|
787
|
+
) -> list[HookReplayResult]:
|
|
788
|
+
"""Trigger PocketBase hooks for a record and return execution results."""
|
|
789
|
+
|
|
790
|
+
collection = collection_id_or_name.strip()
|
|
791
|
+
hook_event = event.strip()
|
|
792
|
+
record = record_id.strip()
|
|
793
|
+
if not collection:
|
|
794
|
+
raise ValueError("collection_id_or_name is required")
|
|
795
|
+
if not hook_event:
|
|
796
|
+
raise ValueError("event is required")
|
|
797
|
+
if not record:
|
|
798
|
+
raise ValueError("record_id is required")
|
|
799
|
+
|
|
800
|
+
payload: dict[str, Any] = {
|
|
801
|
+
"collection": collection,
|
|
802
|
+
"event": hook_event,
|
|
803
|
+
"record_id": record,
|
|
387
804
|
}
|
|
388
805
|
|
|
806
|
+
if hook_ids:
|
|
807
|
+
trimmed = [value.strip() for value in hook_ids if isinstance(value, str) and value.strip()]
|
|
808
|
+
if trimmed:
|
|
809
|
+
payload["hook_ids"] = trimmed
|
|
810
|
+
|
|
811
|
+
if original_event_id and original_event_id.strip():
|
|
812
|
+
payload["original_event_id"] = original_event_id.strip()
|
|
813
|
+
|
|
814
|
+
response = _api_request("POST", "hooks/replay", json_body=payload)
|
|
815
|
+
if not isinstance(response, Mapping):
|
|
816
|
+
return []
|
|
817
|
+
|
|
818
|
+
raw_results = response.get("results")
|
|
819
|
+
if not isinstance(raw_results, list):
|
|
820
|
+
return []
|
|
821
|
+
|
|
822
|
+
results: list[HookReplayResult] = []
|
|
823
|
+
for item in raw_results:
|
|
824
|
+
if not isinstance(item, Mapping):
|
|
825
|
+
continue
|
|
826
|
+
result: HookReplayResult = {}
|
|
827
|
+
for key in (
|
|
828
|
+
"hook_id",
|
|
829
|
+
"hook_name",
|
|
830
|
+
"status",
|
|
831
|
+
"error",
|
|
832
|
+
"event_log_id",
|
|
833
|
+
"replay_id",
|
|
834
|
+
):
|
|
835
|
+
value = item.get(key)
|
|
836
|
+
if isinstance(value, str):
|
|
837
|
+
result[key] = value
|
|
838
|
+
results.append(result)
|
|
839
|
+
return results
|
|
840
|
+
|
|
841
|
+
|
|
842
|
+
def claim_locks(
|
|
843
|
+
*,
|
|
844
|
+
job_type: str,
|
|
845
|
+
collection: str,
|
|
846
|
+
record_ids: Sequence[str],
|
|
847
|
+
job_id: str | None = None,
|
|
848
|
+
claimed_by: str | None = None,
|
|
849
|
+
ttl_seconds: int | None = None,
|
|
850
|
+
provenance: Mapping[str, Any] | None = None,
|
|
851
|
+
) -> Mapping[str, Any]:
|
|
852
|
+
"""Claim one or more records (or logical resources) in ``lm_locks``.
|
|
853
|
+
|
|
854
|
+
Args:
|
|
855
|
+
job_type: Logical workflow name (e.g. ``exports.ar``). Required.
|
|
856
|
+
collection: Namespace for the resource family. This is usually a
|
|
857
|
+
Lumera collection but it can be any identifier (e.g.
|
|
858
|
+
``"cron:billing"``) as long as the combination of
|
|
859
|
+
``collection`` + ``record_id`` is stable.
|
|
860
|
+
record_ids: Iterable of record/resource identifiers to lease. Each
|
|
861
|
+
entry is trimmed and empty values are ignored.
|
|
862
|
+
job_id: Optional run identifier. When supplied, releases and
|
|
863
|
+
reclaims can target the locks owned by this specific run without
|
|
864
|
+
disturbing other workers using the same ``job_type``.
|
|
865
|
+
claimed_by: Optional worker identifier recorded in the lock row.
|
|
866
|
+
ttl_seconds: Optional lease duration; defaults to the server TTL
|
|
867
|
+
(15 minutes) when omitted or non-positive.
|
|
868
|
+
provenance: Optional structured payload describing the actor/run. If
|
|
869
|
+
omitted we fall back to :func:`_default_provenance` using the
|
|
870
|
+
derived ``claimed_by`` and ``job_id`` inputs.
|
|
871
|
+
|
|
872
|
+
Returns:
|
|
873
|
+
The JSON body returned by ``/locks/claim`` describing the claim
|
|
874
|
+
outcome (typically the ``claimed``/``skipped`` ids and TTL).
|
|
875
|
+
"""
|
|
389
876
|
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
877
|
+
jt = job_type.strip()
|
|
878
|
+
coll = collection.strip()
|
|
879
|
+
if not jt:
|
|
880
|
+
raise ValueError("job_type is required")
|
|
881
|
+
if not coll:
|
|
882
|
+
raise ValueError("collection is required")
|
|
883
|
+
trimmed = [value.strip() for value in record_ids if isinstance(value, str) and value.strip()]
|
|
884
|
+
if not trimmed:
|
|
885
|
+
raise ValueError("record_ids must include at least one value")
|
|
886
|
+
body: dict[str, Any] = {"job_type": jt, "collection": coll, "record_ids": trimmed}
|
|
887
|
+
job_ref = job_id.strip() if isinstance(job_id, str) else ""
|
|
888
|
+
if job_ref:
|
|
889
|
+
body["job_id"] = job_ref
|
|
890
|
+
|
|
891
|
+
claimed_by_ref = claimed_by.strip() if isinstance(claimed_by, str) else ""
|
|
892
|
+
if claimed_by_ref:
|
|
893
|
+
body["claimed_by"] = claimed_by_ref
|
|
894
|
+
if ttl_seconds and ttl_seconds > 0:
|
|
895
|
+
body["ttl_seconds"] = ttl_seconds
|
|
896
|
+
|
|
897
|
+
if provenance is not None:
|
|
898
|
+
body["provenance"] = _ensure_mapping(provenance, name="provenance")
|
|
899
|
+
else:
|
|
900
|
+
body["provenance"] = _default_provenance(claimed_by_ref, job_ref or None)
|
|
901
|
+
|
|
902
|
+
response = _api_request("POST", "locks/claim", json_body=body)
|
|
903
|
+
if isinstance(response, MutableMapping):
|
|
904
|
+
return response
|
|
905
|
+
raise RuntimeError("unexpected response payload")
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
def release_locks(
|
|
909
|
+
*,
|
|
910
|
+
job_type: str,
|
|
911
|
+
record_ids: Sequence[str] | None = None,
|
|
912
|
+
job_id: str | None = None,
|
|
913
|
+
collection: str | None = None,
|
|
914
|
+
) -> int:
|
|
915
|
+
"""Release previously claimed locks.
|
|
916
|
+
|
|
917
|
+
Provide whatever context you used when claiming (``job_type`` plus
|
|
918
|
+
optional ``job_id``/``collection``/``record_ids``) to target a subset of
|
|
919
|
+
locks for deletion. When only ``job_type`` is specified, every lock of
|
|
920
|
+
that type is released for the company; add finer filters to avoid
|
|
921
|
+
dropping other workers' leases.
|
|
922
|
+
"""
|
|
923
|
+
jt = job_type.strip()
|
|
924
|
+
if not jt:
|
|
925
|
+
raise ValueError("job_type is required")
|
|
926
|
+
body: dict[str, Any] = {"job_type": jt}
|
|
927
|
+
if job_id and job_id.strip():
|
|
928
|
+
body["job_id"] = job_id.strip()
|
|
929
|
+
if collection and collection.strip():
|
|
930
|
+
body["collection"] = collection.strip()
|
|
931
|
+
if record_ids:
|
|
932
|
+
trimmed = [
|
|
933
|
+
value.strip() for value in record_ids if isinstance(value, str) and value.strip()
|
|
934
|
+
]
|
|
935
|
+
if trimmed:
|
|
936
|
+
body["record_ids"] = trimmed
|
|
937
|
+
|
|
938
|
+
response = _api_request("POST", "locks/release", json_body=body)
|
|
939
|
+
if isinstance(response, MutableMapping):
|
|
940
|
+
released = response.get("released")
|
|
941
|
+
if isinstance(released, int):
|
|
942
|
+
return released
|
|
943
|
+
raise RuntimeError("unexpected response payload")
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
def reclaim_locks(
|
|
947
|
+
*,
|
|
948
|
+
job_type: str,
|
|
949
|
+
collection: str | None = None,
|
|
950
|
+
ttl_seconds: int | None = None,
|
|
951
|
+
) -> int:
|
|
952
|
+
"""Delete stale locks whose leases have expired.
|
|
953
|
+
|
|
954
|
+
Typically run periodically (or before a new batch starts) to evict
|
|
955
|
+
locks older than ``ttl_seconds``. If ``collection`` is supplied only
|
|
956
|
+
that namespace is scanned, otherwise every lock for ``job_type`` is
|
|
957
|
+
considered.
|
|
958
|
+
"""
|
|
959
|
+
jt = job_type.strip()
|
|
960
|
+
if not jt:
|
|
961
|
+
raise ValueError("job_type is required")
|
|
962
|
+
body: dict[str, Any] = {"job_type": jt}
|
|
963
|
+
if collection and collection.strip():
|
|
964
|
+
body["collection"] = collection.strip()
|
|
965
|
+
if ttl_seconds and ttl_seconds > 0:
|
|
966
|
+
body["ttl_seconds"] = ttl_seconds
|
|
967
|
+
response = _api_request("POST", "locks/reclaim", json_body=body)
|
|
968
|
+
if isinstance(response, MutableMapping):
|
|
969
|
+
reclaimed = response.get("reclaimed")
|
|
970
|
+
if isinstance(reclaimed, int):
|
|
971
|
+
return reclaimed
|
|
972
|
+
raise RuntimeError("unexpected response payload")
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
def upsert_record(
|
|
976
|
+
collection_id_or_name: str,
|
|
977
|
+
payload: Mapping[str, Any] | None = None,
|
|
978
|
+
) -> dict[str, Any]:
|
|
979
|
+
"""Create or update a record identified by external_id."""
|
|
980
|
+
|
|
981
|
+
if not collection_id_or_name:
|
|
982
|
+
raise ValueError("collection_id_or_name is required")
|
|
983
|
+
|
|
984
|
+
data = _ensure_mapping(payload, name="payload")
|
|
985
|
+
external_id = str(data.get("external_id", "")).strip()
|
|
986
|
+
if not external_id:
|
|
987
|
+
raise ValueError("payload.external_id is required for upsert")
|
|
988
|
+
data["external_id"] = external_id
|
|
989
|
+
|
|
990
|
+
path = f"collections/{collection_id_or_name}/records/upsert"
|
|
991
|
+
response = _api_request("POST", path, json_body=data)
|
|
992
|
+
|
|
993
|
+
if not isinstance(response, dict):
|
|
994
|
+
raise RuntimeError("unexpected response payload")
|
|
995
|
+
return response
|
|
449
996
|
|
|
450
997
|
|
|
451
998
|
def save_to_lumera(file_path: str) -> dict:
|
|
@@ -459,7 +1006,7 @@ def save_to_lumera(file_path: str) -> dict:
|
|
|
459
1006
|
|
|
460
1007
|
run_id = os.getenv("LUMERA_RUN_ID", "").strip()
|
|
461
1008
|
if run_id:
|
|
462
|
-
return
|
|
1009
|
+
return _upload_automation_run_file(file_path, run_id)
|
|
463
1010
|
|
|
464
1011
|
session_id = os.getenv("LUMERA_SESSION_ID", "").strip()
|
|
465
1012
|
if session_id:
|