lumera 0.9.0__tar.gz → 0.9.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lumera-0.9.0 → lumera-0.9.2}/PKG-INFO +1 -1
- lumera-0.9.2/lumera/integrations/__init__.py +34 -0
- lumera-0.9.2/lumera/integrations/google.py +338 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/pb.py +63 -11
- {lumera-0.9.0 → lumera-0.9.2}/lumera/sdk.py +28 -4
- {lumera-0.9.0 → lumera-0.9.2}/lumera.egg-info/PKG-INFO +1 -1
- {lumera-0.9.0 → lumera-0.9.2}/lumera.egg-info/SOURCES.txt +2 -0
- {lumera-0.9.0 → lumera-0.9.2}/pyproject.toml +4 -3
- {lumera-0.9.0 → lumera-0.9.2}/lumera/__init__.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/_utils.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/automations.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/exceptions.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/google.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/llm.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/locks.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/storage.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera/webhooks.py +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera.egg-info/dependency_links.txt +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera.egg-info/requires.txt +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/lumera.egg-info/top_level.txt +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/setup.cfg +0 -0
- {lumera-0.9.0 → lumera-0.9.2}/tests/test_sdk.py +0 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lumera SDK Integrations
|
|
3
|
+
|
|
4
|
+
Third-party service integrations with Lumera credential management.
|
|
5
|
+
|
|
6
|
+
Each integration module provides:
|
|
7
|
+
- A `get_*_client()` or `get_*_service()` function that returns an authenticated client
|
|
8
|
+
- Optional helper functions for common Lumera patterns
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from lumera.integrations import google, get_access_token
|
|
12
|
+
|
|
13
|
+
# Google Sheets with Lumera-managed OAuth
|
|
14
|
+
sheets = google.get_sheets_service()
|
|
15
|
+
data = sheets.spreadsheets().values().get(...)
|
|
16
|
+
|
|
17
|
+
# Google Drive
|
|
18
|
+
drive = google.get_drive_service()
|
|
19
|
+
files = drive.files().list().execute()
|
|
20
|
+
|
|
21
|
+
# Get raw access token for any provider
|
|
22
|
+
token = get_access_token("slack")
|
|
23
|
+
|
|
24
|
+
Available integrations:
|
|
25
|
+
- `google` - Google APIs (Sheets, Drive)
|
|
26
|
+
|
|
27
|
+
Utilities:
|
|
28
|
+
- `get_access_token(provider)` - Get OAuth token for any Lumera-connected provider
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from .._utils import get_access_token
|
|
32
|
+
from . import google
|
|
33
|
+
|
|
34
|
+
__all__ = ["get_access_token", "google"]
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google API Integration
|
|
3
|
+
|
|
4
|
+
Provides authenticated Google API clients using Lumera-managed OAuth credentials.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
from lumera.integrations import google
|
|
8
|
+
|
|
9
|
+
# Get authenticated Sheets service
|
|
10
|
+
sheets = google.get_sheets_service()
|
|
11
|
+
data = sheets.spreadsheets().values().get(
|
|
12
|
+
spreadsheetId="...",
|
|
13
|
+
range="Sheet1!A1:D10"
|
|
14
|
+
).execute()
|
|
15
|
+
|
|
16
|
+
# Get authenticated Drive service
|
|
17
|
+
drive = google.get_drive_service()
|
|
18
|
+
files = drive.files().list().execute()
|
|
19
|
+
|
|
20
|
+
# Share credentials between services
|
|
21
|
+
creds = google.get_credentials()
|
|
22
|
+
sheets = google.get_sheets_service(credentials=creds)
|
|
23
|
+
drive = google.get_drive_service(credentials=creds)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import io
|
|
27
|
+
import logging
|
|
28
|
+
import os
|
|
29
|
+
import re
|
|
30
|
+
from typing import TYPE_CHECKING, Optional, Tuple
|
|
31
|
+
|
|
32
|
+
# When type checking we want access to the concrete ``Resource`` class that
|
|
33
|
+
# ``googleapiclient.discovery.build`` returns. Importing it unconditionally
|
|
34
|
+
# would require ``googleapiclient`` to be available in every execution
|
|
35
|
+
# environment – something we cannot guarantee. By guarding the import with
|
|
36
|
+
# ``TYPE_CHECKING`` we give static analysers (ruff, mypy, etc.) the
|
|
37
|
+
# information they need without introducing a hard runtime dependency.
|
|
38
|
+
# During static analysis we want to import ``Resource`` so that it is a known
|
|
39
|
+
# name for type checkers, but we don't require this import at runtime. Guard
|
|
40
|
+
# it with ``TYPE_CHECKING`` to avoid hard dependencies.
|
|
41
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
42
|
+
from googleapiclient.discovery import Resource # noqa: F401
|
|
43
|
+
|
|
44
|
+
# Always ensure that the symbol ``Resource`` exists at runtime to placate static
|
|
45
|
+
# analysers like ruff (F821) that inspect the AST without executing the code.
|
|
46
|
+
try: # pragma: no cover – optional runtime import
|
|
47
|
+
from googleapiclient.discovery import Resource # type: ignore
|
|
48
|
+
except ModuleNotFoundError: # pragma: no cover – provide a stub fallback
|
|
49
|
+
|
|
50
|
+
class Resource: # noqa: D401
|
|
51
|
+
"""Stub replacement for ``googleapiclient.discovery.Resource``."""
|
|
52
|
+
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
from google.oauth2.credentials import Credentials
|
|
57
|
+
from googleapiclient.discovery import build
|
|
58
|
+
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
|
|
59
|
+
|
|
60
|
+
from .._utils import get_access_token
|
|
61
|
+
|
|
62
|
+
# Module logger
|
|
63
|
+
logger = logging.getLogger(__name__)
|
|
64
|
+
|
|
65
|
+
# =====================================================================================
|
|
66
|
+
# Configuration
|
|
67
|
+
# =====================================================================================
|
|
68
|
+
|
|
69
|
+
MIME_GOOGLE_SHEET = "application/vnd.google-apps.spreadsheet"
|
|
70
|
+
MIME_EXCEL = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
|
71
|
+
|
|
72
|
+
# =====================================================================================
|
|
73
|
+
# Authentication & Service Initialization
|
|
74
|
+
# =====================================================================================
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_credentials() -> Credentials:
|
|
78
|
+
"""
|
|
79
|
+
Retrieves a Google OAuth token from Lumera and
|
|
80
|
+
converts it into a Credentials object usable by googleapiclient.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
google.oauth2.credentials.Credentials: Authenticated credentials object.
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
RuntimeError: If LUMERA_TOKEN is not set or token fetch fails.
|
|
87
|
+
"""
|
|
88
|
+
logger.debug("Fetching Google access token from Lumera…")
|
|
89
|
+
access_token = get_access_token("google")
|
|
90
|
+
logger.debug("Access token received.")
|
|
91
|
+
creds = Credentials(token=access_token)
|
|
92
|
+
logger.debug("Credentials object created.")
|
|
93
|
+
return creds
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# Backward compatibility alias
|
|
97
|
+
get_google_credentials = get_credentials
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_sheets_service(credentials: Optional[Credentials] = None) -> "Resource":
|
|
101
|
+
"""
|
|
102
|
+
Initializes and returns the Google Sheets API service.
|
|
103
|
+
|
|
104
|
+
If no credentials are provided, this function will automatically fetch a
|
|
105
|
+
Google access token from Lumera and construct the appropriate
|
|
106
|
+
``google.oauth2.credentials.Credentials`` instance.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
credentials: Optional pre-fetched credentials. If None, fetches from Lumera.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Google Sheets API service resource.
|
|
113
|
+
"""
|
|
114
|
+
if credentials is None:
|
|
115
|
+
logger.info("No credentials provided; fetching Google token…")
|
|
116
|
+
credentials = get_credentials()
|
|
117
|
+
logger.info("Google Sheets API service being initialized…")
|
|
118
|
+
return build("sheets", "v4", credentials=credentials)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_drive_service(credentials: Optional[Credentials] = None) -> "Resource":
|
|
122
|
+
"""
|
|
123
|
+
Initializes and returns the Google Drive API service.
|
|
124
|
+
|
|
125
|
+
If no credentials are provided, this function will automatically fetch a
|
|
126
|
+
Google access token from Lumera and construct the appropriate
|
|
127
|
+
``google.oauth2.credentials.Credentials`` instance.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
credentials: Optional pre-fetched credentials. If None, fetches from Lumera.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Google Drive API service resource.
|
|
134
|
+
"""
|
|
135
|
+
if credentials is None:
|
|
136
|
+
logger.info("No credentials provided; fetching Google token…")
|
|
137
|
+
credentials = get_credentials()
|
|
138
|
+
logger.info("Google Drive API service being initialized…")
|
|
139
|
+
return build("drive", "v3", credentials=credentials)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =====================================================================================
|
|
143
|
+
# Google Sheets & Drive Utility Functions
|
|
144
|
+
# =====================================================================================
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def get_spreadsheet_and_sheet_id(
|
|
148
|
+
service: "Resource", spreadsheet_url: str, tab_name: str
|
|
149
|
+
) -> Tuple[Optional[str], Optional[int]]:
|
|
150
|
+
"""
|
|
151
|
+
Given a Google Sheets URL and a tab (sheet) name, returns a tuple:
|
|
152
|
+
(spreadsheet_id, sheet_id)
|
|
153
|
+
"""
|
|
154
|
+
spreadsheet_id = _extract_spreadsheet_id(spreadsheet_url)
|
|
155
|
+
if not spreadsheet_id:
|
|
156
|
+
return None, None
|
|
157
|
+
|
|
158
|
+
sheet_id = _get_sheet_id_from_name(service, spreadsheet_id, tab_name)
|
|
159
|
+
return spreadsheet_id, sheet_id
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _extract_spreadsheet_id(spreadsheet_url: str) -> Optional[str]:
|
|
163
|
+
"""Extracts the spreadsheet ID from a Google Sheets URL."""
|
|
164
|
+
logger.debug(f"Extracting spreadsheet ID from URL: {spreadsheet_url}")
|
|
165
|
+
pattern = r"/d/([a-zA-Z0-9-_]+)"
|
|
166
|
+
match = re.search(pattern, spreadsheet_url)
|
|
167
|
+
if match:
|
|
168
|
+
spreadsheet_id = match.group(1)
|
|
169
|
+
logger.debug(f"Spreadsheet ID extracted: {spreadsheet_id}")
|
|
170
|
+
return spreadsheet_id
|
|
171
|
+
logger.warning("Could not extract Spreadsheet ID.")
|
|
172
|
+
return None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _get_sheet_id_from_name(
|
|
176
|
+
service: "Resource", spreadsheet_id: str, tab_name: str
|
|
177
|
+
) -> Optional[int]:
|
|
178
|
+
"""Uses the Google Sheets API to fetch the sheet ID corresponding to 'tab_name'."""
|
|
179
|
+
logger.debug(f"Requesting sheet metadata for spreadsheet ID: {spreadsheet_id}")
|
|
180
|
+
response = (
|
|
181
|
+
service.spreadsheets()
|
|
182
|
+
.get(spreadsheetId=spreadsheet_id, fields="sheets.properties")
|
|
183
|
+
.execute()
|
|
184
|
+
)
|
|
185
|
+
logger.debug("Metadata received. Searching for tab…")
|
|
186
|
+
|
|
187
|
+
for sheet in response.get("sheets", []):
|
|
188
|
+
properties = sheet.get("properties", {})
|
|
189
|
+
if properties.get("title") == tab_name:
|
|
190
|
+
sheet_id = properties.get("sheetId")
|
|
191
|
+
logger.debug(f"Match found for tab '{tab_name}'. Sheet ID is {sheet_id}")
|
|
192
|
+
return sheet_id
|
|
193
|
+
logger.warning(f"No sheet found with tab name '{tab_name}'.")
|
|
194
|
+
return None
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def sheet_name_from_gid(service: "Resource", spreadsheet_id: str, gid: int) -> Optional[str]:
|
|
198
|
+
"""Resolve a sheet's human-readable name (title) from its gid."""
|
|
199
|
+
logger.debug(f"Resolving sheet name from gid={gid} …")
|
|
200
|
+
meta = (
|
|
201
|
+
service.spreadsheets()
|
|
202
|
+
.get(
|
|
203
|
+
spreadsheetId=spreadsheet_id,
|
|
204
|
+
includeGridData=False,
|
|
205
|
+
fields="sheets(properties(sheetId,title))",
|
|
206
|
+
)
|
|
207
|
+
.execute()
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
for sheet in meta.get("sheets", []):
|
|
211
|
+
props = sheet.get("properties", {})
|
|
212
|
+
if props.get("sheetId") == gid:
|
|
213
|
+
title = props["title"]
|
|
214
|
+
logger.debug(f"Sheet gid={gid} corresponds to sheet name='{title}'.")
|
|
215
|
+
return title
|
|
216
|
+
logger.warning(f"No sheet found with gid={gid}")
|
|
217
|
+
return None
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def read_cell(service: "Resource", spreadsheet_id: str, range_a1: str) -> Optional[str]:
|
|
221
|
+
"""Fetch a single cell value (as string); returns None if empty."""
|
|
222
|
+
logger.debug(f"Reading cell '{range_a1}' …")
|
|
223
|
+
resp = (
|
|
224
|
+
service.spreadsheets()
|
|
225
|
+
.values()
|
|
226
|
+
.get(spreadsheetId=spreadsheet_id, range=range_a1, majorDimension="ROWS")
|
|
227
|
+
.execute()
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
values = resp.get("values", [])
|
|
231
|
+
return values[0][0] if values and values[0] else None
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
# NOTE: The function performs I/O side-effects and does not return a value.
|
|
235
|
+
def download_file_direct(drive_service: "Resource", file_id: str, dest_path: str) -> None:
|
|
236
|
+
"""
|
|
237
|
+
Downloads a file directly from Google Drive using files().get_media
|
|
238
|
+
without any format conversion.
|
|
239
|
+
"""
|
|
240
|
+
logger.info(f"Initiating direct download for file ID: {file_id}")
|
|
241
|
+
|
|
242
|
+
request = drive_service.files().get_media(fileId=file_id)
|
|
243
|
+
fh = io.BytesIO()
|
|
244
|
+
downloader = MediaIoBaseDownload(fh, request)
|
|
245
|
+
|
|
246
|
+
done = False
|
|
247
|
+
while not done:
|
|
248
|
+
status, done = downloader.next_chunk()
|
|
249
|
+
if status:
|
|
250
|
+
logger.debug(f"Download progress: {int(status.progress() * 100)}%")
|
|
251
|
+
|
|
252
|
+
with open(dest_path, "wb") as f:
|
|
253
|
+
f.write(fh.getvalue())
|
|
254
|
+
logger.info(f"File saved to: {dest_path}")
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def upload_excel_as_google_sheet(
|
|
258
|
+
drive_service: "Resource", local_path: str, desired_name: str
|
|
259
|
+
) -> Tuple[Optional[str], Optional[str]]:
|
|
260
|
+
"""
|
|
261
|
+
Uploads a local XLSX file to Google Drive, converting it to Google Sheets format.
|
|
262
|
+
Returns the file ID and web link.
|
|
263
|
+
"""
|
|
264
|
+
logger.info(f"Preparing to upload '{local_path}' as Google Sheet named '{desired_name}'")
|
|
265
|
+
|
|
266
|
+
if not os.path.isfile(local_path):
|
|
267
|
+
logger.error(f"Local file not found at '{local_path}'. Aborting.")
|
|
268
|
+
return None, None
|
|
269
|
+
|
|
270
|
+
media = MediaFileUpload(local_path, mimetype=MIME_EXCEL, resumable=True)
|
|
271
|
+
file_metadata = {"name": desired_name, "mimeType": MIME_GOOGLE_SHEET}
|
|
272
|
+
|
|
273
|
+
logger.info("Initiating Google Drive upload & conversion…")
|
|
274
|
+
request = drive_service.files().create(
|
|
275
|
+
body=file_metadata, media_body=media, fields="id, webViewLink"
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
response = None
|
|
279
|
+
while response is None:
|
|
280
|
+
status, response = request.next_chunk()
|
|
281
|
+
if status:
|
|
282
|
+
logger.debug(f"Upload progress: {int(status.progress() * 100)}%")
|
|
283
|
+
|
|
284
|
+
file_id = response.get("id")
|
|
285
|
+
web_view_link = response.get("webViewLink")
|
|
286
|
+
logger.info(f"Upload completed. File ID: {file_id}")
|
|
287
|
+
return file_id, web_view_link
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
# Remove rows from a sheet. All parameters are 1-based (both *start_row* and
|
|
291
|
+
# *end_row* are inclusive) mirroring the UI behaviour in Google Sheets.
|
|
292
|
+
def delete_rows_api_call(
|
|
293
|
+
service: "Resource",
|
|
294
|
+
spreadsheet_id: str,
|
|
295
|
+
sheet_gid: int,
|
|
296
|
+
start_row: int,
|
|
297
|
+
end_row: int,
|
|
298
|
+
) -> None:
|
|
299
|
+
"""Executes the API call to delete rows."""
|
|
300
|
+
logger.info(f"Deleting rows {start_row}-{end_row} (1-based inclusive)…")
|
|
301
|
+
|
|
302
|
+
body = {
|
|
303
|
+
"requests": [
|
|
304
|
+
{
|
|
305
|
+
"deleteDimension": {
|
|
306
|
+
"range": {
|
|
307
|
+
"sheetId": sheet_gid,
|
|
308
|
+
"dimension": "ROWS",
|
|
309
|
+
"startIndex": start_row - 1, # 0-based
|
|
310
|
+
"endIndex": end_row, # end-exclusive
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
]
|
|
315
|
+
}
|
|
316
|
+
service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
|
|
317
|
+
logger.info("Rows deleted.")
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
__all__ = [
|
|
321
|
+
# Authentication
|
|
322
|
+
"get_credentials",
|
|
323
|
+
"get_google_credentials", # backward compat alias
|
|
324
|
+
# Services
|
|
325
|
+
"get_sheets_service",
|
|
326
|
+
"get_drive_service",
|
|
327
|
+
# Sheets helpers
|
|
328
|
+
"get_spreadsheet_and_sheet_id",
|
|
329
|
+
"sheet_name_from_gid",
|
|
330
|
+
"read_cell",
|
|
331
|
+
"delete_rows_api_call",
|
|
332
|
+
# Drive helpers
|
|
333
|
+
"download_file_direct",
|
|
334
|
+
"upload_excel_as_google_sheet",
|
|
335
|
+
# Constants
|
|
336
|
+
"MIME_GOOGLE_SHEET",
|
|
337
|
+
"MIME_EXCEL",
|
|
338
|
+
]
|
|
@@ -318,31 +318,44 @@ def delete(collection: str, record_id: str) -> None:
|
|
|
318
318
|
# =============================================================================
|
|
319
319
|
|
|
320
320
|
|
|
321
|
-
def bulk_delete(
|
|
321
|
+
def bulk_delete(
|
|
322
|
+
collection: str,
|
|
323
|
+
record_ids: Sequence[str],
|
|
324
|
+
*,
|
|
325
|
+
transaction: bool = False,
|
|
326
|
+
) -> dict[str, Any]:
|
|
322
327
|
"""Delete multiple records by ID.
|
|
323
328
|
|
|
324
329
|
Args:
|
|
325
330
|
collection: Collection name or ID
|
|
326
331
|
record_ids: List of record IDs to delete (max 1000)
|
|
332
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure).
|
|
333
|
+
If False (default), partial success is allowed.
|
|
327
334
|
|
|
328
335
|
Returns:
|
|
329
336
|
Result with succeeded/failed counts and any errors:
|
|
330
337
|
{
|
|
331
338
|
"succeeded": 10,
|
|
332
339
|
"failed": 0,
|
|
333
|
-
"errors": []
|
|
340
|
+
"errors": [],
|
|
341
|
+
"rolled_back": false # only present if transaction=True and failed
|
|
334
342
|
}
|
|
335
343
|
|
|
336
344
|
Example:
|
|
337
345
|
>>> result = pb.bulk_delete("deposits", ["id1", "id2", "id3"])
|
|
338
346
|
>>> print(f"Deleted {result['succeeded']} records")
|
|
347
|
+
|
|
348
|
+
>>> # Use transaction mode for all-or-nothing
|
|
349
|
+
>>> result = pb.bulk_delete("deposits", ids, transaction=True)
|
|
339
350
|
"""
|
|
340
|
-
return _bulk_delete_records(collection, record_ids)
|
|
351
|
+
return _bulk_delete_records(collection, record_ids, transaction=transaction)
|
|
341
352
|
|
|
342
353
|
|
|
343
354
|
def bulk_update(
|
|
344
355
|
collection: str,
|
|
345
356
|
records: Sequence[dict[str, Any]],
|
|
357
|
+
*,
|
|
358
|
+
transaction: bool = False,
|
|
346
359
|
) -> dict[str, Any]:
|
|
347
360
|
"""Update multiple records with individual data per record.
|
|
348
361
|
|
|
@@ -352,9 +365,17 @@ def bulk_update(
|
|
|
352
365
|
Args:
|
|
353
366
|
collection: Collection name or ID
|
|
354
367
|
records: List of records to update (max 1000). Each must have 'id' field.
|
|
368
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure).
|
|
369
|
+
If False (default), partial success is allowed.
|
|
355
370
|
|
|
356
371
|
Returns:
|
|
357
|
-
Result with succeeded/failed counts
|
|
372
|
+
Result with succeeded/failed counts:
|
|
373
|
+
{
|
|
374
|
+
"succeeded": 2,
|
|
375
|
+
"failed": 0,
|
|
376
|
+
"errors": [],
|
|
377
|
+
"rolled_back": false # only present if transaction=True and failed
|
|
378
|
+
}
|
|
358
379
|
|
|
359
380
|
Example:
|
|
360
381
|
>>> result = pb.bulk_update("deposits", [
|
|
@@ -362,11 +383,19 @@ def bulk_update(
|
|
|
362
383
|
... {"id": "rec2", "status": "rejected", "amount": 200},
|
|
363
384
|
... ])
|
|
364
385
|
>>> print(f"Updated {result['succeeded']} records")
|
|
386
|
+
|
|
387
|
+
>>> # Use transaction mode for all-or-nothing
|
|
388
|
+
>>> result = pb.bulk_update("deposits", records, transaction=True)
|
|
365
389
|
"""
|
|
366
|
-
return _bulk_update_records(collection, records)
|
|
390
|
+
return _bulk_update_records(collection, records, transaction=transaction)
|
|
367
391
|
|
|
368
392
|
|
|
369
|
-
def bulk_upsert(
|
|
393
|
+
def bulk_upsert(
|
|
394
|
+
collection: str,
|
|
395
|
+
records: Sequence[dict[str, Any]],
|
|
396
|
+
*,
|
|
397
|
+
transaction: bool = False,
|
|
398
|
+
) -> dict[str, Any]:
|
|
370
399
|
"""Create or update multiple records by ID.
|
|
371
400
|
|
|
372
401
|
Each record can include an "id" field. Records with matching IDs will be
|
|
@@ -375,25 +404,44 @@ def bulk_upsert(collection: str, records: Sequence[dict[str, Any]]) -> dict[str,
|
|
|
375
404
|
Args:
|
|
376
405
|
collection: Collection name or ID
|
|
377
406
|
records: List of records (max 1000)
|
|
407
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure).
|
|
408
|
+
If False (default), partial success is allowed.
|
|
378
409
|
|
|
379
410
|
Returns:
|
|
380
|
-
Result with succeeded/failed counts and created record IDs
|
|
411
|
+
Result with succeeded/failed counts and created record IDs:
|
|
412
|
+
{
|
|
413
|
+
"succeeded": 2,
|
|
414
|
+
"failed": 0,
|
|
415
|
+
"errors": [],
|
|
416
|
+
"records": [{"id": "..."}, ...],
|
|
417
|
+
"rolled_back": false # only present if transaction=True and failed
|
|
418
|
+
}
|
|
381
419
|
|
|
382
420
|
Example:
|
|
383
421
|
>>> result = pb.bulk_upsert("deposits", [
|
|
384
422
|
... {"id": "existing_id", "amount": 100},
|
|
385
423
|
... {"amount": 200}, # creates new record
|
|
386
424
|
... ])
|
|
425
|
+
|
|
426
|
+
>>> # Use transaction mode for all-or-nothing
|
|
427
|
+
>>> result = pb.bulk_upsert("deposits", records, transaction=True)
|
|
387
428
|
"""
|
|
388
|
-
return _bulk_upsert_records(collection, records)
|
|
429
|
+
return _bulk_upsert_records(collection, records, transaction=transaction)
|
|
389
430
|
|
|
390
431
|
|
|
391
|
-
def bulk_insert(
|
|
432
|
+
def bulk_insert(
|
|
433
|
+
collection: str,
|
|
434
|
+
records: Sequence[dict[str, Any]],
|
|
435
|
+
*,
|
|
436
|
+
transaction: bool = False,
|
|
437
|
+
) -> dict[str, Any]:
|
|
392
438
|
"""Insert multiple new records.
|
|
393
439
|
|
|
394
440
|
Args:
|
|
395
441
|
collection: Collection name or ID
|
|
396
442
|
records: List of records to create (max 1000)
|
|
443
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure).
|
|
444
|
+
If False (default), partial success is allowed.
|
|
397
445
|
|
|
398
446
|
Returns:
|
|
399
447
|
Result with succeeded/failed counts and created record IDs:
|
|
@@ -401,7 +449,8 @@ def bulk_insert(collection: str, records: Sequence[dict[str, Any]]) -> dict[str,
|
|
|
401
449
|
"succeeded": 2,
|
|
402
450
|
"failed": 0,
|
|
403
451
|
"errors": [],
|
|
404
|
-
"records": [{"id": "..."}, {"id": "..."}]
|
|
452
|
+
"records": [{"id": "..."}, {"id": "..."}],
|
|
453
|
+
"rolled_back": false # only present if transaction=True and failed
|
|
405
454
|
}
|
|
406
455
|
|
|
407
456
|
Example:
|
|
@@ -411,8 +460,11 @@ def bulk_insert(collection: str, records: Sequence[dict[str, Any]]) -> dict[str,
|
|
|
411
460
|
... ])
|
|
412
461
|
>>> for rec in result["records"]:
|
|
413
462
|
... print(f"Created: {rec['id']}")
|
|
463
|
+
|
|
464
|
+
>>> # Use transaction mode for all-or-nothing
|
|
465
|
+
>>> result = pb.bulk_insert("deposits", records, transaction=True)
|
|
414
466
|
"""
|
|
415
|
-
return _bulk_insert_records(collection, records)
|
|
467
|
+
return _bulk_insert_records(collection, records, transaction=transaction)
|
|
416
468
|
|
|
417
469
|
|
|
418
470
|
def iter_all(
|
|
@@ -591,12 +591,15 @@ def delete_record(collection_id_or_name: str, record_id: str) -> None:
|
|
|
591
591
|
def bulk_delete_records(
|
|
592
592
|
collection_id_or_name: str,
|
|
593
593
|
record_ids: Sequence[str],
|
|
594
|
+
*,
|
|
595
|
+
transaction: bool = False,
|
|
594
596
|
) -> dict[str, Any]:
|
|
595
597
|
"""Bulk delete records by IDs.
|
|
596
598
|
|
|
597
599
|
Args:
|
|
598
600
|
collection_id_or_name: Collection name or ID
|
|
599
601
|
record_ids: List of record IDs to delete (max 1000)
|
|
602
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
600
603
|
|
|
601
604
|
Returns:
|
|
602
605
|
Result with succeeded/failed counts and any errors
|
|
@@ -607,19 +610,25 @@ def bulk_delete_records(
|
|
|
607
610
|
raise ValueError("record_ids is required")
|
|
608
611
|
|
|
609
612
|
path = f"collections/{collection_id_or_name}/records/bulk/delete"
|
|
610
|
-
|
|
613
|
+
body: dict[str, Any] = {"ids": list(record_ids)}
|
|
614
|
+
if transaction:
|
|
615
|
+
body["transaction"] = True
|
|
616
|
+
result = _api_request("POST", path, json_body=body)
|
|
611
617
|
return result if isinstance(result, dict) else {}
|
|
612
618
|
|
|
613
619
|
|
|
614
620
|
def bulk_update_records(
|
|
615
621
|
collection_id_or_name: str,
|
|
616
622
|
records: Sequence[Mapping[str, Any]],
|
|
623
|
+
*,
|
|
624
|
+
transaction: bool = False,
|
|
617
625
|
) -> dict[str, Any]:
|
|
618
626
|
"""Update multiple records with individual data per record.
|
|
619
627
|
|
|
620
628
|
Args:
|
|
621
629
|
collection_id_or_name: Collection name or ID
|
|
622
630
|
records: List of records to update (max 1000). Each record must have an 'id' field.
|
|
631
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
623
632
|
|
|
624
633
|
Returns:
|
|
625
634
|
Result with succeeded/failed counts
|
|
@@ -630,19 +639,25 @@ def bulk_update_records(
|
|
|
630
639
|
raise ValueError("records is required")
|
|
631
640
|
|
|
632
641
|
path = f"collections/{collection_id_or_name}/records/bulk/update"
|
|
633
|
-
|
|
642
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
643
|
+
if transaction:
|
|
644
|
+
body["transaction"] = True
|
|
645
|
+
result = _api_request("POST", path, json_body=body)
|
|
634
646
|
return result if isinstance(result, dict) else {}
|
|
635
647
|
|
|
636
648
|
|
|
637
649
|
def bulk_upsert_records(
|
|
638
650
|
collection_id_or_name: str,
|
|
639
651
|
records: Sequence[Mapping[str, Any]],
|
|
652
|
+
*,
|
|
653
|
+
transaction: bool = False,
|
|
640
654
|
) -> dict[str, Any]:
|
|
641
655
|
"""Upsert multiple records (create or update by ID).
|
|
642
656
|
|
|
643
657
|
Args:
|
|
644
658
|
collection_id_or_name: Collection name or ID
|
|
645
659
|
records: List of records (max 1000). Include 'id' field to update existing.
|
|
660
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
646
661
|
|
|
647
662
|
Returns:
|
|
648
663
|
Result with succeeded/failed counts and created record IDs
|
|
@@ -653,19 +668,25 @@ def bulk_upsert_records(
|
|
|
653
668
|
raise ValueError("records is required")
|
|
654
669
|
|
|
655
670
|
path = f"collections/{collection_id_or_name}/records/bulk/upsert"
|
|
656
|
-
|
|
671
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
672
|
+
if transaction:
|
|
673
|
+
body["transaction"] = True
|
|
674
|
+
result = _api_request("POST", path, json_body=body)
|
|
657
675
|
return result if isinstance(result, dict) else {}
|
|
658
676
|
|
|
659
677
|
|
|
660
678
|
def bulk_insert_records(
|
|
661
679
|
collection_id_or_name: str,
|
|
662
680
|
records: Sequence[Mapping[str, Any]],
|
|
681
|
+
*,
|
|
682
|
+
transaction: bool = False,
|
|
663
683
|
) -> dict[str, Any]:
|
|
664
684
|
"""Insert multiple new records.
|
|
665
685
|
|
|
666
686
|
Args:
|
|
667
687
|
collection_id_or_name: Collection name or ID
|
|
668
688
|
records: List of records to create (max 1000)
|
|
689
|
+
transaction: If True, use all-or-nothing semantics (rollback on any failure)
|
|
669
690
|
|
|
670
691
|
Returns:
|
|
671
692
|
Result with succeeded/failed counts and created record IDs
|
|
@@ -676,7 +697,10 @@ def bulk_insert_records(
|
|
|
676
697
|
raise ValueError("records is required")
|
|
677
698
|
|
|
678
699
|
path = f"collections/{collection_id_or_name}/records/bulk/insert"
|
|
679
|
-
|
|
700
|
+
body: dict[str, Any] = {"records": [dict(r) for r in records]}
|
|
701
|
+
if transaction:
|
|
702
|
+
body["transaction"] = True
|
|
703
|
+
result = _api_request("POST", path, json_body=body)
|
|
680
704
|
return result if isinstance(result, dict) else {}
|
|
681
705
|
|
|
682
706
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "lumera"
|
|
3
|
-
version = "0.9.
|
|
3
|
+
version = "0.9.2"
|
|
4
4
|
description = "SDK for building on Lumera platform"
|
|
5
5
|
requires-python = ">=3.11"
|
|
6
6
|
dependencies = [
|
|
@@ -45,8 +45,9 @@ full = [
|
|
|
45
45
|
"requests==2.32.4",
|
|
46
46
|
]
|
|
47
47
|
|
|
48
|
-
[tool.setuptools]
|
|
49
|
-
|
|
48
|
+
[tool.setuptools.packages.find]
|
|
49
|
+
where = ["."]
|
|
50
|
+
include = ["lumera*"]
|
|
50
51
|
|
|
51
52
|
[tool.pytest.ini_options]
|
|
52
53
|
minversion = "8.0"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|