lumera 0.9.1__tar.gz → 0.9.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lumera-0.9.1 → lumera-0.9.3}/PKG-INFO +1 -1
- {lumera-0.9.1 → lumera-0.9.3}/lumera/automations.py +70 -1
- lumera-0.9.3/lumera/integrations/__init__.py +34 -0
- lumera-0.9.3/lumera/integrations/google.py +338 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/pb.py +69 -88
- {lumera-0.9.1 → lumera-0.9.3}/lumera/sdk.py +100 -34
- {lumera-0.9.1 → lumera-0.9.3}/lumera.egg-info/PKG-INFO +1 -1
- {lumera-0.9.1 → lumera-0.9.3}/lumera.egg-info/SOURCES.txt +2 -0
- {lumera-0.9.1 → lumera-0.9.3}/pyproject.toml +4 -3
- {lumera-0.9.1 → lumera-0.9.3}/tests/test_sdk.py +42 -5
- {lumera-0.9.1 → lumera-0.9.3}/lumera/__init__.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/_utils.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/exceptions.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/google.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/llm.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/locks.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/storage.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera/webhooks.py +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera.egg-info/dependency_links.txt +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera.egg-info/requires.txt +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/lumera.egg-info/top_level.txt +0 -0
- {lumera-0.9.1 → lumera-0.9.3}/setup.cfg +0 -0
|
@@ -57,8 +57,9 @@ __all__ = [
|
|
|
57
57
|
"create",
|
|
58
58
|
"update",
|
|
59
59
|
"upsert",
|
|
60
|
-
# Log streaming
|
|
60
|
+
# Log streaming and download
|
|
61
61
|
"stream_logs",
|
|
62
|
+
"get_log_download_url",
|
|
62
63
|
# Classes
|
|
63
64
|
"Run",
|
|
64
65
|
"Automation",
|
|
@@ -218,6 +219,33 @@ class Run:
|
|
|
218
219
|
self._data = result
|
|
219
220
|
return self
|
|
220
221
|
|
|
222
|
+
def get_log_download_url(self) -> str:
|
|
223
|
+
"""Get a presigned URL to download the run's logs.
|
|
224
|
+
|
|
225
|
+
Logs are archived to S3 after the run completes. This method returns
|
|
226
|
+
a presigned URL that can be used to download the log file.
|
|
227
|
+
|
|
228
|
+
**Caution for coding agents:** Automation logs can be very large (up to 50MB).
|
|
229
|
+
Avoid reading entire log contents into context. Instead, download to a file
|
|
230
|
+
and use tools like `grep`, `tail`, or `head` to extract relevant portions.
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
A presigned URL string for downloading the logs.
|
|
234
|
+
|
|
235
|
+
Raises:
|
|
236
|
+
ValueError: If the run has no ID.
|
|
237
|
+
RuntimeError: If logs are not yet available (run still in progress).
|
|
238
|
+
|
|
239
|
+
Example:
|
|
240
|
+
>>> run = automations.get_run("run_id")
|
|
241
|
+
>>> if run.is_terminal:
|
|
242
|
+
... url = run.get_log_download_url()
|
|
243
|
+
... # Download to file, then use grep/tail to inspect
|
|
244
|
+
"""
|
|
245
|
+
if not self.id:
|
|
246
|
+
raise ValueError("Cannot get log URL without run id")
|
|
247
|
+
return get_log_download_url(self.id)
|
|
248
|
+
|
|
221
249
|
def to_dict(self) -> dict[str, Any]:
|
|
222
250
|
"""Return the underlying data dict."""
|
|
223
251
|
return self._data.copy()
|
|
@@ -833,3 +861,44 @@ def stream_logs(run_id: str, *, timeout: float = 30) -> Iterator[str]:
|
|
|
833
861
|
return
|
|
834
862
|
current_event = ""
|
|
835
863
|
current_data = ""
|
|
864
|
+
|
|
865
|
+
|
|
866
|
+
def get_log_download_url(run_id: str) -> str:
|
|
867
|
+
"""Get a presigned URL to download the logs for a completed run.
|
|
868
|
+
|
|
869
|
+
Logs are archived to S3 after a run completes. This function returns
|
|
870
|
+
a presigned URL that can be used to download the log file directly.
|
|
871
|
+
|
|
872
|
+
**Caution for coding agents:** Automation logs can be very large (up to 50MB).
|
|
873
|
+
Avoid reading entire log contents into context. Instead, download to a file
|
|
874
|
+
and use tools like `grep`, `tail`, or `head` to extract relevant portions.
|
|
875
|
+
|
|
876
|
+
Args:
|
|
877
|
+
run_id: The run ID to get logs for.
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
A presigned URL string for downloading the logs.
|
|
881
|
+
|
|
882
|
+
Raises:
|
|
883
|
+
ValueError: If run_id is empty.
|
|
884
|
+
LumeraAPIError: If the run doesn't exist or logs aren't available.
|
|
885
|
+
|
|
886
|
+
Example:
|
|
887
|
+
>>> url = automations.get_log_download_url("run_abc123")
|
|
888
|
+
>>> # Download to file, then inspect with shell tools
|
|
889
|
+
>>> import subprocess
|
|
890
|
+
>>> subprocess.run(["curl", "-o", "run.log", url])
|
|
891
|
+
>>> # Use grep/tail to extract relevant parts
|
|
892
|
+
"""
|
|
893
|
+
run_id = run_id.strip()
|
|
894
|
+
if not run_id:
|
|
895
|
+
raise ValueError("run_id is required")
|
|
896
|
+
|
|
897
|
+
result = _api_request(
|
|
898
|
+
"GET",
|
|
899
|
+
f"automation-runs/{run_id}/files/download-url",
|
|
900
|
+
params={"name": "run.log"},
|
|
901
|
+
)
|
|
902
|
+
if isinstance(result, dict) and "url" in result:
|
|
903
|
+
return result["url"]
|
|
904
|
+
raise RuntimeError("Unexpected response: no download URL returned")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lumera SDK Integrations
|
|
3
|
+
|
|
4
|
+
Third-party service integrations with Lumera credential management.
|
|
5
|
+
|
|
6
|
+
Each integration module provides:
|
|
7
|
+
- A `get_*_client()` or `get_*_service()` function that returns an authenticated client
|
|
8
|
+
- Optional helper functions for common Lumera patterns
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from lumera.integrations import google, get_access_token
|
|
12
|
+
|
|
13
|
+
# Google Sheets with Lumera-managed OAuth
|
|
14
|
+
sheets = google.get_sheets_service()
|
|
15
|
+
data = sheets.spreadsheets().values().get(...)
|
|
16
|
+
|
|
17
|
+
# Google Drive
|
|
18
|
+
drive = google.get_drive_service()
|
|
19
|
+
files = drive.files().list().execute()
|
|
20
|
+
|
|
21
|
+
# Get raw access token for any provider
|
|
22
|
+
token = get_access_token("slack")
|
|
23
|
+
|
|
24
|
+
Available integrations:
|
|
25
|
+
- `google` - Google APIs (Sheets, Drive)
|
|
26
|
+
|
|
27
|
+
Utilities:
|
|
28
|
+
- `get_access_token(provider)` - Get OAuth token for any Lumera-connected provider
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from .._utils import get_access_token
|
|
32
|
+
from . import google
|
|
33
|
+
|
|
34
|
+
__all__ = ["get_access_token", "google"]
|
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google API Integration
|
|
3
|
+
|
|
4
|
+
Provides authenticated Google API clients using Lumera-managed OAuth credentials.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
from lumera.integrations import google
|
|
8
|
+
|
|
9
|
+
# Get authenticated Sheets service
|
|
10
|
+
sheets = google.get_sheets_service()
|
|
11
|
+
data = sheets.spreadsheets().values().get(
|
|
12
|
+
spreadsheetId="...",
|
|
13
|
+
range="Sheet1!A1:D10"
|
|
14
|
+
).execute()
|
|
15
|
+
|
|
16
|
+
# Get authenticated Drive service
|
|
17
|
+
drive = google.get_drive_service()
|
|
18
|
+
files = drive.files().list().execute()
|
|
19
|
+
|
|
20
|
+
# Share credentials between services
|
|
21
|
+
creds = google.get_credentials()
|
|
22
|
+
sheets = google.get_sheets_service(credentials=creds)
|
|
23
|
+
drive = google.get_drive_service(credentials=creds)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import io
|
|
27
|
+
import logging
|
|
28
|
+
import os
|
|
29
|
+
import re
|
|
30
|
+
from typing import TYPE_CHECKING, Optional, Tuple
|
|
31
|
+
|
|
32
|
+
# When type checking we want access to the concrete ``Resource`` class that
|
|
33
|
+
# ``googleapiclient.discovery.build`` returns. Importing it unconditionally
|
|
34
|
+
# would require ``googleapiclient`` to be available in every execution
|
|
35
|
+
# environment – something we cannot guarantee. By guarding the import with
|
|
36
|
+
# ``TYPE_CHECKING`` we give static analysers (ruff, mypy, etc.) the
|
|
37
|
+
# information they need without introducing a hard runtime dependency.
|
|
38
|
+
# During static analysis we want to import ``Resource`` so that it is a known
|
|
39
|
+
# name for type checkers, but we don't require this import at runtime. Guard
|
|
40
|
+
# it with ``TYPE_CHECKING`` to avoid hard dependencies.
|
|
41
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
42
|
+
from googleapiclient.discovery import Resource # noqa: F401
|
|
43
|
+
|
|
44
|
+
# Always ensure that the symbol ``Resource`` exists at runtime to placate static
|
|
45
|
+
# analysers like ruff (F821) that inspect the AST without executing the code.
|
|
46
|
+
try: # pragma: no cover – optional runtime import
|
|
47
|
+
from googleapiclient.discovery import Resource # type: ignore
|
|
48
|
+
except ModuleNotFoundError: # pragma: no cover – provide a stub fallback
|
|
49
|
+
|
|
50
|
+
class Resource: # noqa: D401
|
|
51
|
+
"""Stub replacement for ``googleapiclient.discovery.Resource``."""
|
|
52
|
+
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
from google.oauth2.credentials import Credentials
|
|
57
|
+
from googleapiclient.discovery import build
|
|
58
|
+
from googleapiclient.http import MediaFileUpload, MediaIoBaseDownload
|
|
59
|
+
|
|
60
|
+
from .._utils import get_access_token
|
|
61
|
+
|
|
62
|
+
# Module logger
|
|
63
|
+
logger = logging.getLogger(__name__)
|
|
64
|
+
|
|
65
|
+
# =====================================================================================
|
|
66
|
+
# Configuration
|
|
67
|
+
# =====================================================================================
|
|
68
|
+
|
|
69
|
+
MIME_GOOGLE_SHEET = "application/vnd.google-apps.spreadsheet"
|
|
70
|
+
MIME_EXCEL = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
|
71
|
+
|
|
72
|
+
# =====================================================================================
|
|
73
|
+
# Authentication & Service Initialization
|
|
74
|
+
# =====================================================================================
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def get_credentials() -> Credentials:
|
|
78
|
+
"""
|
|
79
|
+
Retrieves a Google OAuth token from Lumera and
|
|
80
|
+
converts it into a Credentials object usable by googleapiclient.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
google.oauth2.credentials.Credentials: Authenticated credentials object.
|
|
84
|
+
|
|
85
|
+
Raises:
|
|
86
|
+
RuntimeError: If LUMERA_TOKEN is not set or token fetch fails.
|
|
87
|
+
"""
|
|
88
|
+
logger.debug("Fetching Google access token from Lumera…")
|
|
89
|
+
access_token = get_access_token("google")
|
|
90
|
+
logger.debug("Access token received.")
|
|
91
|
+
creds = Credentials(token=access_token)
|
|
92
|
+
logger.debug("Credentials object created.")
|
|
93
|
+
return creds
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# Backward compatibility alias
|
|
97
|
+
get_google_credentials = get_credentials
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_sheets_service(credentials: Optional[Credentials] = None) -> "Resource":
|
|
101
|
+
"""
|
|
102
|
+
Initializes and returns the Google Sheets API service.
|
|
103
|
+
|
|
104
|
+
If no credentials are provided, this function will automatically fetch a
|
|
105
|
+
Google access token from Lumera and construct the appropriate
|
|
106
|
+
``google.oauth2.credentials.Credentials`` instance.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
credentials: Optional pre-fetched credentials. If None, fetches from Lumera.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Google Sheets API service resource.
|
|
113
|
+
"""
|
|
114
|
+
if credentials is None:
|
|
115
|
+
logger.info("No credentials provided; fetching Google token…")
|
|
116
|
+
credentials = get_credentials()
|
|
117
|
+
logger.info("Google Sheets API service being initialized…")
|
|
118
|
+
return build("sheets", "v4", credentials=credentials)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_drive_service(credentials: Optional[Credentials] = None) -> "Resource":
|
|
122
|
+
"""
|
|
123
|
+
Initializes and returns the Google Drive API service.
|
|
124
|
+
|
|
125
|
+
If no credentials are provided, this function will automatically fetch a
|
|
126
|
+
Google access token from Lumera and construct the appropriate
|
|
127
|
+
``google.oauth2.credentials.Credentials`` instance.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
credentials: Optional pre-fetched credentials. If None, fetches from Lumera.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Google Drive API service resource.
|
|
134
|
+
"""
|
|
135
|
+
if credentials is None:
|
|
136
|
+
logger.info("No credentials provided; fetching Google token…")
|
|
137
|
+
credentials = get_credentials()
|
|
138
|
+
logger.info("Google Drive API service being initialized…")
|
|
139
|
+
return build("drive", "v3", credentials=credentials)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =====================================================================================
|
|
143
|
+
# Google Sheets & Drive Utility Functions
|
|
144
|
+
# =====================================================================================
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def get_spreadsheet_and_sheet_id(
|
|
148
|
+
service: "Resource", spreadsheet_url: str, tab_name: str
|
|
149
|
+
) -> Tuple[Optional[str], Optional[int]]:
|
|
150
|
+
"""
|
|
151
|
+
Given a Google Sheets URL and a tab (sheet) name, returns a tuple:
|
|
152
|
+
(spreadsheet_id, sheet_id)
|
|
153
|
+
"""
|
|
154
|
+
spreadsheet_id = _extract_spreadsheet_id(spreadsheet_url)
|
|
155
|
+
if not spreadsheet_id:
|
|
156
|
+
return None, None
|
|
157
|
+
|
|
158
|
+
sheet_id = _get_sheet_id_from_name(service, spreadsheet_id, tab_name)
|
|
159
|
+
return spreadsheet_id, sheet_id
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _extract_spreadsheet_id(spreadsheet_url: str) -> Optional[str]:
|
|
163
|
+
"""Extracts the spreadsheet ID from a Google Sheets URL."""
|
|
164
|
+
logger.debug(f"Extracting spreadsheet ID from URL: {spreadsheet_url}")
|
|
165
|
+
pattern = r"/d/([a-zA-Z0-9-_]+)"
|
|
166
|
+
match = re.search(pattern, spreadsheet_url)
|
|
167
|
+
if match:
|
|
168
|
+
spreadsheet_id = match.group(1)
|
|
169
|
+
logger.debug(f"Spreadsheet ID extracted: {spreadsheet_id}")
|
|
170
|
+
return spreadsheet_id
|
|
171
|
+
logger.warning("Could not extract Spreadsheet ID.")
|
|
172
|
+
return None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _get_sheet_id_from_name(
|
|
176
|
+
service: "Resource", spreadsheet_id: str, tab_name: str
|
|
177
|
+
) -> Optional[int]:
|
|
178
|
+
"""Uses the Google Sheets API to fetch the sheet ID corresponding to 'tab_name'."""
|
|
179
|
+
logger.debug(f"Requesting sheet metadata for spreadsheet ID: {spreadsheet_id}")
|
|
180
|
+
response = (
|
|
181
|
+
service.spreadsheets()
|
|
182
|
+
.get(spreadsheetId=spreadsheet_id, fields="sheets.properties")
|
|
183
|
+
.execute()
|
|
184
|
+
)
|
|
185
|
+
logger.debug("Metadata received. Searching for tab…")
|
|
186
|
+
|
|
187
|
+
for sheet in response.get("sheets", []):
|
|
188
|
+
properties = sheet.get("properties", {})
|
|
189
|
+
if properties.get("title") == tab_name:
|
|
190
|
+
sheet_id = properties.get("sheetId")
|
|
191
|
+
logger.debug(f"Match found for tab '{tab_name}'. Sheet ID is {sheet_id}")
|
|
192
|
+
return sheet_id
|
|
193
|
+
logger.warning(f"No sheet found with tab name '{tab_name}'.")
|
|
194
|
+
return None
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def sheet_name_from_gid(service: "Resource", spreadsheet_id: str, gid: int) -> Optional[str]:
|
|
198
|
+
"""Resolve a sheet's human-readable name (title) from its gid."""
|
|
199
|
+
logger.debug(f"Resolving sheet name from gid={gid} …")
|
|
200
|
+
meta = (
|
|
201
|
+
service.spreadsheets()
|
|
202
|
+
.get(
|
|
203
|
+
spreadsheetId=spreadsheet_id,
|
|
204
|
+
includeGridData=False,
|
|
205
|
+
fields="sheets(properties(sheetId,title))",
|
|
206
|
+
)
|
|
207
|
+
.execute()
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
for sheet in meta.get("sheets", []):
|
|
211
|
+
props = sheet.get("properties", {})
|
|
212
|
+
if props.get("sheetId") == gid:
|
|
213
|
+
title = props["title"]
|
|
214
|
+
logger.debug(f"Sheet gid={gid} corresponds to sheet name='{title}'.")
|
|
215
|
+
return title
|
|
216
|
+
logger.warning(f"No sheet found with gid={gid}")
|
|
217
|
+
return None
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def read_cell(service: "Resource", spreadsheet_id: str, range_a1: str) -> Optional[str]:
|
|
221
|
+
"""Fetch a single cell value (as string); returns None if empty."""
|
|
222
|
+
logger.debug(f"Reading cell '{range_a1}' …")
|
|
223
|
+
resp = (
|
|
224
|
+
service.spreadsheets()
|
|
225
|
+
.values()
|
|
226
|
+
.get(spreadsheetId=spreadsheet_id, range=range_a1, majorDimension="ROWS")
|
|
227
|
+
.execute()
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
values = resp.get("values", [])
|
|
231
|
+
return values[0][0] if values and values[0] else None
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
# NOTE: The function performs I/O side-effects and does not return a value.
|
|
235
|
+
def download_file_direct(drive_service: "Resource", file_id: str, dest_path: str) -> None:
|
|
236
|
+
"""
|
|
237
|
+
Downloads a file directly from Google Drive using files().get_media
|
|
238
|
+
without any format conversion.
|
|
239
|
+
"""
|
|
240
|
+
logger.info(f"Initiating direct download for file ID: {file_id}")
|
|
241
|
+
|
|
242
|
+
request = drive_service.files().get_media(fileId=file_id)
|
|
243
|
+
fh = io.BytesIO()
|
|
244
|
+
downloader = MediaIoBaseDownload(fh, request)
|
|
245
|
+
|
|
246
|
+
done = False
|
|
247
|
+
while not done:
|
|
248
|
+
status, done = downloader.next_chunk()
|
|
249
|
+
if status:
|
|
250
|
+
logger.debug(f"Download progress: {int(status.progress() * 100)}%")
|
|
251
|
+
|
|
252
|
+
with open(dest_path, "wb") as f:
|
|
253
|
+
f.write(fh.getvalue())
|
|
254
|
+
logger.info(f"File saved to: {dest_path}")
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def upload_excel_as_google_sheet(
|
|
258
|
+
drive_service: "Resource", local_path: str, desired_name: str
|
|
259
|
+
) -> Tuple[Optional[str], Optional[str]]:
|
|
260
|
+
"""
|
|
261
|
+
Uploads a local XLSX file to Google Drive, converting it to Google Sheets format.
|
|
262
|
+
Returns the file ID and web link.
|
|
263
|
+
"""
|
|
264
|
+
logger.info(f"Preparing to upload '{local_path}' as Google Sheet named '{desired_name}'")
|
|
265
|
+
|
|
266
|
+
if not os.path.isfile(local_path):
|
|
267
|
+
logger.error(f"Local file not found at '{local_path}'. Aborting.")
|
|
268
|
+
return None, None
|
|
269
|
+
|
|
270
|
+
media = MediaFileUpload(local_path, mimetype=MIME_EXCEL, resumable=True)
|
|
271
|
+
file_metadata = {"name": desired_name, "mimeType": MIME_GOOGLE_SHEET}
|
|
272
|
+
|
|
273
|
+
logger.info("Initiating Google Drive upload & conversion…")
|
|
274
|
+
request = drive_service.files().create(
|
|
275
|
+
body=file_metadata, media_body=media, fields="id, webViewLink"
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
response = None
|
|
279
|
+
while response is None:
|
|
280
|
+
status, response = request.next_chunk()
|
|
281
|
+
if status:
|
|
282
|
+
logger.debug(f"Upload progress: {int(status.progress() * 100)}%")
|
|
283
|
+
|
|
284
|
+
file_id = response.get("id")
|
|
285
|
+
web_view_link = response.get("webViewLink")
|
|
286
|
+
logger.info(f"Upload completed. File ID: {file_id}")
|
|
287
|
+
return file_id, web_view_link
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
# Remove rows from a sheet. All parameters are 1-based (both *start_row* and
|
|
291
|
+
# *end_row* are inclusive) mirroring the UI behaviour in Google Sheets.
|
|
292
|
+
def delete_rows_api_call(
|
|
293
|
+
service: "Resource",
|
|
294
|
+
spreadsheet_id: str,
|
|
295
|
+
sheet_gid: int,
|
|
296
|
+
start_row: int,
|
|
297
|
+
end_row: int,
|
|
298
|
+
) -> None:
|
|
299
|
+
"""Executes the API call to delete rows."""
|
|
300
|
+
logger.info(f"Deleting rows {start_row}-{end_row} (1-based inclusive)…")
|
|
301
|
+
|
|
302
|
+
body = {
|
|
303
|
+
"requests": [
|
|
304
|
+
{
|
|
305
|
+
"deleteDimension": {
|
|
306
|
+
"range": {
|
|
307
|
+
"sheetId": sheet_gid,
|
|
308
|
+
"dimension": "ROWS",
|
|
309
|
+
"startIndex": start_row - 1, # 0-based
|
|
310
|
+
"endIndex": end_row, # end-exclusive
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
]
|
|
315
|
+
}
|
|
316
|
+
service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
|
|
317
|
+
logger.info("Rows deleted.")
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
__all__ = [
|
|
321
|
+
# Authentication
|
|
322
|
+
"get_credentials",
|
|
323
|
+
"get_google_credentials", # backward compat alias
|
|
324
|
+
# Services
|
|
325
|
+
"get_sheets_service",
|
|
326
|
+
"get_drive_service",
|
|
327
|
+
# Sheets helpers
|
|
328
|
+
"get_spreadsheet_and_sheet_id",
|
|
329
|
+
"sheet_name_from_gid",
|
|
330
|
+
"read_cell",
|
|
331
|
+
"delete_rows_api_call",
|
|
332
|
+
# Drive helpers
|
|
333
|
+
"download_file_direct",
|
|
334
|
+
"upload_excel_as_google_sheet",
|
|
335
|
+
# Constants
|
|
336
|
+
"MIME_GOOGLE_SHEET",
|
|
337
|
+
"MIME_EXCEL",
|
|
338
|
+
]
|
|
@@ -48,6 +48,7 @@ Example:
|
|
|
48
48
|
>>> deposit = pb.get("deposits", "rec_abc123")
|
|
49
49
|
"""
|
|
50
50
|
|
|
51
|
+
import warnings
|
|
51
52
|
from typing import Any, Iterator, Mapping, Sequence
|
|
52
53
|
|
|
53
54
|
__all__ = [
|
|
@@ -88,9 +89,6 @@ from .sdk import (
|
|
|
88
89
|
from .sdk import (
|
|
89
90
|
bulk_upsert_records as _bulk_upsert_records,
|
|
90
91
|
)
|
|
91
|
-
from .sdk import (
|
|
92
|
-
create_collection as _create_collection,
|
|
93
|
-
)
|
|
94
92
|
from .sdk import (
|
|
95
93
|
create_record as _create_record,
|
|
96
94
|
)
|
|
@@ -100,6 +98,9 @@ from .sdk import (
|
|
|
100
98
|
from .sdk import (
|
|
101
99
|
delete_record as _delete_record,
|
|
102
100
|
)
|
|
101
|
+
from .sdk import (
|
|
102
|
+
ensure_collection as _ensure_collection,
|
|
103
|
+
)
|
|
103
104
|
from .sdk import (
|
|
104
105
|
get_collection as _get_collection,
|
|
105
106
|
)
|
|
@@ -115,9 +116,6 @@ from .sdk import (
|
|
|
115
116
|
from .sdk import (
|
|
116
117
|
list_records as _list_records,
|
|
117
118
|
)
|
|
118
|
-
from .sdk import (
|
|
119
|
-
update_collection as _update_collection,
|
|
120
|
-
)
|
|
121
119
|
from .sdk import (
|
|
122
120
|
update_record as _update_record,
|
|
123
121
|
)
|
|
@@ -561,62 +559,61 @@ def get_collection(name: str) -> dict[str, Any] | None:
|
|
|
561
559
|
raise
|
|
562
560
|
|
|
563
561
|
|
|
564
|
-
def
|
|
562
|
+
def ensure_collection(
|
|
565
563
|
name: str,
|
|
566
|
-
schema: Sequence[dict[str, Any]],
|
|
564
|
+
schema: Sequence[dict[str, Any]] | None = None,
|
|
567
565
|
*,
|
|
568
566
|
indexes: Sequence[str] | None = None,
|
|
569
567
|
) -> dict[str, Any]:
|
|
570
|
-
"""
|
|
568
|
+
"""Ensure a collection exists with the given schema (idempotent).
|
|
571
569
|
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
schema: List of field definitions. Each field is a dict with:
|
|
575
|
-
- name: Field name (required)
|
|
576
|
-
- type: Field type (required): text, number, bool, date, json,
|
|
577
|
-
relation, select, editor, lumera_file
|
|
578
|
-
- required: Whether field is required (default False)
|
|
579
|
-
- options: Type-specific options (e.g., collectionId for relations)
|
|
580
|
-
indexes: Optional list of index definitions
|
|
570
|
+
This is the recommended way to manage collections. It creates the collection
|
|
571
|
+
if it doesn't exist, or updates it if it does. Safe to call multiple times.
|
|
581
572
|
|
|
582
|
-
|
|
583
|
-
|
|
573
|
+
IMPORTANT: The schema and indexes are declarative:
|
|
574
|
+
- schema: The COMPLETE list of user fields you want (replaces all existing user fields)
|
|
575
|
+
- indexes: The COMPLETE list of user indexes you want (replaces all existing user indexes)
|
|
576
|
+
- System fields (id, created, updated, etc.) are automatically managed
|
|
577
|
+
- System indexes (external_id, updated) are automatically managed
|
|
584
578
|
|
|
585
|
-
|
|
586
|
-
>>> col = pb.create_collection("deposits", [
|
|
587
|
-
... {"name": "amount", "type": "number", "required": True},
|
|
588
|
-
... {"name": "status", "type": "text"},
|
|
589
|
-
... {"name": "account_id", "type": "relation",
|
|
590
|
-
... "options": {"collectionId": "accounts"}}
|
|
591
|
-
... ])
|
|
592
|
-
"""
|
|
593
|
-
return _create_collection(name, schema=schema, indexes=indexes)
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
def update_collection(
|
|
597
|
-
name: str,
|
|
598
|
-
schema: Sequence[dict[str, Any]],
|
|
599
|
-
*,
|
|
600
|
-
indexes: Sequence[str] | None = None,
|
|
601
|
-
) -> dict[str, Any]:
|
|
602
|
-
"""Update a collection's schema.
|
|
579
|
+
If you omit schema or indexes, existing values are preserved.
|
|
603
580
|
|
|
604
581
|
Args:
|
|
605
|
-
name: Collection name
|
|
606
|
-
schema:
|
|
607
|
-
|
|
582
|
+
name: Collection name (must not start with '_')
|
|
583
|
+
schema: List of field definitions. If provided, replaces all user fields.
|
|
584
|
+
Each field is a dict with:
|
|
585
|
+
- name: Field name (required)
|
|
586
|
+
- type: Field type (required): text, number, bool, date, json,
|
|
587
|
+
relation, select, editor, lumera_file
|
|
588
|
+
- required: Whether field is required (default False)
|
|
589
|
+
- options: Type-specific options (e.g., collectionId for relations)
|
|
590
|
+
indexes: Optional list of user index DDL statements. If provided,
|
|
591
|
+
replaces all user indexes.
|
|
608
592
|
|
|
609
593
|
Returns:
|
|
610
|
-
|
|
594
|
+
Collection object with:
|
|
595
|
+
- schema: User-defined fields only (what you can modify)
|
|
596
|
+
- indexes: User-defined indexes only (what you can modify)
|
|
597
|
+
- systemInfo: Read-only system fields and indexes (automatically managed)
|
|
611
598
|
|
|
612
599
|
Example:
|
|
613
|
-
>>>
|
|
600
|
+
>>> # Create or update a collection
|
|
601
|
+
>>> col = pb.ensure_collection("deposits", [
|
|
614
602
|
... {"name": "amount", "type": "number", "required": True},
|
|
615
603
|
... {"name": "status", "type": "text"},
|
|
616
|
-
...
|
|
604
|
+
... ])
|
|
605
|
+
>>>
|
|
606
|
+
>>> # Add a field using copy-modify-send pattern
|
|
607
|
+
>>> col = pb.get_collection("deposits")
|
|
608
|
+
>>> col["schema"].append({"name": "notes", "type": "text"})
|
|
609
|
+
>>> pb.ensure_collection("deposits", col["schema"])
|
|
610
|
+
>>>
|
|
611
|
+
>>> # Add an index
|
|
612
|
+
>>> pb.ensure_collection("deposits", indexes=[
|
|
613
|
+
... "CREATE INDEX idx_status ON deposits (status)"
|
|
617
614
|
... ])
|
|
618
615
|
"""
|
|
619
|
-
return
|
|
616
|
+
return _ensure_collection(name, schema=schema, indexes=indexes)
|
|
620
617
|
|
|
621
618
|
|
|
622
619
|
def delete_collection(name: str) -> None:
|
|
@@ -634,56 +631,40 @@ def delete_collection(name: str) -> None:
|
|
|
634
631
|
_delete_collection(name)
|
|
635
632
|
|
|
636
633
|
|
|
637
|
-
|
|
634
|
+
# Backwards compatibility aliases
|
|
635
|
+
def create_collection(
|
|
638
636
|
name: str,
|
|
639
637
|
schema: Sequence[dict[str, Any]],
|
|
640
638
|
*,
|
|
641
|
-
update_schema: bool = False,
|
|
642
639
|
indexes: Sequence[str] | None = None,
|
|
643
640
|
) -> dict[str, Any]:
|
|
644
|
-
"""
|
|
645
|
-
|
|
646
|
-
This is the recommended way to set up collections in automation scripts.
|
|
647
|
-
Safe to call multiple times - will not fail if collection already exists.
|
|
648
|
-
|
|
649
|
-
Args:
|
|
650
|
-
name: Collection name
|
|
651
|
-
schema: List of field definitions
|
|
652
|
-
update_schema: If True, update existing collection's schema.
|
|
653
|
-
If False (default), leave existing collection unchanged.
|
|
654
|
-
indexes: Optional list of index definitions
|
|
655
|
-
|
|
656
|
-
Returns:
|
|
657
|
-
Collection object (created, updated, or existing)
|
|
658
|
-
|
|
659
|
-
Behavior:
|
|
660
|
-
- Collection doesn't exist → create it
|
|
661
|
-
- Collection exists, update_schema=False → return existing (no-op)
|
|
662
|
-
- Collection exists, update_schema=True → update schema
|
|
641
|
+
"""Create a new collection.
|
|
663
642
|
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
>>> col = pb.ensure_collection("deposits", [
|
|
667
|
-
... {"name": "amount", "type": "number", "required": True},
|
|
668
|
-
... {"name": "status", "type": "text"},
|
|
669
|
-
... ])
|
|
670
|
-
>>>
|
|
671
|
-
>>> # Update schema if collection exists
|
|
672
|
-
>>> col = pb.ensure_collection("deposits", [
|
|
673
|
-
... {"name": "amount", "type": "number", "required": True},
|
|
674
|
-
... {"name": "status", "type": "text"},
|
|
675
|
-
... {"name": "notes", "type": "text"}, # Add new field
|
|
676
|
-
... ], update_schema=True)
|
|
643
|
+
.. deprecated::
|
|
644
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
677
645
|
"""
|
|
678
|
-
|
|
646
|
+
warnings.warn(
|
|
647
|
+
"create_collection() is deprecated, use ensure_collection() instead",
|
|
648
|
+
DeprecationWarning,
|
|
649
|
+
stacklevel=2,
|
|
650
|
+
)
|
|
651
|
+
return ensure_collection(name, schema, indexes=indexes)
|
|
679
652
|
|
|
680
|
-
if existing is None:
|
|
681
|
-
# Collection doesn't exist, create it
|
|
682
|
-
return create_collection(name, schema, indexes=indexes)
|
|
683
653
|
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
654
|
+
def update_collection(
|
|
655
|
+
name: str,
|
|
656
|
+
schema: Sequence[dict[str, Any]],
|
|
657
|
+
*,
|
|
658
|
+
indexes: Sequence[str] | None = None,
|
|
659
|
+
) -> dict[str, Any]:
|
|
660
|
+
"""Update a collection's schema.
|
|
687
661
|
|
|
688
|
-
|
|
689
|
-
|
|
662
|
+
.. deprecated::
|
|
663
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
664
|
+
"""
|
|
665
|
+
warnings.warn(
|
|
666
|
+
"update_collection() is deprecated, use ensure_collection() instead",
|
|
667
|
+
DeprecationWarning,
|
|
668
|
+
stacklevel=2,
|
|
669
|
+
)
|
|
670
|
+
return ensure_collection(name, schema, indexes=indexes)
|
|
@@ -23,6 +23,7 @@ Direct usage is discouraged unless you need low-level control.
|
|
|
23
23
|
|
|
24
24
|
import json
|
|
25
25
|
import os
|
|
26
|
+
import warnings
|
|
26
27
|
from typing import Any, Iterable, Mapping, MutableMapping, Sequence, TypedDict
|
|
27
28
|
|
|
28
29
|
import requests as _requests
|
|
@@ -179,27 +180,99 @@ def get_collection(collection_id_or_name: str) -> dict[str, Any]:
|
|
|
179
180
|
return _api_request("GET", f"collections/{collection_id_or_name}")
|
|
180
181
|
|
|
181
182
|
|
|
182
|
-
def
|
|
183
|
+
def ensure_collection(
|
|
183
184
|
name: str,
|
|
184
185
|
*,
|
|
185
186
|
collection_type: str = "base",
|
|
186
|
-
schema: Iterable[CollectionField] |
|
|
187
|
-
indexes: Iterable[str] |
|
|
187
|
+
schema: Iterable[CollectionField] | object = _UNSET,
|
|
188
|
+
indexes: Iterable[str] | object = _UNSET,
|
|
188
189
|
) -> dict[str, Any]:
|
|
189
|
-
"""
|
|
190
|
+
"""Ensure a collection exists with the given schema and indexes.
|
|
191
|
+
|
|
192
|
+
This is an idempotent operation - it creates the collection if it doesn't exist,
|
|
193
|
+
or updates it if it does. Safe to call multiple times with the same arguments.
|
|
194
|
+
|
|
195
|
+
The `schema` field should contain ONLY user-defined fields. System fields
|
|
196
|
+
(id, created, updated, created_by, updated_by, external_id, lm_provenance)
|
|
197
|
+
are automatically managed by Lumera and should not be included.
|
|
190
198
|
|
|
199
|
+
The `indexes` field should contain ONLY user-defined indexes. System indexes
|
|
200
|
+
(external_id unique index, updated index) are automatically managed.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
name: Collection name. Required.
|
|
204
|
+
collection_type: Collection type, defaults to "base".
|
|
205
|
+
schema: List of field definitions. If provided, replaces all user fields.
|
|
206
|
+
If omitted, existing fields are preserved.
|
|
207
|
+
indexes: List of index DDL statements. If provided, replaces all user indexes.
|
|
208
|
+
If omitted, existing indexes are preserved.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
The collection data including:
|
|
212
|
+
- schema: User-defined fields only
|
|
213
|
+
- indexes: User-defined indexes only
|
|
214
|
+
- systemInfo: Object with system-managed fields and indexes (read-only)
|
|
215
|
+
|
|
216
|
+
Example:
|
|
217
|
+
# Create or update a collection
|
|
218
|
+
coll = ensure_collection(
|
|
219
|
+
"customers",
|
|
220
|
+
schema=[
|
|
221
|
+
{"name": "title", "type": "text", "required": True},
|
|
222
|
+
{"name": "email", "type": "text"},
|
|
223
|
+
],
|
|
224
|
+
indexes=["CREATE INDEX idx_email ON customers (email)"]
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Add a field (copy-modify-send pattern)
|
|
228
|
+
coll = get_collection("customers")
|
|
229
|
+
coll["schema"].append({"name": "phone", "type": "text"})
|
|
230
|
+
ensure_collection("customers", schema=coll["schema"])
|
|
231
|
+
"""
|
|
191
232
|
if not name or not name.strip():
|
|
192
233
|
raise ValueError("name is required")
|
|
193
234
|
|
|
194
|
-
|
|
235
|
+
name = name.strip()
|
|
236
|
+
payload: dict[str, Any] = {}
|
|
237
|
+
|
|
195
238
|
if collection_type:
|
|
196
239
|
payload["type"] = collection_type
|
|
197
|
-
|
|
240
|
+
|
|
241
|
+
if schema is not _UNSET:
|
|
242
|
+
if schema is None:
|
|
243
|
+
raise ValueError("schema cannot be None; provide an iterable of fields or omit")
|
|
198
244
|
payload["schema"] = [dict(field) for field in schema]
|
|
199
|
-
if indexes is not None:
|
|
200
|
-
payload["indexes"] = list(indexes)
|
|
201
245
|
|
|
202
|
-
|
|
246
|
+
if indexes is not _UNSET:
|
|
247
|
+
payload["indexes"] = list(indexes) if indexes is not None else []
|
|
248
|
+
|
|
249
|
+
return _api_request("PUT", f"collections/{name}", json_body=payload)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
# Backwards compatibility aliases
|
|
253
|
+
def create_collection(
|
|
254
|
+
name: str,
|
|
255
|
+
*,
|
|
256
|
+
collection_type: str = "base",
|
|
257
|
+
schema: Iterable[CollectionField] | None = None,
|
|
258
|
+
indexes: Iterable[str] | None = None,
|
|
259
|
+
) -> dict[str, Any]:
|
|
260
|
+
"""Create a new PocketBase collection.
|
|
261
|
+
|
|
262
|
+
.. deprecated::
|
|
263
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
264
|
+
"""
|
|
265
|
+
warnings.warn(
|
|
266
|
+
"create_collection() is deprecated, use ensure_collection() instead",
|
|
267
|
+
DeprecationWarning,
|
|
268
|
+
stacklevel=2,
|
|
269
|
+
)
|
|
270
|
+
return ensure_collection(
|
|
271
|
+
name,
|
|
272
|
+
collection_type=collection_type,
|
|
273
|
+
schema=schema if schema is not None else [],
|
|
274
|
+
indexes=indexes if indexes is not None else [],
|
|
275
|
+
)
|
|
203
276
|
|
|
204
277
|
|
|
205
278
|
def update_collection(
|
|
@@ -210,33 +283,26 @@ def update_collection(
|
|
|
210
283
|
schema: Iterable[CollectionField] | object = _UNSET,
|
|
211
284
|
indexes: Iterable[str] | object = _UNSET,
|
|
212
285
|
) -> dict[str, Any]:
|
|
213
|
-
"""Update a PocketBase collection.
|
|
214
|
-
|
|
215
|
-
if not collection_id_or_name:
|
|
216
|
-
raise ValueError("collection_id_or_name is required")
|
|
217
|
-
|
|
218
|
-
payload: dict[str, Any] = {}
|
|
219
|
-
|
|
220
|
-
if name is not _UNSET:
|
|
221
|
-
if name is None or not str(name).strip():
|
|
222
|
-
raise ValueError("name cannot be empty")
|
|
223
|
-
payload["name"] = str(name).strip()
|
|
224
|
-
|
|
225
|
-
if collection_type is not _UNSET:
|
|
226
|
-
payload["type"] = collection_type
|
|
286
|
+
"""Update a PocketBase collection.
|
|
227
287
|
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
288
|
+
.. deprecated::
|
|
289
|
+
Use :func:`ensure_collection` instead, which handles both create and update.
|
|
290
|
+
Note: The 'name' parameter for renaming is no longer supported.
|
|
291
|
+
"""
|
|
292
|
+
warnings.warn(
|
|
293
|
+
"update_collection() is deprecated, use ensure_collection() instead",
|
|
294
|
+
DeprecationWarning,
|
|
295
|
+
stacklevel=2,
|
|
296
|
+
)
|
|
297
|
+
if name is not _UNSET and name != collection_id_or_name:
|
|
298
|
+
raise ValueError("Renaming collections via 'name' parameter is no longer supported")
|
|
238
299
|
|
|
239
|
-
return
|
|
300
|
+
return ensure_collection(
|
|
301
|
+
collection_id_or_name,
|
|
302
|
+
collection_type=collection_type if collection_type is not _UNSET else "base",
|
|
303
|
+
schema=schema,
|
|
304
|
+
indexes=indexes,
|
|
305
|
+
)
|
|
240
306
|
|
|
241
307
|
|
|
242
308
|
def delete_collection(collection_id_or_name: str) -> None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "lumera"
|
|
3
|
-
version = "0.9.
|
|
3
|
+
version = "0.9.3"
|
|
4
4
|
description = "SDK for building on Lumera platform"
|
|
5
5
|
requires-python = ">=3.11"
|
|
6
6
|
dependencies = [
|
|
@@ -45,8 +45,9 @@ full = [
|
|
|
45
45
|
"requests==2.32.4",
|
|
46
46
|
]
|
|
47
47
|
|
|
48
|
-
[tool.setuptools]
|
|
49
|
-
|
|
48
|
+
[tool.setuptools.packages.find]
|
|
49
|
+
where = ["."]
|
|
50
|
+
include = ["lumera*"]
|
|
50
51
|
|
|
51
52
|
[tool.pytest.ini_options]
|
|
52
53
|
minversion = "8.0"
|
|
@@ -154,14 +154,17 @@ def test_list_collections_uses_token_and_returns_payload(monkeypatch: pytest.Mon
|
|
|
154
154
|
assert headers["Authorization"] == "token tok"
|
|
155
155
|
|
|
156
156
|
|
|
157
|
-
def
|
|
157
|
+
def test_ensure_collection_uses_put(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
158
|
+
"""Test that ensure_collection uses PUT method and includes schema in payload."""
|
|
158
159
|
monkeypatch.setenv(sdk.TOKEN_ENV, "tok")
|
|
159
160
|
|
|
160
161
|
captured: dict[str, object] = {}
|
|
161
162
|
|
|
162
|
-
def fake_request(
|
|
163
|
+
def fake_request(method: str, url: str, **kwargs: object) -> DummyResponse:
|
|
164
|
+
captured["method"] = method
|
|
165
|
+
captured["url"] = url
|
|
163
166
|
captured["json"] = kwargs.get("json")
|
|
164
|
-
return DummyResponse(status_code=
|
|
167
|
+
return DummyResponse(status_code=200, json_data={"id": "new", "name": "example"})
|
|
165
168
|
|
|
166
169
|
class MockSession:
|
|
167
170
|
def request(self, method: str, url: str, **kwargs: object) -> DummyResponse:
|
|
@@ -172,18 +175,52 @@ def test_create_collection_posts_payload(monkeypatch: pytest.MonkeyPatch) -> Non
|
|
|
172
175
|
|
|
173
176
|
monkeypatch.setattr(_utils, "_get_session", lambda: MockSession())
|
|
174
177
|
|
|
175
|
-
resp =
|
|
178
|
+
resp = sdk.ensure_collection(
|
|
176
179
|
"example", schema=[{"name": "field", "type": "text"}], indexes=["CREATE INDEX"]
|
|
177
180
|
)
|
|
178
181
|
|
|
179
182
|
assert resp["id"] == "new"
|
|
183
|
+
assert captured["method"] == "PUT"
|
|
184
|
+
assert str(captured["url"]).endswith("/collections/example")
|
|
180
185
|
payload = captured["json"]
|
|
181
186
|
assert isinstance(payload, dict)
|
|
182
|
-
|
|
187
|
+
# Name is not in payload (it's in URL path)
|
|
188
|
+
assert "name" not in payload
|
|
183
189
|
assert payload["schema"][0]["name"] == "field"
|
|
184
190
|
assert payload["indexes"] == ["CREATE INDEX"]
|
|
185
191
|
|
|
186
192
|
|
|
193
|
+
def test_create_collection_uses_ensure(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
194
|
+
"""Test that create_collection (deprecated) calls ensure_collection."""
|
|
195
|
+
monkeypatch.setenv(sdk.TOKEN_ENV, "tok")
|
|
196
|
+
|
|
197
|
+
captured: dict[str, object] = {}
|
|
198
|
+
|
|
199
|
+
def fake_request(method: str, url: str, **kwargs: object) -> DummyResponse:
|
|
200
|
+
captured["method"] = method
|
|
201
|
+
captured["url"] = url
|
|
202
|
+
return DummyResponse(status_code=200, json_data={"id": "new"})
|
|
203
|
+
|
|
204
|
+
class MockSession:
|
|
205
|
+
def request(self, method: str, url: str, **kwargs: object) -> DummyResponse:
|
|
206
|
+
return fake_request(method, url, **kwargs)
|
|
207
|
+
|
|
208
|
+
def mount(self, prefix: str, adapter: object) -> None:
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
monkeypatch.setattr(_utils, "_get_session", lambda: MockSession())
|
|
212
|
+
|
|
213
|
+
# Should emit deprecation warning
|
|
214
|
+
with pytest.warns(DeprecationWarning, match="create_collection.*deprecated"):
|
|
215
|
+
resp = create_collection(
|
|
216
|
+
"example", schema=[{"name": "field", "type": "text"}], indexes=["CREATE INDEX"]
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
assert resp["id"] == "new"
|
|
220
|
+
# Should use PUT (via ensure_collection)
|
|
221
|
+
assert captured["method"] == "PUT"
|
|
222
|
+
|
|
223
|
+
|
|
187
224
|
def test_create_record_sends_json_payload(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
188
225
|
monkeypatch.setenv(sdk.TOKEN_ENV, "tok")
|
|
189
226
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|