lumera 0.4.6__py3-none-any.whl → 0.9.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lumera/__init__.py +99 -4
- lumera/_utils.py +782 -0
- lumera/automations.py +904 -0
- lumera/exceptions.py +72 -0
- lumera/files.py +97 -0
- lumera/google.py +47 -270
- lumera/integrations/__init__.py +34 -0
- lumera/integrations/google.py +338 -0
- lumera/llm.py +481 -0
- lumera/locks.py +216 -0
- lumera/pb.py +679 -0
- lumera/sdk.py +927 -380
- lumera/storage.py +270 -0
- lumera/webhooks.py +304 -0
- lumera-0.9.6.dist-info/METADATA +37 -0
- lumera-0.9.6.dist-info/RECORD +18 -0
- {lumera-0.4.6.dist-info → lumera-0.9.6.dist-info}/WHEEL +1 -1
- lumera-0.4.6.dist-info/METADATA +0 -11
- lumera-0.4.6.dist-info/RECORD +0 -7
- {lumera-0.4.6.dist-info → lumera-0.9.6.dist-info}/top_level.txt +0 -0
lumera/exceptions.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom exceptions for the Lumera SDK.
|
|
3
|
+
|
|
4
|
+
Exception hierarchy:
|
|
5
|
+
LumeraError (base)
|
|
6
|
+
├── RecordNotFoundError - Record doesn't exist (404)
|
|
7
|
+
├── ValidationError - Data doesn't match schema
|
|
8
|
+
├── UniqueConstraintError - Unique field violation
|
|
9
|
+
└── LockHeldError - Lock already held by another process
|
|
10
|
+
|
|
11
|
+
Example:
|
|
12
|
+
>>> from lumera import pb
|
|
13
|
+
>>> from lumera.exceptions import RecordNotFoundError
|
|
14
|
+
>>> try:
|
|
15
|
+
... deposit = pb.get("deposits", "invalid_id")
|
|
16
|
+
... except RecordNotFoundError as e:
|
|
17
|
+
... print(f"Not found: {e.record_id}")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"LumeraError",
|
|
22
|
+
"RecordNotFoundError",
|
|
23
|
+
"ValidationError",
|
|
24
|
+
"UniqueConstraintError",
|
|
25
|
+
"LockHeldError",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LumeraError(Exception):
|
|
30
|
+
"""Base exception for all Lumera SDK errors."""
|
|
31
|
+
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class RecordNotFoundError(LumeraError):
|
|
36
|
+
"""Record doesn't exist in the collection."""
|
|
37
|
+
|
|
38
|
+
def __init__(self, collection: str, record_id: str) -> None:
|
|
39
|
+
super().__init__(f"Record '{record_id}' not found in collection '{collection}'")
|
|
40
|
+
self.collection = collection
|
|
41
|
+
self.record_id = record_id
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ValidationError(LumeraError):
|
|
45
|
+
"""Data doesn't match collection schema."""
|
|
46
|
+
|
|
47
|
+
def __init__(self, collection: str, errors: dict[str, str]) -> None:
|
|
48
|
+
super().__init__(f"Validation failed for '{collection}': {errors}")
|
|
49
|
+
self.collection = collection
|
|
50
|
+
self.errors = errors
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class UniqueConstraintError(LumeraError):
|
|
54
|
+
"""Unique field constraint violation."""
|
|
55
|
+
|
|
56
|
+
def __init__(self, collection: str, field: str, value: object) -> None:
|
|
57
|
+
super().__init__(f"Record with {field}='{value}' already exists in '{collection}'")
|
|
58
|
+
self.collection = collection
|
|
59
|
+
self.field = field
|
|
60
|
+
self.value = value
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class LockHeldError(LumeraError):
|
|
64
|
+
"""Lock is already held by another process."""
|
|
65
|
+
|
|
66
|
+
def __init__(self, lock_name: str, held_by: str | None = None) -> None:
|
|
67
|
+
msg = f"Lock '{lock_name}' is already held"
|
|
68
|
+
if held_by:
|
|
69
|
+
msg += f" by {held_by}"
|
|
70
|
+
super().__init__(msg)
|
|
71
|
+
self.lock_name = lock_name
|
|
72
|
+
self.held_by = held_by
|
lumera/files.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File input types for Lumera automations.
|
|
3
|
+
|
|
4
|
+
These types are used with Pydantic models to define file inputs for automations.
|
|
5
|
+
They generate the correct JSON schema format that the frontend recognizes for
|
|
6
|
+
file picker UI.
|
|
7
|
+
|
|
8
|
+
Example:
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
|
+
from lumera import LumeraFile, LumeraFiles
|
|
11
|
+
|
|
12
|
+
class ProcessInputs(BaseModel):
|
|
13
|
+
report: LumeraFile = Field(..., description="Excel report to process")
|
|
14
|
+
attachments: LumeraFiles = Field(default=[], description="Additional files")
|
|
15
|
+
|
|
16
|
+
def main(inputs: ProcessInputs):
|
|
17
|
+
# inputs.report is a string path like "/tmp/lumera-files/report.xlsx"
|
|
18
|
+
with open(inputs.report) as f:
|
|
19
|
+
...
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from typing import Annotated, Any
|
|
23
|
+
|
|
24
|
+
# Check if Pydantic is available
|
|
25
|
+
try:
|
|
26
|
+
from pydantic import GetJsonSchemaHandler
|
|
27
|
+
from pydantic.json_schema import JsonSchemaValue
|
|
28
|
+
from pydantic_core import CoreSchema
|
|
29
|
+
|
|
30
|
+
_HAS_PYDANTIC = True
|
|
31
|
+
except ImportError:
|
|
32
|
+
_HAS_PYDANTIC = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
if _HAS_PYDANTIC:
|
|
36
|
+
|
|
37
|
+
class _LumeraFileSchema:
|
|
38
|
+
"""
|
|
39
|
+
Pydantic JSON schema handler for single file inputs.
|
|
40
|
+
|
|
41
|
+
Generates: {"type": "string", "format": "file"}
|
|
42
|
+
|
|
43
|
+
The frontend recognizes this schema and renders a file picker.
|
|
44
|
+
At runtime, the value is a string path to the downloaded file.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
@classmethod
|
|
48
|
+
def __get_pydantic_json_schema__(
|
|
49
|
+
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
|
|
50
|
+
) -> JsonSchemaValue:
|
|
51
|
+
return {"type": "string", "format": "file"}
|
|
52
|
+
|
|
53
|
+
class _LumeraFilesSchema:
|
|
54
|
+
"""
|
|
55
|
+
Pydantic JSON schema handler for multiple file inputs.
|
|
56
|
+
|
|
57
|
+
Generates: {"type": "array", "items": {"type": "string", "format": "file"}}
|
|
58
|
+
|
|
59
|
+
The frontend recognizes this schema and renders a multi-file picker.
|
|
60
|
+
At runtime, the value is a list of string paths to downloaded files.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
@classmethod
|
|
64
|
+
def __get_pydantic_json_schema__(
|
|
65
|
+
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
|
|
66
|
+
) -> JsonSchemaValue:
|
|
67
|
+
return {"type": "array", "items": {"type": "string", "format": "file"}}
|
|
68
|
+
|
|
69
|
+
# Public types using Annotated to attach schema handlers
|
|
70
|
+
LumeraFile: Any = Annotated[str, _LumeraFileSchema()]
|
|
71
|
+
"""
|
|
72
|
+
Type for single file input in Pydantic models.
|
|
73
|
+
|
|
74
|
+
At runtime, this is a string containing the local file path.
|
|
75
|
+
In JSON schema, generates {"type": "string", "format": "file"}.
|
|
76
|
+
|
|
77
|
+
Example:
|
|
78
|
+
class Inputs(BaseModel):
|
|
79
|
+
document: LumeraFile = Field(..., description="PDF to process")
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
LumeraFiles: Any = Annotated[list[str], _LumeraFilesSchema()]
|
|
83
|
+
"""
|
|
84
|
+
Type for multiple file inputs in Pydantic models.
|
|
85
|
+
|
|
86
|
+
At runtime, this is a list of strings containing local file paths.
|
|
87
|
+
In JSON schema, generates {"type": "array", "items": {"type": "string", "format": "file"}}.
|
|
88
|
+
|
|
89
|
+
Example:
|
|
90
|
+
class Inputs(BaseModel):
|
|
91
|
+
documents: LumeraFiles = Field(default=[], description="PDFs to merge")
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
else:
|
|
95
|
+
# Fallback when Pydantic is not installed - types are just aliases
|
|
96
|
+
LumeraFile: Any = str
|
|
97
|
+
LumeraFiles: Any = list
|
lumera/google.py
CHANGED
|
@@ -1,270 +1,47 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
#
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
#
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
#
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
# Authentication & Service Initialization
|
|
49
|
-
# =====================================================================================
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def get_google_credentials() -> Credentials:
|
|
53
|
-
"""
|
|
54
|
-
Retrieves a Google OAuth token from Lumera and
|
|
55
|
-
converts it into a Credentials object usable by googleapiclient.
|
|
56
|
-
"""
|
|
57
|
-
logger.debug("Fetching Google access token from Lumera…")
|
|
58
|
-
access_token = get_access_token("google")
|
|
59
|
-
logger.debug("Access token received.")
|
|
60
|
-
creds = Credentials(token=access_token)
|
|
61
|
-
logger.debug("Credentials object created.")
|
|
62
|
-
return creds
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
def get_sheets_service(credentials: Optional[Credentials] = None) -> 'Resource':
|
|
66
|
-
"""
|
|
67
|
-
Initializes and returns the Google Sheets API service.
|
|
68
|
-
|
|
69
|
-
If no credentials are provided, this function will automatically fetch a
|
|
70
|
-
Google access token from Lumera and construct the appropriate
|
|
71
|
-
``google.oauth2.credentials.Credentials`` instance.
|
|
72
|
-
"""
|
|
73
|
-
if credentials is None:
|
|
74
|
-
logger.info("No credentials provided; fetching Google token…")
|
|
75
|
-
credentials = get_google_credentials()
|
|
76
|
-
logger.info("Google Sheets API service being initialized…")
|
|
77
|
-
return build('sheets', 'v4', credentials=credentials)
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def get_drive_service(credentials: Optional[Credentials] = None) -> 'Resource':
|
|
81
|
-
"""
|
|
82
|
-
Initializes and returns the Google Drive API service.
|
|
83
|
-
|
|
84
|
-
If no credentials are provided, this function will automatically fetch a
|
|
85
|
-
Google access token from Lumera and construct the appropriate
|
|
86
|
-
``google.oauth2.credentials.Credentials`` instance.
|
|
87
|
-
"""
|
|
88
|
-
if credentials is None:
|
|
89
|
-
logger.info("No credentials provided; fetching Google token…")
|
|
90
|
-
credentials = get_google_credentials()
|
|
91
|
-
logger.info("Google Drive API service being initialized…")
|
|
92
|
-
return build('drive', 'v3', credentials=credentials)
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
# =====================================================================================
|
|
96
|
-
# Google Sheets & Drive Utility Functions
|
|
97
|
-
# =====================================================================================
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def get_spreadsheet_and_sheet_id(
|
|
101
|
-
service: 'Resource', spreadsheet_url: str, tab_name: str
|
|
102
|
-
) -> Tuple[Optional[str], Optional[int]]:
|
|
103
|
-
"""
|
|
104
|
-
Given a Google Sheets URL and a tab (sheet) name, returns a tuple:
|
|
105
|
-
(spreadsheet_id, sheet_id)
|
|
106
|
-
"""
|
|
107
|
-
spreadsheet_id = _extract_spreadsheet_id(spreadsheet_url)
|
|
108
|
-
if not spreadsheet_id:
|
|
109
|
-
return None, None
|
|
110
|
-
|
|
111
|
-
sheet_id = _get_sheet_id_from_name(service, spreadsheet_id, tab_name)
|
|
112
|
-
return spreadsheet_id, sheet_id
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
def _extract_spreadsheet_id(spreadsheet_url: str) -> Optional[str]:
|
|
116
|
-
"""Extracts the spreadsheet ID from a Google Sheets URL."""
|
|
117
|
-
logger.debug(f"Extracting spreadsheet ID from URL: {spreadsheet_url}")
|
|
118
|
-
pattern = r"/d/([a-zA-Z0-9-_]+)"
|
|
119
|
-
match = re.search(pattern, spreadsheet_url)
|
|
120
|
-
if match:
|
|
121
|
-
spreadsheet_id = match.group(1)
|
|
122
|
-
logger.debug(f"Spreadsheet ID extracted: {spreadsheet_id}")
|
|
123
|
-
return spreadsheet_id
|
|
124
|
-
logger.warning("Could not extract Spreadsheet ID.")
|
|
125
|
-
return None
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
def _get_sheet_id_from_name(
|
|
129
|
-
service: 'Resource', spreadsheet_id: str, tab_name: str
|
|
130
|
-
) -> Optional[int]:
|
|
131
|
-
"""Uses the Google Sheets API to fetch the sheet ID corresponding to 'tab_name'."""
|
|
132
|
-
logger.debug(f"Requesting sheet metadata for spreadsheet ID: {spreadsheet_id}")
|
|
133
|
-
response = (
|
|
134
|
-
service.spreadsheets()
|
|
135
|
-
.get(spreadsheetId=spreadsheet_id, fields="sheets.properties")
|
|
136
|
-
.execute()
|
|
137
|
-
)
|
|
138
|
-
logger.debug("Metadata received. Searching for tab…")
|
|
139
|
-
|
|
140
|
-
for sheet in response.get("sheets", []):
|
|
141
|
-
properties = sheet.get("properties", {})
|
|
142
|
-
if properties.get("title") == tab_name:
|
|
143
|
-
sheet_id = properties.get("sheetId")
|
|
144
|
-
logger.debug(f"Match found for tab '{tab_name}'. Sheet ID is {sheet_id}")
|
|
145
|
-
return sheet_id
|
|
146
|
-
logger.warning(f"No sheet found with tab name '{tab_name}'.")
|
|
147
|
-
return None
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
def sheet_name_from_gid(service: 'Resource', spreadsheet_id: str, gid: int) -> Optional[str]:
|
|
151
|
-
"""Resolve a sheet's human-readable name (title) from its gid."""
|
|
152
|
-
logger.debug(f"Resolving sheet name from gid={gid} …")
|
|
153
|
-
meta = (
|
|
154
|
-
service.spreadsheets()
|
|
155
|
-
.get(
|
|
156
|
-
spreadsheetId=spreadsheet_id,
|
|
157
|
-
includeGridData=False,
|
|
158
|
-
fields="sheets(properties(sheetId,title))",
|
|
159
|
-
)
|
|
160
|
-
.execute()
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
for sheet in meta.get("sheets", []):
|
|
164
|
-
props = sheet.get("properties", {})
|
|
165
|
-
if props.get("sheetId") == gid:
|
|
166
|
-
title = props["title"]
|
|
167
|
-
logger.debug(f"Sheet gid={gid} corresponds to sheet name='{title}'.")
|
|
168
|
-
return title
|
|
169
|
-
logger.warning(f"No sheet found with gid={gid}")
|
|
170
|
-
return None
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
def read_cell(service: 'Resource', spreadsheet_id: str, range_a1: str) -> Optional[str]:
|
|
174
|
-
"""Fetch a single cell value (as string); returns None if empty."""
|
|
175
|
-
logger.debug(f"Reading cell '{range_a1}' …")
|
|
176
|
-
resp = (
|
|
177
|
-
service.spreadsheets()
|
|
178
|
-
.values()
|
|
179
|
-
.get(spreadsheetId=spreadsheet_id, range=range_a1, majorDimension="ROWS")
|
|
180
|
-
.execute()
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
values = resp.get("values", [])
|
|
184
|
-
return values[0][0] if values and values[0] else None
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
# NOTE: The function performs I/O side-effects and does not return a value.
|
|
188
|
-
def download_file_direct(drive_service: 'Resource', file_id: str, dest_path: str) -> None:
|
|
189
|
-
"""
|
|
190
|
-
Downloads a file directly from Google Drive using files().get_media
|
|
191
|
-
without any format conversion.
|
|
192
|
-
"""
|
|
193
|
-
logger.info(f"Initiating direct download for file ID: {file_id}")
|
|
194
|
-
|
|
195
|
-
request = drive_service.files().get_media(fileId=file_id)
|
|
196
|
-
fh = io.BytesIO()
|
|
197
|
-
downloader = MediaIoBaseDownload(fh, request)
|
|
198
|
-
|
|
199
|
-
done = False
|
|
200
|
-
while not done:
|
|
201
|
-
status, done = downloader.next_chunk()
|
|
202
|
-
if status:
|
|
203
|
-
logger.debug(f"Download progress: {int(status.progress() * 100)}%")
|
|
204
|
-
|
|
205
|
-
with open(dest_path, "wb") as f:
|
|
206
|
-
f.write(fh.getvalue())
|
|
207
|
-
logger.info(f"File saved to: {dest_path}")
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
def upload_excel_as_google_sheet(
|
|
211
|
-
drive_service: 'Resource', local_path: str, desired_name: str
|
|
212
|
-
) -> Tuple[Optional[str], Optional[str]]:
|
|
213
|
-
"""
|
|
214
|
-
Uploads a local XLSX file to Google Drive, converting it to Google Sheets format.
|
|
215
|
-
Returns the file ID and web link.
|
|
216
|
-
"""
|
|
217
|
-
logger.info(f"Preparing to upload '{local_path}' as Google Sheet named '{desired_name}'")
|
|
218
|
-
|
|
219
|
-
if not os.path.isfile(local_path):
|
|
220
|
-
logger.error(f"Local file not found at '{local_path}'. Aborting.")
|
|
221
|
-
return None, None
|
|
222
|
-
|
|
223
|
-
media = MediaFileUpload(local_path, mimetype=MIME_EXCEL, resumable=True)
|
|
224
|
-
file_metadata = {"name": desired_name, "mimeType": MIME_GOOGLE_SHEET}
|
|
225
|
-
|
|
226
|
-
logger.info("Initiating Google Drive upload & conversion…")
|
|
227
|
-
request = drive_service.files().create(
|
|
228
|
-
body=file_metadata, media_body=media, fields="id, webViewLink"
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
response = None
|
|
232
|
-
while response is None:
|
|
233
|
-
status, response = request.next_chunk()
|
|
234
|
-
if status:
|
|
235
|
-
logger.debug(f"Upload progress: {int(status.progress() * 100)}%")
|
|
236
|
-
|
|
237
|
-
file_id = response.get("id")
|
|
238
|
-
web_view_link = response.get("webViewLink")
|
|
239
|
-
logger.info(f"Upload completed. File ID: {file_id}")
|
|
240
|
-
return file_id, web_view_link
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
# Remove rows from a sheet. All parameters are 1-based (both *start_row* and
|
|
244
|
-
# *end_row* are inclusive) mirroring the UI behaviour in Google Sheets.
|
|
245
|
-
def delete_rows_api_call(
|
|
246
|
-
service: 'Resource',
|
|
247
|
-
spreadsheet_id: str,
|
|
248
|
-
sheet_gid: int,
|
|
249
|
-
start_row: int,
|
|
250
|
-
end_row: int,
|
|
251
|
-
) -> None:
|
|
252
|
-
"""Executes the API call to delete rows."""
|
|
253
|
-
logger.info(f"Deleting rows {start_row}-{end_row} (1-based inclusive)…")
|
|
254
|
-
|
|
255
|
-
body = {
|
|
256
|
-
"requests": [
|
|
257
|
-
{
|
|
258
|
-
"deleteDimension": {
|
|
259
|
-
"range": {
|
|
260
|
-
"sheetId": sheet_gid,
|
|
261
|
-
"dimension": "ROWS",
|
|
262
|
-
"startIndex": start_row - 1, # 0-based
|
|
263
|
-
"endIndex": end_row, # end-exclusive
|
|
264
|
-
}
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
]
|
|
268
|
-
}
|
|
269
|
-
service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute()
|
|
270
|
-
logger.info("Rows deleted.")
|
|
1
|
+
"""
|
|
2
|
+
Backward compatibility shim for lumera.google.
|
|
3
|
+
|
|
4
|
+
This module has moved to lumera.integrations.google.
|
|
5
|
+
All imports are re-exported here for backward compatibility.
|
|
6
|
+
|
|
7
|
+
New code should use:
|
|
8
|
+
from lumera.integrations import google
|
|
9
|
+
# or
|
|
10
|
+
from lumera.integrations.google import get_sheets_service, get_drive_service
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
# Re-export everything from the new location
|
|
14
|
+
from .integrations.google import (
|
|
15
|
+
MIME_EXCEL,
|
|
16
|
+
MIME_GOOGLE_SHEET,
|
|
17
|
+
delete_rows_api_call,
|
|
18
|
+
download_file_direct,
|
|
19
|
+
get_credentials,
|
|
20
|
+
get_drive_service,
|
|
21
|
+
get_google_credentials,
|
|
22
|
+
get_sheets_service,
|
|
23
|
+
get_spreadsheet_and_sheet_id,
|
|
24
|
+
read_cell,
|
|
25
|
+
sheet_name_from_gid,
|
|
26
|
+
upload_excel_as_google_sheet,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
__all__ = [
|
|
30
|
+
# Authentication
|
|
31
|
+
"get_credentials",
|
|
32
|
+
"get_google_credentials",
|
|
33
|
+
# Services
|
|
34
|
+
"get_sheets_service",
|
|
35
|
+
"get_drive_service",
|
|
36
|
+
# Sheets helpers
|
|
37
|
+
"get_spreadsheet_and_sheet_id",
|
|
38
|
+
"sheet_name_from_gid",
|
|
39
|
+
"read_cell",
|
|
40
|
+
"delete_rows_api_call",
|
|
41
|
+
# Drive helpers
|
|
42
|
+
"download_file_direct",
|
|
43
|
+
"upload_excel_as_google_sheet",
|
|
44
|
+
# Constants
|
|
45
|
+
"MIME_GOOGLE_SHEET",
|
|
46
|
+
"MIME_EXCEL",
|
|
47
|
+
]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lumera SDK Integrations
|
|
3
|
+
|
|
4
|
+
Third-party service integrations with Lumera credential management.
|
|
5
|
+
|
|
6
|
+
Each integration module provides:
|
|
7
|
+
- A `get_*_client()` or `get_*_service()` function that returns an authenticated client
|
|
8
|
+
- Optional helper functions for common Lumera patterns
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
from lumera.integrations import google, get_access_token
|
|
12
|
+
|
|
13
|
+
# Google Sheets with Lumera-managed OAuth
|
|
14
|
+
sheets = google.get_sheets_service()
|
|
15
|
+
data = sheets.spreadsheets().values().get(...)
|
|
16
|
+
|
|
17
|
+
# Google Drive
|
|
18
|
+
drive = google.get_drive_service()
|
|
19
|
+
files = drive.files().list().execute()
|
|
20
|
+
|
|
21
|
+
# Get raw access token for any provider
|
|
22
|
+
token = get_access_token("slack")
|
|
23
|
+
|
|
24
|
+
Available integrations:
|
|
25
|
+
- `google` - Google APIs (Sheets, Drive)
|
|
26
|
+
|
|
27
|
+
Utilities:
|
|
28
|
+
- `get_access_token(provider)` - Get OAuth token for any Lumera-connected provider
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from .._utils import get_access_token
|
|
32
|
+
from . import google
|
|
33
|
+
|
|
34
|
+
__all__ = ["get_access_token", "google"]
|