lumera 0.4.20__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lumera/sdk.py CHANGED
@@ -1,3 +1,26 @@
1
+ """
2
+ Low-level SDK implementation - prefer high-level modules instead.
3
+
4
+ For most use cases, import from these modules instead of sdk.py:
5
+
6
+ from lumera import pb # Record operations (pb.search, pb.create, pb.update, etc.)
7
+ from lumera import storage # File uploads (storage.upload, storage.upload_file)
8
+ from lumera import llm # LLM completions (llm.complete, llm.chat, llm.embed)
9
+ from lumera import locks # Locking (locks.claim_record_locks, locks.release_record_locks)
10
+
11
+ Example:
12
+ # Instead of:
13
+ from lumera.sdk import list_records, create_record
14
+ result = list_records("deposits", filter={"status": "pending"})
15
+
16
+ # Use:
17
+ from lumera import pb
18
+ result = pb.search("deposits", filter={"status": "pending"})
19
+
20
+ The functions in this module are used internally by the high-level modules.
21
+ Direct usage is discouraged unless you need low-level control.
22
+ """
23
+
1
24
  import json
2
25
  import os
3
26
  from typing import Any, Iterable, Mapping, MutableMapping, Sequence, TypedDict
@@ -233,8 +256,9 @@ def list_records(
233
256
  offset: int | None = None,
234
257
  sort: str | None = None,
235
258
  filter: Mapping[str, Any] | Sequence[Any] | None = None,
259
+ expand: str | None = None,
236
260
  ) -> dict[str, Any]:
237
- """List records for the given PocketBase collection.
261
+ """List records for the given collection.
238
262
 
239
263
  Args:
240
264
  collection_id_or_name: Collection name or ID. Required.
@@ -244,7 +268,7 @@ def list_records(
244
268
  limit: Alternative to ``per_page`` for cursor-style queries.
245
269
  offset: Starting offset for cursor-style queries.
246
270
  sort: Optional sort expression (e.g. ``"-created"``).
247
- filter: Accepts either a raw PocketBase filter string (e.g.
271
+ filter: Accepts either a raw filter string (e.g.
248
272
  ``"status = 'ok'"``) or a structured filter encoded as a mapping/
249
273
  sequence. Structured filters mirror the Page Builder helpers, e.g.:
250
274
 
@@ -253,6 +277,9 @@ def list_records(
253
277
 
254
278
  The SDK JSON-encodes structured filters so the API can build
255
279
  tenant-aware expressions automatically.
280
+ expand: Optional comma-separated list of relation fields to expand.
281
+ Expanded relations are included inline in the record response.
282
+ Example: ``"user_id,company_id"`` or ``"line_items_via_deposit_id"``
256
283
 
257
284
  Returns:
258
285
  The raw response from ``GET /collections/{id}/records`` including
@@ -275,6 +302,8 @@ def list_records(
275
302
  params["sort"] = sort
276
303
  if filter is not None:
277
304
  params["filter"] = json.dumps(filter)
305
+ if expand is not None:
306
+ params["expand"] = expand
278
307
 
279
308
  path = f"collections/{collection_id_or_name}/records"
280
309
  return _api_request("GET", path, params=params or None)
@@ -367,8 +396,22 @@ def run_agent(
367
396
  status: str | None = None,
368
397
  error: str | None = None,
369
398
  provenance: Mapping[str, Any] | None = None,
399
+ external_id: str | None = None,
400
+ metadata: Mapping[str, Any] | None = None,
370
401
  ) -> dict[str, Any]:
371
- """Create an agent run and optionally upload files for file inputs."""
402
+ """Create an agent run and optionally upload files for file inputs.
403
+
404
+ Args:
405
+ agent_id: The automation/agent to run. Required.
406
+ inputs: Inputs payload (dict or JSON string). File refs are resolved automatically.
407
+ files: Mapping of input key -> path(s) to upload before run creation.
408
+ status: Optional initial status (defaults to ``queued``).
409
+ error: Optional error string to store alongside the initial status.
410
+ provenance: Custom provenance payload; falls back to environment-derived provenance.
411
+ external_id: Stable idempotency key. If provided, repeated calls with the same value
412
+ will return the existing run (server-side idempotency).
413
+ metadata: Arbitrary JSON metadata to persist with the run (e.g., callback_url).
414
+ """
372
415
 
373
416
  agent_id = agent_id.strip()
374
417
  if not agent_id:
@@ -398,6 +441,10 @@ def run_agent(
398
441
  payload["id"] = run_id
399
442
  if error is not None:
400
443
  payload["error"] = error
444
+ if external_id:
445
+ payload["external_id"] = external_id.strip()
446
+ if metadata is not None:
447
+ payload["metadata"] = _ensure_mapping(metadata, name="metadata")
401
448
  payload["lm_provenance"] = _ensure_mapping(
402
449
  provenance, name="provenance"
403
450
  ) or _default_provenance(agent_id, run_id)
@@ -408,6 +455,90 @@ def run_agent(
408
455
  return run
409
456
 
410
457
 
458
+ def get_agent_run(
459
+ agent_id: str | None = None,
460
+ *,
461
+ run_id: str | None = None,
462
+ external_id: str | None = None,
463
+ ) -> dict[str, Any]:
464
+ """Fetch an agent run by id or by agent_id + external_id idempotency key.
465
+
466
+ Args:
467
+ agent_id: Agent id for external_id lookup. Required when ``run_id`` is not provided.
468
+ run_id: Optional run id. When provided, this takes precedence over external_id lookup.
469
+ external_id: Optional idempotency key to look up the latest run for the agent.
470
+
471
+ Raises:
472
+ ValueError: If required identifiers are missing.
473
+ LumeraAPIError: If no matching run is found.
474
+ """
475
+
476
+ if run_id:
477
+ return _api_request("GET", f"agent-runs/{run_id}")
478
+
479
+ agent_id = agent_id.strip() if isinstance(agent_id, str) else ""
480
+ external_id = external_id.strip() if isinstance(external_id, str) else ""
481
+ if not agent_id:
482
+ raise ValueError("agent_id is required when run_id is not provided")
483
+ if not external_id:
484
+ raise ValueError("external_id is required when run_id is not provided")
485
+
486
+ resp = _api_request(
487
+ "GET",
488
+ "agent-runs",
489
+ params={"agent_id": agent_id, "external_id": external_id, "limit": 1},
490
+ )
491
+ runs = resp.get("agent_runs") if isinstance(resp, dict) else None
492
+ if runs and isinstance(runs, list) and runs and isinstance(runs[0], dict):
493
+ return runs[0]
494
+
495
+ url = _api_url("agent-runs")
496
+ raise _LumeraAPIError(404, "agent run not found", url=url, payload=None)
497
+
498
+
499
+ def update_agent_run(
500
+ run_id: str,
501
+ *,
502
+ result: Mapping[str, Any] | None = None,
503
+ status: str | None = None,
504
+ error: str | None = None,
505
+ metadata: Mapping[str, Any] | None = None,
506
+ ) -> dict[str, Any]:
507
+ """Update an agent run with result, status, or other fields.
508
+
509
+ Args:
510
+ run_id: The run id to update. Required.
511
+ result: Optional result payload to store (max 20KB).
512
+ status: Optional status update.
513
+ error: Optional error string.
514
+ metadata: Optional metadata update.
515
+
516
+ Returns:
517
+ The updated agent run record.
518
+ """
519
+ run_id = run_id.strip() if isinstance(run_id, str) else ""
520
+ if not run_id:
521
+ raise ValueError("run_id is required")
522
+
523
+ payload: dict[str, Any] = {}
524
+ if result is not None:
525
+ payload["result"] = _ensure_mapping(result, name="result")
526
+ if status is not None:
527
+ payload["status"] = status.strip()
528
+ if error is not None:
529
+ payload["error"] = error
530
+ if metadata is not None:
531
+ payload["metadata"] = _ensure_mapping(metadata, name="metadata")
532
+
533
+ if not payload:
534
+ raise ValueError("at least one field to update is required")
535
+
536
+ response = _api_request("PATCH", f"agent-runs/{run_id}", json_body=payload)
537
+ if not isinstance(response, dict):
538
+ raise RuntimeError("unexpected response payload")
539
+ return response
540
+
541
+
411
542
  def create_record(
412
543
  collection_id_or_name: str,
413
544
  payload: Mapping[str, Any] | None = None,
lumera/storage.py ADDED
@@ -0,0 +1,269 @@
1
+ """
2
+ Storage operations for uploading and managing files.
3
+
4
+ Files are automatically namespaced by the current automation run:
5
+ /agent_runs/{run_id}/{path}
6
+
7
+ This prevents collisions between different runs while allowing logical
8
+ organization within a run.
9
+
10
+ Available functions:
11
+ upload() - Upload bytes/string content to storage
12
+ upload_file() - Upload a local file to storage
13
+ download_url() - Get download URL for an uploaded file
14
+ list_files() - List files uploaded in current run
15
+
16
+ Environment requirements:
17
+ LUMERA_RUN_ID - Set automatically when running in automation context
18
+ LUMERA_TOKEN - API authentication token
19
+
20
+ Example:
21
+ >>> from lumera import storage
22
+ >>> result = storage.upload("exports/report.csv", csv_data, content_type="text/csv")
23
+ >>> print(result["url"])
24
+ """
25
+
26
+ __all__ = ["upload", "upload_file", "download_url", "list_files", "UploadResult"]
27
+
28
+ import mimetypes
29
+ import os
30
+ import pathlib
31
+ from typing import Any, Required, TypedDict
32
+
33
+ import requests
34
+
35
+ from ._utils import API_BASE, get_lumera_token
36
+
37
+
38
+ class UploadResult(TypedDict, total=False):
39
+ """Result of file upload.
40
+
41
+ Required fields (always present):
42
+ url: Public download URL
43
+ path: Relative path within run namespace
44
+ size: File size in bytes
45
+ content_type: MIME type
46
+
47
+ Optional fields:
48
+ object_key: Storage object key (platform implementation detail)
49
+ """
50
+
51
+ url: Required[str] # Public download URL (always present)
52
+ path: Required[str] # Relative path within run namespace (always present)
53
+ size: Required[int] # File size in bytes (always present)
54
+ content_type: Required[str] # MIME type (always present)
55
+ object_key: str # Storage object key (optional, platform detail)
56
+
57
+
58
+ def upload(
59
+ path: str,
60
+ content: bytes | str,
61
+ *,
62
+ content_type: str,
63
+ metadata: dict[str, Any] | None = None, # noqa: ARG001 - Reserved for future use
64
+ ) -> UploadResult:
65
+ """Upload content to storage.
66
+
67
+ Files are automatically namespaced by the current automation run.
68
+ Storage location: /agent_runs/{run_id}/{path}
69
+
70
+ Args:
71
+ path: Relative path within this run's namespace (e.g., "exports/daily.csv")
72
+ Can include subfolders for organization
73
+ content: File content as bytes or string (strings will be encoded as UTF-8)
74
+ content_type: MIME type (e.g., "text/csv", "application/json")
75
+ metadata: Optional searchable metadata (reserved for future use)
76
+
77
+ Returns:
78
+ Upload result with URL and metadata
79
+
80
+ Raises:
81
+ ValueError: If path or content_type is empty
82
+ RuntimeError: If LUMERA_RUN_ID environment variable is not set
83
+ requests.HTTPError: If upload fails
84
+
85
+ Example:
86
+ >>> result = storage.upload(
87
+ ... path="exports/report.csv",
88
+ ... content=csv_data.encode("utf-8"),
89
+ ... content_type="text/csv"
90
+ ... )
91
+ >>> print(result["url"])
92
+ https://storage.lumerahq.com/download/abc123
93
+ """
94
+ if not path or not path.strip():
95
+ raise ValueError("path is required and cannot be empty")
96
+ if not content_type or not content_type.strip():
97
+ raise ValueError("content_type is required and cannot be empty")
98
+
99
+ # Get current run ID from environment
100
+ run_id = os.getenv("LUMERA_RUN_ID", "").strip()
101
+ if not run_id:
102
+ raise RuntimeError(
103
+ "LUMERA_RUN_ID environment variable not set. "
104
+ "This function must be called from within an automation run."
105
+ )
106
+
107
+ # Convert string content to bytes
108
+ if isinstance(content, str):
109
+ content = content.encode("utf-8")
110
+
111
+ filename = os.path.basename(path)
112
+ size = len(content)
113
+
114
+ token = get_lumera_token()
115
+ headers = {"Authorization": f"token {token}", "Content-Type": "application/json"}
116
+
117
+ # Request presigned upload URL
118
+ resp = requests.post(
119
+ f"{API_BASE}/agent-runs/{run_id}/files/upload-url",
120
+ json={"filename": filename, "content_type": content_type, "size": size},
121
+ headers=headers,
122
+ timeout=30,
123
+ )
124
+ resp.raise_for_status()
125
+ data = resp.json()
126
+
127
+ upload_url = data["upload_url"]
128
+
129
+ # Upload content to presigned URL
130
+ put_resp = requests.put(
131
+ upload_url, data=content, headers={"Content-Type": content_type}, timeout=300
132
+ )
133
+ put_resp.raise_for_status()
134
+
135
+ # Construct result
136
+ result: UploadResult = {
137
+ "url": data.get("download_url", ""),
138
+ "object_key": data.get("object_key", ""),
139
+ "path": path,
140
+ "size": size,
141
+ "content_type": content_type,
142
+ }
143
+
144
+ return result
145
+
146
+
147
+ def upload_file(
148
+ path: str,
149
+ file_path: str,
150
+ *,
151
+ content_type: str | None = None,
152
+ metadata: dict[str, Any] | None = None,
153
+ ) -> UploadResult:
154
+ """Upload a file from disk.
155
+
156
+ Args:
157
+ path: Relative path in run namespace (e.g., "exports/report.pdf")
158
+ file_path: Local file path to upload
159
+ content_type: MIME type (auto-detected from extension if not provided)
160
+ metadata: Optional searchable metadata (reserved for future use)
161
+
162
+ Returns:
163
+ Upload result with URL and metadata
164
+
165
+ Raises:
166
+ FileNotFoundError: If file_path doesn't exist
167
+ ValueError: If path is empty
168
+
169
+ Example:
170
+ >>> result = storage.upload_file(
171
+ ... path="exports/report.pdf",
172
+ ... file_path="/tmp/generated_report.pdf"
173
+ ... )
174
+ """
175
+ file_path_obj = pathlib.Path(file_path).expanduser().resolve()
176
+ if not file_path_obj.is_file():
177
+ raise FileNotFoundError(f"File not found: {file_path}")
178
+
179
+ # Auto-detect content type if not provided
180
+ if not content_type:
181
+ content_type = mimetypes.guess_type(file_path)[0] or "application/octet-stream"
182
+
183
+ # Read file content
184
+ with open(file_path_obj, "rb") as f:
185
+ content = f.read()
186
+
187
+ return upload(path, content, content_type=content_type, metadata=metadata)
188
+
189
+
190
+ def download_url(path: str) -> str:
191
+ """Get download URL for a file uploaded in this run.
192
+
193
+ Args:
194
+ path: Relative path within this run (same as used in upload)
195
+
196
+ Returns:
197
+ Download URL
198
+
199
+ Raises:
200
+ RuntimeError: If LUMERA_RUN_ID not set
201
+ requests.HTTPError: If file doesn't exist
202
+
203
+ Example:
204
+ >>> url = storage.download_url("exports/report.csv")
205
+ """
206
+ run_id = os.getenv("LUMERA_RUN_ID", "").strip()
207
+ if not run_id:
208
+ raise RuntimeError(
209
+ "LUMERA_RUN_ID environment variable not set. "
210
+ "This function must be called from within an automation run."
211
+ )
212
+
213
+ filename = os.path.basename(path)
214
+ token = get_lumera_token()
215
+ headers = {"Authorization": f"token {token}"}
216
+
217
+ resp = requests.get(
218
+ f"{API_BASE}/agent-runs/{run_id}/files/download-url",
219
+ params={"name": filename},
220
+ headers=headers,
221
+ timeout=30,
222
+ )
223
+ resp.raise_for_status()
224
+
225
+ data = resp.json()
226
+ return data.get("download_url", "")
227
+
228
+
229
+ def list_files(prefix: str | None = None) -> list[dict[str, Any]]:
230
+ """List files uploaded in this run.
231
+
232
+ Args:
233
+ prefix: Optional path prefix filter (e.g., "exports/")
234
+
235
+ Returns:
236
+ List of file metadata dicts with keys:
237
+ - name: filename
238
+ - size: size in bytes
239
+ - content_type: MIME type
240
+ - created: creation timestamp
241
+
242
+ Raises:
243
+ RuntimeError: If LUMERA_RUN_ID not set
244
+
245
+ Example:
246
+ >>> files = storage.list_files(prefix="exports/")
247
+ >>> for file in files:
248
+ ... print(file["name"], file["size"])
249
+ """
250
+ run_id = os.getenv("LUMERA_RUN_ID", "").strip()
251
+ if not run_id:
252
+ raise RuntimeError(
253
+ "LUMERA_RUN_ID environment variable not set. "
254
+ "This function must be called from within an automation run."
255
+ )
256
+
257
+ token = get_lumera_token()
258
+ headers = {"Authorization": f"token {token}"}
259
+
260
+ resp = requests.get(f"{API_BASE}/agent-runs/{run_id}/files", headers=headers, timeout=30)
261
+ resp.raise_for_status()
262
+
263
+ files = resp.json()
264
+
265
+ # Filter by prefix if provided
266
+ if prefix:
267
+ files = [f for f in files if f.get("name", "").startswith(prefix)]
268
+
269
+ return files
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumera
3
- Version: 0.4.20
3
+ Version: 0.5.0
4
4
  Summary: SDK for building on Lumera platform
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: requests
@@ -21,8 +21,8 @@ Requires-Dist: matplotlib==3.10.3; extra == "full"
21
21
  Requires-Dist: notion-client==2.4.0; extra == "full"
22
22
  Requires-Dist: numpy==2.3.0; extra == "full"
23
23
  Requires-Dist: office365-rest-python-client; extra == "full"
24
- Requires-Dist: openai; extra == "full"
25
- Requires-Dist: openai-agents; extra == "full"
24
+ Requires-Dist: openai<3.0.0,>=2.15.0; extra == "full"
25
+ Requires-Dist: openai-agents<1.0.0,>=0.6.5; extra == "full"
26
26
  Requires-Dist: openpyxl==3.1.5; extra == "full"
27
27
  Requires-Dist: pandas==2.3.0; extra == "full"
28
28
  Requires-Dist: pdfplumber; extra == "full"
@@ -0,0 +1,13 @@
1
+ lumera/__init__.py,sha256=AtbfmFoPuLEIcjBS4L-XWL-ARpjUIfR370-A7PAfsC8,2188
2
+ lumera/_utils.py,sha256=QyAaphxXGEK8XNPO0ghKLgTOYhAxcF_j3W0T8StzjxA,23610
3
+ lumera/exceptions.py,sha256=bNsx4iYaroAAGsYxErfELC2B5ZJ3w5lVa1kKdIx5s9g,2173
4
+ lumera/google.py,sha256=3IVNL1HaOtsTmunl0alnGFuUAkzQQRyCEA3CKjlPqO0,10183
5
+ lumera/llm.py,sha256=pUTZK7t3GTK0vfxMI1PJgJwNendyuiJc5MB1pUj2vxE,14412
6
+ lumera/locks.py,sha256=8l_qxb8nrxge7YJ-ApUTJ5MeYpIdxDeEa94Eim9O-YM,6806
7
+ lumera/pb.py,sha256=nYD8veZWUxPK3pKImoP72VZMBkBQvCLbJErxRBPVykk,8858
8
+ lumera/sdk.py,sha256=ibkf85HFMUQPaFyhCaV4DZf0IwZf1yrEW9XFPjsGZOY,26875
9
+ lumera/storage.py,sha256=Eo4HrasZ0DKt-qVHT8UsrtfbD7hNQUU13oosjto7C-k,8157
10
+ lumera-0.5.0.dist-info/METADATA,sha256=wFXpJ082TIaPLR_Q1Kday5DL_i4fMx_IpmqN74_Jm0A,1604
11
+ lumera-0.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ lumera-0.5.0.dist-info/top_level.txt,sha256=HgfK4XQkpMTnM2E5iWM4kB711FnYqUY9dglzib3pWlE,7
13
+ lumera-0.5.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- lumera/__init__.py,sha256=wiSDU8oEsyLWPx4aKjrOdRuG9xRCU2hx-2tl-0usQCI,1412
2
- lumera/_utils.py,sha256=QyAaphxXGEK8XNPO0ghKLgTOYhAxcF_j3W0T8StzjxA,23610
3
- lumera/google.py,sha256=3IVNL1HaOtsTmunl0alnGFuUAkzQQRyCEA3CKjlPqO0,10183
4
- lumera/sdk.py,sha256=ULWrBjRggaXtesmeBxq_eXM5a9hQMhZ9_f6-sOj4gKs,21827
5
- lumera-0.4.20.dist-info/METADATA,sha256=-oZpdGCUinqh6dvdK803pLbiL1a_e4SLk75wou7utH8,1576
6
- lumera-0.4.20.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
7
- lumera-0.4.20.dist-info/top_level.txt,sha256=HgfK4XQkpMTnM2E5iWM4kB711FnYqUY9dglzib3pWlE,7
8
- lumera-0.4.20.dist-info/RECORD,,