lumera 0.9.9__tar.gz → 0.10.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumera
3
- Version: 0.9.9
3
+ Version: 0.10.1
4
4
  Summary: SDK for building on Lumera platform
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: requests
@@ -57,18 +57,148 @@ __all__ = [
57
57
  "create",
58
58
  "update",
59
59
  "upsert",
60
- # Log streaming and download
60
+ # Log functions
61
61
  "stream_logs",
62
+ "get_logs",
62
63
  "get_log_download_url",
63
64
  # Classes
64
65
  "Run",
65
66
  "Automation",
67
+ "LogEntry",
68
+ "LogsResponse",
66
69
  ]
67
70
 
68
71
  from ._utils import LumeraAPIError, _api_request
69
72
  from .sdk import get_automation_run as _get_automation_run
70
73
  from .sdk import run_automation as _run_automation
71
74
 
75
+ # ============================================================================
76
+ # LogEntry Class
77
+ # ============================================================================
78
+
79
+
80
+ class LogEntry:
81
+ """A single log entry from an automation run.
82
+
83
+ Attributes:
84
+ content: The log message content.
85
+ type: Log type ("stream_stdout", "stream_stderr", "warning", "image_png", "image_jpeg").
86
+ timestamp: ISO timestamp when the log was emitted.
87
+ error: True if this is an error entry.
88
+ """
89
+
90
+ def __init__(self, data: dict[str, Any]) -> None:
91
+ self._data = data
92
+
93
+ @property
94
+ def content(self) -> str:
95
+ return self._data.get("content", "")
96
+
97
+ @property
98
+ def type(self) -> str:
99
+ return self._data.get("type", "")
100
+
101
+ @property
102
+ def timestamp(self) -> str | None:
103
+ return self._data.get("timestamp")
104
+
105
+ @property
106
+ def error(self) -> bool:
107
+ return self._data.get("error", False)
108
+
109
+ @property
110
+ def is_image(self) -> bool:
111
+ """True if this entry contains image data (base64 encoded in content)."""
112
+ return self.type in ("image_png", "image_jpeg")
113
+
114
+ def __repr__(self) -> str:
115
+ preview = self.content[:50] + "..." if len(self.content) > 50 else self.content
116
+ return f"LogEntry(type={self.type!r}, content={preview!r})"
117
+
118
+
119
+ def _parse_ndjson_entries(data: str) -> list[LogEntry]:
120
+ """Parse NDJSON log data into LogEntry objects."""
121
+ entries = []
122
+ for line in data.splitlines():
123
+ line = line.strip()
124
+ if not line:
125
+ continue
126
+ try:
127
+ parsed = json.loads(line)
128
+ if isinstance(parsed, dict):
129
+ entries.append(LogEntry(parsed))
130
+ except (json.JSONDecodeError, ValueError):
131
+ # Skip malformed lines or lines with huge numbers
132
+ pass
133
+ return entries
134
+
135
+
136
+ # ============================================================================
137
+ # LogsResponse Class
138
+ # ============================================================================
139
+
140
+
141
+ class LogsResponse:
142
+ """Response from fetching automation run logs.
143
+
144
+ Attributes:
145
+ data: Raw log content as a string (NDJSON format).
146
+ entries: Parsed log entries as LogEntry objects.
147
+ offset: Byte offset where this chunk starts.
148
+ size: Number of bytes in this chunk.
149
+ total_size: Total size of the log file.
150
+ has_more: True if there are more logs after this chunk.
151
+ source: Where logs came from ("live" or "archived").
152
+ truncated: True if logs were truncated at storage time (>50MB).
153
+ """
154
+
155
+ def __init__(self, data: dict[str, Any]) -> None:
156
+ self._data = data
157
+ self._entries: list[LogEntry] | None = None
158
+
159
+ @property
160
+ def data(self) -> str:
161
+ """Raw NDJSON log content."""
162
+ return self._data.get("data", "")
163
+
164
+ @property
165
+ def entries(self) -> list[LogEntry]:
166
+ """Parsed log entries. Lazily parsed from NDJSON data."""
167
+ if self._entries is None:
168
+ self._entries = _parse_ndjson_entries(self.data)
169
+ return self._entries
170
+
171
+ @property
172
+ def offset(self) -> int:
173
+ return self._data.get("offset", 0)
174
+
175
+ @property
176
+ def size(self) -> int:
177
+ return self._data.get("size", 0)
178
+
179
+ @property
180
+ def total_size(self) -> int:
181
+ return self._data.get("total_size", 0)
182
+
183
+ @property
184
+ def has_more(self) -> bool:
185
+ return self._data.get("has_more", False)
186
+
187
+ @property
188
+ def source(self) -> str:
189
+ return self._data.get("source", "")
190
+
191
+ @property
192
+ def truncated(self) -> bool:
193
+ return self._data.get("truncated", False)
194
+
195
+ def __repr__(self) -> str:
196
+ return (
197
+ f"LogsResponse(offset={self.offset}, size={self.size}, "
198
+ f"total_size={self.total_size}, has_more={self.has_more})"
199
+ )
200
+
201
+
72
202
  # ============================================================================
73
203
  # Run Class
74
204
  # ============================================================================
@@ -246,6 +376,66 @@ class Run:
246
376
  raise ValueError("Cannot get log URL without run id")
247
377
  return get_log_download_url(self.id)
248
378
 
379
+ def logs(
380
+ self,
381
+ *,
382
+ offset: int = 0,
383
+ limit: int = 1024 * 1024,
384
+ all: bool = False,
385
+ ) -> LogsResponse:
386
+ """Fetch logs for this run.
387
+
388
+ Works for both live (running) and archived (completed) runs.
389
+ Returns raw log data as a string (NDJSON format).
390
+
391
+ Args:
392
+ offset: Byte offset to start from. Negative values read from end
393
+ (e.g., -1048576 = last 1MB).
394
+ limit: Maximum bytes to return (default 1MB).
395
+ all: If True, fetch all logs at once. Returns 400 if logs > 10MB.
396
+
397
+ Returns:
398
+ A LogsResponse object with data, offset, size, total_size, has_more,
399
+ source ("live" or "archived"), and truncated flag.
400
+
401
+ Raises:
402
+ ValueError: If the run has no ID.
403
+ LumeraAPIError: If logs are not available or request fails.
404
+
405
+ Example:
406
+ >>> run = automations.get_run("run_id")
407
+ >>> resp = run.logs()
408
+ >>> print(resp.data) # Raw NDJSON log content
409
+ >>> while resp.has_more:
410
+ ... resp = run.logs(offset=resp.offset + resp.size)
411
+ ... print(resp.data)
412
+ """
413
+ if not self.id:
414
+ raise ValueError("Cannot fetch logs without run id")
415
+ return get_logs(self.id, offset=offset, limit=limit, all=all)
416
+
417
+ def stream_logs(self, *, timeout: float = 30) -> Iterator[LogEntry]:
418
+ """Stream logs from this run.
419
+
420
+ Works for both live (running) and archived (completed) runs.
421
+ For live runs, streams in real-time as logs are produced.
422
+ For archived runs, streams the entire log from S3.
423
+
424
+ Args:
425
+ timeout: HTTP connection timeout in seconds.
426
+
427
+ Yields:
428
+ LogEntry objects with content, type, timestamp, and error fields.
429
+
430
+ Example:
431
+ >>> run = automations.run("automation_id", inputs={})
432
+ >>> for entry in run.stream_logs():
433
+ ... print(f"[{entry.type}] {entry.content}")
434
+ """
435
+ if not self.id:
436
+ raise ValueError("Cannot stream logs without run id")
437
+ return stream_logs(self.id, timeout=timeout)
438
+
249
439
  def to_dict(self) -> dict[str, Any]:
250
440
  """Return the underlying data dict."""
251
441
  return self._data.copy()
@@ -794,22 +984,24 @@ def delete(automation_id: str) -> None:
794
984
  # ============================================================================
795
985
 
796
986
 
797
- def stream_logs(run_id: str, *, timeout: float = 30) -> Iterator[str]:
798
- """Stream live logs from a running automation.
987
+ def stream_logs(run_id: str, *, timeout: float = 30) -> Iterator[LogEntry]:
988
+ """Stream logs from an automation run.
799
989
 
800
- Connects to the server-sent events endpoint and yields log lines
801
- as they arrive. Stops when the run completes.
990
+ Works for both live (running) and archived (completed) runs.
991
+ Connects to the server-sent events endpoint and yields LogEntry objects
992
+ as they arrive. For live runs, streams in real-time. For archived
993
+ runs, streams the entire log from storage.
802
994
 
803
995
  Args:
804
996
  run_id: The run ID to stream logs from.
805
997
  timeout: HTTP connection timeout in seconds.
806
998
 
807
999
  Yields:
808
- Log lines as strings.
1000
+ LogEntry objects with content, type, timestamp, and error fields.
809
1001
 
810
1002
  Example:
811
- >>> for line in automations.stream_logs("run_id"):
812
- ... print(line)
1003
+ >>> for entry in automations.stream_logs("run_id"):
1004
+ ... print(f"[{entry.type}] {entry.content}")
813
1005
  """
814
1006
  import base64
815
1007
  import os
@@ -825,7 +1017,7 @@ def stream_logs(run_id: str, *, timeout: float = 30) -> Iterator[str]:
825
1017
  if not token:
826
1018
  raise ValueError("LUMERA_TOKEN environment variable is required")
827
1019
 
828
- url = f"{base_url}/automation-runs/{run_id}/logs/live"
1020
+ url = f"{base_url}/automation-runs/{run_id}/logs?stream=true"
829
1021
  headers = {
830
1022
  "Authorization": f"token {token}",
831
1023
  "Accept": "text/event-stream",
@@ -851,10 +1043,20 @@ def stream_logs(run_id: str, *, timeout: float = 30) -> Iterator[str]:
851
1043
  try:
852
1044
  data = json.loads(current_data)
853
1045
  if "data" in data:
854
- # Data is base64-encoded
1046
+ # Data is base64-encoded NDJSON
855
1047
  raw = base64.b64decode(data["data"])
856
1048
  decoded = raw.decode("utf-8", errors="replace")
857
- yield from decoded.splitlines()
1049
+ for ndjson_line in decoded.splitlines():
1050
+ ndjson_line = ndjson_line.strip()
1051
+ if not ndjson_line:
1052
+ continue
1053
+ try:
1054
+ entry_data = json.loads(ndjson_line)
1055
+ if isinstance(entry_data, dict):
1056
+ yield LogEntry(entry_data)
1057
+ except (json.JSONDecodeError, ValueError):
1058
+ # Skip malformed lines or lines with huge numbers
1059
+ pass
858
1060
  except (json.JSONDecodeError, KeyError):
859
1061
  pass
860
1062
  elif current_event == "complete":
@@ -902,3 +1104,51 @@ def get_log_download_url(run_id: str) -> str:
902
1104
  if isinstance(result, dict) and "url" in result:
903
1105
  return result["url"]
904
1106
  raise RuntimeError("Unexpected response: no download URL returned")
1107
+
1108
+
1109
+ def get_logs(
1110
+ run_id: str,
1111
+ *,
1112
+ offset: int = 0,
1113
+ limit: int = 1024 * 1024,
1114
+ all: bool = False,
1115
+ ) -> LogsResponse:
1116
+ """Fetch logs for an automation run.
1117
+
1118
+ Works for both live (running) and archived (completed) runs.
1119
+ Returns raw log data as a string (NDJSON format).
1120
+
1121
+ Args:
1122
+ run_id: The run ID to get logs for.
1123
+ offset: Byte offset to start from. Negative values read from end
1124
+ (e.g., -1048576 = last 1MB).
1125
+ limit: Maximum bytes to return (default 1MB).
1126
+ all: If True, fetch all logs at once. Returns 400 if logs > 10MB.
1127
+
1128
+ Returns:
1129
+ A LogsResponse object with data, offset, size, total_size, has_more,
1130
+ source ("live" or "archived"), and truncated flag.
1131
+
1132
+ Raises:
1133
+ ValueError: If run_id is empty.
1134
+ LumeraAPIError: If logs are not available or request fails.
1135
+
1136
+ Example:
1137
+ >>> resp = automations.get_logs("run_id")
1138
+ >>> print(resp.data) # Raw NDJSON log content
1139
+ >>> while resp.has_more:
1140
+ ... resp = automations.get_logs("run_id", offset=resp.offset + resp.size)
1141
+ ... print(resp.data)
1142
+ """
1143
+ run_id = run_id.strip()
1144
+ if not run_id:
1145
+ raise ValueError("run_id is required")
1146
+
1147
+ params: dict[str, Any] = {"offset": offset, "limit": limit}
1148
+ if all:
1149
+ params["all"] = "true"
1150
+
1151
+ result = _api_request("GET", f"automation-runs/{run_id}/logs", params=params)
1152
+ if isinstance(result, dict):
1153
+ return LogsResponse(result)
1154
+ raise RuntimeError("Unexpected response from logs endpoint")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lumera
3
- Version: 0.9.9
3
+ Version: 0.10.1
4
4
  Summary: SDK for building on Lumera platform
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: requests
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "lumera"
3
- version = "0.9.9"
3
+ version = "0.10.1"
4
4
  description = "SDK for building on Lumera platform"
5
5
  requires-python = ">=3.11"
6
6
  dependencies = [
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes