AbstractRuntime 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. abstractruntime/__init__.py +76 -1
  2. abstractruntime/core/config.py +68 -1
  3. abstractruntime/core/models.py +5 -0
  4. abstractruntime/core/policy.py +74 -3
  5. abstractruntime/core/runtime.py +1002 -126
  6. abstractruntime/core/vars.py +8 -2
  7. abstractruntime/evidence/recorder.py +1 -1
  8. abstractruntime/history_bundle.py +772 -0
  9. abstractruntime/integrations/abstractcore/__init__.py +3 -0
  10. abstractruntime/integrations/abstractcore/default_tools.py +127 -3
  11. abstractruntime/integrations/abstractcore/effect_handlers.py +2440 -99
  12. abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
  13. abstractruntime/integrations/abstractcore/factory.py +68 -20
  14. abstractruntime/integrations/abstractcore/llm_client.py +447 -15
  15. abstractruntime/integrations/abstractcore/mcp_worker.py +1 -0
  16. abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
  17. abstractruntime/integrations/abstractcore/tool_executor.py +31 -10
  18. abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
  19. abstractruntime/integrations/abstractmemory/__init__.py +3 -0
  20. abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
  21. abstractruntime/memory/active_context.py +6 -1
  22. abstractruntime/memory/kg_packets.py +164 -0
  23. abstractruntime/memory/memact_composer.py +175 -0
  24. abstractruntime/memory/recall_levels.py +163 -0
  25. abstractruntime/memory/token_budget.py +86 -0
  26. abstractruntime/storage/__init__.py +4 -1
  27. abstractruntime/storage/artifacts.py +158 -30
  28. abstractruntime/storage/base.py +17 -1
  29. abstractruntime/storage/commands.py +339 -0
  30. abstractruntime/storage/in_memory.py +41 -1
  31. abstractruntime/storage/json_files.py +195 -12
  32. abstractruntime/storage/observable.py +38 -1
  33. abstractruntime/storage/offloading.py +433 -0
  34. abstractruntime/storage/sqlite.py +836 -0
  35. abstractruntime/visualflow_compiler/__init__.py +29 -0
  36. abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
  37. abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
  38. abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
  39. abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
  40. abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
  41. abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
  42. abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
  43. abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
  44. abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
  45. abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
  46. abstractruntime/visualflow_compiler/compiler.py +3832 -0
  47. abstractruntime/visualflow_compiler/flow.py +247 -0
  48. abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
  49. abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
  50. abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
  51. abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
  52. abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
  53. abstractruntime/visualflow_compiler/visual/models.py +211 -0
  54. abstractruntime/workflow_bundle/__init__.py +52 -0
  55. abstractruntime/workflow_bundle/models.py +236 -0
  56. abstractruntime/workflow_bundle/packer.py +317 -0
  57. abstractruntime/workflow_bundle/reader.py +87 -0
  58. abstractruntime/workflow_bundle/registry.py +587 -0
  59. abstractruntime-0.4.1.dist-info/METADATA +177 -0
  60. abstractruntime-0.4.1.dist-info/RECORD +86 -0
  61. abstractruntime-0.4.0.dist-info/METADATA +0 -167
  62. abstractruntime-0.4.0.dist-info/RECORD +0 -49
  63. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
  64. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/entry_points.txt +0 -0
  65. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,433 @@
1
+ """abstractruntime.storage.offloading
2
+
3
+ Storage decorators for keeping durable JSON payloads small by offloading large values
4
+ to the ArtifactStore (store refs inline).
5
+
6
+ This is intentionally opt-in and layered at the persistence boundary (RunStore/LedgerStore)
7
+ so node/tool code does not need to remember to offload manually.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import os
14
+ from dataclasses import replace
15
+ from typing import Any, Callable, Dict, List, Optional, Tuple
16
+
17
+ from .artifacts import ArtifactStore, artifact_ref, is_artifact_ref
18
+ from .base import LedgerStore, RunStore
19
+ from ..core.models import RunState, RunStatus, StepRecord
20
+
21
+ DEFAULT_MAX_INLINE_BYTES = 256 * 1024
22
+
23
+
24
+ def _default_max_inline_bytes() -> int:
25
+ raw = str(os.getenv("ABSTRACTRUNTIME_MAX_INLINE_BYTES", "")).strip()
26
+ if not raw:
27
+ return DEFAULT_MAX_INLINE_BYTES
28
+ try:
29
+ return int(raw)
30
+ except Exception:
31
+ return DEFAULT_MAX_INLINE_BYTES
32
+
33
+
34
+ def _json_dumps_bytes(value: Any) -> Optional[bytes]:
35
+ try:
36
+ return json.dumps(value, ensure_ascii=False, separators=(",", ":")).encode("utf-8")
37
+ except Exception:
38
+ return None
39
+
40
+
41
+ def offload_large_values(
42
+ value: Any,
43
+ *,
44
+ artifact_store: ArtifactStore,
45
+ run_id: str,
46
+ max_inline_bytes: int,
47
+ base_tags: Optional[Dict[str, str]] = None,
48
+ root_path: str = "",
49
+ allow_offload: Optional[Callable[[str, Any], bool]] = None,
50
+ allow_root_replace: bool = False,
51
+ ) -> Any:
52
+ """Replace large JSON leaves/subtrees with ArtifactStore-backed refs.
53
+
54
+ This is best-effort: values that cannot be serialized are either left untouched
55
+ (for non-bytes) or offloaded (bytes/bytearray) to preserve JSON-safe persistence.
56
+ """
57
+
58
+ if max_inline_bytes <= 0:
59
+ return value
60
+
61
+ tags0 = dict(base_tags or {})
62
+
63
+ def _can_offload(path: str, v: Any) -> bool:
64
+ if allow_offload is None:
65
+ return True
66
+ try:
67
+ return bool(allow_offload(path, v))
68
+ except Exception:
69
+ return False
70
+
71
+ stack: set[int] = set()
72
+
73
+ def _offload_bytes(*, content: bytes, content_type: str, path: str, kind: str) -> Dict[str, str]:
74
+ tags = dict(tags0)
75
+ tags["path"] = str(path)
76
+ tags["kind"] = str(kind)
77
+ meta = artifact_store.store(content, content_type=content_type, run_id=run_id, tags=tags)
78
+ return artifact_ref(str(getattr(meta, "artifact_id", "") or ""))
79
+
80
+ def _walk(cur: Any, *, path: str, root: bool) -> Tuple[Any, bool]:
81
+ if cur is None:
82
+ return None, False
83
+ if is_artifact_ref(cur):
84
+ return cur, False
85
+
86
+ if isinstance(cur, (bytes, bytearray)):
87
+ # Bytes are not JSON-serializable; offload unconditionally.
88
+ ref = _offload_bytes(content=bytes(cur), content_type="application/octet-stream", path=path, kind="bytes")
89
+ return ref, True
90
+
91
+ if isinstance(cur, str):
92
+ n = len(cur.encode("utf-8"))
93
+ if n > max_inline_bytes and _can_offload(path, cur) and (allow_root_replace or not root):
94
+ ref = _offload_bytes(content=cur.encode("utf-8"), content_type="text/plain", path=path, kind="text")
95
+ return ref, True
96
+ return cur, False
97
+
98
+ if isinstance(cur, dict):
99
+ oid = id(cur)
100
+ if oid in stack:
101
+ raise ValueError(f"Cycle detected while offloading at {path}")
102
+ stack.add(oid)
103
+ try:
104
+ changed = False
105
+ out: Dict[str, Any] = {}
106
+ for k, v in cur.items():
107
+ key = str(k)
108
+ child_path = f"{path}.{key}" if path else key
109
+ new_v, ch = _walk(v, path=child_path, root=False)
110
+ if ch:
111
+ changed = True
112
+ out[key] = new_v
113
+
114
+ # If still large, optionally offload the whole subtree (shape changes).
115
+ if _can_offload(path, out) and (allow_root_replace or not root):
116
+ payload = _json_dumps_bytes(out)
117
+ if payload is not None and len(payload) > max_inline_bytes:
118
+ ref = _offload_bytes(
119
+ content=payload,
120
+ content_type="application/json",
121
+ path=path,
122
+ kind="json",
123
+ )
124
+ return ref, True
125
+
126
+ return (out, True) if changed else (cur, False)
127
+ finally:
128
+ stack.discard(oid)
129
+
130
+ if isinstance(cur, list):
131
+ oid = id(cur)
132
+ if oid in stack:
133
+ raise ValueError(f"Cycle detected while offloading at {path}")
134
+ stack.add(oid)
135
+ try:
136
+ changed = False
137
+ out_list: List[Any] = []
138
+ for i, item in enumerate(cur):
139
+ child_path = f"{path}[{i}]" if path else f"[{i}]"
140
+ new_item, ch = _walk(item, path=child_path, root=False)
141
+ if ch:
142
+ changed = True
143
+ out_list.append(new_item)
144
+
145
+ if _can_offload(path, out_list) and (allow_root_replace or not root):
146
+ payload = _json_dumps_bytes(out_list)
147
+ if payload is not None and len(payload) > max_inline_bytes:
148
+ ref = _offload_bytes(
149
+ content=payload,
150
+ content_type="application/json",
151
+ path=path,
152
+ kind="json",
153
+ )
154
+ return ref, True
155
+
156
+ return (out_list, True) if changed else (cur, False)
157
+ finally:
158
+ stack.discard(oid)
159
+
160
+ return cur, False
161
+
162
+ start_path = str(root_path or "").strip()
163
+ out, _ = _walk(value, path=start_path, root=True)
164
+ return out
165
+
166
+
167
+ def _offload_run_state(
168
+ run: RunState,
169
+ *,
170
+ artifact_store: ArtifactStore,
171
+ max_inline_bytes: int,
172
+ ) -> RunState:
173
+ """Create a persisted RunState copy with oversized internal payloads offloaded."""
174
+
175
+ if max_inline_bytes <= 0:
176
+ return run
177
+
178
+ rid = str(run.run_id or "").strip()
179
+ if not rid:
180
+ return run
181
+
182
+ # Safety: run.vars contains execution-critical state (e.g. VisualFlow persisted node_outputs).
183
+ # Offloading those values during a non-terminal run can break crash recovery if a downstream
184
+ # node expects the original value (e.g. large strings) after a restart.
185
+ #
186
+ # We therefore only offload run-owned/private vars once the run is terminal. Ledger offloading
187
+ # remains always-on because ledger records are not used for execution.
188
+ terminal = run.status in (RunStatus.COMPLETED, RunStatus.FAILED, RunStatus.CANCELLED)
189
+
190
+ # Only offload runtime-owned/private namespaces in vars to avoid breaking user semantics.
191
+ vars_obj: Dict[str, Any] = dict(run.vars or {})
192
+
193
+ def _allow_vars(path: str, v: Any) -> bool:
194
+ # Root namespaces must remain dict-shaped; allow offloading only *within* private keys.
195
+ # e.g. vars._temp.<...>, vars._runtime.<...>, vars._last_output.<...>
196
+ p = str(path or "")
197
+ if p == "vars":
198
+ return False
199
+ if p.startswith("vars._") and p.count(".") >= 2:
200
+ return True
201
+ return False
202
+
203
+ # Apply per-private-namespace so we preserve the root dict objects.
204
+ changed = False
205
+ if terminal:
206
+ for k, v in list(vars_obj.items()):
207
+ key = str(k)
208
+ if not key.startswith("_"):
209
+ continue
210
+ root_path = f"vars.{key}"
211
+ try:
212
+ new_v = offload_large_values(
213
+ v,
214
+ artifact_store=artifact_store,
215
+ run_id=rid,
216
+ max_inline_bytes=max_inline_bytes,
217
+ base_tags={"source": "run_store_offload"},
218
+ root_path=root_path,
219
+ allow_offload=_allow_vars,
220
+ allow_root_replace=False,
221
+ )
222
+ except Exception:
223
+ new_v = v
224
+ if new_v is not v:
225
+ vars_obj[key] = new_v
226
+ changed = True
227
+
228
+ output = run.output
229
+ output_changed = False
230
+ if terminal and output is not None:
231
+ def _allow_output(path: str, v: Any) -> bool:
232
+ # Output is terminal-facing; safe to offload anywhere, including root if needed.
233
+ return True
234
+
235
+ try:
236
+ new_out = offload_large_values(
237
+ output,
238
+ artifact_store=artifact_store,
239
+ run_id=rid,
240
+ max_inline_bytes=max_inline_bytes,
241
+ base_tags={"source": "run_output_offload"},
242
+ root_path="output",
243
+ allow_offload=_allow_output,
244
+ allow_root_replace=True,
245
+ )
246
+ except Exception:
247
+ new_out = output
248
+ if new_out is not output:
249
+ output = new_out # type: ignore[assignment]
250
+ output_changed = True
251
+
252
+ if not changed and not output_changed:
253
+ return run
254
+
255
+ return replace(run, vars=vars_obj, output=output)
256
+
257
+
258
+ def _offload_step_record(
259
+ record: StepRecord,
260
+ *,
261
+ artifact_store: ArtifactStore,
262
+ max_inline_bytes: int,
263
+ ) -> StepRecord:
264
+ if max_inline_bytes <= 0:
265
+ return record
266
+
267
+ rid = str(record.run_id or "").strip()
268
+ if not rid:
269
+ return record
270
+
271
+ changed = False
272
+ eff = record.effect
273
+ res = record.result
274
+
275
+ def _allow(path: str, v: Any) -> bool:
276
+ # Ledger records are observability/provenance: ok to offload nested payloads.
277
+ # Preserve effect/result object shape by disallowing root replacement.
278
+ return True
279
+
280
+ if eff is not None:
281
+ try:
282
+ new_eff = offload_large_values(
283
+ eff,
284
+ artifact_store=artifact_store,
285
+ run_id=rid,
286
+ max_inline_bytes=max_inline_bytes,
287
+ base_tags={"source": "ledger_effect_offload"},
288
+ root_path="ledger.effect",
289
+ allow_offload=_allow,
290
+ allow_root_replace=False,
291
+ )
292
+ except Exception:
293
+ new_eff = eff
294
+ if new_eff is not eff:
295
+ eff = new_eff
296
+ changed = True
297
+
298
+ if res is not None:
299
+ try:
300
+ new_res = offload_large_values(
301
+ res,
302
+ artifact_store=artifact_store,
303
+ run_id=rid,
304
+ max_inline_bytes=max_inline_bytes,
305
+ base_tags={"source": "ledger_result_offload"},
306
+ root_path="ledger.result",
307
+ allow_offload=_allow,
308
+ allow_root_replace=False,
309
+ )
310
+ except Exception:
311
+ new_res = res
312
+ if new_res is not res:
313
+ res = new_res
314
+ changed = True
315
+
316
+ return replace(record, effect=eff, result=res) if changed else record
317
+
318
+
319
+ class OffloadingRunStore(RunStore):
320
+ """RunStore decorator that offloads oversized payloads to the ArtifactStore."""
321
+
322
+ def __init__(
323
+ self,
324
+ inner: RunStore,
325
+ *,
326
+ artifact_store: ArtifactStore,
327
+ max_inline_bytes: Optional[int] = None,
328
+ ) -> None:
329
+ self._inner = inner
330
+ self._artifact_store = artifact_store
331
+ self._max_inline_bytes = _default_max_inline_bytes() if max_inline_bytes is None else int(max_inline_bytes)
332
+
333
+ @property
334
+ def inner(self) -> RunStore:
335
+ return self._inner
336
+
337
+ def save(self, run: RunState) -> None:
338
+ persisted = _offload_run_state(run, artifact_store=self._artifact_store, max_inline_bytes=self._max_inline_bytes)
339
+ self._inner.save(persisted)
340
+
341
+ def load(self, run_id: str) -> Optional[RunState]:
342
+ return self._inner.load(run_id)
343
+
344
+ # --- QueryableRunStore passthrough (best-effort) ---
345
+
346
+ def list_runs(self, *, status=None, wait_reason=None, workflow_id=None, limit: int = 100): # type: ignore[override]
347
+ fn = getattr(self._inner, "list_runs", None)
348
+ if not callable(fn):
349
+ raise NotImplementedError("Inner RunStore does not support list_runs")
350
+ return fn(status=status, wait_reason=wait_reason, workflow_id=workflow_id, limit=limit)
351
+
352
+ def list_run_index(self, *, status=None, workflow_id=None, session_id=None, root_only: bool = False, limit: int = 100): # type: ignore[override]
353
+ fn = getattr(self._inner, "list_run_index", None)
354
+ if not callable(fn):
355
+ raise NotImplementedError("Inner RunStore does not support list_run_index")
356
+ return fn(status=status, workflow_id=workflow_id, session_id=session_id, root_only=root_only, limit=limit)
357
+
358
+ def list_due_wait_until(self, *, now_iso: str, limit: int = 100): # type: ignore[override]
359
+ fn = getattr(self._inner, "list_due_wait_until", None)
360
+ if not callable(fn):
361
+ raise NotImplementedError("Inner RunStore does not support list_due_wait_until")
362
+ return fn(now_iso=now_iso, limit=limit)
363
+
364
+ def list_children(self, *, parent_run_id: str, status=None): # type: ignore[override]
365
+ fn = getattr(self._inner, "list_children", None)
366
+ if not callable(fn):
367
+ raise NotImplementedError("Inner RunStore does not support list_children")
368
+ return fn(parent_run_id=parent_run_id, status=status)
369
+
370
+
371
+ class OffloadingLedgerStore(LedgerStore):
372
+ """LedgerStore decorator that offloads oversized effect/result payloads to the ArtifactStore."""
373
+
374
+ def __init__(
375
+ self,
376
+ inner: LedgerStore,
377
+ *,
378
+ artifact_store: ArtifactStore,
379
+ max_inline_bytes: Optional[int] = None,
380
+ ) -> None:
381
+ self._inner = inner
382
+ self._artifact_store = artifact_store
383
+ self._max_inline_bytes = _default_max_inline_bytes() if max_inline_bytes is None else int(max_inline_bytes)
384
+
385
+ @property
386
+ def inner(self) -> LedgerStore:
387
+ return self._inner
388
+
389
+ def append(self, record: StepRecord) -> None:
390
+ persisted = _offload_step_record(record, artifact_store=self._artifact_store, max_inline_bytes=self._max_inline_bytes)
391
+ self._inner.append(persisted)
392
+
393
+ def list(self, run_id: str) -> List[Dict[str, Any]]:
394
+ return self._inner.list(run_id)
395
+
396
+ def subscribe(self, callback, *, run_id: Optional[str] = None): # type: ignore[override]
397
+ fn = getattr(self._inner, "subscribe", None)
398
+ if not callable(fn):
399
+ raise RuntimeError("Inner LedgerStore does not support subscribe()")
400
+ return fn(callback, run_id=run_id)
401
+
402
+ def count(self, run_id: str) -> int:
403
+ fn = getattr(self._inner, "count", None)
404
+ if callable(fn):
405
+ try:
406
+ return int(fn(run_id))
407
+ except Exception:
408
+ pass
409
+ try:
410
+ records = self._inner.list(run_id)
411
+ return int(len(records) if isinstance(records, list) else 0)
412
+ except Exception:
413
+ return 0
414
+
415
+ def count_many(self, run_ids: List[str]) -> Dict[str, int]: # type: ignore[override]
416
+ fn = getattr(self._inner, "count_many", None)
417
+ if callable(fn):
418
+ try:
419
+ out = fn(run_ids)
420
+ return out if isinstance(out, dict) else {}
421
+ except Exception:
422
+ return {}
423
+ return {str(r or "").strip(): self.count(str(r or "").strip()) for r in (run_ids or []) if str(r or "").strip()}
424
+
425
+ def metrics_many(self, run_ids: List[str]) -> Dict[str, Dict[str, int]]: # type: ignore[override]
426
+ fn = getattr(self._inner, "metrics_many", None)
427
+ if callable(fn):
428
+ try:
429
+ out = fn(run_ids)
430
+ return out if isinstance(out, dict) else {}
431
+ except Exception:
432
+ return {}
433
+ return {}