AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. abstractruntime/__init__.py +83 -3
  2. abstractruntime/core/config.py +82 -2
  3. abstractruntime/core/event_keys.py +62 -0
  4. abstractruntime/core/models.py +17 -1
  5. abstractruntime/core/policy.py +74 -3
  6. abstractruntime/core/runtime.py +3334 -28
  7. abstractruntime/core/vars.py +103 -2
  8. abstractruntime/evidence/__init__.py +10 -0
  9. abstractruntime/evidence/recorder.py +325 -0
  10. abstractruntime/history_bundle.py +772 -0
  11. abstractruntime/integrations/abstractcore/__init__.py +6 -0
  12. abstractruntime/integrations/abstractcore/constants.py +19 -0
  13. abstractruntime/integrations/abstractcore/default_tools.py +258 -0
  14. abstractruntime/integrations/abstractcore/effect_handlers.py +2622 -32
  15. abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
  16. abstractruntime/integrations/abstractcore/factory.py +149 -16
  17. abstractruntime/integrations/abstractcore/llm_client.py +891 -55
  18. abstractruntime/integrations/abstractcore/mcp_worker.py +587 -0
  19. abstractruntime/integrations/abstractcore/observability.py +80 -0
  20. abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
  21. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  22. abstractruntime/integrations/abstractcore/tool_executor.py +509 -31
  23. abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
  24. abstractruntime/integrations/abstractmemory/__init__.py +3 -0
  25. abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
  26. abstractruntime/memory/__init__.py +21 -0
  27. abstractruntime/memory/active_context.py +751 -0
  28. abstractruntime/memory/active_memory.py +452 -0
  29. abstractruntime/memory/compaction.py +105 -0
  30. abstractruntime/memory/kg_packets.py +164 -0
  31. abstractruntime/memory/memact_composer.py +175 -0
  32. abstractruntime/memory/recall_levels.py +163 -0
  33. abstractruntime/memory/token_budget.py +86 -0
  34. abstractruntime/rendering/__init__.py +17 -0
  35. abstractruntime/rendering/agent_trace_report.py +256 -0
  36. abstractruntime/rendering/json_stringify.py +136 -0
  37. abstractruntime/scheduler/scheduler.py +93 -2
  38. abstractruntime/storage/__init__.py +7 -2
  39. abstractruntime/storage/artifacts.py +175 -32
  40. abstractruntime/storage/base.py +17 -1
  41. abstractruntime/storage/commands.py +339 -0
  42. abstractruntime/storage/in_memory.py +41 -1
  43. abstractruntime/storage/json_files.py +210 -14
  44. abstractruntime/storage/observable.py +136 -0
  45. abstractruntime/storage/offloading.py +433 -0
  46. abstractruntime/storage/sqlite.py +836 -0
  47. abstractruntime/visualflow_compiler/__init__.py +29 -0
  48. abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
  49. abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
  50. abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
  51. abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
  52. abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
  53. abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
  54. abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
  55. abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
  56. abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
  57. abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
  58. abstractruntime/visualflow_compiler/compiler.py +3832 -0
  59. abstractruntime/visualflow_compiler/flow.py +247 -0
  60. abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
  61. abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
  62. abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
  63. abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
  64. abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
  65. abstractruntime/visualflow_compiler/visual/models.py +211 -0
  66. abstractruntime/workflow_bundle/__init__.py +52 -0
  67. abstractruntime/workflow_bundle/models.py +236 -0
  68. abstractruntime/workflow_bundle/packer.py +317 -0
  69. abstractruntime/workflow_bundle/reader.py +87 -0
  70. abstractruntime/workflow_bundle/registry.py +587 -0
  71. abstractruntime-0.4.1.dist-info/METADATA +177 -0
  72. abstractruntime-0.4.1.dist-info/RECORD +86 -0
  73. abstractruntime-0.4.1.dist-info/entry_points.txt +2 -0
  74. abstractruntime-0.2.0.dist-info/METADATA +0 -163
  75. abstractruntime-0.2.0.dist-info/RECORD +0 -32
  76. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
  77. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -58,6 +58,47 @@ class InMemoryRunStore(RunStore):
58
58
 
59
59
  return results[:limit]
60
60
 
61
+ def list_run_index(
62
+ self,
63
+ *,
64
+ status: Optional[RunStatus] = None,
65
+ workflow_id: Optional[str] = None,
66
+ session_id: Optional[str] = None,
67
+ root_only: bool = False,
68
+ limit: int = 100,
69
+ ) -> List[Dict[str, Any]]:
70
+ lim = max(1, int(limit or 100))
71
+ out: List[Dict[str, Any]] = []
72
+
73
+ for run in self._runs.values():
74
+ if status is not None and run.status != status:
75
+ continue
76
+ if workflow_id is not None and run.workflow_id != workflow_id:
77
+ continue
78
+ if session_id is not None and str(run.session_id or "").strip() != str(session_id or "").strip():
79
+ continue
80
+ if bool(root_only) and str(run.parent_run_id or "").strip():
81
+ continue
82
+
83
+ waiting = run.waiting
84
+ out.append(
85
+ {
86
+ "run_id": str(run.run_id),
87
+ "workflow_id": str(run.workflow_id),
88
+ "status": str(getattr(run.status, "value", run.status)),
89
+ "wait_reason": str(getattr(getattr(waiting, "reason", None), "value", waiting.reason)) if waiting is not None else None,
90
+ "wait_until": str(getattr(waiting, "until", None)) if waiting is not None else None,
91
+ "parent_run_id": str(run.parent_run_id) if run.parent_run_id else None,
92
+ "actor_id": str(run.actor_id) if run.actor_id else None,
93
+ "session_id": str(run.session_id) if run.session_id else None,
94
+ "created_at": str(run.created_at) if run.created_at else None,
95
+ "updated_at": str(run.updated_at) if run.updated_at else None,
96
+ }
97
+ )
98
+
99
+ out.sort(key=lambda r: str(r.get("updated_at") or ""), reverse=True)
100
+ return out[:lim]
101
+
61
102
  def list_due_wait_until(
62
103
  self,
63
104
  *,
@@ -116,4 +157,3 @@ class InMemoryLedgerStore(LedgerStore):
116
157
  def list(self, run_id: str) -> List[Dict[str, Any]]:
117
158
  return list(self._records.get(run_id, []))
118
159
 
119
-
@@ -10,6 +10,8 @@ This is meant as a straightforward MVP backend.
10
10
  from __future__ import annotations
11
11
 
12
12
  import json
13
+ import threading
14
+ import uuid
13
15
  from dataclasses import asdict
14
16
  from pathlib import Path
15
17
  from typing import Any, Dict, List, Optional
@@ -24,20 +26,105 @@ class JsonFileRunStore(RunStore):
24
26
  Implements both RunStore (ABC) and QueryableRunStore (Protocol).
25
27
 
26
28
  Query operations scan all run_*.json files, which is acceptable for MVP
27
- but may need indexing for large deployments.
29
+ but needs lightweight indexing for interactive workloads (e.g. WS tick loops)
30
+ once the run directory grows.
28
31
  """
29
32
 
30
33
  def __init__(self, base_dir: str | Path):
31
34
  self._base = Path(base_dir)
32
35
  self._base.mkdir(parents=True, exist_ok=True)
36
+ self._index_lock = threading.Lock()
37
+ self._children_index: Optional[Dict[str, set[str]]] = None
38
+ self._run_parent_index: Dict[str, Optional[str]] = {}
39
+ self._run_cache_lock = threading.Lock()
40
+ # run_id -> (mtime_ns, RunState)
41
+ self._run_cache: Dict[str, tuple[int, RunState]] = {}
33
42
 
34
43
  def _path(self, run_id: str) -> Path:
35
44
  return self._base / f"run_{run_id}.json"
36
45
 
46
+ def _run_id_from_path(self, p: Path) -> str:
47
+ name = str(getattr(p, "name", "") or "")
48
+ if not name.startswith("run_") or not name.endswith(".json"):
49
+ return ""
50
+ return name[len("run_") : -len(".json")]
51
+
52
+ def _ensure_children_index(self) -> None:
53
+ if self._children_index is not None:
54
+ return
55
+ with self._index_lock:
56
+ if self._children_index is not None:
57
+ return
58
+
59
+ children: Dict[str, set[str]] = {}
60
+ run_parent: Dict[str, Optional[str]] = {}
61
+
62
+ for run in self._iter_all_runs():
63
+ parent = run.parent_run_id
64
+ run_parent[run.run_id] = parent
65
+ if isinstance(parent, str) and parent:
66
+ children.setdefault(parent, set()).add(run.run_id)
67
+
68
+ self._children_index = children
69
+ self._run_parent_index = run_parent
70
+
71
+ def _drop_from_children_index(self, run_id: str) -> None:
72
+ with self._index_lock:
73
+ if self._children_index is None:
74
+ return
75
+ parent = self._run_parent_index.pop(run_id, None)
76
+ if isinstance(parent, str) and parent:
77
+ siblings = self._children_index.get(parent)
78
+ if siblings is not None:
79
+ siblings.discard(run_id)
80
+ if not siblings:
81
+ self._children_index.pop(parent, None)
82
+
83
+ def _update_children_index_on_save(self, run: RunState) -> None:
84
+ run_id = run.run_id
85
+ new_parent = run.parent_run_id
86
+
87
+ with self._index_lock:
88
+ if self._children_index is None:
89
+ return
90
+
91
+ old_parent = self._run_parent_index.get(run_id)
92
+ if isinstance(old_parent, str) and old_parent and old_parent != new_parent:
93
+ siblings = self._children_index.get(old_parent)
94
+ if siblings is not None:
95
+ siblings.discard(run_id)
96
+ if not siblings:
97
+ self._children_index.pop(old_parent, None)
98
+
99
+ self._run_parent_index[run_id] = new_parent
100
+ if isinstance(new_parent, str) and new_parent:
101
+ self._children_index.setdefault(new_parent, set()).add(run_id)
102
+
37
103
  def save(self, run: RunState) -> None:
38
104
  p = self._path(run.run_id)
39
- with p.open("w", encoding="utf-8") as f:
40
- json.dump(asdict(run), f, ensure_ascii=False, indent=2)
105
+ # Atomic write to prevent corrupted/partial JSON when multiple threads/processes
106
+ # (e.g. WS tick loop + UI pause/cancel) write the same run file concurrently.
107
+ tmp = p.with_name(f"{p.name}.{uuid.uuid4().hex}.tmp")
108
+ try:
109
+ with tmp.open("w", encoding="utf-8") as f:
110
+ json.dump(asdict(run), f, ensure_ascii=False, indent=2)
111
+ tmp.replace(p)
112
+ finally:
113
+ # Best-effort cleanup if replace() failed.
114
+ try:
115
+ if tmp.exists():
116
+ tmp.unlink()
117
+ except Exception:
118
+ pass
119
+ self._update_children_index_on_save(run)
120
+ try:
121
+ st = p.stat()
122
+ mtime_ns = int(getattr(st, "st_mtime_ns", 0) or 0)
123
+ except Exception:
124
+ mtime_ns = 0
125
+ if mtime_ns > 0:
126
+ with self._run_cache_lock:
127
+ self._run_cache[str(run.run_id)] = (mtime_ns, run)
41
128
 
42
129
  def load(self, run_id: str) -> Optional[RunState]:
43
130
  p = self._path(run_id)
@@ -47,6 +134,17 @@ class JsonFileRunStore(RunStore):
47
134
 
48
135
  def _load_from_path(self, p: Path) -> Optional[RunState]:
49
136
  """Load a RunState from a file path."""
137
+ rid_hint = self._run_id_from_path(p)
138
+ try:
139
+ st = p.stat()
140
+ mtime_ns = int(getattr(st, "st_mtime_ns", 0) or 0)
141
+ except Exception:
142
+ mtime_ns = 0
143
+ if rid_hint and mtime_ns > 0:
144
+ with self._run_cache_lock:
145
+ cached = self._run_cache.get(rid_hint)
146
+ if cached is not None and int(cached[0]) == int(mtime_ns):
147
+ return cached[1]
50
148
  try:
51
149
  with p.open("r", encoding="utf-8") as f:
52
150
  data = json.load(f)
@@ -76,7 +174,7 @@ class JsonFileRunStore(RunStore):
76
174
  details=raw_waiting.get("details"),
77
175
  )
78
176
 
79
- return RunState(
177
+ run = RunState(
80
178
  run_id=data["run_id"],
81
179
  workflow_id=data["workflow_id"],
82
180
  status=status,
@@ -91,6 +189,11 @@ class JsonFileRunStore(RunStore):
91
189
  session_id=data.get("session_id"),
92
190
  parent_run_id=data.get("parent_run_id"),
93
191
  )
192
+ rid = str(getattr(run, "run_id", "") or "").strip() or rid_hint
193
+ if rid and mtime_ns > 0:
194
+ with self._run_cache_lock:
195
+ self._run_cache[rid] = (mtime_ns, run)
196
+ return run
94
197
 
95
198
  def _iter_all_runs(self) -> List[RunState]:
96
199
  """Iterate over all stored runs."""
@@ -111,11 +214,28 @@ class JsonFileRunStore(RunStore):
111
214
  workflow_id: Optional[str] = None,
112
215
  limit: int = 100,
113
216
  ) -> List[RunState]:
114
- """List runs matching the given filters."""
115
- results: List[RunState] = []
217
+ """List runs matching the given filters.
218
+
219
+ Performance note:
220
+ - We order by run file mtime (close to updated_at) and stop once we have `limit` matches.
221
+ - This avoids parsing every historical run JSON file on large runtimes.
222
+ """
223
+ lim = max(1, int(limit or 100))
224
+ ranked: list[tuple[int, Path]] = []
225
+ for p in self._base.glob("run_*.json"):
226
+ try:
227
+ st = p.stat()
228
+ mtime_ns = int(getattr(st, "st_mtime_ns", 0) or 0)
229
+ except Exception:
230
+ continue
231
+ ranked.append((mtime_ns, p))
232
+ ranked.sort(key=lambda x: x[0], reverse=True)
116
233
 
117
- for run in self._iter_all_runs():
118
- # Apply filters
234
+ results: List[RunState] = []
235
+ for _mtime_ns, p in ranked:
236
+ run = self._load_from_path(p)
237
+ if run is None:
238
+ continue
119
239
  if status is not None and run.status != status:
120
240
  continue
121
241
  if workflow_id is not None and run.workflow_id != workflow_id:
@@ -123,13 +243,70 @@ class JsonFileRunStore(RunStore):
123
243
  if wait_reason is not None:
124
244
  if run.waiting is None or run.waiting.reason != wait_reason:
125
245
  continue
126
-
127
246
  results.append(run)
247
+ if len(results) >= lim:
248
+ break
128
249
 
129
- # Sort by updated_at descending (most recent first)
130
250
  results.sort(key=lambda r: r.updated_at or "", reverse=True)
251
+ return results[:lim]
131
252
 
132
- return results[:limit]
253
+ def list_run_index(
254
+ self,
255
+ *,
256
+ status: Optional[RunStatus] = None,
257
+ workflow_id: Optional[str] = None,
258
+ session_id: Optional[str] = None,
259
+ root_only: bool = False,
260
+ limit: int = 100,
261
+ ) -> List[Dict[str, Any]]:
262
+ """List lightweight run index rows without depending on full RunState consumers."""
263
+ lim = max(1, int(limit or 100))
264
+ ranked: list[tuple[int, Path]] = []
265
+ for p in self._base.glob("run_*.json"):
266
+ try:
267
+ st = p.stat()
268
+ mtime_ns = int(getattr(st, "st_mtime_ns", 0) or 0)
269
+ except Exception:
270
+ continue
271
+ ranked.append((mtime_ns, p))
272
+ ranked.sort(key=lambda x: x[0], reverse=True)
273
+
274
+ out: List[Dict[str, Any]] = []
275
+ sid = str(session_id or "").strip() if session_id is not None else None
276
+
277
+ for _mtime_ns, p in ranked:
278
+ run = self._load_from_path(p)
279
+ if run is None:
280
+ continue
281
+ if status is not None and run.status != status:
282
+ continue
283
+ if workflow_id is not None and run.workflow_id != workflow_id:
284
+ continue
285
+ if sid is not None and str(run.session_id or "").strip() != sid:
286
+ continue
287
+ if bool(root_only) and str(run.parent_run_id or "").strip():
288
+ continue
289
+
290
+ waiting = run.waiting
291
+ out.append(
292
+ {
293
+ "run_id": str(run.run_id),
294
+ "workflow_id": str(run.workflow_id),
295
+ "status": str(getattr(run.status, "value", run.status)),
296
+ "wait_reason": str(getattr(getattr(waiting, "reason", None), "value", waiting.reason)) if waiting is not None else None,
297
+ "wait_until": str(getattr(waiting, "until", None)) if waiting is not None else None,
298
+ "parent_run_id": str(run.parent_run_id) if run.parent_run_id else None,
299
+ "actor_id": str(run.actor_id) if run.actor_id else None,
300
+ "session_id": str(run.session_id) if run.session_id else None,
301
+ "created_at": str(run.created_at) if run.created_at else None,
302
+ "updated_at": str(run.updated_at) if run.updated_at else None,
303
+ }
304
+ )
305
+ if len(out) >= lim:
306
+ break
307
+
308
+ out.sort(key=lambda r: str(r.get("updated_at") or ""), reverse=True)
309
+ return out[:lim]
133
310
 
134
311
  def list_due_wait_until(
135
312
  self,
@@ -167,10 +344,15 @@ class JsonFileRunStore(RunStore):
167
344
  status: Optional[RunStatus] = None,
168
345
  ) -> List[RunState]:
169
346
  """List child runs of a parent."""
170
- results: List[RunState] = []
347
+ self._ensure_children_index()
348
+ with self._index_lock:
349
+ child_ids = list((self._children_index or {}).get(parent_run_id, set()))
171
350
 
172
- for run in self._iter_all_runs():
173
- if run.parent_run_id != parent_run_id:
351
+ results: List[RunState] = []
352
+ for run_id in sorted(child_ids):
353
+ run = self.load(run_id)
354
+ if run is None:
355
+ self._drop_from_children_index(run_id)
174
356
  continue
175
357
  if status is not None and run.status != status:
176
358
  continue
@@ -206,3 +388,17 @@ class JsonlLedgerStore(LedgerStore):
206
388
  out.append(json.loads(line))
207
389
  return out
208
390
 
391
+ def count(self, run_id: str) -> int:
392
+ """Return the number of ledger records for run_id (fast path).
393
+
394
+ This avoids JSON parsing when only a count is needed (e.g. UI dropdowns).
395
+ """
396
+ p = self._path(run_id)
397
+ if not p.exists():
398
+ return 0
399
+ n = 0
400
+ with p.open("r", encoding="utf-8") as f:
401
+ for line in f:
402
+ if line.strip():
403
+ n += 1
404
+ return n
@@ -0,0 +1,136 @@
1
+ """abstractruntime.storage.observable
2
+
3
+ In-process pub/sub for ledger append events.
4
+
5
+ Design intent:
6
+ - Keep AbstractRuntime kernel dependency-light (stdlib only).
7
+ - Treat the ledger as the durable source of truth (replay via `list(run_id)`).
8
+ - Provide a small, optional subscription surface for live UX (WS/CLI) without
9
+ introducing a second global event system.
10
+
11
+ This is intentionally *process-local* (no cross-process guarantees).
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict
17
+ import threading
18
+ from typing import Any, Callable, Dict, List, Optional, Protocol, runtime_checkable
19
+
20
+ from .base import LedgerStore
21
+ from ..core.models import StepRecord
22
+
23
+
24
+ LedgerRecordDict = Dict[str, Any]
25
+ LedgerSubscriber = Callable[[LedgerRecordDict], None]
26
+
27
+
28
+ @runtime_checkable
29
+ class ObservableLedgerStoreProtocol(Protocol):
30
+ """Optional LedgerStore extension for in-process subscriptions."""
31
+
32
+ def subscribe(
33
+ self,
34
+ callback: LedgerSubscriber,
35
+ *,
36
+ run_id: Optional[str] = None,
37
+ ) -> Callable[[], None]:
38
+ """Subscribe to appended ledger records.
39
+
40
+ Args:
41
+ callback: Called after each append with a JSON-safe record dict.
42
+ run_id: If set, only receive records for that run.
43
+
44
+ Returns:
45
+ An unsubscribe callback.
46
+ """
47
+ ...
48
+
49
+
50
+ class ObservableLedgerStore(LedgerStore):
51
+ """LedgerStore decorator that notifies subscribers on append()."""
52
+
53
+ def __init__(self, inner: LedgerStore):
54
+ self._inner = inner
55
+ self._lock = threading.Lock()
56
+ self._subscribers: list[tuple[Optional[str], LedgerSubscriber]] = []
57
+
58
+ def append(self, record: StepRecord) -> None:
59
+ self._inner.append(record)
60
+
61
+ payload: LedgerRecordDict = asdict(record)
62
+ with self._lock:
63
+ subscribers = list(self._subscribers)
64
+
65
+ for run_id, callback in subscribers:
66
+ if run_id is not None and run_id != record.run_id:
67
+ continue
68
+ try:
69
+ callback(payload)
70
+ except Exception:
71
+ # Observability must never compromise durability/execution.
72
+ continue
73
+
74
+ def list(self, run_id: str) -> List[LedgerRecordDict]:
75
+ return self._inner.list(run_id)
76
+
77
+ def count(self, run_id: str) -> int:
78
+ """Best-effort record count for run_id.
79
+
80
+ When the inner LedgerStore implements a faster `count()` method (e.g. JsonlLedgerStore),
81
+ delegate to it. Otherwise, fall back to `len(list(run_id))`.
82
+ """
83
+ inner = self._inner
84
+ try:
85
+ count_fn = getattr(inner, "count", None)
86
+ if callable(count_fn):
87
+ return int(count_fn(run_id))
88
+ except Exception:
89
+ pass
90
+ try:
91
+ return len(inner.list(run_id))
92
+ except Exception:
93
+ return 0
94
+
95
+ def count_many(self, run_ids: List[str]) -> Dict[str, int]: # type: ignore[override]
96
+ fn = getattr(self._inner, "count_many", None)
97
+ if callable(fn):
98
+ try:
99
+ out = fn(run_ids)
100
+ return out if isinstance(out, dict) else {}
101
+ except Exception:
102
+ return {}
103
+ return {str(r or "").strip(): self.count(str(r or "").strip()) for r in (run_ids or []) if str(r or "").strip()}
104
+
105
+ def metrics_many(self, run_ids: List[str]) -> Dict[str, Dict[str, int]]: # type: ignore[override]
106
+ fn = getattr(self._inner, "metrics_many", None)
107
+ if callable(fn):
108
+ try:
109
+ out = fn(run_ids)
110
+ return out if isinstance(out, dict) else {}
111
+ except Exception:
112
+ return {}
113
+ return {}
114
+
115
+ def subscribe(
116
+ self,
117
+ callback: LedgerSubscriber,
118
+ *,
119
+ run_id: Optional[str] = None,
120
+ ) -> Callable[[], None]:
121
+ with self._lock:
122
+ self._subscribers.append((run_id, callback))
123
+
124
+ def _unsubscribe() -> None:
125
+ with self._lock:
126
+ try:
127
+ self._subscribers.remove((run_id, callback))
128
+ except ValueError:
129
+ return
130
+
131
+ return _unsubscribe
132
+
133
+ def clear_subscribers(self) -> None:
134
+ """Clear all subscribers (test utility)."""
135
+ with self._lock:
136
+ self._subscribers.clear()