AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. abstractruntime/__init__.py +83 -3
  2. abstractruntime/core/config.py +82 -2
  3. abstractruntime/core/event_keys.py +62 -0
  4. abstractruntime/core/models.py +17 -1
  5. abstractruntime/core/policy.py +74 -3
  6. abstractruntime/core/runtime.py +3334 -28
  7. abstractruntime/core/vars.py +103 -2
  8. abstractruntime/evidence/__init__.py +10 -0
  9. abstractruntime/evidence/recorder.py +325 -0
  10. abstractruntime/history_bundle.py +772 -0
  11. abstractruntime/integrations/abstractcore/__init__.py +6 -0
  12. abstractruntime/integrations/abstractcore/constants.py +19 -0
  13. abstractruntime/integrations/abstractcore/default_tools.py +258 -0
  14. abstractruntime/integrations/abstractcore/effect_handlers.py +2622 -32
  15. abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
  16. abstractruntime/integrations/abstractcore/factory.py +149 -16
  17. abstractruntime/integrations/abstractcore/llm_client.py +891 -55
  18. abstractruntime/integrations/abstractcore/mcp_worker.py +587 -0
  19. abstractruntime/integrations/abstractcore/observability.py +80 -0
  20. abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
  21. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  22. abstractruntime/integrations/abstractcore/tool_executor.py +509 -31
  23. abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
  24. abstractruntime/integrations/abstractmemory/__init__.py +3 -0
  25. abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
  26. abstractruntime/memory/__init__.py +21 -0
  27. abstractruntime/memory/active_context.py +751 -0
  28. abstractruntime/memory/active_memory.py +452 -0
  29. abstractruntime/memory/compaction.py +105 -0
  30. abstractruntime/memory/kg_packets.py +164 -0
  31. abstractruntime/memory/memact_composer.py +175 -0
  32. abstractruntime/memory/recall_levels.py +163 -0
  33. abstractruntime/memory/token_budget.py +86 -0
  34. abstractruntime/rendering/__init__.py +17 -0
  35. abstractruntime/rendering/agent_trace_report.py +256 -0
  36. abstractruntime/rendering/json_stringify.py +136 -0
  37. abstractruntime/scheduler/scheduler.py +93 -2
  38. abstractruntime/storage/__init__.py +7 -2
  39. abstractruntime/storage/artifacts.py +175 -32
  40. abstractruntime/storage/base.py +17 -1
  41. abstractruntime/storage/commands.py +339 -0
  42. abstractruntime/storage/in_memory.py +41 -1
  43. abstractruntime/storage/json_files.py +210 -14
  44. abstractruntime/storage/observable.py +136 -0
  45. abstractruntime/storage/offloading.py +433 -0
  46. abstractruntime/storage/sqlite.py +836 -0
  47. abstractruntime/visualflow_compiler/__init__.py +29 -0
  48. abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
  49. abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
  50. abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
  51. abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
  52. abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
  53. abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
  54. abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
  55. abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
  56. abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
  57. abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
  58. abstractruntime/visualflow_compiler/compiler.py +3832 -0
  59. abstractruntime/visualflow_compiler/flow.py +247 -0
  60. abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
  61. abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
  62. abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
  63. abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
  64. abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
  65. abstractruntime/visualflow_compiler/visual/models.py +211 -0
  66. abstractruntime/workflow_bundle/__init__.py +52 -0
  67. abstractruntime/workflow_bundle/models.py +236 -0
  68. abstractruntime/workflow_bundle/packer.py +317 -0
  69. abstractruntime/workflow_bundle/reader.py +87 -0
  70. abstractruntime/workflow_bundle/registry.py +587 -0
  71. abstractruntime-0.4.1.dist-info/METADATA +177 -0
  72. abstractruntime-0.4.1.dist-info/RECORD +86 -0
  73. abstractruntime-0.4.1.dist-info/entry_points.txt +2 -0
  74. abstractruntime-0.2.0.dist-info/METADATA +0 -163
  75. abstractruntime-0.2.0.dist-info/RECORD +0 -32
  76. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
  77. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -21,6 +21,12 @@ SCRATCHPAD = "scratchpad"
21
21
  RUNTIME = "_runtime"
22
22
  TEMP = "_temp"
23
23
  LIMITS = "_limits" # Canonical storage for runtime resource limits
24
+ NODE_TRACES = "node_traces" # _runtime namespace key for per-node execution traces
25
+
26
+ # Fallback context window size used only when model capabilities are unavailable.
27
+ # In normal operation (AbstractCore-integrated runtimes), this is derived from
28
+ # `abstractcore.architectures.detection.get_model_capabilities(...)`.
29
+ DEFAULT_MAX_TOKENS = 32768
24
30
 
25
31
 
26
32
  def ensure_namespaces(vars: Dict[str, Any]) -> Dict[str, Any]:
@@ -69,7 +75,7 @@ def ensure_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
69
75
 
70
76
  This is the canonical location for runtime resource limits:
71
77
  - max_iterations / current_iteration: Iteration control
72
- - max_tokens / estimated_tokens_used: Token/context window management
78
+ - max_tokens / max_input_tokens / max_output_tokens / estimated_tokens_used: Token/context window management
73
79
  - max_history_messages: Conversation history limit (-1 = unlimited)
74
80
  - warn_*_pct: Warning thresholds for proactive notifications
75
81
 
@@ -79,16 +85,111 @@ def ensure_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
79
85
  return get_limits(vars)
80
86
 
81
87
 
88
+ def get_node_traces(vars: Dict[str, Any]) -> Dict[str, Any]:
89
+ """Return the runtime-owned per-node trace mapping.
90
+
91
+ Stored under `run.vars["_runtime"]["node_traces"]`.
92
+ This is intended for host UX/debugging and for exposing traces to higher layers.
93
+ """
94
+ runtime_ns = get_runtime(vars)
95
+ traces = runtime_ns.get(NODE_TRACES)
96
+ if not isinstance(traces, dict):
97
+ traces = {}
98
+ runtime_ns[NODE_TRACES] = traces
99
+ return traces
100
+
101
+
102
+ def get_node_trace(vars: Dict[str, Any], node_id: str) -> Dict[str, Any]:
103
+ """Return a single node trace object (always a dict)."""
104
+ traces = get_node_traces(vars)
105
+ trace = traces.get(node_id)
106
+ if isinstance(trace, dict):
107
+ return trace
108
+ return {"node_id": node_id, "steps": []}
109
+
110
+
82
111
  def _default_limits() -> Dict[str, Any]:
83
112
  """Return default limits dict."""
84
113
  return {
85
114
  "max_iterations": 25,
86
115
  "current_iteration": 0,
87
- "max_tokens": 32768,
116
+ "max_tokens": DEFAULT_MAX_TOKENS,
88
117
  "max_output_tokens": None,
118
+ "max_input_tokens": None,
89
119
  "max_history_messages": -1,
90
120
  "estimated_tokens_used": 0,
91
121
  "warn_iterations_pct": 80,
92
122
  "warn_tokens_pct": 80,
93
123
  }
94
124
 
125
+
126
+ def parse_vars_path(path: str) -> list[Any]:
127
+ """Parse a path for inspecting `RunState.vars`.
128
+
129
+ Supports:
130
+ - dot paths: "scratchpad.research.sources[0].title"
131
+ - JSON pointer-ish paths: "/scratchpad/research/sources/0/title"
132
+ """
133
+ import re
134
+
135
+ raw = str(path or "").strip()
136
+ if not raw:
137
+ return []
138
+
139
+ tokens: list[Any] = []
140
+
141
+ if raw.startswith("/"):
142
+ for part in [p for p in raw.split("/") if p]:
143
+ part = part.replace("~1", "/").replace("~0", "~")
144
+ if part.isdigit():
145
+ tokens.append(int(part))
146
+ else:
147
+ tokens.append(part)
148
+ return tokens
149
+
150
+ for part in [p for p in raw.split(".") if p]:
151
+ # Allow list indexing as a bare segment: `foo.0.bar`
152
+ if "[" not in part and part.isdigit():
153
+ tokens.append(int(part))
154
+ continue
155
+
156
+ # Split `foo[0][1]` into ["foo", 0, 1]
157
+ for m in re.finditer(r"([^\[\]]+)|\[(\d+)\]", part):
158
+ key = m.group(1)
159
+ idx = m.group(2)
160
+ if key is not None:
161
+ tokens.append(key)
162
+ elif idx is not None:
163
+ tokens.append(int(idx))
164
+
165
+ return tokens
166
+
167
+
168
+ def resolve_vars_path(root: Any, tokens: list[Any]) -> Any:
169
+ """Resolve tokens against nested dict/list structures."""
170
+ cur: Any = root
171
+ at: list[str] = []
172
+
173
+ for tok in tokens:
174
+ if isinstance(tok, int):
175
+ if not isinstance(cur, list):
176
+ where = ".".join([p for p in at if p]) or "(root)"
177
+ raise ValueError(f"Expected list at {where} but found {type(cur).__name__}")
178
+ if tok < 0 or tok >= len(cur):
179
+ where = ".".join([p for p in at if p]) or "(root)"
180
+ raise ValueError(f"Index {tok} out of range at {where} (len={len(cur)})")
181
+ cur = cur[tok]
182
+ at.append(str(tok))
183
+ continue
184
+
185
+ key = str(tok)
186
+ if not isinstance(cur, dict):
187
+ where = ".".join([p for p in at if p]) or "(root)"
188
+ raise ValueError(f"Expected object at {where} but found {type(cur).__name__}")
189
+ if key not in cur:
190
+ where = ".".join([p for p in at if p]) or "(root)"
191
+ raise ValueError(f"Missing key '{key}' at {where}")
192
+ cur = cur[key]
193
+ at.append(key)
194
+
195
+ return cur
@@ -0,0 +1,10 @@
1
+ """abstractruntime.evidence
2
+
3
+ Runtime-owned evidence capture and indexing.
4
+ """
5
+
6
+ from .recorder import EvidenceRecorder, DEFAULT_EVIDENCE_TOOL_NAMES
7
+
8
+ __all__ = ["EvidenceRecorder", "DEFAULT_EVIDENCE_TOOL_NAMES"]
9
+
10
+
@@ -0,0 +1,325 @@
1
+ """abstractruntime.evidence.recorder
2
+
3
+ Evidence is "provenance-first": a durable record of what the system actually observed at
4
+ external boundaries (web + process execution), stored as artifacts with a small JSON index
5
+ in run state.
6
+
7
+ Design goals:
8
+ - Always-on capture for a small default set of tools (web_search/fetch_url/execute_command).
9
+ - Keep RunState.vars JSON-safe and bounded: store large payloads in ArtifactStore and keep refs.
10
+ - Make later indexing/storage upgrades possible (Elastic/vector/etc) without changing semantics.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import json
16
+ from dataclasses import dataclass
17
+ from datetime import datetime, timezone
18
+ from typing import Any, Dict, List, Optional, Sequence
19
+
20
+ from ..core.models import RunState
21
+ from ..storage.artifacts import ArtifactStore, artifact_ref, is_artifact_ref
22
+
23
+
24
+ DEFAULT_EVIDENCE_TOOL_NAMES: tuple[str, ...] = ("web_search", "fetch_url", "execute_command")
25
+
26
+
27
+ def utc_now_iso() -> str:
28
+ return datetime.now(timezone.utc).isoformat()
29
+
30
+
31
+ def _ensure_memory_spans(run: RunState) -> list[dict[str, Any]]:
32
+ runtime_ns = run.vars.get("_runtime")
33
+ if not isinstance(runtime_ns, dict):
34
+ runtime_ns = {}
35
+ run.vars["_runtime"] = runtime_ns
36
+ spans = runtime_ns.get("memory_spans")
37
+ if not isinstance(spans, list):
38
+ spans = []
39
+ runtime_ns["memory_spans"] = spans
40
+ return spans
41
+
42
+
43
+ def _preview(text: str, *, limit: int = 160) -> str:
44
+ s = str(text or "").strip()
45
+ if len(s) <= limit:
46
+ return s
47
+ return s[: max(0, limit - 1)] + "…"
48
+
49
+
50
+ def _json_loads_maybe(text: str) -> Optional[Any]:
51
+ if not isinstance(text, str):
52
+ return None
53
+ t = text.strip()
54
+ if not t:
55
+ return None
56
+ if not (t.startswith("{") or t.startswith("[")):
57
+ return None
58
+ try:
59
+ return json.loads(t)
60
+ except Exception:
61
+ return None
62
+
63
+
64
+ def _store_text(
65
+ store: ArtifactStore,
66
+ *,
67
+ text: str,
68
+ run_id: str,
69
+ tags: Dict[str, str],
70
+ content_type: str = "text/plain",
71
+ ) -> Optional[Dict[str, str]]:
72
+ s = str(text or "")
73
+ if not s:
74
+ return None
75
+ meta = store.store_text(s, content_type=content_type, run_id=run_id, tags=tags)
76
+ return artifact_ref(meta.artifact_id)
77
+
78
+
79
+ def _store_json(
80
+ store: ArtifactStore,
81
+ *,
82
+ data: Any,
83
+ run_id: str,
84
+ tags: Dict[str, str],
85
+ ) -> Optional[Dict[str, str]]:
86
+ if data is None:
87
+ return None
88
+ meta = store.store_json(data, run_id=run_id, tags=tags)
89
+ return artifact_ref(meta.artifact_id)
90
+
91
+
92
+ @dataclass(frozen=True)
93
+ class EvidenceCaptureStats:
94
+ recorded: int = 0
95
+
96
+
97
+ class EvidenceRecorder:
98
+ """Runtime-side recorder for always-on evidence."""
99
+
100
+ def __init__(
101
+ self,
102
+ *,
103
+ artifact_store: ArtifactStore,
104
+ tool_names: Sequence[str] = DEFAULT_EVIDENCE_TOOL_NAMES,
105
+ ):
106
+ self._store = artifact_store
107
+ self._tool_names = {str(n).strip() for n in tool_names if isinstance(n, str) and n.strip()}
108
+
109
+ def record_tool_calls(
110
+ self,
111
+ *,
112
+ run: RunState,
113
+ node_id: str,
114
+ tool_calls: list[Any],
115
+ tool_results: Dict[str, Any],
116
+ ) -> EvidenceCaptureStats:
117
+ if not isinstance(tool_results, dict):
118
+ return EvidenceCaptureStats(recorded=0)
119
+ results = tool_results.get("results", [])
120
+ if not isinstance(results, list) or not results:
121
+ return EvidenceCaptureStats(recorded=0)
122
+ if not isinstance(tool_calls, list):
123
+ tool_calls = []
124
+
125
+ spans = _ensure_memory_spans(run)
126
+ recorded = 0
127
+
128
+ for idx, r in enumerate(results):
129
+ if not isinstance(r, dict):
130
+ continue
131
+ call = tool_calls[idx] if idx < len(tool_calls) and isinstance(tool_calls[idx], dict) else {}
132
+ tool_name = str(r.get("name") or call.get("name") or "").strip()
133
+ if not tool_name or tool_name not in self._tool_names:
134
+ continue
135
+
136
+ ok = bool(r.get("success") is True)
137
+ call_id = str(r.get("call_id") or call.get("call_id") or "")
138
+ error = r.get("error")
139
+ error_text = str(error).strip() if isinstance(error, str) and error.strip() else None
140
+ args = call.get("arguments") if isinstance(call, dict) else None
141
+ args_dict = dict(args) if isinstance(args, dict) else {}
142
+
143
+ output = r.get("output")
144
+ # Tool executors vary: output may be str/dict/None.
145
+ output_dict = dict(output) if isinstance(output, dict) else None
146
+ output_text = str(output or "") if isinstance(output, str) else None
147
+
148
+ created_at = utc_now_iso()
149
+ tags: Dict[str, str] = {"kind": "evidence", "tool": tool_name}
150
+
151
+ evidence_payload: Dict[str, Any] = {
152
+ "tool_name": tool_name,
153
+ "call_id": call_id,
154
+ "success": ok,
155
+ "error": error_text,
156
+ "created_at": created_at,
157
+ "run_id": run.run_id,
158
+ "workflow_id": run.workflow_id,
159
+ "node_id": node_id,
160
+ "arguments": args_dict,
161
+ }
162
+ if run.actor_id:
163
+ evidence_payload["actor_id"] = str(run.actor_id)
164
+ if getattr(run, "session_id", None):
165
+ evidence_payload["session_id"] = str(run.session_id)
166
+
167
+ artifacts: Dict[str, Any] = {}
168
+
169
+ if tool_name == "fetch_url":
170
+ url = str(args_dict.get("url") or "")
171
+ if url:
172
+ #[WARNING:TRUNCATION] bounded tag value (indexing)
173
+ tags["url"] = url[:200]
174
+
175
+ if isinstance(output_dict, dict):
176
+ # Store and strip large text fields from the tool output dict.
177
+ raw_text = output_dict.pop("raw_text", None)
178
+ norm_text = output_dict.pop("normalized_text", None)
179
+ content_type = output_dict.get("content_type")
180
+ content_type_str = str(content_type) if isinstance(content_type, str) else ""
181
+
182
+ raw_ref = None
183
+ if isinstance(raw_text, str) and raw_text:
184
+ raw_ref = _store_text(
185
+ self._store,
186
+ text=raw_text,
187
+ run_id=run.run_id,
188
+ tags={**tags, "part": "raw"},
189
+ content_type=content_type_str or "text/plain",
190
+ )
191
+ output_dict["raw_artifact"] = raw_ref
192
+ artifacts["raw"] = raw_ref
193
+
194
+ norm_ref = None
195
+ if isinstance(norm_text, str) and norm_text:
196
+ norm_ref = _store_text(
197
+ self._store,
198
+ text=norm_text,
199
+ run_id=run.run_id,
200
+ tags={**tags, "part": "normalized"},
201
+ content_type="text/plain",
202
+ )
203
+ output_dict["normalized_artifact"] = norm_ref
204
+ artifacts["normalized_text"] = norm_ref
205
+
206
+ evidence_payload["url"] = str(output_dict.get("url") or url)
207
+ evidence_payload["final_url"] = str(output_dict.get("final_url") or "")
208
+ evidence_payload["content_type"] = content_type_str
209
+ evidence_payload["size_bytes"] = output_dict.get("size_bytes")
210
+ if artifacts:
211
+ evidence_payload["artifacts"] = artifacts
212
+
213
+ # Write back the stripped/augmented dict into the tool result so run state stays small.
214
+ r["output"] = output_dict
215
+
216
+ elif tool_name == "execute_command":
217
+ cmd = str(args_dict.get("command") or "")
218
+ if cmd:
219
+ tags["command"] = _preview(cmd, limit=200)
220
+
221
+ if isinstance(output_dict, dict):
222
+ stdout = output_dict.pop("stdout", None)
223
+ stderr = output_dict.pop("stderr", None)
224
+
225
+ stdout_ref = None
226
+ if isinstance(stdout, str) and stdout:
227
+ stdout_ref = _store_text(
228
+ self._store,
229
+ text=stdout,
230
+ run_id=run.run_id,
231
+ tags={**tags, "part": "stdout"},
232
+ )
233
+ output_dict["stdout_artifact"] = stdout_ref
234
+ artifacts["stdout"] = stdout_ref
235
+
236
+ stderr_ref = None
237
+ if isinstance(stderr, str) and stderr:
238
+ stderr_ref = _store_text(
239
+ self._store,
240
+ text=stderr,
241
+ run_id=run.run_id,
242
+ tags={**tags, "part": "stderr"},
243
+ )
244
+ output_dict["stderr_artifact"] = stderr_ref
245
+ artifacts["stderr"] = stderr_ref
246
+
247
+ evidence_payload["command"] = str(output_dict.get("command") or cmd)
248
+ evidence_payload["return_code"] = output_dict.get("return_code")
249
+ evidence_payload["duration_s"] = output_dict.get("duration_s")
250
+ evidence_payload["working_directory"] = output_dict.get("working_directory")
251
+ evidence_payload["platform"] = output_dict.get("platform")
252
+ if artifacts:
253
+ evidence_payload["artifacts"] = artifacts
254
+
255
+ r["output"] = output_dict
256
+
257
+ elif isinstance(output_text, str) and output_text:
258
+ out_ref = _store_text(
259
+ self._store,
260
+ text=output_text,
261
+ run_id=run.run_id,
262
+ tags={**tags, "part": "output"},
263
+ )
264
+ if out_ref is not None:
265
+ artifacts["output"] = out_ref
266
+ evidence_payload["artifacts"] = artifacts
267
+
268
+ elif tool_name == "web_search":
269
+ query = str(args_dict.get("query") or "")
270
+ if query:
271
+ tags["query"] = _preview(query, limit=200)
272
+ evidence_payload["query"] = query
273
+
274
+ if isinstance(output_text, str) and output_text:
275
+ parsed = _json_loads_maybe(output_text)
276
+ if parsed is not None:
277
+ out_ref = _store_json(self._store, data=parsed, run_id=run.run_id, tags={**tags, "part": "results"})
278
+ else:
279
+ out_ref = _store_text(self._store, text=output_text, run_id=run.run_id, tags={**tags, "part": "results"})
280
+ if out_ref is not None:
281
+ artifacts["results"] = out_ref
282
+ evidence_payload["artifacts"] = artifacts
283
+ elif isinstance(output_dict, dict):
284
+ out_ref = _store_json(self._store, data=output_dict, run_id=run.run_id, tags={**tags, "part": "results"})
285
+ if out_ref is not None:
286
+ artifacts["results"] = out_ref
287
+ evidence_payload["artifacts"] = artifacts
288
+
289
+ # Store the evidence record itself (small JSON with artifact refs).
290
+ record_ref = _store_json(self._store, data=evidence_payload, run_id=run.run_id, tags=tags)
291
+ if not (isinstance(record_ref, dict) and is_artifact_ref(record_ref)):
292
+ continue
293
+ evidence_id = record_ref["$artifact"]
294
+
295
+ # Append to span-like index for fast listing.
296
+ span_record: Dict[str, Any] = {
297
+ "kind": "evidence",
298
+ "artifact_id": evidence_id,
299
+ "created_at": created_at,
300
+ "from_timestamp": created_at,
301
+ "to_timestamp": created_at,
302
+ "message_count": 0,
303
+ "tool_name": tool_name,
304
+ "call_id": call_id,
305
+ "success": ok,
306
+ }
307
+ if tool_name == "fetch_url":
308
+ span_record["url"] = evidence_payload.get("url") or str(args_dict.get("url") or "")
309
+ elif tool_name == "web_search":
310
+ span_record["query"] = str(args_dict.get("query") or "")
311
+ elif tool_name == "execute_command":
312
+ span_record["command_preview"] = _preview(str(args_dict.get("command") or ""))
313
+
314
+ # Attach span id back to the tool result entry for easy linking in traces/UIs.
315
+ meta = r.get("meta")
316
+ if not isinstance(meta, dict):
317
+ meta = {}
318
+ r["meta"] = meta
319
+ meta["evidence_id"] = evidence_id
320
+
321
+ spans.append(span_record)
322
+ recorded += 1
323
+
324
+ return EvidenceCaptureStats(recorded=recorded)
325
+