AbstractRuntime 0.0.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. abstractruntime/__init__.py +7 -2
  2. abstractruntime/core/__init__.py +9 -2
  3. abstractruntime/core/config.py +114 -0
  4. abstractruntime/core/event_keys.py +62 -0
  5. abstractruntime/core/models.py +55 -1
  6. abstractruntime/core/runtime.py +2609 -24
  7. abstractruntime/core/vars.py +189 -0
  8. abstractruntime/evidence/__init__.py +10 -0
  9. abstractruntime/evidence/recorder.py +325 -0
  10. abstractruntime/integrations/abstractcore/__init__.py +9 -2
  11. abstractruntime/integrations/abstractcore/constants.py +19 -0
  12. abstractruntime/integrations/abstractcore/default_tools.py +134 -0
  13. abstractruntime/integrations/abstractcore/effect_handlers.py +288 -9
  14. abstractruntime/integrations/abstractcore/factory.py +133 -11
  15. abstractruntime/integrations/abstractcore/llm_client.py +547 -42
  16. abstractruntime/integrations/abstractcore/mcp_worker.py +586 -0
  17. abstractruntime/integrations/abstractcore/observability.py +80 -0
  18. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  19. abstractruntime/integrations/abstractcore/tool_executor.py +544 -8
  20. abstractruntime/memory/__init__.py +21 -0
  21. abstractruntime/memory/active_context.py +746 -0
  22. abstractruntime/memory/active_memory.py +452 -0
  23. abstractruntime/memory/compaction.py +105 -0
  24. abstractruntime/rendering/__init__.py +17 -0
  25. abstractruntime/rendering/agent_trace_report.py +256 -0
  26. abstractruntime/rendering/json_stringify.py +136 -0
  27. abstractruntime/scheduler/scheduler.py +93 -2
  28. abstractruntime/storage/__init__.py +3 -1
  29. abstractruntime/storage/artifacts.py +51 -5
  30. abstractruntime/storage/json_files.py +16 -3
  31. abstractruntime/storage/observable.py +99 -0
  32. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/METADATA +5 -1
  33. abstractruntime-0.4.0.dist-info/RECORD +49 -0
  34. abstractruntime-0.4.0.dist-info/entry_points.txt +2 -0
  35. abstractruntime-0.0.1.dist-info/RECORD +0 -30
  36. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/WHEEL +0 -0
  37. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -13,7 +13,12 @@ pause until the host resumes with the tool results.
13
13
  from __future__ import annotations
14
14
 
15
15
  from dataclasses import asdict, is_dataclass
16
- from typing import Any, Dict, List, Optional, Protocol
16
+ import inspect
17
+ import json
18
+ import re
19
+ import threading
20
+ import uuid
21
+ from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence
17
22
 
18
23
  from .logging import get_logger
19
24
 
@@ -24,6 +29,327 @@ class ToolExecutor(Protocol):
24
29
  def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]: ...
25
30
 
26
31
 
32
+ def _normalize_timeout_s(value: Optional[float]) -> Optional[float]:
33
+ if value is None:
34
+ return None
35
+ try:
36
+ f = float(value)
37
+ except Exception:
38
+ return None
39
+ # Contract: non-positive values are treated as "unlimited".
40
+ return None if f <= 0 else f
41
+
42
+
43
+ def _call_with_timeout(func: Callable[[], Any], *, timeout_s: Optional[float]) -> tuple[bool, Any, Optional[str]]:
44
+ """Execute a callable with a best-effort timeout.
45
+
46
+ Important limitation (Python semantics): we cannot forcibly stop a running function
47
+ without process isolation. On timeout we return an error, but the underlying callable
48
+ may still finish later (daemon thread).
49
+ """
50
+ timeout_s = _normalize_timeout_s(timeout_s)
51
+ if timeout_s is None:
52
+ try:
53
+ return True, func(), None
54
+ except Exception as e:
55
+ return False, None, str(e)
56
+
57
+ result: Dict[str, Any] = {"done": False, "ok": False, "value": None, "error": None}
58
+
59
+ def _runner() -> None:
60
+ try:
61
+ result["value"] = func()
62
+ result["ok"] = True
63
+ except Exception as e:
64
+ result["error"] = str(e)
65
+ result["ok"] = False
66
+ finally:
67
+ result["done"] = True
68
+
69
+ t = threading.Thread(target=_runner, daemon=True)
70
+ t.start()
71
+ t.join(timeout_s)
72
+
73
+ if not result.get("done", False):
74
+ return False, None, f"Tool execution timed out after {timeout_s}s"
75
+ if result.get("ok", False):
76
+ return True, result.get("value"), None
77
+ return False, None, str(result.get("error") or "Tool execution failed")
78
+
79
+
80
+ class MappingToolExecutor:
81
+ """Executes tool calls using an explicit {tool_name -> callable} mapping.
82
+
83
+ This is the recommended durable execution path: the mapping is held by the
84
+ host/runtime process and is never persisted inside RunState.
85
+ """
86
+
87
+ def __init__(self, tool_map: Dict[str, Callable[..., Any]], *, timeout_s: Optional[float] = None):
88
+ self._tool_map = dict(tool_map)
89
+ self._timeout_s = _normalize_timeout_s(timeout_s)
90
+
91
+ @classmethod
92
+ def from_tools(cls, tools: Sequence[Callable[..., Any]], *, timeout_s: Optional[float] = None) -> "MappingToolExecutor":
93
+ tool_map: Dict[str, Callable[..., Any]] = {}
94
+ for t in tools:
95
+ tool_def = getattr(t, "_tool_definition", None)
96
+ if tool_def is not None:
97
+ name = str(getattr(tool_def, "name", "") or "")
98
+ func = getattr(tool_def, "function", None) or t
99
+ else:
100
+ name = str(getattr(t, "__name__", "") or "")
101
+ func = t
102
+
103
+ if not name:
104
+ raise ValueError("Tool is missing a name")
105
+ if not callable(func):
106
+ raise ValueError(f"Tool '{name}' is not callable")
107
+ if name in tool_map:
108
+ raise ValueError(f"Duplicate tool name '{name}'")
109
+
110
+ tool_map[name] = func
111
+
112
+ return cls(tool_map, timeout_s=timeout_s)
113
+
114
+ def set_timeout_s(self, timeout_s: Optional[float]) -> None:
115
+ self._timeout_s = _normalize_timeout_s(timeout_s)
116
+
117
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
118
+ results: List[Dict[str, Any]] = []
119
+
120
+ def _loads_dict_like(value: Any) -> Optional[Dict[str, Any]]:
121
+ if value is None:
122
+ return None
123
+ if isinstance(value, dict):
124
+ return dict(value)
125
+ if not isinstance(value, str):
126
+ return None
127
+ text = value.strip()
128
+ if not text:
129
+ return None
130
+ try:
131
+ parsed = json.loads(text)
132
+ except Exception:
133
+ return None
134
+ return parsed if isinstance(parsed, dict) else None
135
+
136
+ def _unwrap_wrapper_args(kwargs: Dict[str, Any]) -> Dict[str, Any]:
137
+ """Unwrap common wrapper shapes like {"name":..., "arguments":{...}}.
138
+
139
+ Some models emit tool kwargs wrapped inside an "arguments" object and may
140
+ mistakenly place real kwargs alongside wrapper fields. We unwrap and merge
141
+ (inner args take precedence).
142
+ """
143
+ current: Dict[str, Any] = dict(kwargs or {})
144
+ wrapper_keys = {"name", "arguments", "call_id", "id"}
145
+ for _ in range(4):
146
+ inner = current.get("arguments")
147
+ inner_dict = _loads_dict_like(inner)
148
+ if not isinstance(inner_dict, dict):
149
+ break
150
+ extras = {k: v for k, v in current.items() if k not in wrapper_keys}
151
+ merged = dict(inner_dict)
152
+ for k, v in extras.items():
153
+ merged.setdefault(k, v)
154
+ current = merged
155
+ return current
156
+
157
+ def _filter_kwargs(func: Callable[..., Any], kwargs: Dict[str, Any]) -> Dict[str, Any]:
158
+ """Best-effort filtering of unexpected kwargs for callables without **kwargs."""
159
+ try:
160
+ sig = inspect.signature(func)
161
+ except Exception:
162
+ return kwargs
163
+
164
+ params = list(sig.parameters.values())
165
+ if any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params):
166
+ return kwargs
167
+
168
+ allowed = {
169
+ p.name
170
+ for p in params
171
+ if p.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)
172
+ }
173
+ return {k: v for k, v in kwargs.items() if k in allowed}
174
+
175
+ def _normalize_key(key: str) -> str:
176
+ # Lowercase and remove common separators so `file_path`, `filePath`,
177
+ # `file-path`, `file path` all normalize to the same token.
178
+ return re.sub(r"[\s_\-]+", "", str(key or "").strip().lower())
179
+
180
+ _SYNONYM_ALIASES: Dict[str, List[str]] = {
181
+ # Common semantic drift across many tools
182
+ "path": ["file_path", "directory_path", "path"],
183
+ # Common CLI/media naming drift
184
+ "filename": ["file_path"],
185
+ "filepath": ["file_path"],
186
+ "dir": ["directory_path", "path"],
187
+ "directory": ["directory_path", "path"],
188
+ "folder": ["directory_path", "path"],
189
+ "query": ["pattern", "query"],
190
+ "regex": ["pattern", "regex"],
191
+ # Range drift (used by multiple tools)
192
+ "start": ["start_line", "start"],
193
+ "end": ["end_line", "end"],
194
+ "startlineoneindexed": ["start_line"],
195
+ "endlineoneindexedinclusive": ["end_line"],
196
+ }
197
+
198
+ def _canonicalize_kwargs(func: Callable[..., Any], kwargs: Dict[str, Any]) -> Dict[str, Any]:
199
+ """Best-effort canonicalization of kwarg names.
200
+
201
+ Strategy:
202
+ - Unwrap common wrapper shapes (nested `arguments`)
203
+ - Map keys by normalized form (case + separators)
204
+ - Apply a small, tool-agnostic synonym table (path/query/start/end)
205
+ - Finally, filter unexpected kwargs for callables without **kwargs
206
+ """
207
+ if not isinstance(kwargs, dict) or not kwargs:
208
+ return {}
209
+
210
+ # 1) Unwrap wrapper shapes early.
211
+ current = _unwrap_wrapper_args(kwargs)
212
+
213
+ try:
214
+ sig = inspect.signature(func)
215
+ except Exception:
216
+ return current
217
+
218
+ params = list(sig.parameters.values())
219
+ allowed_names = {
220
+ p.name
221
+ for p in params
222
+ if p.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)
223
+ }
224
+ norm_to_param = { _normalize_key(n): n for n in allowed_names }
225
+
226
+ out: Dict[str, Any] = dict(current)
227
+
228
+ # 2) Normalized (morphological) key mapping.
229
+ for k in list(out.keys()):
230
+ if k in allowed_names:
231
+ continue
232
+ nk = _normalize_key(k)
233
+ target = norm_to_param.get(nk)
234
+ if target and target not in out:
235
+ out[target] = out.pop(k)
236
+
237
+ # 3) Synonym mapping (semantic).
238
+ for k in list(out.keys()):
239
+ if k in allowed_names:
240
+ continue
241
+ nk = _normalize_key(k)
242
+ candidates = _SYNONYM_ALIASES.get(nk, [])
243
+ for cand in candidates:
244
+ if cand in allowed_names and cand not in out:
245
+ out[cand] = out.pop(k)
246
+ break
247
+
248
+ # 4) Filter unexpected kwargs when callable doesn't accept **kwargs.
249
+ return _filter_kwargs(func, out)
250
+
251
+ def _error_from_output(value: Any) -> Optional[str]:
252
+ """Detect tool failures reported as string outputs (instead of exceptions)."""
253
+ # Structured tool outputs may explicitly report failure without raising.
254
+ # Only treat as error when the tool declares failure.
255
+ if isinstance(value, dict):
256
+ success = value.get("success")
257
+ ok = value.get("ok")
258
+ if success is False or ok is False:
259
+ err = value.get("error") or value.get("message") or "Tool reported failure"
260
+ text = str(err).strip()
261
+ return text or "Tool reported failure"
262
+ return None
263
+ if not isinstance(value, str):
264
+ return None
265
+ text = value.strip()
266
+ if not text:
267
+ return None
268
+ if text.startswith("Error:"):
269
+ cleaned = text[len("Error:") :].strip()
270
+ return cleaned or text
271
+ if text.startswith(("❌", "🚫", "⏰")):
272
+ cleaned = text.lstrip("❌🚫⏰").strip()
273
+ if cleaned.startswith("Error:"):
274
+ cleaned = cleaned[len("Error:") :].strip()
275
+ return cleaned or text
276
+ return None
277
+
278
+ def _append_result(*, call_id: str, name: str, output: Any) -> None:
279
+ error = _error_from_output(output)
280
+ if error is not None:
281
+ # Preserve structured outputs for provenance/evidence. For string-only error outputs
282
+ # (the historical convention), keep output empty and store the message in `error`.
283
+ output_json = None if isinstance(output, str) else _jsonable(output)
284
+ results.append(
285
+ {
286
+ "call_id": call_id,
287
+ "name": name,
288
+ "success": False,
289
+ "output": output_json,
290
+ "error": error,
291
+ }
292
+ )
293
+ return
294
+
295
+ results.append(
296
+ {
297
+ "call_id": call_id,
298
+ "name": name,
299
+ "success": True,
300
+ "output": _jsonable(output),
301
+ "error": None,
302
+ }
303
+ )
304
+
305
+ for tc in tool_calls:
306
+ name = str(tc.get("name", "") or "")
307
+ raw_arguments = tc.get("arguments") or {}
308
+ arguments = dict(raw_arguments) if isinstance(raw_arguments, dict) else (_loads_dict_like(raw_arguments) or {})
309
+ call_id = str(tc.get("call_id") or "")
310
+
311
+ func = self._tool_map.get(name)
312
+ if func is None:
313
+ results.append(
314
+ {
315
+ "call_id": call_id,
316
+ "name": name,
317
+ "success": False,
318
+ "output": None,
319
+ "error": f"Tool '{name}' not found",
320
+ }
321
+ )
322
+ continue
323
+
324
+ arguments = _canonicalize_kwargs(func, arguments)
325
+
326
+ def _invoke() -> Any:
327
+ try:
328
+ return func(**arguments)
329
+ except TypeError:
330
+ # Retry once with sanitized kwargs for common wrapper/extra-arg failures.
331
+ filtered = _canonicalize_kwargs(func, arguments)
332
+ if filtered != arguments:
333
+ return func(**filtered)
334
+ raise
335
+
336
+ ok, output, err = _call_with_timeout(_invoke, timeout_s=self._timeout_s)
337
+ if ok:
338
+ _append_result(call_id=call_id, name=name, output=output)
339
+ else:
340
+ results.append(
341
+ {
342
+ "call_id": call_id,
343
+ "name": name,
344
+ "success": False,
345
+ "output": None,
346
+ "error": str(err or "Tool execution failed"),
347
+ }
348
+ )
349
+
350
+ return {"mode": "executed", "results": results}
351
+
352
+
27
353
  def _jsonable(value: Any) -> Any:
28
354
  if value is None:
29
355
  return None
@@ -50,9 +376,15 @@ def _jsonable(value: Any) -> Any:
50
376
  class AbstractCoreToolExecutor:
51
377
  """Executes tool calls using AbstractCore's global tool registry."""
52
378
 
379
+ def __init__(self, *, timeout_s: Optional[float] = None):
380
+ self._timeout_s = _normalize_timeout_s(timeout_s)
381
+
382
+ def set_timeout_s(self, timeout_s: Optional[float]) -> None:
383
+ self._timeout_s = _normalize_timeout_s(timeout_s)
384
+
53
385
  def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
54
386
  from abstractcore.tools.core import ToolCall
55
- from abstractcore.tools.registry import execute_tools
387
+ from abstractcore.tools.registry import execute_tool
56
388
 
57
389
  calls = [
58
390
  ToolCall(
@@ -63,15 +395,29 @@ class AbstractCoreToolExecutor:
63
395
  for tc in tool_calls
64
396
  ]
65
397
 
66
- results = execute_tools(calls)
67
398
  normalized = []
68
- for r in results:
399
+ for call in calls:
400
+ ok, out, err = _call_with_timeout(lambda c=call: execute_tool(c), timeout_s=self._timeout_s)
401
+ if ok:
402
+ r = out
403
+ normalized.append(
404
+ {
405
+ "call_id": getattr(r, "call_id", "") if r is not None else "",
406
+ "name": getattr(call, "name", ""),
407
+ "success": bool(getattr(r, "success", False)) if r is not None else True,
408
+ "output": _jsonable(getattr(r, "output", None)) if r is not None else None,
409
+ "error": getattr(r, "error", None) if r is not None else None,
410
+ }
411
+ )
412
+ continue
413
+
69
414
  normalized.append(
70
415
  {
71
- "call_id": getattr(r, "call_id", ""),
72
- "success": bool(getattr(r, "success", False)),
73
- "output": _jsonable(getattr(r, "output", None)),
74
- "error": getattr(r, "error", None),
416
+ "call_id": str(getattr(call, "call_id", "") or ""),
417
+ "name": getattr(call, "name", ""),
418
+ "success": False,
419
+ "output": None,
420
+ "error": str(err or "Tool execution failed"),
75
421
  }
76
422
  )
77
423
 
@@ -87,3 +433,193 @@ class PassthroughToolExecutor:
87
433
  def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
88
434
  return {"mode": self._mode, "tool_calls": _jsonable(tool_calls)}
89
435
 
436
+
437
+ def _mcp_result_to_output(result: Any) -> Any:
438
+ if not isinstance(result, dict):
439
+ return _jsonable(result)
440
+
441
+ content = result.get("content")
442
+ if isinstance(content, list):
443
+ texts: list[str] = []
444
+ for item in content:
445
+ if not isinstance(item, dict):
446
+ continue
447
+ if item.get("type") != "text":
448
+ continue
449
+ text = item.get("text")
450
+ if isinstance(text, str) and text.strip():
451
+ texts.append(text.strip())
452
+ if texts:
453
+ joined = "\n".join(texts).strip()
454
+ if joined:
455
+ try:
456
+ return _jsonable(json.loads(joined))
457
+ except Exception:
458
+ return joined
459
+
460
+ return _jsonable(result)
461
+
462
+
463
+ def _mcp_result_to_error(result: Any) -> Optional[str]:
464
+ if not isinstance(result, dict):
465
+ return None
466
+ output = _mcp_result_to_output(result)
467
+
468
+ # MCP-native error flag.
469
+ if result.get("isError") is True:
470
+ if isinstance(output, str) and output.strip():
471
+ return output.strip()
472
+ return "MCP tool call reported error"
473
+
474
+ # Some real MCP servers return error strings inside content while leaving `isError=false`.
475
+ # Match the local executor's convention for string error outputs.
476
+ if isinstance(output, str):
477
+ text = output.strip()
478
+ if not text:
479
+ return None
480
+ if text.startswith("Error:"):
481
+ cleaned = text[len("Error:") :].strip()
482
+ return cleaned or text
483
+ if text.startswith(("❌", "🚫", "⏰")):
484
+ cleaned = text.lstrip("❌🚫⏰").strip()
485
+ if cleaned.startswith("Error:"):
486
+ cleaned = cleaned[len("Error:") :].strip()
487
+ return cleaned or text
488
+ if text.lower().startswith("traceback"):
489
+ return text
490
+ return None
491
+
492
+
493
+ class McpToolExecutor:
494
+ """Executes tool calls remotely via an MCP server (Streamable HTTP / JSON-RPC)."""
495
+
496
+ def __init__(
497
+ self,
498
+ *,
499
+ server_id: str,
500
+ mcp_url: str,
501
+ timeout_s: Optional[float] = 30.0,
502
+ mcp_client: Optional[Any] = None,
503
+ ):
504
+ self._server_id = str(server_id or "").strip()
505
+ if not self._server_id:
506
+ raise ValueError("McpToolExecutor requires a non-empty server_id")
507
+ self._mcp_url = str(mcp_url or "").strip()
508
+ if not self._mcp_url:
509
+ raise ValueError("McpToolExecutor requires a non-empty mcp_url")
510
+ self._timeout_s = _normalize_timeout_s(timeout_s)
511
+ self._mcp_client = mcp_client
512
+
513
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
514
+ from abstractcore.mcp import McpClient, parse_namespaced_tool_name
515
+
516
+ results: List[Dict[str, Any]] = []
517
+ client = self._mcp_client or McpClient(url=self._mcp_url, timeout_s=self._timeout_s)
518
+ close_client = self._mcp_client is None
519
+ try:
520
+ for tc in tool_calls:
521
+ name = str(tc.get("name", "") or "")
522
+ call_id = str(tc.get("call_id") or "")
523
+ raw_arguments = tc.get("arguments") or {}
524
+ arguments = dict(raw_arguments) if isinstance(raw_arguments, dict) else {}
525
+
526
+ remote_name = name
527
+ parsed = parse_namespaced_tool_name(name)
528
+ if parsed is not None:
529
+ server_id, tool_name = parsed
530
+ if server_id != self._server_id:
531
+ results.append(
532
+ {
533
+ "call_id": call_id,
534
+ "name": name,
535
+ "success": False,
536
+ "output": None,
537
+ "error": f"MCP tool '{name}' targets server '{server_id}', expected '{self._server_id}'",
538
+ }
539
+ )
540
+ continue
541
+ remote_name = tool_name
542
+
543
+ try:
544
+ mcp_result = client.call_tool(name=remote_name, arguments=arguments)
545
+ err = _mcp_result_to_error(mcp_result)
546
+ if err is not None:
547
+ results.append(
548
+ {
549
+ "call_id": call_id,
550
+ "name": name,
551
+ "success": False,
552
+ "output": None,
553
+ "error": err,
554
+ }
555
+ )
556
+ continue
557
+ results.append(
558
+ {
559
+ "call_id": call_id,
560
+ "name": name,
561
+ "success": True,
562
+ "output": _mcp_result_to_output(mcp_result),
563
+ "error": None,
564
+ }
565
+ )
566
+ except Exception as e:
567
+ results.append(
568
+ {
569
+ "call_id": call_id,
570
+ "name": name,
571
+ "success": False,
572
+ "output": None,
573
+ "error": str(e),
574
+ }
575
+ )
576
+
577
+ finally:
578
+ if close_client:
579
+ try:
580
+ client.close()
581
+ except Exception:
582
+ pass
583
+
584
+ return {"mode": "executed", "results": results}
585
+
586
+
587
+ class DelegatingMcpToolExecutor:
588
+ """Delegates tool calls to an MCP server by returning a durable JOB wait payload.
589
+
590
+ This executor does not execute tools directly; it packages the tool calls plus
591
+ MCP endpoint metadata into a `WAITING` state so an external worker can execute
592
+ them and resume the run with results.
593
+ """
594
+
595
+ def __init__(
596
+ self,
597
+ *,
598
+ server_id: str,
599
+ mcp_url: str,
600
+ transport: str = "streamable_http",
601
+ wait_key_factory: Optional[Callable[[], str]] = None,
602
+ ):
603
+ self._server_id = str(server_id or "").strip()
604
+ if not self._server_id:
605
+ raise ValueError("DelegatingMcpToolExecutor requires a non-empty server_id")
606
+ self._mcp_url = str(mcp_url or "").strip()
607
+ if not self._mcp_url:
608
+ raise ValueError("DelegatingMcpToolExecutor requires a non-empty mcp_url")
609
+ self._transport = str(transport or "").strip() or "streamable_http"
610
+ self._wait_key_factory = wait_key_factory or (lambda: f"mcp_job:{uuid.uuid4().hex}")
611
+
612
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
613
+ return {
614
+ "mode": "delegated",
615
+ "wait_reason": "job",
616
+ "wait_key": self._wait_key_factory(),
617
+ "tool_calls": _jsonable(tool_calls),
618
+ "details": {
619
+ "protocol": "mcp",
620
+ "transport": self._transport,
621
+ "url": self._mcp_url,
622
+ "server_id": self._server_id,
623
+ "tool_name_prefix": f"mcp::{self._server_id}::",
624
+ },
625
+ }
@@ -0,0 +1,21 @@
1
+ """abstractruntime.memory
2
+
3
+ Runtime-owned memory utilities.
4
+
5
+ The runtime stores *everything* durably (RunStore/LedgerStore/ArtifactStore), but
6
+ only a selected view is sent to the LLM as **active context**:
7
+
8
+ RunState.vars["context"]["messages"]
9
+
10
+ This package provides minimal, JSON-safe helpers to:
11
+ - list and filter archived spans (metadata/time range)
12
+ - rehydrate archived spans back into active context deterministically
13
+ - derive the LLM-visible view from active context under simple limits
14
+
15
+ Semantic retrieval and graph-level memory live in AbstractMemory/AbstractFlow.
16
+ """
17
+
18
+ from .active_context import ActiveContextPolicy, TimeRange
19
+
20
+ __all__ = ["ActiveContextPolicy", "TimeRange"]
21
+