AbstractRuntime 0.0.1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +7 -2
- abstractruntime/core/__init__.py +9 -2
- abstractruntime/core/config.py +114 -0
- abstractruntime/core/event_keys.py +62 -0
- abstractruntime/core/models.py +55 -1
- abstractruntime/core/runtime.py +2609 -24
- abstractruntime/core/vars.py +189 -0
- abstractruntime/evidence/__init__.py +10 -0
- abstractruntime/evidence/recorder.py +325 -0
- abstractruntime/integrations/abstractcore/__init__.py +9 -2
- abstractruntime/integrations/abstractcore/constants.py +19 -0
- abstractruntime/integrations/abstractcore/default_tools.py +134 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +288 -9
- abstractruntime/integrations/abstractcore/factory.py +133 -11
- abstractruntime/integrations/abstractcore/llm_client.py +547 -42
- abstractruntime/integrations/abstractcore/mcp_worker.py +586 -0
- abstractruntime/integrations/abstractcore/observability.py +80 -0
- abstractruntime/integrations/abstractcore/summarizer.py +154 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +544 -8
- abstractruntime/memory/__init__.py +21 -0
- abstractruntime/memory/active_context.py +746 -0
- abstractruntime/memory/active_memory.py +452 -0
- abstractruntime/memory/compaction.py +105 -0
- abstractruntime/rendering/__init__.py +17 -0
- abstractruntime/rendering/agent_trace_report.py +256 -0
- abstractruntime/rendering/json_stringify.py +136 -0
- abstractruntime/scheduler/scheduler.py +93 -2
- abstractruntime/storage/__init__.py +3 -1
- abstractruntime/storage/artifacts.py +51 -5
- abstractruntime/storage/json_files.py +16 -3
- abstractruntime/storage/observable.py +99 -0
- {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/METADATA +5 -1
- abstractruntime-0.4.0.dist-info/RECORD +49 -0
- abstractruntime-0.4.0.dist-info/entry_points.txt +2 -0
- abstractruntime-0.0.1.dist-info/RECORD +0 -30
- {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/WHEEL +0 -0
- {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
"""Default toolsets for AbstractRuntime's AbstractCore integration.
|
|
2
|
+
|
|
3
|
+
This module provides a *host-side* convenience list of common, safe(ish) tools
|
|
4
|
+
that can be wired into a Runtime via MappingToolExecutor.
|
|
5
|
+
|
|
6
|
+
Design notes:
|
|
7
|
+
- We keep the runtime kernel dependency-light; this lives under
|
|
8
|
+
`integrations/abstractcore/` which is the explicit opt-in to AbstractCore.
|
|
9
|
+
- Tool callables are never persisted in RunState; only ToolSpecs (dicts) are.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from typing import Any, Callable, Dict, List, Sequence
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
ToolCallable = Callable[..., Any]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _tool_name(func: ToolCallable) -> str:
|
|
21
|
+
tool_def = getattr(func, "_tool_definition", None)
|
|
22
|
+
if tool_def is not None:
|
|
23
|
+
name = getattr(tool_def, "name", None)
|
|
24
|
+
if isinstance(name, str) and name.strip():
|
|
25
|
+
return name.strip()
|
|
26
|
+
name = getattr(func, "__name__", "")
|
|
27
|
+
return str(name or "").strip()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _tool_spec(func: ToolCallable) -> Dict[str, Any]:
|
|
31
|
+
tool_def = getattr(func, "_tool_definition", None)
|
|
32
|
+
if tool_def is not None and hasattr(tool_def, "to_dict"):
|
|
33
|
+
return dict(tool_def.to_dict())
|
|
34
|
+
|
|
35
|
+
from abstractcore.tools.core import ToolDefinition
|
|
36
|
+
|
|
37
|
+
return dict(ToolDefinition.from_function(func).to_dict())
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_default_toolsets() -> Dict[str, Dict[str, Any]]:
|
|
41
|
+
"""Return default toolsets {id -> {label, tools:[callables]}}."""
|
|
42
|
+
from abstractcore.tools.common_tools import (
|
|
43
|
+
list_files,
|
|
44
|
+
read_file,
|
|
45
|
+
search_files,
|
|
46
|
+
analyze_code,
|
|
47
|
+
write_file,
|
|
48
|
+
edit_file,
|
|
49
|
+
web_search,
|
|
50
|
+
fetch_url,
|
|
51
|
+
execute_command,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
"files": {
|
|
56
|
+
"id": "files",
|
|
57
|
+
"label": "Files",
|
|
58
|
+
"tools": [list_files, search_files, analyze_code, read_file, write_file, edit_file],
|
|
59
|
+
},
|
|
60
|
+
"web": {
|
|
61
|
+
"id": "web",
|
|
62
|
+
"label": "Web",
|
|
63
|
+
"tools": [web_search, fetch_url],
|
|
64
|
+
},
|
|
65
|
+
"system": {
|
|
66
|
+
"id": "system",
|
|
67
|
+
"label": "System",
|
|
68
|
+
"tools": [execute_command],
|
|
69
|
+
},
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def get_default_tools() -> List[ToolCallable]:
|
|
74
|
+
"""Return the flattened list of all default tool callables."""
|
|
75
|
+
toolsets = get_default_toolsets()
|
|
76
|
+
out: list[ToolCallable] = []
|
|
77
|
+
seen: set[str] = set()
|
|
78
|
+
for spec in toolsets.values():
|
|
79
|
+
for tool in spec.get("tools", []):
|
|
80
|
+
if not callable(tool):
|
|
81
|
+
continue
|
|
82
|
+
name = _tool_name(tool)
|
|
83
|
+
if not name or name in seen:
|
|
84
|
+
continue
|
|
85
|
+
seen.add(name)
|
|
86
|
+
out.append(tool)
|
|
87
|
+
return out
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def list_default_tool_specs() -> List[Dict[str, Any]]:
|
|
91
|
+
"""Return ToolSpecs for UI and LLM payloads (JSON-safe)."""
|
|
92
|
+
toolsets = get_default_toolsets()
|
|
93
|
+
toolset_by_name: Dict[str, str] = {}
|
|
94
|
+
for tid, spec in toolsets.items():
|
|
95
|
+
for tool in spec.get("tools", []):
|
|
96
|
+
if callable(tool):
|
|
97
|
+
name = _tool_name(tool)
|
|
98
|
+
if name:
|
|
99
|
+
toolset_by_name[name] = tid
|
|
100
|
+
|
|
101
|
+
out: list[Dict[str, Any]] = []
|
|
102
|
+
for tool in get_default_tools():
|
|
103
|
+
spec = _tool_spec(tool)
|
|
104
|
+
name = str(spec.get("name") or "").strip()
|
|
105
|
+
if not name:
|
|
106
|
+
continue
|
|
107
|
+
spec["toolset"] = toolset_by_name.get(name) or "other"
|
|
108
|
+
out.append(spec)
|
|
109
|
+
|
|
110
|
+
# Stable ordering: toolset then name
|
|
111
|
+
out.sort(key=lambda s: (str(s.get("toolset") or ""), str(s.get("name") or "")))
|
|
112
|
+
return out
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def build_default_tool_map() -> Dict[str, ToolCallable]:
|
|
116
|
+
"""Return {tool_name -> callable} for MappingToolExecutor."""
|
|
117
|
+
tool_map: Dict[str, ToolCallable] = {}
|
|
118
|
+
for tool in get_default_tools():
|
|
119
|
+
name = _tool_name(tool)
|
|
120
|
+
if not name:
|
|
121
|
+
continue
|
|
122
|
+
tool_map[name] = tool
|
|
123
|
+
return tool_map
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def filter_tool_specs(tool_names: Sequence[str]) -> List[Dict[str, Any]]:
|
|
127
|
+
"""Return ToolSpecs for the requested tool names (order preserved)."""
|
|
128
|
+
available = {str(s.get("name")): s for s in list_default_tool_specs() if isinstance(s.get("name"), str)}
|
|
129
|
+
out: list[Dict[str, Any]] = []
|
|
130
|
+
for name in tool_names:
|
|
131
|
+
spec = available.get(name)
|
|
132
|
+
if spec is not None:
|
|
133
|
+
out.append(spec)
|
|
134
|
+
return out
|
|
@@ -11,7 +11,8 @@ They are designed to keep `RunState.vars` JSON-safe.
|
|
|
11
11
|
|
|
12
12
|
from __future__ import annotations
|
|
13
13
|
|
|
14
|
-
|
|
14
|
+
import json
|
|
15
|
+
from typing import Any, Dict, Optional, Set, Tuple, Type
|
|
15
16
|
|
|
16
17
|
from ...core.models import Effect, EffectType, RunState, WaitReason, WaitState
|
|
17
18
|
from ...core.runtime import EffectOutcome, EffectHandler
|
|
@@ -22,19 +23,159 @@ from .logging import get_logger
|
|
|
22
23
|
logger = get_logger(__name__)
|
|
23
24
|
|
|
24
25
|
|
|
26
|
+
def _jsonable(value: Any) -> Any:
|
|
27
|
+
"""Best-effort conversion to JSON-safe objects.
|
|
28
|
+
|
|
29
|
+
Runtime traces and effect outcomes are persisted in RunState.vars and must remain JSON-safe.
|
|
30
|
+
"""
|
|
31
|
+
if value is None:
|
|
32
|
+
return None
|
|
33
|
+
if isinstance(value, (str, int, float, bool)):
|
|
34
|
+
return value
|
|
35
|
+
if isinstance(value, dict):
|
|
36
|
+
return {str(k): _jsonable(v) for k, v in value.items()}
|
|
37
|
+
if isinstance(value, list):
|
|
38
|
+
return [_jsonable(v) for v in value]
|
|
39
|
+
try:
|
|
40
|
+
json.dumps(value)
|
|
41
|
+
return value
|
|
42
|
+
except Exception:
|
|
43
|
+
return str(value)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _pydantic_model_from_json_schema(schema: Dict[str, Any], *, name: str) -> Type[Any]:
|
|
47
|
+
"""Best-effort conversion from a JSON schema dict to a Pydantic model.
|
|
48
|
+
|
|
49
|
+
This exists so structured output requests can remain JSON-safe in durable
|
|
50
|
+
effect payloads (we persist the schema, not the Python class).
|
|
51
|
+
"""
|
|
52
|
+
try:
|
|
53
|
+
from pydantic import BaseModel, create_model
|
|
54
|
+
except Exception as e: # pragma: no cover
|
|
55
|
+
raise RuntimeError(f"Pydantic is required for structured outputs: {e}")
|
|
56
|
+
|
|
57
|
+
def _python_type(sub_schema: Any, *, nested_name: str) -> Any:
|
|
58
|
+
if not isinstance(sub_schema, dict):
|
|
59
|
+
return Any
|
|
60
|
+
t = sub_schema.get("type")
|
|
61
|
+
if t == "string":
|
|
62
|
+
return str
|
|
63
|
+
if t == "integer":
|
|
64
|
+
return int
|
|
65
|
+
if t == "number":
|
|
66
|
+
return float
|
|
67
|
+
if t == "boolean":
|
|
68
|
+
return bool
|
|
69
|
+
if t == "array":
|
|
70
|
+
items = sub_schema.get("items")
|
|
71
|
+
return list[_python_type(items, nested_name=f"{nested_name}Item")] # type: ignore[index]
|
|
72
|
+
if t == "object":
|
|
73
|
+
props = sub_schema.get("properties")
|
|
74
|
+
if isinstance(props, dict) and props:
|
|
75
|
+
return _model(sub_schema, name=nested_name)
|
|
76
|
+
return Dict[str, Any]
|
|
77
|
+
return Any
|
|
78
|
+
|
|
79
|
+
def _model(obj_schema: Dict[str, Any], *, name: str) -> Type[BaseModel]:
|
|
80
|
+
if obj_schema.get("type") != "object":
|
|
81
|
+
raise ValueError("response_schema must be a JSON schema object")
|
|
82
|
+
props = obj_schema.get("properties")
|
|
83
|
+
if not isinstance(props, dict) or not props:
|
|
84
|
+
raise ValueError("response_schema must define properties")
|
|
85
|
+
required_raw = obj_schema.get("required")
|
|
86
|
+
required: Set[str] = set()
|
|
87
|
+
if isinstance(required_raw, list):
|
|
88
|
+
required = {str(x) for x in required_raw if isinstance(x, str)}
|
|
89
|
+
|
|
90
|
+
fields: Dict[str, Tuple[Any, Any]] = {}
|
|
91
|
+
for prop_name, prop_schema in props.items():
|
|
92
|
+
if not isinstance(prop_name, str) or not prop_name.strip():
|
|
93
|
+
continue
|
|
94
|
+
# Keep things simple: only support identifier-like names to avoid aliasing issues.
|
|
95
|
+
if not prop_name.isidentifier():
|
|
96
|
+
raise ValueError(
|
|
97
|
+
f"Invalid property name '{prop_name}'. Use identifier-style names (letters, digits, underscore)."
|
|
98
|
+
)
|
|
99
|
+
t = _python_type(prop_schema, nested_name=f"{name}_{prop_name}")
|
|
100
|
+
if prop_name in required:
|
|
101
|
+
fields[prop_name] = (t, ...)
|
|
102
|
+
else:
|
|
103
|
+
fields[prop_name] = (Optional[t], None)
|
|
104
|
+
|
|
105
|
+
return create_model(name, **fields) # type: ignore[call-arg]
|
|
106
|
+
|
|
107
|
+
return _model(schema, name=name)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _trace_context(run: RunState) -> Dict[str, str]:
|
|
111
|
+
ctx: Dict[str, str] = {
|
|
112
|
+
"run_id": run.run_id,
|
|
113
|
+
"workflow_id": str(run.workflow_id),
|
|
114
|
+
"node_id": str(run.current_node),
|
|
115
|
+
}
|
|
116
|
+
if run.actor_id:
|
|
117
|
+
ctx["actor_id"] = str(run.actor_id)
|
|
118
|
+
session_id = getattr(run, "session_id", None)
|
|
119
|
+
if session_id:
|
|
120
|
+
ctx["session_id"] = str(session_id)
|
|
121
|
+
if run.parent_run_id:
|
|
122
|
+
ctx["parent_run_id"] = str(run.parent_run_id)
|
|
123
|
+
return ctx
|
|
124
|
+
|
|
125
|
+
|
|
25
126
|
def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
26
127
|
def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
27
128
|
payload = dict(effect.payload or {})
|
|
28
129
|
prompt = payload.get("prompt")
|
|
29
130
|
messages = payload.get("messages")
|
|
30
131
|
system_prompt = payload.get("system_prompt")
|
|
132
|
+
provider = payload.get("provider")
|
|
133
|
+
model = payload.get("model")
|
|
31
134
|
tools = payload.get("tools")
|
|
32
|
-
|
|
135
|
+
response_schema = payload.get("response_schema")
|
|
136
|
+
response_schema_name = payload.get("response_schema_name")
|
|
137
|
+
raw_params = payload.get("params")
|
|
138
|
+
params = dict(raw_params) if isinstance(raw_params, dict) else {}
|
|
139
|
+
|
|
140
|
+
# Propagate durable trace context into AbstractCore calls.
|
|
141
|
+
trace_metadata = params.get("trace_metadata")
|
|
142
|
+
if not isinstance(trace_metadata, dict):
|
|
143
|
+
trace_metadata = {}
|
|
144
|
+
trace_metadata.update(_trace_context(run))
|
|
145
|
+
params["trace_metadata"] = trace_metadata
|
|
146
|
+
|
|
147
|
+
# Support per-effect routing: allow the payload to override provider/model.
|
|
148
|
+
# These reserved keys are consumed by MultiLocalAbstractCoreLLMClient and
|
|
149
|
+
# ignored by LocalAbstractCoreLLMClient.
|
|
150
|
+
if isinstance(provider, str) and provider.strip():
|
|
151
|
+
params["_provider"] = provider.strip()
|
|
152
|
+
if isinstance(model, str) and model.strip():
|
|
153
|
+
params["_model"] = model.strip()
|
|
33
154
|
|
|
34
155
|
if not prompt and not messages:
|
|
35
156
|
return EffectOutcome.failed("llm_call requires payload.prompt or payload.messages")
|
|
36
157
|
|
|
37
158
|
try:
|
|
159
|
+
if isinstance(response_schema, dict) and response_schema:
|
|
160
|
+
model_name = (
|
|
161
|
+
str(response_schema_name).strip()
|
|
162
|
+
if isinstance(response_schema_name, str) and response_schema_name.strip()
|
|
163
|
+
else "StructuredOutput"
|
|
164
|
+
)
|
|
165
|
+
params["response_model"] = _pydantic_model_from_json_schema(response_schema, name=model_name)
|
|
166
|
+
|
|
167
|
+
runtime_observability = {
|
|
168
|
+
"llm_generate_kwargs": _jsonable(
|
|
169
|
+
{
|
|
170
|
+
"prompt": str(prompt or ""),
|
|
171
|
+
"messages": messages,
|
|
172
|
+
"system_prompt": system_prompt,
|
|
173
|
+
"tools": tools,
|
|
174
|
+
"params": params,
|
|
175
|
+
}
|
|
176
|
+
),
|
|
177
|
+
}
|
|
178
|
+
|
|
38
179
|
result = llm.generate(
|
|
39
180
|
prompt=str(prompt or ""),
|
|
40
181
|
messages=messages,
|
|
@@ -42,6 +183,16 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
|
42
183
|
tools=tools,
|
|
43
184
|
params=params,
|
|
44
185
|
)
|
|
186
|
+
if isinstance(result, dict):
|
|
187
|
+
meta = result.get("metadata")
|
|
188
|
+
if not isinstance(meta, dict):
|
|
189
|
+
meta = {}
|
|
190
|
+
result["metadata"] = meta
|
|
191
|
+
existing = meta.get("_runtime_observability")
|
|
192
|
+
if not isinstance(existing, dict):
|
|
193
|
+
existing = {}
|
|
194
|
+
meta["_runtime_observability"] = existing
|
|
195
|
+
existing.update(runtime_observability)
|
|
45
196
|
return EffectOutcome.completed(result=result)
|
|
46
197
|
except Exception as e:
|
|
47
198
|
logger.error("LLM_CALL failed", error=str(e))
|
|
@@ -50,15 +201,93 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
|
50
201
|
return _handler
|
|
51
202
|
|
|
52
203
|
|
|
53
|
-
def make_tool_calls_handler(*, tools: ToolExecutor) -> EffectHandler:
|
|
204
|
+
def make_tool_calls_handler(*, tools: Optional[ToolExecutor] = None) -> EffectHandler:
|
|
205
|
+
"""Create a TOOL_CALLS effect handler.
|
|
206
|
+
|
|
207
|
+
Tool execution is performed exclusively via the host-configured ToolExecutor.
|
|
208
|
+
This keeps `RunState.vars` and ledger payloads JSON-safe (durable execution).
|
|
209
|
+
"""
|
|
54
210
|
def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
55
211
|
payload = dict(effect.payload or {})
|
|
56
212
|
tool_calls = payload.get("tool_calls")
|
|
57
213
|
if not isinstance(tool_calls, list):
|
|
58
214
|
return EffectOutcome.failed("tool_calls requires payload.tool_calls (list)")
|
|
215
|
+
allowed_tools_raw = payload.get("allowed_tools")
|
|
216
|
+
allowlist_enabled = isinstance(allowed_tools_raw, list)
|
|
217
|
+
allowed_tools: Set[str] = set()
|
|
218
|
+
if allowlist_enabled:
|
|
219
|
+
allowed_tools = {str(t) for t in allowed_tools_raw if isinstance(t, str) and t.strip()}
|
|
220
|
+
|
|
221
|
+
if tools is None:
|
|
222
|
+
return EffectOutcome.failed(
|
|
223
|
+
"TOOL_CALLS requires a ToolExecutor; configure Runtime with "
|
|
224
|
+
"MappingToolExecutor/AbstractCoreToolExecutor/PassthroughToolExecutor."
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
original_call_count = len(tool_calls)
|
|
228
|
+
|
|
229
|
+
# Always block non-dict tool call entries: passthrough hosts expect dicts and may crash otherwise.
|
|
230
|
+
blocked_by_index: Dict[int, Dict[str, Any]] = {}
|
|
231
|
+
filtered_tool_calls: list[Dict[str, Any]] = []
|
|
232
|
+
|
|
233
|
+
# For evidence and deterministic resume merging, keep a positional tool call list aligned to the
|
|
234
|
+
# *original* tool call order. Blocked entries are represented as empty-args stubs.
|
|
235
|
+
tool_calls_for_evidence: list[Dict[str, Any]] = []
|
|
236
|
+
|
|
237
|
+
for idx, tc in enumerate(tool_calls):
|
|
238
|
+
if not isinstance(tc, dict):
|
|
239
|
+
blocked_by_index[idx] = {
|
|
240
|
+
"call_id": "",
|
|
241
|
+
"name": "",
|
|
242
|
+
"success": False,
|
|
243
|
+
"output": None,
|
|
244
|
+
"error": "Invalid tool call (expected an object)",
|
|
245
|
+
}
|
|
246
|
+
tool_calls_for_evidence.append({})
|
|
247
|
+
continue
|
|
248
|
+
|
|
249
|
+
name_raw = tc.get("name")
|
|
250
|
+
name = name_raw.strip() if isinstance(name_raw, str) else ""
|
|
251
|
+
call_id = str(tc.get("call_id") or "")
|
|
252
|
+
|
|
253
|
+
if allowlist_enabled:
|
|
254
|
+
if not name:
|
|
255
|
+
blocked_by_index[idx] = {
|
|
256
|
+
"call_id": call_id,
|
|
257
|
+
"name": "",
|
|
258
|
+
"success": False,
|
|
259
|
+
"output": None,
|
|
260
|
+
"error": "Tool call missing a valid name",
|
|
261
|
+
}
|
|
262
|
+
tool_calls_for_evidence.append({"call_id": call_id, "name": "", "arguments": {}})
|
|
263
|
+
continue
|
|
264
|
+
if name not in allowed_tools:
|
|
265
|
+
blocked_by_index[idx] = {
|
|
266
|
+
"call_id": call_id,
|
|
267
|
+
"name": name,
|
|
268
|
+
"success": False,
|
|
269
|
+
"output": None,
|
|
270
|
+
"error": f"Tool '{name}' is not allowed for this node",
|
|
271
|
+
}
|
|
272
|
+
# Do not leak arguments for disallowed tools into the durable wait payload.
|
|
273
|
+
tool_calls_for_evidence.append({"call_id": call_id, "name": name, "arguments": {}})
|
|
274
|
+
continue
|
|
275
|
+
|
|
276
|
+
# Allowed (or allowlist disabled): include for execution and keep full args for evidence.
|
|
277
|
+
filtered_tool_calls.append(tc)
|
|
278
|
+
tool_calls_for_evidence.append(tc)
|
|
279
|
+
|
|
280
|
+
# If everything was blocked, complete immediately with blocked results (no waiting/execution).
|
|
281
|
+
if not filtered_tool_calls and blocked_by_index:
|
|
282
|
+
return EffectOutcome.completed(
|
|
283
|
+
result={
|
|
284
|
+
"mode": "executed",
|
|
285
|
+
"results": [blocked_by_index[i] for i in sorted(blocked_by_index.keys())],
|
|
286
|
+
}
|
|
287
|
+
)
|
|
59
288
|
|
|
60
289
|
try:
|
|
61
|
-
result = tools.execute(tool_calls=
|
|
290
|
+
result = tools.execute(tool_calls=filtered_tool_calls)
|
|
62
291
|
except Exception as e:
|
|
63
292
|
logger.error("TOOL_CALLS execution failed", error=str(e))
|
|
64
293
|
return EffectOutcome.failed(str(e))
|
|
@@ -66,24 +295,74 @@ def make_tool_calls_handler(*, tools: ToolExecutor) -> EffectHandler:
|
|
|
66
295
|
mode = result.get("mode")
|
|
67
296
|
if mode and mode != "executed":
|
|
68
297
|
# Passthrough/untrusted mode: pause until an external host resumes with tool results.
|
|
69
|
-
|
|
298
|
+
#
|
|
299
|
+
# Correctness/security: persist only allowlist-safe tool calls in the wait payload.
|
|
300
|
+
wait_key = payload.get("wait_key") or result.get("wait_key") or f"tool_calls:{run.run_id}:{run.current_node}"
|
|
301
|
+
raw_wait_reason = result.get("wait_reason")
|
|
302
|
+
wait_reason = WaitReason.EVENT
|
|
303
|
+
if isinstance(raw_wait_reason, str) and raw_wait_reason.strip():
|
|
304
|
+
try:
|
|
305
|
+
wait_reason = WaitReason(raw_wait_reason.strip())
|
|
306
|
+
except ValueError:
|
|
307
|
+
wait_reason = WaitReason.EVENT
|
|
308
|
+
elif str(mode).strip().lower() == "delegated":
|
|
309
|
+
wait_reason = WaitReason.JOB
|
|
310
|
+
|
|
311
|
+
tool_calls_for_wait = result.get("tool_calls")
|
|
312
|
+
if not isinstance(tool_calls_for_wait, list):
|
|
313
|
+
tool_calls_for_wait = filtered_tool_calls
|
|
314
|
+
|
|
315
|
+
details: Dict[str, Any] = {"mode": mode, "tool_calls": _jsonable(tool_calls_for_wait)}
|
|
316
|
+
executor_details = result.get("details")
|
|
317
|
+
if isinstance(executor_details, dict) and executor_details:
|
|
318
|
+
# Avoid collisions with our reserved keys.
|
|
319
|
+
details["executor"] = _jsonable(executor_details)
|
|
320
|
+
if blocked_by_index:
|
|
321
|
+
details["original_call_count"] = original_call_count
|
|
322
|
+
details["blocked_by_index"] = {str(k): _jsonable(v) for k, v in blocked_by_index.items()}
|
|
323
|
+
details["tool_calls_for_evidence"] = _jsonable(tool_calls_for_evidence)
|
|
324
|
+
|
|
70
325
|
wait = WaitState(
|
|
71
|
-
reason=
|
|
326
|
+
reason=wait_reason,
|
|
72
327
|
wait_key=str(wait_key),
|
|
73
328
|
resume_to_node=payload.get("resume_to_node") or default_next_node,
|
|
74
329
|
result_key=effect.result_key,
|
|
75
|
-
details=
|
|
330
|
+
details=details,
|
|
76
331
|
)
|
|
77
332
|
return EffectOutcome.waiting(wait)
|
|
78
333
|
|
|
334
|
+
if blocked_by_index:
|
|
335
|
+
existing_results = result.get("results")
|
|
336
|
+
if isinstance(existing_results, list):
|
|
337
|
+
merged_results: list[Any] = []
|
|
338
|
+
executed_iter = iter(existing_results)
|
|
339
|
+
for idx in range(len(tool_calls)):
|
|
340
|
+
blocked = blocked_by_index.get(idx)
|
|
341
|
+
if blocked is not None:
|
|
342
|
+
merged_results.append(blocked)
|
|
343
|
+
continue
|
|
344
|
+
try:
|
|
345
|
+
merged_results.append(next(executed_iter))
|
|
346
|
+
except StopIteration:
|
|
347
|
+
merged_results.append(
|
|
348
|
+
{
|
|
349
|
+
"call_id": "",
|
|
350
|
+
"name": "",
|
|
351
|
+
"success": False,
|
|
352
|
+
"output": None,
|
|
353
|
+
"error": "Missing tool result",
|
|
354
|
+
}
|
|
355
|
+
)
|
|
356
|
+
result = dict(result)
|
|
357
|
+
result["results"] = merged_results
|
|
358
|
+
|
|
79
359
|
return EffectOutcome.completed(result=result)
|
|
80
360
|
|
|
81
361
|
return _handler
|
|
82
362
|
|
|
83
363
|
|
|
84
|
-
def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor) -> Dict[EffectType, Any]:
|
|
364
|
+
def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor = None) -> Dict[EffectType, Any]:
|
|
85
365
|
return {
|
|
86
366
|
EffectType.LLM_CALL: make_llm_call_handler(llm=llm),
|
|
87
367
|
EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools),
|
|
88
368
|
}
|
|
89
|
-
|