AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +83 -3
- abstractruntime/core/config.py +82 -2
- abstractruntime/core/event_keys.py +62 -0
- abstractruntime/core/models.py +17 -1
- abstractruntime/core/policy.py +74 -3
- abstractruntime/core/runtime.py +3334 -28
- abstractruntime/core/vars.py +103 -2
- abstractruntime/evidence/__init__.py +10 -0
- abstractruntime/evidence/recorder.py +325 -0
- abstractruntime/history_bundle.py +772 -0
- abstractruntime/integrations/abstractcore/__init__.py +6 -0
- abstractruntime/integrations/abstractcore/constants.py +19 -0
- abstractruntime/integrations/abstractcore/default_tools.py +258 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +2622 -32
- abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
- abstractruntime/integrations/abstractcore/factory.py +149 -16
- abstractruntime/integrations/abstractcore/llm_client.py +891 -55
- abstractruntime/integrations/abstractcore/mcp_worker.py +587 -0
- abstractruntime/integrations/abstractcore/observability.py +80 -0
- abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
- abstractruntime/integrations/abstractcore/summarizer.py +154 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +509 -31
- abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
- abstractruntime/integrations/abstractmemory/__init__.py +3 -0
- abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
- abstractruntime/memory/__init__.py +21 -0
- abstractruntime/memory/active_context.py +751 -0
- abstractruntime/memory/active_memory.py +452 -0
- abstractruntime/memory/compaction.py +105 -0
- abstractruntime/memory/kg_packets.py +164 -0
- abstractruntime/memory/memact_composer.py +175 -0
- abstractruntime/memory/recall_levels.py +163 -0
- abstractruntime/memory/token_budget.py +86 -0
- abstractruntime/rendering/__init__.py +17 -0
- abstractruntime/rendering/agent_trace_report.py +256 -0
- abstractruntime/rendering/json_stringify.py +136 -0
- abstractruntime/scheduler/scheduler.py +93 -2
- abstractruntime/storage/__init__.py +7 -2
- abstractruntime/storage/artifacts.py +175 -32
- abstractruntime/storage/base.py +17 -1
- abstractruntime/storage/commands.py +339 -0
- abstractruntime/storage/in_memory.py +41 -1
- abstractruntime/storage/json_files.py +210 -14
- abstractruntime/storage/observable.py +136 -0
- abstractruntime/storage/offloading.py +433 -0
- abstractruntime/storage/sqlite.py +836 -0
- abstractruntime/visualflow_compiler/__init__.py +29 -0
- abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
- abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
- abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
- abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
- abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
- abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
- abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
- abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
- abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
- abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
- abstractruntime/visualflow_compiler/compiler.py +3832 -0
- abstractruntime/visualflow_compiler/flow.py +247 -0
- abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
- abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
- abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
- abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
- abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
- abstractruntime/visualflow_compiler/visual/models.py +211 -0
- abstractruntime/workflow_bundle/__init__.py +52 -0
- abstractruntime/workflow_bundle/models.py +236 -0
- abstractruntime/workflow_bundle/packer.py +317 -0
- abstractruntime/workflow_bundle/reader.py +87 -0
- abstractruntime/workflow_bundle/registry.py +587 -0
- abstractruntime-0.4.1.dist-info/METADATA +177 -0
- abstractruntime-0.4.1.dist-info/RECORD +86 -0
- abstractruntime-0.4.1.dist-info/entry_points.txt +2 -0
- abstractruntime-0.2.0.dist-info/METADATA +0 -163
- abstractruntime-0.2.0.dist-info/RECORD +0 -32
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,19 +11,381 @@ They are designed to keep `RunState.vars` JSON-safe.
|
|
|
11
11
|
|
|
12
12
|
from __future__ import annotations
|
|
13
13
|
|
|
14
|
-
|
|
14
|
+
import json
|
|
15
|
+
import hashlib
|
|
16
|
+
import os
|
|
17
|
+
import mimetypes
|
|
18
|
+
import re
|
|
19
|
+
import tempfile
|
|
20
|
+
import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Any, Dict, Optional, Set, Tuple, Type
|
|
15
23
|
|
|
16
|
-
from ...core.models import Effect, EffectType, RunState, WaitReason, WaitState
|
|
24
|
+
from ...core.models import Effect, EffectType, RunState, RunStatus, WaitReason, WaitState
|
|
17
25
|
from ...core.runtime import EffectOutcome, EffectHandler
|
|
26
|
+
from ...storage.base import RunStore
|
|
27
|
+
from ...storage.artifacts import ArtifactStore, is_artifact_ref, get_artifact_id
|
|
18
28
|
from .llm_client import AbstractCoreLLMClient
|
|
19
29
|
from .tool_executor import ToolExecutor
|
|
20
30
|
from .logging import get_logger
|
|
31
|
+
from .session_attachments import (
|
|
32
|
+
dedup_messages_view,
|
|
33
|
+
execute_open_attachment,
|
|
34
|
+
list_session_attachments,
|
|
35
|
+
render_active_attachments_system_message,
|
|
36
|
+
render_session_attachments_system_message,
|
|
37
|
+
session_memory_owner_run_id,
|
|
38
|
+
)
|
|
39
|
+
from .workspace_scoped_tools import WorkspaceScope, rewrite_tool_arguments
|
|
21
40
|
|
|
22
41
|
logger = get_logger(__name__)
|
|
23
42
|
|
|
43
|
+
_JSON_SCHEMA_PRIMITIVE_TYPES: Set[str] = {"string", "integer", "number", "boolean", "array", "object", "null"}
|
|
44
|
+
|
|
45
|
+
_ABS_PATH_RE = re.compile(r"^[a-zA-Z]:[\\\\/]")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _is_abs_path_like(path: str) -> bool:
|
|
49
|
+
pth = str(path or "").strip()
|
|
50
|
+
if not pth:
|
|
51
|
+
return False
|
|
52
|
+
if pth.startswith("/"):
|
|
53
|
+
return True
|
|
54
|
+
return bool(_ABS_PATH_RE.match(pth))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _guess_ext_from_content_type(content_type: str) -> str:
|
|
58
|
+
ct = str(content_type or "").strip().lower()
|
|
59
|
+
if not ct:
|
|
60
|
+
return ""
|
|
61
|
+
ext = mimetypes.guess_extension(ct) or ""
|
|
62
|
+
if ext == ".jpe":
|
|
63
|
+
return ".jpg"
|
|
64
|
+
return ext
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _safe_materialized_filename(*, desired: str, artifact_id: str, ext: str) -> str:
|
|
68
|
+
"""Return a filesystem-safe filename for a materialized artifact.
|
|
69
|
+
|
|
70
|
+
Used for temp-file materialization only; avoids leaking absolute paths and keeps names
|
|
71
|
+
conservative for cross-platform filesystem limits.
|
|
72
|
+
"""
|
|
73
|
+
label = str(desired or "").replace("\\", "/").strip()
|
|
74
|
+
if "/" in label or _is_abs_path_like(label):
|
|
75
|
+
label = label.rsplit("/", 1)[-1]
|
|
76
|
+
label = label.strip().strip("/")
|
|
77
|
+
if not label:
|
|
78
|
+
label = str(artifact_id or "").strip() or "attachment"
|
|
79
|
+
if ext and not Path(label).suffix:
|
|
80
|
+
label = f"{label}{ext}"
|
|
81
|
+
|
|
82
|
+
safe = re.sub(r"[^a-zA-Z0-9._-]+", "_", label).strip("._") or "attachment"
|
|
83
|
+
try:
|
|
84
|
+
stem = Path(safe).stem
|
|
85
|
+
suf = Path(safe).suffix
|
|
86
|
+
except Exception:
|
|
87
|
+
stem, suf = safe, ""
|
|
88
|
+
short = str(artifact_id or "").strip()[:8]
|
|
89
|
+
if short:
|
|
90
|
+
safe = f"{stem}__{short}{suf}"
|
|
91
|
+
|
|
92
|
+
max_len = 220
|
|
93
|
+
if len(safe) > max_len:
|
|
94
|
+
try:
|
|
95
|
+
suf = Path(safe).suffix
|
|
96
|
+
except Exception:
|
|
97
|
+
suf = ""
|
|
98
|
+
keep = max_len - len(suf)
|
|
99
|
+
safe = safe[: max(1, keep)] + suf
|
|
100
|
+
return safe
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _jsonable(value: Any) -> Any:
|
|
104
|
+
"""Best-effort conversion to JSON-safe objects.
|
|
105
|
+
|
|
106
|
+
Runtime traces and effect outcomes are persisted in RunState.vars and must remain JSON-safe.
|
|
107
|
+
"""
|
|
108
|
+
if value is None:
|
|
109
|
+
return None
|
|
110
|
+
if isinstance(value, (str, int, float, bool)):
|
|
111
|
+
return value
|
|
112
|
+
if isinstance(value, dict):
|
|
113
|
+
return {str(k): _jsonable(v) for k, v in value.items()}
|
|
114
|
+
if isinstance(value, list):
|
|
115
|
+
return [_jsonable(v) for v in value]
|
|
116
|
+
try:
|
|
117
|
+
json.dumps(value)
|
|
118
|
+
return value
|
|
119
|
+
except Exception:
|
|
120
|
+
return str(value)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _normalize_response_schema(raw: Any) -> Optional[Dict[str, Any]]:
|
|
124
|
+
"""Normalize user-provided schema inputs into a JSON Schema dict (object root).
|
|
125
|
+
|
|
126
|
+
Supported inputs (best-effort):
|
|
127
|
+
- JSON Schema object:
|
|
128
|
+
{"type":"object","properties":{...}, "required":[...], ...}
|
|
129
|
+
(also accepts missing type when properties exist)
|
|
130
|
+
- OpenAI/LMStudio wrapper shapes:
|
|
131
|
+
{"type":"json_schema","json_schema":{"schema":{...}}}
|
|
132
|
+
{"json_schema":{"schema":{...}}}
|
|
133
|
+
{"schema":{...}} (inner wrapper copy/pasted from provider docs)
|
|
134
|
+
- "Field map" shortcut (common authoring mistake but unambiguous intent):
|
|
135
|
+
{"choice":"...", "score": 0, "meta": {"foo":"bar"}, ...}
|
|
136
|
+
Coerces into a JSON Schema object with properties inferred from values.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
def _is_schema_type(value: Any) -> bool:
|
|
140
|
+
if isinstance(value, str):
|
|
141
|
+
return value in _JSON_SCHEMA_PRIMITIVE_TYPES
|
|
142
|
+
if isinstance(value, list) and value and all(isinstance(x, str) for x in value):
|
|
143
|
+
return all(x in _JSON_SCHEMA_PRIMITIVE_TYPES for x in value)
|
|
144
|
+
return False
|
|
145
|
+
|
|
146
|
+
def _looks_like_json_schema(obj: Dict[str, Any]) -> bool:
|
|
147
|
+
if "$schema" in obj or "$id" in obj or "$ref" in obj or "$defs" in obj or "definitions" in obj:
|
|
148
|
+
return True
|
|
149
|
+
if "oneOf" in obj or "anyOf" in obj or "allOf" in obj:
|
|
150
|
+
return True
|
|
151
|
+
if "enum" in obj or "const" in obj:
|
|
152
|
+
return True
|
|
153
|
+
if "items" in obj:
|
|
154
|
+
return True
|
|
155
|
+
if "required" in obj and isinstance(obj.get("required"), list):
|
|
156
|
+
return True
|
|
157
|
+
props = obj.get("properties")
|
|
158
|
+
if isinstance(props, dict):
|
|
159
|
+
return True
|
|
160
|
+
if "type" in obj and _is_schema_type(obj.get("type")):
|
|
161
|
+
return True
|
|
162
|
+
return False
|
|
163
|
+
|
|
164
|
+
def _unwrap_wrapper(obj: Dict[str, Any]) -> Dict[str, Any]:
|
|
165
|
+
current = dict(obj)
|
|
166
|
+
|
|
167
|
+
# If someone pasted an enclosing request object, tolerate common keys.
|
|
168
|
+
for wrapper_key in ("response_format", "responseFormat"):
|
|
169
|
+
inner = current.get(wrapper_key)
|
|
170
|
+
if isinstance(inner, dict):
|
|
171
|
+
current = dict(inner)
|
|
172
|
+
|
|
173
|
+
# OpenAI/LMStudio wrapper: {type:"json_schema", json_schema:{schema:{...}}}
|
|
174
|
+
if current.get("type") == "json_schema" and isinstance(current.get("json_schema"), dict):
|
|
175
|
+
inner = current.get("json_schema")
|
|
176
|
+
if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
|
|
177
|
+
return dict(inner.get("schema") or {})
|
|
178
|
+
|
|
179
|
+
# Slightly-less-wrapped: {json_schema:{schema:{...}}}
|
|
180
|
+
if isinstance(current.get("json_schema"), dict):
|
|
181
|
+
inner = current.get("json_schema")
|
|
182
|
+
if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
|
|
183
|
+
return dict(inner.get("schema") or {})
|
|
184
|
+
|
|
185
|
+
# Inner wrapper copy/paste: {schema:{...}} or {name,strict,schema:{...}}
|
|
186
|
+
if "schema" in current and isinstance(current.get("schema"), dict) and not _looks_like_json_schema(current):
|
|
187
|
+
return dict(current.get("schema") or {})
|
|
188
|
+
|
|
189
|
+
return current
|
|
190
|
+
|
|
191
|
+
def _infer_schema_from_value(value: Any) -> Dict[str, Any]:
|
|
192
|
+
if value is None:
|
|
193
|
+
return {}
|
|
194
|
+
if isinstance(value, bool):
|
|
195
|
+
return {"type": "boolean"}
|
|
196
|
+
if isinstance(value, int) and not isinstance(value, bool):
|
|
197
|
+
return {"type": "integer"}
|
|
198
|
+
if isinstance(value, float):
|
|
199
|
+
return {"type": "number"}
|
|
200
|
+
if isinstance(value, str):
|
|
201
|
+
return {"type": "string", "description": value}
|
|
202
|
+
if isinstance(value, list):
|
|
203
|
+
# Prefer a simple, safe array schema; do not attempt enum constraints here.
|
|
204
|
+
item_schema: Dict[str, Any] = {}
|
|
205
|
+
for item in value:
|
|
206
|
+
if item is None:
|
|
207
|
+
continue
|
|
208
|
+
if isinstance(item, bool):
|
|
209
|
+
item_schema = {"type": "boolean"}
|
|
210
|
+
break
|
|
211
|
+
if isinstance(item, int) and not isinstance(item, bool):
|
|
212
|
+
item_schema = {"type": "integer"}
|
|
213
|
+
break
|
|
214
|
+
if isinstance(item, float):
|
|
215
|
+
item_schema = {"type": "number"}
|
|
216
|
+
break
|
|
217
|
+
if isinstance(item, str):
|
|
218
|
+
item_schema = {"type": "string"}
|
|
219
|
+
break
|
|
220
|
+
if isinstance(item, dict):
|
|
221
|
+
item_schema = _coerce_object_schema(item)
|
|
222
|
+
break
|
|
223
|
+
out: Dict[str, Any] = {"type": "array"}
|
|
224
|
+
if item_schema:
|
|
225
|
+
out["items"] = item_schema
|
|
226
|
+
return out
|
|
227
|
+
if isinstance(value, dict):
|
|
228
|
+
# If it already looks like a schema, keep it as-is (with minor fixes).
|
|
229
|
+
if _looks_like_json_schema(value):
|
|
230
|
+
return _coerce_object_schema(value)
|
|
231
|
+
# Otherwise treat nested dict as another field-map object.
|
|
232
|
+
return _coerce_object_schema(value)
|
|
233
|
+
return {"type": "string", "description": str(value)}
|
|
234
|
+
|
|
235
|
+
def _coerce_object_schema(obj: Dict[str, Any]) -> Dict[str, Any]:
|
|
236
|
+
# If it's already a JSON schema, normalize the minimal invariants we need.
|
|
237
|
+
if _looks_like_json_schema(obj):
|
|
238
|
+
out = dict(obj)
|
|
239
|
+
props = out.get("properties")
|
|
240
|
+
if isinstance(props, dict) and out.get("type") is None:
|
|
241
|
+
out["type"] = "object"
|
|
242
|
+
# Nothing else to do here; deeper normalization is handled by the pydantic conversion.
|
|
243
|
+
return out
|
|
244
|
+
|
|
245
|
+
# Otherwise, interpret as "properties map" (field → schema/description/example).
|
|
246
|
+
properties: Dict[str, Any] = {}
|
|
247
|
+
required: list[str] = []
|
|
248
|
+
for k, v in obj.items():
|
|
249
|
+
if not isinstance(k, str) or not k.strip():
|
|
250
|
+
continue
|
|
251
|
+
key = k.strip()
|
|
252
|
+
required.append(key)
|
|
253
|
+
properties[key] = _infer_schema_from_value(v)
|
|
254
|
+
|
|
255
|
+
schema: Dict[str, Any] = {"type": "object", "properties": properties}
|
|
256
|
+
if required:
|
|
257
|
+
schema["required"] = required
|
|
258
|
+
return schema
|
|
259
|
+
|
|
260
|
+
if raw is None:
|
|
261
|
+
return None
|
|
262
|
+
|
|
263
|
+
if isinstance(raw, str) and raw.strip():
|
|
264
|
+
try:
|
|
265
|
+
parsed = json.loads(raw)
|
|
266
|
+
raw = parsed
|
|
267
|
+
except Exception:
|
|
268
|
+
# Keep raw string; caller will treat as absent/invalid.
|
|
269
|
+
return None
|
|
270
|
+
|
|
271
|
+
if not isinstance(raw, dict) or not raw:
|
|
272
|
+
return None
|
|
273
|
+
|
|
274
|
+
candidate = _unwrap_wrapper(raw)
|
|
275
|
+
if not isinstance(candidate, dict) or not candidate:
|
|
276
|
+
return None
|
|
277
|
+
|
|
278
|
+
normalized = _coerce_object_schema(candidate)
|
|
279
|
+
return normalized if isinstance(normalized, dict) and normalized else None
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def _pydantic_model_from_json_schema(schema: Dict[str, Any], *, name: str) -> Type[Any]:
|
|
283
|
+
"""Best-effort conversion from a JSON schema dict to a Pydantic model.
|
|
284
|
+
|
|
285
|
+
This exists so structured output requests can remain JSON-safe in durable
|
|
286
|
+
effect payloads (we persist the schema, not the Python class).
|
|
287
|
+
"""
|
|
288
|
+
try:
|
|
289
|
+
from pydantic import BaseModel, create_model
|
|
290
|
+
except Exception as e: # pragma: no cover
|
|
291
|
+
raise RuntimeError(f"Pydantic is required for structured outputs: {e}")
|
|
292
|
+
|
|
293
|
+
from typing import Literal, Union
|
|
294
|
+
|
|
295
|
+
NoneType = type(None)
|
|
296
|
+
|
|
297
|
+
def _python_type(sub_schema: Any, *, nested_name: str) -> Any:
|
|
298
|
+
if not isinstance(sub_schema, dict):
|
|
299
|
+
return Any
|
|
300
|
+
# Enums: represent as Literal[...] so Pydantic can enforce allowed values.
|
|
301
|
+
enum_raw = sub_schema.get("enum")
|
|
302
|
+
if isinstance(enum_raw, list) and enum_raw:
|
|
303
|
+
try:
|
|
304
|
+
return Literal.__getitem__(tuple(enum_raw)) # type: ignore[attr-defined]
|
|
305
|
+
except Exception:
|
|
306
|
+
return Any
|
|
307
|
+
|
|
308
|
+
t = sub_schema.get("type")
|
|
309
|
+
if isinstance(t, list) and t:
|
|
310
|
+
# Union types (e.g. ["string","null"]).
|
|
311
|
+
variants: list[Any] = []
|
|
312
|
+
for tt in t:
|
|
313
|
+
if tt == "null":
|
|
314
|
+
variants.append(NoneType)
|
|
315
|
+
continue
|
|
316
|
+
if isinstance(tt, str) and tt:
|
|
317
|
+
variants.append(_python_type(dict(sub_schema, type=tt), nested_name=nested_name))
|
|
318
|
+
# Drop Any from unions to avoid masking concrete variants.
|
|
319
|
+
variants2 = [v for v in variants if v is not Any]
|
|
320
|
+
variants = variants2 or variants
|
|
321
|
+
if not variants:
|
|
322
|
+
return Any
|
|
323
|
+
if len(variants) == 1:
|
|
324
|
+
return variants[0]
|
|
325
|
+
try:
|
|
326
|
+
return Union.__getitem__(tuple(variants)) # type: ignore[attr-defined]
|
|
327
|
+
except Exception:
|
|
328
|
+
return Any
|
|
329
|
+
if t == "string":
|
|
330
|
+
return str
|
|
331
|
+
if t == "integer":
|
|
332
|
+
return int
|
|
333
|
+
if t == "number":
|
|
334
|
+
return float
|
|
335
|
+
if t == "boolean":
|
|
336
|
+
return bool
|
|
337
|
+
if t == "array":
|
|
338
|
+
items = sub_schema.get("items")
|
|
339
|
+
return list[_python_type(items, nested_name=f"{nested_name}Item")] # type: ignore[index]
|
|
340
|
+
if t == "object":
|
|
341
|
+
props = sub_schema.get("properties")
|
|
342
|
+
if isinstance(props, dict) and props:
|
|
343
|
+
return _model(sub_schema, name=nested_name)
|
|
344
|
+
return Dict[str, Any]
|
|
345
|
+
return Any
|
|
346
|
+
|
|
347
|
+
def _model(obj_schema: Dict[str, Any], *, name: str) -> Type[BaseModel]:
|
|
348
|
+
schema_type = obj_schema.get("type")
|
|
349
|
+
if schema_type is None and isinstance(obj_schema.get("properties"), dict):
|
|
350
|
+
schema_type = "object"
|
|
351
|
+
if isinstance(schema_type, list) and "object" in schema_type:
|
|
352
|
+
schema_type = "object"
|
|
353
|
+
if schema_type != "object":
|
|
354
|
+
raise ValueError("response_schema must be a JSON schema object")
|
|
355
|
+
props = obj_schema.get("properties")
|
|
356
|
+
if not isinstance(props, dict) or not props:
|
|
357
|
+
raise ValueError("response_schema must define properties")
|
|
358
|
+
required_raw = obj_schema.get("required")
|
|
359
|
+
required: Set[str] = set()
|
|
360
|
+
if isinstance(required_raw, list):
|
|
361
|
+
required = {str(x) for x in required_raw if isinstance(x, str)}
|
|
362
|
+
|
|
363
|
+
fields: Dict[str, Tuple[Any, Any]] = {}
|
|
364
|
+
for prop_name, prop_schema in props.items():
|
|
365
|
+
if not isinstance(prop_name, str) or not prop_name.strip():
|
|
366
|
+
continue
|
|
367
|
+
# Keep things simple: only support identifier-like names to avoid aliasing issues.
|
|
368
|
+
if not prop_name.isidentifier():
|
|
369
|
+
raise ValueError(
|
|
370
|
+
f"Invalid property name '{prop_name}'. Use identifier-style names (letters, digits, underscore)."
|
|
371
|
+
)
|
|
372
|
+
t = _python_type(prop_schema, nested_name=f"{name}_{prop_name}")
|
|
373
|
+
if prop_name in required:
|
|
374
|
+
fields[prop_name] = (t, ...)
|
|
375
|
+
else:
|
|
376
|
+
fields[prop_name] = (Optional[t], None)
|
|
377
|
+
|
|
378
|
+
return create_model(name, **fields) # type: ignore[call-arg]
|
|
379
|
+
|
|
380
|
+
return _model(schema, name=name)
|
|
381
|
+
|
|
24
382
|
|
|
25
383
|
def _trace_context(run: RunState) -> Dict[str, str]:
|
|
26
|
-
ctx: Dict[str, str] = {
|
|
384
|
+
ctx: Dict[str, str] = {
|
|
385
|
+
"run_id": run.run_id,
|
|
386
|
+
"workflow_id": str(run.workflow_id),
|
|
387
|
+
"node_id": str(run.current_node),
|
|
388
|
+
}
|
|
27
389
|
if run.actor_id:
|
|
28
390
|
ctx["actor_id"] = str(run.actor_id)
|
|
29
391
|
session_id = getattr(run, "session_id", None)
|
|
@@ -34,13 +396,350 @@ def _trace_context(run: RunState) -> Dict[str, str]:
|
|
|
34
396
|
return ctx
|
|
35
397
|
|
|
36
398
|
|
|
37
|
-
def
|
|
399
|
+
def _truthy_env(name: str) -> bool:
|
|
400
|
+
raw = os.getenv(name)
|
|
401
|
+
if raw is None:
|
|
402
|
+
return False
|
|
403
|
+
s = str(raw).strip().lower()
|
|
404
|
+
return s in {"1", "true", "yes", "y", "on"}
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def _derive_prompt_cache_key(
|
|
408
|
+
*,
|
|
409
|
+
namespace: str,
|
|
410
|
+
session_id: str,
|
|
411
|
+
provider: str,
|
|
412
|
+
model: str,
|
|
413
|
+
workflow_id: str,
|
|
414
|
+
node_id: str,
|
|
415
|
+
version: int = 1,
|
|
416
|
+
) -> str:
|
|
417
|
+
ns = str(namespace or "").strip() or "session"
|
|
418
|
+
raw = f"v{int(version)}|{session_id}|{provider}|{model}|{workflow_id}|{node_id}"
|
|
419
|
+
digest = hashlib.sha256(raw.encode("utf-8")).hexdigest()[:24]
|
|
420
|
+
return f"{ns}:{digest}"
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def _maybe_inject_prompt_cache_key(*, run: RunState, params: Dict[str, Any]) -> None:
|
|
424
|
+
# Explicit caller override wins (including None/empty for "disable").
|
|
425
|
+
if "prompt_cache_key" in params:
|
|
426
|
+
return
|
|
427
|
+
|
|
428
|
+
runtime_ns = run.vars.get("_runtime") if isinstance(run.vars, dict) else None
|
|
429
|
+
cfg = runtime_ns.get("prompt_cache") if isinstance(runtime_ns, dict) else None
|
|
430
|
+
|
|
431
|
+
enabled: bool
|
|
432
|
+
namespace: str = "session"
|
|
433
|
+
if isinstance(cfg, bool):
|
|
434
|
+
enabled = cfg
|
|
435
|
+
elif isinstance(cfg, dict):
|
|
436
|
+
enabled_raw = cfg.get("enabled")
|
|
437
|
+
enabled = bool(enabled_raw) if enabled_raw is not None else True
|
|
438
|
+
ns = cfg.get("namespace")
|
|
439
|
+
if isinstance(ns, str) and ns.strip():
|
|
440
|
+
namespace = ns.strip()
|
|
441
|
+
key_override = cfg.get("key")
|
|
442
|
+
if isinstance(key_override, str) and key_override.strip():
|
|
443
|
+
params["prompt_cache_key"] = key_override.strip()
|
|
444
|
+
return
|
|
445
|
+
else:
|
|
446
|
+
# Operator-wide opt-in (e.g. enable caching for AbstractGateway sessions).
|
|
447
|
+
enabled = _truthy_env("ABSTRACTRUNTIME_PROMPT_CACHE") or _truthy_env("ABSTRACTGATEWAY_PROMPT_CACHE")
|
|
448
|
+
|
|
449
|
+
if not enabled:
|
|
450
|
+
return
|
|
451
|
+
|
|
452
|
+
# Require a session_id to avoid cross-run accidental reuse.
|
|
453
|
+
session_id = str(getattr(run, "session_id", "") or "").strip()
|
|
454
|
+
if not session_id:
|
|
455
|
+
trace_md = params.get("trace_metadata")
|
|
456
|
+
if isinstance(trace_md, dict):
|
|
457
|
+
sid = trace_md.get("session_id")
|
|
458
|
+
if isinstance(sid, str) and sid.strip():
|
|
459
|
+
session_id = sid.strip()
|
|
460
|
+
if not session_id:
|
|
461
|
+
return
|
|
462
|
+
|
|
463
|
+
trace_md = params.get("trace_metadata") if isinstance(params.get("trace_metadata"), dict) else {}
|
|
464
|
+
workflow_id = str(trace_md.get("workflow_id") or run.workflow_id or "").strip()
|
|
465
|
+
node_id = str(trace_md.get("node_id") or run.current_node or "").strip()
|
|
466
|
+
|
|
467
|
+
provider = str(params.get("_provider") or "").strip().lower()
|
|
468
|
+
model = str(params.get("_model") or "").strip()
|
|
469
|
+
if not provider or not model:
|
|
470
|
+
return
|
|
471
|
+
|
|
472
|
+
params["prompt_cache_key"] = _derive_prompt_cache_key(
|
|
473
|
+
namespace=namespace,
|
|
474
|
+
session_id=session_id,
|
|
475
|
+
provider=provider,
|
|
476
|
+
model=model,
|
|
477
|
+
workflow_id=workflow_id,
|
|
478
|
+
node_id=node_id,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
def _resolve_llm_call_media(
|
|
483
|
+
media: Any,
|
|
484
|
+
*,
|
|
485
|
+
artifact_store: Optional[ArtifactStore],
|
|
486
|
+
temp_dir: Optional[Path] = None,
|
|
487
|
+
) -> tuple[Optional[list[Any]], Optional[str]]:
|
|
488
|
+
"""Resolve a JSON-safe media list into inputs suitable for AbstractCore `generate(media=...)`.
|
|
489
|
+
|
|
490
|
+
Supported media item shapes (best-effort):
|
|
491
|
+
- str: treated as a local file path (passthrough)
|
|
492
|
+
- {"$artifact": "...", ...}: ArtifactStore-backed attachment (materialized to a temp file)
|
|
493
|
+
- {"artifact_id": "...", ...}: alternate artifact ref form (materialized)
|
|
494
|
+
|
|
495
|
+
Returns:
|
|
496
|
+
(resolved_media, error)
|
|
497
|
+
"""
|
|
498
|
+
if media is None:
|
|
499
|
+
return None, None
|
|
500
|
+
if isinstance(media, tuple):
|
|
501
|
+
media_items = list(media)
|
|
502
|
+
else:
|
|
503
|
+
media_items = media
|
|
504
|
+
if not isinstance(media_items, list) or not media_items:
|
|
505
|
+
return None, None
|
|
506
|
+
|
|
507
|
+
def _artifact_id_from_item(item: Any) -> Optional[str]:
|
|
508
|
+
if isinstance(item, dict):
|
|
509
|
+
if is_artifact_ref(item):
|
|
510
|
+
try:
|
|
511
|
+
aid = get_artifact_id(item)
|
|
512
|
+
except Exception:
|
|
513
|
+
aid = None
|
|
514
|
+
if isinstance(aid, str) and aid.strip():
|
|
515
|
+
return aid.strip()
|
|
516
|
+
raw = item.get("artifact_id")
|
|
517
|
+
if isinstance(raw, str) and raw.strip():
|
|
518
|
+
return raw.strip()
|
|
519
|
+
return None
|
|
520
|
+
|
|
521
|
+
out: list[Any] = []
|
|
522
|
+
for item in media_items:
|
|
523
|
+
if isinstance(item, str):
|
|
524
|
+
path = item.strip()
|
|
525
|
+
if path:
|
|
526
|
+
out.append(path)
|
|
527
|
+
continue
|
|
528
|
+
|
|
529
|
+
artifact_id = _artifact_id_from_item(item)
|
|
530
|
+
if artifact_id is None:
|
|
531
|
+
return None, f"Unsupported media item (expected path or artifact ref): {type(item).__name__}"
|
|
532
|
+
if artifact_store is None:
|
|
533
|
+
return None, "Artifact-backed media requires an ArtifactStore (missing artifact_store)"
|
|
534
|
+
if temp_dir is None:
|
|
535
|
+
return None, "Internal error: temp_dir is required for artifact-backed media"
|
|
536
|
+
|
|
537
|
+
artifact = artifact_store.load(str(artifact_id))
|
|
538
|
+
if artifact is None:
|
|
539
|
+
return None, f"Artifact '{artifact_id}' not found"
|
|
540
|
+
|
|
541
|
+
content = getattr(artifact, "content", None)
|
|
542
|
+
if not isinstance(content, (bytes, bytearray)):
|
|
543
|
+
return None, f"Artifact '{artifact_id}' content is not bytes"
|
|
544
|
+
|
|
545
|
+
# Preserve best-effort filename extension for downstream media detection and label
|
|
546
|
+
# attachments with a safe, non-absolute path identifier.
|
|
547
|
+
filename = ""
|
|
548
|
+
source_path = ""
|
|
549
|
+
if isinstance(item, dict):
|
|
550
|
+
raw_source = item.get("source_path") or item.get("path")
|
|
551
|
+
if isinstance(raw_source, str) and raw_source.strip():
|
|
552
|
+
source_path = raw_source.strip()
|
|
553
|
+
raw_name = source_path or item.get("filename") or item.get("name")
|
|
554
|
+
if isinstance(raw_name, str) and raw_name.strip():
|
|
555
|
+
filename = raw_name.strip()
|
|
556
|
+
ext = Path(filename).suffix if filename else ""
|
|
557
|
+
if not ext:
|
|
558
|
+
ct = str(getattr(getattr(artifact, "metadata", None), "content_type", "") or "")
|
|
559
|
+
ext = _guess_ext_from_content_type(ct)
|
|
560
|
+
|
|
561
|
+
desired = source_path or filename or artifact_id
|
|
562
|
+
safe_name = _safe_materialized_filename(desired=desired, artifact_id=str(artifact_id), ext=str(ext))
|
|
563
|
+
p = temp_dir / safe_name
|
|
564
|
+
try:
|
|
565
|
+
p.write_bytes(bytes(content))
|
|
566
|
+
except Exception as e:
|
|
567
|
+
return None, f"Failed to materialize artifact '{artifact_id}': {e}"
|
|
568
|
+
out.append(str(p))
|
|
569
|
+
|
|
570
|
+
return (out or None), None
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def _inline_active_text_attachments(
|
|
574
|
+
*,
|
|
575
|
+
messages: Any,
|
|
576
|
+
media: Any,
|
|
577
|
+
artifact_store: Optional[ArtifactStore],
|
|
578
|
+
temp_dir: Optional[Path],
|
|
579
|
+
max_inline_text_bytes: int,
|
|
580
|
+
) -> tuple[Any, Any]:
|
|
581
|
+
"""Inline small text-like artifact media into the last user message.
|
|
582
|
+
|
|
583
|
+
Returns: (updated_messages, remaining_media)
|
|
584
|
+
|
|
585
|
+
This is a derived view: it does not mutate durable run context.
|
|
586
|
+
"""
|
|
587
|
+
if not isinstance(messages, list) or not messages:
|
|
588
|
+
return messages, media
|
|
589
|
+
if media is None:
|
|
590
|
+
return messages, media
|
|
591
|
+
media_items = list(media) if isinstance(media, (list, tuple)) else None
|
|
592
|
+
if not media_items:
|
|
593
|
+
return messages, media
|
|
594
|
+
if artifact_store is None or temp_dir is None:
|
|
595
|
+
return messages, media
|
|
596
|
+
|
|
597
|
+
user_idx: Optional[int] = None
|
|
598
|
+
for i in range(len(messages) - 1, -1, -1):
|
|
599
|
+
m = messages[i]
|
|
600
|
+
if not isinstance(m, dict):
|
|
601
|
+
continue
|
|
602
|
+
if m.get("role") != "user":
|
|
603
|
+
continue
|
|
604
|
+
if isinstance(m.get("content"), str):
|
|
605
|
+
user_idx = i
|
|
606
|
+
break
|
|
607
|
+
if user_idx is None:
|
|
608
|
+
return messages, media
|
|
609
|
+
|
|
610
|
+
base_text = str(messages[user_idx].get("content") or "")
|
|
611
|
+
|
|
612
|
+
def _is_text_like_content_type(ct: str) -> bool:
|
|
613
|
+
ct_low = str(ct or "").lower().strip()
|
|
614
|
+
if not ct_low:
|
|
615
|
+
return False
|
|
616
|
+
if ct_low.startswith("text/"):
|
|
617
|
+
return True
|
|
618
|
+
return ct_low in {
|
|
619
|
+
"application/json",
|
|
620
|
+
"application/yaml",
|
|
621
|
+
"application/x-yaml",
|
|
622
|
+
"application/xml",
|
|
623
|
+
"application/javascript",
|
|
624
|
+
"application/typescript",
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
def _filename_for_item(item: Dict[str, Any], *, artifact_id: str) -> str:
|
|
628
|
+
name = str(item.get("filename") or "").strip()
|
|
629
|
+
if name:
|
|
630
|
+
return name
|
|
631
|
+
src = str(item.get("source_path") or item.get("path") or "").strip()
|
|
632
|
+
if src:
|
|
633
|
+
try:
|
|
634
|
+
return Path(src).name or src.rsplit("/", 1)[-1]
|
|
635
|
+
except Exception:
|
|
636
|
+
return src.rsplit("/", 1)[-1]
|
|
637
|
+
return artifact_id
|
|
638
|
+
|
|
639
|
+
inline_blocks: list[str] = []
|
|
640
|
+
remaining: list[Any] = []
|
|
641
|
+
|
|
642
|
+
# Import lazily to avoid making media processing a hard dependency of the runtime kernel.
|
|
643
|
+
try:
|
|
644
|
+
from abstractcore.media.auto_handler import AutoMediaHandler # type: ignore
|
|
645
|
+
except Exception:
|
|
646
|
+
AutoMediaHandler = None # type: ignore[assignment]
|
|
647
|
+
|
|
648
|
+
handler = None
|
|
649
|
+
if AutoMediaHandler is not None:
|
|
650
|
+
try:
|
|
651
|
+
handler = AutoMediaHandler(enable_events=False)
|
|
652
|
+
except Exception:
|
|
653
|
+
handler = None
|
|
654
|
+
|
|
655
|
+
for item in media_items:
|
|
656
|
+
if not isinstance(item, dict):
|
|
657
|
+
remaining.append(item)
|
|
658
|
+
continue
|
|
659
|
+
aid = item.get("$artifact") or item.get("artifact_id")
|
|
660
|
+
if not isinstance(aid, str) or not aid.strip():
|
|
661
|
+
remaining.append(item)
|
|
662
|
+
continue
|
|
663
|
+
artifact_id = aid.strip()
|
|
664
|
+
|
|
665
|
+
meta = artifact_store.get_metadata(artifact_id)
|
|
666
|
+
if meta is None:
|
|
667
|
+
remaining.append(item)
|
|
668
|
+
continue
|
|
669
|
+
|
|
670
|
+
ct = str(item.get("content_type") or getattr(meta, "content_type", "") or "")
|
|
671
|
+
if not _is_text_like_content_type(ct):
|
|
672
|
+
remaining.append(item)
|
|
673
|
+
continue
|
|
674
|
+
try:
|
|
675
|
+
size_bytes = int(getattr(meta, "size_bytes", 0) or 0)
|
|
676
|
+
except Exception:
|
|
677
|
+
remaining.append(item)
|
|
678
|
+
continue
|
|
679
|
+
if size_bytes > int(max_inline_text_bytes):
|
|
680
|
+
remaining.append(item)
|
|
681
|
+
continue
|
|
682
|
+
|
|
683
|
+
artifact = artifact_store.load(artifact_id)
|
|
684
|
+
if artifact is None:
|
|
685
|
+
remaining.append(item)
|
|
686
|
+
continue
|
|
687
|
+
content = getattr(artifact, "content", None)
|
|
688
|
+
if not isinstance(content, (bytes, bytearray)):
|
|
689
|
+
remaining.append(item)
|
|
690
|
+
continue
|
|
691
|
+
|
|
692
|
+
# Materialize into temp_dir (required by AbstractCore media processors).
|
|
693
|
+
name = _filename_for_item(item, artifact_id=artifact_id)
|
|
694
|
+
ext = Path(name).suffix or _guess_ext_from_content_type(ct)
|
|
695
|
+
p = temp_dir / _safe_materialized_filename(desired=name, artifact_id=artifact_id, ext=ext)
|
|
696
|
+
try:
|
|
697
|
+
p.write_bytes(bytes(content))
|
|
698
|
+
except Exception:
|
|
699
|
+
remaining.append(item)
|
|
700
|
+
continue
|
|
701
|
+
|
|
702
|
+
processed = ""
|
|
703
|
+
if handler is not None:
|
|
704
|
+
try:
|
|
705
|
+
res = handler.process_file(p, max_inline_tabular_bytes=int(max_inline_text_bytes), format_output="structured")
|
|
706
|
+
if getattr(res, "success", False) and getattr(res, "media_content", None) is not None:
|
|
707
|
+
processed = str(getattr(res.media_content, "content", "") or "")
|
|
708
|
+
except Exception:
|
|
709
|
+
processed = ""
|
|
710
|
+
|
|
711
|
+
if not processed:
|
|
712
|
+
try:
|
|
713
|
+
processed = bytes(content).decode("utf-8")
|
|
714
|
+
except Exception:
|
|
715
|
+
remaining.append(item)
|
|
716
|
+
continue
|
|
717
|
+
|
|
718
|
+
label = _filename_for_item(item, artifact_id=artifact_id)
|
|
719
|
+
inline_blocks.append(f"\n\n--- Content from {label} ---\n{processed}\n--- End of {label} ---")
|
|
720
|
+
|
|
721
|
+
if not inline_blocks:
|
|
722
|
+
return messages, media
|
|
723
|
+
|
|
724
|
+
updated = list(messages)
|
|
725
|
+
updated[user_idx] = dict(updated[user_idx], content=base_text + "".join(inline_blocks))
|
|
726
|
+
return updated, (remaining or None)
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def make_llm_call_handler(*, llm: AbstractCoreLLMClient, artifact_store: Optional[ArtifactStore] = None) -> EffectHandler:
|
|
38
730
|
def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
39
731
|
payload = dict(effect.payload or {})
|
|
40
732
|
prompt = payload.get("prompt")
|
|
41
733
|
messages = payload.get("messages")
|
|
42
734
|
system_prompt = payload.get("system_prompt")
|
|
43
|
-
|
|
735
|
+
media = payload.get("media")
|
|
736
|
+
provider = payload.get("provider")
|
|
737
|
+
model = payload.get("model")
|
|
738
|
+
tools_raw = payload.get("tools")
|
|
739
|
+
tools = tools_raw if isinstance(tools_raw, list) and len(tools_raw) > 0 else None
|
|
740
|
+
response_schema = _normalize_response_schema(payload.get("response_schema"))
|
|
741
|
+
response_schema_name = payload.get("response_schema_name")
|
|
742
|
+
structured_output_fallback = payload.get("structured_output_fallback")
|
|
44
743
|
raw_params = payload.get("params")
|
|
45
744
|
params = dict(raw_params) if isinstance(raw_params, dict) else {}
|
|
46
745
|
|
|
@@ -51,17 +750,691 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
|
51
750
|
trace_metadata.update(_trace_context(run))
|
|
52
751
|
params["trace_metadata"] = trace_metadata
|
|
53
752
|
|
|
54
|
-
|
|
55
|
-
|
|
753
|
+
# Support per-effect routing: allow the payload to override provider/model.
|
|
754
|
+
# These reserved keys are consumed by MultiLocalAbstractCoreLLMClient and
|
|
755
|
+
# ignored by LocalAbstractCoreLLMClient.
|
|
756
|
+
if isinstance(provider, str) and provider.strip():
|
|
757
|
+
params["_provider"] = provider.strip()
|
|
758
|
+
if isinstance(model, str) and model.strip():
|
|
759
|
+
params["_model"] = model.strip()
|
|
56
760
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
761
|
+
_maybe_inject_prompt_cache_key(run=run, params=params)
|
|
762
|
+
|
|
763
|
+
def _nonempty_str(value: Any) -> Optional[str]:
|
|
764
|
+
if not isinstance(value, str):
|
|
765
|
+
return None
|
|
766
|
+
text = value.strip()
|
|
767
|
+
return text if text else None
|
|
768
|
+
|
|
769
|
+
prompt = _nonempty_str(prompt)
|
|
770
|
+
|
|
771
|
+
has_messages = isinstance(messages, list) and len(messages) > 0
|
|
772
|
+
has_prompt = isinstance(prompt, str) and bool(prompt)
|
|
773
|
+
if not has_prompt and not has_messages:
|
|
774
|
+
return EffectOutcome.failed(
|
|
775
|
+
"llm_call requires payload.prompt or payload.messages"
|
|
64
776
|
)
|
|
777
|
+
|
|
778
|
+
# Some agent loops (notably ReAct) require a strict "no in-loop truncation" policy for
|
|
779
|
+
# correctness: every iteration must see the full scratchpad/history accumulated so far.
|
|
780
|
+
# These runs can opt out of runtime-level input trimming via `_runtime.disable_input_trimming`.
|
|
781
|
+
runtime_ns = run.vars.get("_runtime") if isinstance(run.vars, dict) else None
|
|
782
|
+
disable_input_trimming = bool(runtime_ns.get("disable_input_trimming")) if isinstance(runtime_ns, dict) else False
|
|
783
|
+
if isinstance(runtime_ns, dict):
|
|
784
|
+
pending_media = runtime_ns.get("pending_media")
|
|
785
|
+
if isinstance(pending_media, list) and pending_media:
|
|
786
|
+
combined: list[Any] = []
|
|
787
|
+
if isinstance(media, tuple):
|
|
788
|
+
combined.extend(list(media))
|
|
789
|
+
elif isinstance(media, list):
|
|
790
|
+
combined.extend(list(media))
|
|
791
|
+
combined.extend(pending_media)
|
|
792
|
+
|
|
793
|
+
def _media_key(item: Any) -> Optional[Tuple[str, str]]:
|
|
794
|
+
if isinstance(item, str):
|
|
795
|
+
s = item.strip()
|
|
796
|
+
return ("path", s) if s else None
|
|
797
|
+
if isinstance(item, dict):
|
|
798
|
+
aid = item.get("$artifact") or item.get("artifact_id")
|
|
799
|
+
if isinstance(aid, str) and aid.strip():
|
|
800
|
+
return ("artifact", aid.strip())
|
|
801
|
+
return None
|
|
802
|
+
|
|
803
|
+
merged: list[Any] = []
|
|
804
|
+
seen: set[Tuple[str, str]] = set()
|
|
805
|
+
for it in combined:
|
|
806
|
+
k = _media_key(it)
|
|
807
|
+
if k is None or k in seen:
|
|
808
|
+
continue
|
|
809
|
+
merged.append(dict(it) if isinstance(it, dict) else it)
|
|
810
|
+
seen.add(k)
|
|
811
|
+
|
|
812
|
+
media = merged
|
|
813
|
+
if isinstance(runtime_ns.get("pending_media"), list):
|
|
814
|
+
runtime_ns["pending_media"] = []
|
|
815
|
+
|
|
816
|
+
# Enforce a per-call (or per-run) input-token budget by trimming oldest non-system messages.
|
|
817
|
+
#
|
|
818
|
+
# This is separate from provider limits: it protects reasoning quality and latency by keeping
|
|
819
|
+
# the active context window bounded even when the model supports very large contexts.
|
|
820
|
+
max_input_tokens: Optional[int] = None
|
|
821
|
+
try:
|
|
822
|
+
raw_max_in = payload.get("max_input_tokens")
|
|
823
|
+
if raw_max_in is None:
|
|
824
|
+
limits = run.vars.get("_limits") if isinstance(run.vars, dict) else None
|
|
825
|
+
raw_max_in = limits.get("max_input_tokens") if isinstance(limits, dict) else None
|
|
826
|
+
if raw_max_in is not None and not isinstance(raw_max_in, bool):
|
|
827
|
+
parsed = int(raw_max_in)
|
|
828
|
+
if parsed > 0:
|
|
829
|
+
max_input_tokens = parsed
|
|
830
|
+
except Exception:
|
|
831
|
+
max_input_tokens = None
|
|
832
|
+
|
|
833
|
+
if not disable_input_trimming and isinstance(max_input_tokens, int) and max_input_tokens > 0 and isinstance(messages, list) and messages:
|
|
834
|
+
try:
|
|
835
|
+
from abstractruntime.memory.token_budget import trim_messages_to_max_input_tokens
|
|
836
|
+
|
|
837
|
+
model_name = model if isinstance(model, str) and model.strip() else None
|
|
838
|
+
messages = trim_messages_to_max_input_tokens(messages, max_input_tokens=int(max_input_tokens), model=model_name)
|
|
839
|
+
except Exception:
|
|
840
|
+
# Never fail an LLM call due to trimming.
|
|
841
|
+
pass
|
|
842
|
+
|
|
843
|
+
# Enforce output token budgets (max_output_tokens) when configured.
|
|
844
|
+
#
|
|
845
|
+
# Priority:
|
|
846
|
+
# 1) explicit params (payload.params.max_output_tokens / max_tokens)
|
|
847
|
+
# 2) explicit payload field (payload.max_output_tokens / max_out_tokens)
|
|
848
|
+
# 3) run-level default limits (run.vars._limits.max_output_tokens)
|
|
849
|
+
max_output_tokens: Optional[int] = None
|
|
850
|
+
try:
|
|
851
|
+
raw_max_out = None
|
|
852
|
+
if "max_output_tokens" in params:
|
|
853
|
+
raw_max_out = params.get("max_output_tokens")
|
|
854
|
+
elif "max_tokens" in params:
|
|
855
|
+
raw_max_out = params.get("max_tokens")
|
|
856
|
+
if raw_max_out is None:
|
|
857
|
+
raw_max_out = payload.get("max_output_tokens")
|
|
858
|
+
if raw_max_out is None:
|
|
859
|
+
raw_max_out = payload.get("max_out_tokens")
|
|
860
|
+
# Treat 0/negative as "unset" so we still fall back to run-level limits.
|
|
861
|
+
parsed_max_out: Optional[int] = None
|
|
862
|
+
if raw_max_out is not None and not isinstance(raw_max_out, bool):
|
|
863
|
+
try:
|
|
864
|
+
parsed_max_out = int(raw_max_out)
|
|
865
|
+
except Exception:
|
|
866
|
+
parsed_max_out = None
|
|
867
|
+
if parsed_max_out is None or parsed_max_out <= 0:
|
|
868
|
+
raw_max_out = None
|
|
869
|
+
|
|
870
|
+
if raw_max_out is None:
|
|
871
|
+
limits = run.vars.get("_limits") if isinstance(run.vars, dict) else None
|
|
872
|
+
raw_max_out = limits.get("max_output_tokens") if isinstance(limits, dict) else None
|
|
873
|
+
if raw_max_out is not None and not isinstance(raw_max_out, bool):
|
|
874
|
+
parsed = int(raw_max_out)
|
|
875
|
+
if parsed > 0:
|
|
876
|
+
max_output_tokens = parsed
|
|
877
|
+
except Exception:
|
|
878
|
+
max_output_tokens = None
|
|
879
|
+
|
|
880
|
+
if (
|
|
881
|
+
isinstance(max_output_tokens, int)
|
|
882
|
+
and max_output_tokens > 0
|
|
883
|
+
and "max_output_tokens" not in params
|
|
884
|
+
and "max_tokens" not in params
|
|
885
|
+
):
|
|
886
|
+
params["max_output_tokens"] = int(max_output_tokens)
|
|
887
|
+
|
|
888
|
+
def _coerce_boolish(value: Any) -> bool:
|
|
889
|
+
if isinstance(value, bool):
|
|
890
|
+
return value
|
|
891
|
+
if isinstance(value, (int, float)) and not isinstance(value, bool):
|
|
892
|
+
return value != 0
|
|
893
|
+
if isinstance(value, str):
|
|
894
|
+
return value.strip().lower() in {"1", "true", "yes", "y", "on"}
|
|
895
|
+
return False
|
|
896
|
+
|
|
897
|
+
# Optional attachment registry injections (active + session index).
|
|
898
|
+
#
|
|
899
|
+
# These are derived views: they do not mutate the durable run context.
|
|
900
|
+
session_attachments: Optional[list[Dict[str, Any]]] = None
|
|
901
|
+
try:
|
|
902
|
+
include_raw = params.get("include_session_attachments_index") if isinstance(params, dict) else None
|
|
903
|
+
if include_raw is None:
|
|
904
|
+
# Run-level override (467):
|
|
905
|
+
# Allow hosts/workflows to control attachment index injection without having to
|
|
906
|
+
# touch every individual LLM_CALL payload (especially inside Agent subworkflows).
|
|
907
|
+
runtime_ns = run.vars.get("_runtime") if isinstance(run.vars, dict) else None
|
|
908
|
+
control = runtime_ns.get("control") if isinstance(runtime_ns, dict) else None
|
|
909
|
+
override = control.get("include_session_attachments_index") if isinstance(control, dict) else None
|
|
910
|
+
if override is not None:
|
|
911
|
+
include_raw = override
|
|
912
|
+
|
|
913
|
+
if include_raw is None:
|
|
914
|
+
# Default heuristic:
|
|
915
|
+
# - Agents (tools present): include.
|
|
916
|
+
# - Raw LLM calls: include only when explicitly using context.
|
|
917
|
+
#
|
|
918
|
+
# Exception: when active attachments are present for this call, skip the stored
|
|
919
|
+
# session attachment index by default (it is redundant and encourages re-opening).
|
|
920
|
+
has_active_media = bool(list(media)) if isinstance(media, (list, tuple)) else bool(media)
|
|
921
|
+
if has_active_media:
|
|
922
|
+
include_raw = False
|
|
923
|
+
else:
|
|
924
|
+
inc_ctx = payload.get("include_context")
|
|
925
|
+
if inc_ctx is None:
|
|
926
|
+
inc_ctx = payload.get("use_context")
|
|
927
|
+
include_raw = True if tools is not None else _coerce_boolish(inc_ctx)
|
|
928
|
+
|
|
929
|
+
include_index = _coerce_boolish(include_raw)
|
|
930
|
+
sid = getattr(run, "session_id", None)
|
|
931
|
+
sid_str = str(sid or "").strip() if isinstance(sid, str) or sid is not None else ""
|
|
932
|
+
|
|
933
|
+
active_msg = ""
|
|
934
|
+
try:
|
|
935
|
+
active_msg = render_active_attachments_system_message(media, max_entries=12, max_chars=2000)
|
|
936
|
+
except Exception:
|
|
937
|
+
active_msg = ""
|
|
938
|
+
|
|
939
|
+
session_msg = ""
|
|
940
|
+
if include_index and artifact_store is not None and sid_str:
|
|
941
|
+
session_attachments = list_session_attachments(
|
|
942
|
+
artifact_store=artifact_store, session_id=sid_str, limit=20
|
|
943
|
+
)
|
|
944
|
+
has_open_attachment_tool = any(
|
|
945
|
+
isinstance(t, dict) and str(t.get("name") or "").strip() == "open_attachment" for t in (tools or [])
|
|
946
|
+
)
|
|
947
|
+
session_msg = render_session_attachments_system_message(
|
|
948
|
+
session_attachments,
|
|
949
|
+
max_entries=20,
|
|
950
|
+
max_chars=4000,
|
|
951
|
+
include_open_attachment_hint=has_open_attachment_tool,
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
if active_msg or session_msg:
|
|
955
|
+
if not isinstance(messages, list):
|
|
956
|
+
messages = []
|
|
957
|
+
# Remove any previous injected attachment system messages to avoid staleness/duplication.
|
|
958
|
+
cleaned: list[Dict[str, Any]] = []
|
|
959
|
+
for m in messages:
|
|
960
|
+
if not isinstance(m, dict):
|
|
961
|
+
continue
|
|
962
|
+
if m.get("role") != "system":
|
|
963
|
+
cleaned.append(m)
|
|
964
|
+
continue
|
|
965
|
+
c = m.get("content")
|
|
966
|
+
c_str = str(c or "")
|
|
967
|
+
if c_str.strip().startswith("Active attachments") or c_str.strip().startswith("Stored session attachments") or c_str.strip().startswith("Session attachments"):
|
|
968
|
+
continue
|
|
969
|
+
cleaned.append(m)
|
|
970
|
+
|
|
971
|
+
injected: list[Dict[str, Any]] = []
|
|
972
|
+
if active_msg:
|
|
973
|
+
injected.append({"role": "system", "content": active_msg})
|
|
974
|
+
if session_msg:
|
|
975
|
+
injected.append({"role": "system", "content": session_msg})
|
|
976
|
+
messages = injected + cleaned
|
|
977
|
+
except Exception:
|
|
978
|
+
session_attachments = None
|
|
979
|
+
|
|
980
|
+
fallback_enabled = _coerce_boolish(structured_output_fallback)
|
|
981
|
+
base_params = dict(params)
|
|
982
|
+
|
|
983
|
+
try:
|
|
984
|
+
# View-time dedup of repeated document reads (keeps LLM-visible context lean).
|
|
985
|
+
if isinstance(messages, list) and messages:
|
|
986
|
+
messages = dedup_messages_view(list(messages), session_attachments=session_attachments)
|
|
987
|
+
|
|
988
|
+
def _extract_user_text_for_context(*, prompt_value: Any, messages_value: Any) -> str:
|
|
989
|
+
if isinstance(prompt_value, str) and prompt_value.strip():
|
|
990
|
+
return prompt_value.strip()
|
|
991
|
+
if isinstance(messages_value, list):
|
|
992
|
+
for m in reversed(messages_value):
|
|
993
|
+
if not isinstance(m, dict):
|
|
994
|
+
continue
|
|
995
|
+
if m.get("role") != "user":
|
|
996
|
+
continue
|
|
997
|
+
c = m.get("content")
|
|
998
|
+
if isinstance(c, str) and c.strip():
|
|
999
|
+
return c.strip()
|
|
1000
|
+
return ""
|
|
1001
|
+
|
|
1002
|
+
# Preserve the "real" user text for `/use_context` without including inlined attachment blocks.
|
|
1003
|
+
user_text_for_context = _extract_user_text_for_context(prompt_value=prompt, messages_value=messages)
|
|
1004
|
+
|
|
1005
|
+
structured_requested = isinstance(response_schema, dict) and response_schema
|
|
1006
|
+
params_for_call = dict(params)
|
|
1007
|
+
|
|
1008
|
+
# Runtime-owned defaults (run-scoped): allow workflows/clients to set once at
|
|
1009
|
+
# run start via `run.vars["_runtime"]` and have all LLM calls inherit them.
|
|
1010
|
+
#
|
|
1011
|
+
# This is intentionally narrow: only copy explicit policy keys we support.
|
|
1012
|
+
try:
|
|
1013
|
+
runtime_ns = run.vars.get("_runtime") if isinstance(run.vars, dict) else None
|
|
1014
|
+
except Exception:
|
|
1015
|
+
runtime_ns = None
|
|
1016
|
+
if isinstance(runtime_ns, dict):
|
|
1017
|
+
audio_policy = runtime_ns.get("audio_policy")
|
|
1018
|
+
if "audio_policy" not in params_for_call and isinstance(audio_policy, str) and audio_policy.strip():
|
|
1019
|
+
params_for_call["audio_policy"] = audio_policy.strip()
|
|
1020
|
+
|
|
1021
|
+
stt_language = runtime_ns.get("stt_language")
|
|
1022
|
+
if stt_language is None:
|
|
1023
|
+
stt_language = runtime_ns.get("audio_language")
|
|
1024
|
+
if "stt_language" not in params_for_call and isinstance(stt_language, str) and stt_language.strip():
|
|
1025
|
+
params_for_call["stt_language"] = stt_language.strip()
|
|
1026
|
+
|
|
1027
|
+
if structured_requested:
|
|
1028
|
+
model_name = (
|
|
1029
|
+
str(response_schema_name).strip()
|
|
1030
|
+
if isinstance(response_schema_name, str) and response_schema_name.strip()
|
|
1031
|
+
else "StructuredOutput"
|
|
1032
|
+
)
|
|
1033
|
+
params_for_call["response_model"] = _pydantic_model_from_json_schema(response_schema, name=model_name)
|
|
1034
|
+
|
|
1035
|
+
structured_failed = False
|
|
1036
|
+
structured_error: Optional[str] = None
|
|
1037
|
+
|
|
1038
|
+
messages_for_call = messages
|
|
1039
|
+
media_for_call = media
|
|
1040
|
+
|
|
1041
|
+
resolved_media: Optional[list[Any]] = None
|
|
1042
|
+
tmpdir: Optional[tempfile.TemporaryDirectory] = None
|
|
1043
|
+
if media_for_call is not None:
|
|
1044
|
+
tmpdir = tempfile.TemporaryDirectory(prefix="abstractruntime_media_")
|
|
1045
|
+
try:
|
|
1046
|
+
max_inline_text_bytes = 120_000
|
|
1047
|
+
raw_max_inline = params.get("max_inline_attachment_bytes")
|
|
1048
|
+
if raw_max_inline is None:
|
|
1049
|
+
raw_max_inline = params.get("max_inline_text_attachment_bytes")
|
|
1050
|
+
if raw_max_inline is not None and not isinstance(raw_max_inline, bool):
|
|
1051
|
+
try:
|
|
1052
|
+
max_inline_text_bytes = max(0, int(raw_max_inline))
|
|
1053
|
+
except Exception:
|
|
1054
|
+
max_inline_text_bytes = 120_000
|
|
1055
|
+
|
|
1056
|
+
messages_for_call, media_for_call = _inline_active_text_attachments(
|
|
1057
|
+
messages=messages_for_call,
|
|
1058
|
+
media=media_for_call,
|
|
1059
|
+
artifact_store=artifact_store,
|
|
1060
|
+
temp_dir=Path(tmpdir.name),
|
|
1061
|
+
max_inline_text_bytes=max_inline_text_bytes,
|
|
1062
|
+
)
|
|
1063
|
+
resolved_media, err = _resolve_llm_call_media(
|
|
1064
|
+
media_for_call,
|
|
1065
|
+
artifact_store=artifact_store,
|
|
1066
|
+
temp_dir=Path(tmpdir.name),
|
|
1067
|
+
)
|
|
1068
|
+
if err:
|
|
1069
|
+
tmpdir.cleanup()
|
|
1070
|
+
return EffectOutcome.failed(err)
|
|
1071
|
+
except Exception as e:
|
|
1072
|
+
tmpdir.cleanup()
|
|
1073
|
+
return EffectOutcome.failed(str(e))
|
|
1074
|
+
|
|
1075
|
+
# Framework default: Glyph compression is experimental and opt-in.
|
|
1076
|
+
#
|
|
1077
|
+
# Avoid noisy warnings and unnecessary decision overhead for non-vision models unless
|
|
1078
|
+
# the caller explicitly requests compression via `params.glyph_compression`.
|
|
1079
|
+
if isinstance(media_for_call, list) and media_for_call and "glyph_compression" not in params_for_call:
|
|
1080
|
+
params_for_call["glyph_compression"] = "never"
|
|
1081
|
+
|
|
1082
|
+
runtime_observability = {
|
|
1083
|
+
"llm_generate_kwargs": _jsonable(
|
|
1084
|
+
{
|
|
1085
|
+
"prompt": str(prompt or ""),
|
|
1086
|
+
"messages": messages_for_call,
|
|
1087
|
+
"system_prompt": system_prompt,
|
|
1088
|
+
"media": media_for_call,
|
|
1089
|
+
"tools": tools,
|
|
1090
|
+
"params": params_for_call,
|
|
1091
|
+
"structured_output_fallback": fallback_enabled,
|
|
1092
|
+
}
|
|
1093
|
+
),
|
|
1094
|
+
}
|
|
1095
|
+
truncation_attempts: list[dict[str, Any]] = []
|
|
1096
|
+
had_truncation = False
|
|
1097
|
+
|
|
1098
|
+
def _finish_reason_is_truncation(value: Any) -> bool:
|
|
1099
|
+
if not isinstance(value, str):
|
|
1100
|
+
return False
|
|
1101
|
+
return value.strip().lower() in {"length", "max_tokens", "max_output_tokens"}
|
|
1102
|
+
|
|
1103
|
+
def _bump_max_output_tokens(current: dict[str, Any]) -> dict[str, Any]:
|
|
1104
|
+
updated = dict(current)
|
|
1105
|
+
raw = updated.get("max_output_tokens")
|
|
1106
|
+
if raw is None:
|
|
1107
|
+
raw = updated.get("max_tokens")
|
|
1108
|
+
cur = 0
|
|
1109
|
+
if raw is not None and not isinstance(raw, bool):
|
|
1110
|
+
try:
|
|
1111
|
+
cur = int(raw)
|
|
1112
|
+
except Exception:
|
|
1113
|
+
cur = 0
|
|
1114
|
+
if cur <= 0:
|
|
1115
|
+
# If the caller didn't specify an output budget, assume we're at least at the
|
|
1116
|
+
# runtime/provider default (often ~2k). Use the run limits as a better hint.
|
|
1117
|
+
hinted = None
|
|
1118
|
+
try:
|
|
1119
|
+
limits = run.vars.get("_limits") if isinstance(run.vars, dict) else None
|
|
1120
|
+
hinted = limits.get("max_output_tokens") if isinstance(limits, dict) else None
|
|
1121
|
+
except Exception:
|
|
1122
|
+
hinted = None
|
|
1123
|
+
if hinted is not None and not isinstance(hinted, bool):
|
|
1124
|
+
try:
|
|
1125
|
+
hinted_i = int(hinted)
|
|
1126
|
+
except Exception:
|
|
1127
|
+
hinted_i = 0
|
|
1128
|
+
if hinted_i > 0:
|
|
1129
|
+
cur = hinted_i
|
|
1130
|
+
else:
|
|
1131
|
+
cur = 2048
|
|
1132
|
+
else:
|
|
1133
|
+
cur = 2048
|
|
1134
|
+
|
|
1135
|
+
bumped = max(cur * 2, cur + 500)
|
|
1136
|
+
cap_raw = payload.get("max_output_tokens_cap")
|
|
1137
|
+
if cap_raw is None:
|
|
1138
|
+
cap_raw = payload.get("max_truncation_max_output_tokens")
|
|
1139
|
+
|
|
1140
|
+
cap: int | None = None
|
|
1141
|
+
if cap_raw is not None and not isinstance(cap_raw, bool):
|
|
1142
|
+
try:
|
|
1143
|
+
cap = max(256, int(cap_raw))
|
|
1144
|
+
except Exception:
|
|
1145
|
+
cap = None
|
|
1146
|
+
|
|
1147
|
+
if cap is None:
|
|
1148
|
+
hinted_cap = None
|
|
1149
|
+
try:
|
|
1150
|
+
limits = run.vars.get("_limits") if isinstance(run.vars, dict) else None
|
|
1151
|
+
hinted_cap = limits.get("max_output_tokens") if isinstance(limits, dict) else None
|
|
1152
|
+
except Exception:
|
|
1153
|
+
hinted_cap = None
|
|
1154
|
+
if hinted_cap is not None and not isinstance(hinted_cap, bool):
|
|
1155
|
+
try:
|
|
1156
|
+
hinted_i = int(hinted_cap)
|
|
1157
|
+
except Exception:
|
|
1158
|
+
hinted_i = 0
|
|
1159
|
+
if hinted_i > 0:
|
|
1160
|
+
cap = hinted_i
|
|
1161
|
+
|
|
1162
|
+
if cap is None:
|
|
1163
|
+
# Prefer model capabilities over arbitrary "unbounded" caps.
|
|
1164
|
+
try:
|
|
1165
|
+
caps = getattr(llm, "get_model_capabilities", None)
|
|
1166
|
+
model_caps = caps() if callable(caps) else None
|
|
1167
|
+
raw = model_caps.get("max_output_tokens") if isinstance(model_caps, dict) else None
|
|
1168
|
+
if raw is None and isinstance(model_caps, dict):
|
|
1169
|
+
raw = model_caps.get("max_tokens")
|
|
1170
|
+
cap_i = int(raw) if raw is not None and not isinstance(raw, bool) else 0
|
|
1171
|
+
if cap_i > 0:
|
|
1172
|
+
cap = cap_i
|
|
1173
|
+
except Exception:
|
|
1174
|
+
pass
|
|
1175
|
+
|
|
1176
|
+
# If the model capabilities are unknown, keep the cap effectively unbounded.
|
|
1177
|
+
if cap is None:
|
|
1178
|
+
cap = 1_000_000
|
|
1179
|
+
|
|
1180
|
+
updated["max_output_tokens"] = min(bumped, cap)
|
|
1181
|
+
updated.pop("max_tokens", None)
|
|
1182
|
+
return updated
|
|
1183
|
+
|
|
1184
|
+
retry_on_truncation_raw = payload.get("retry_on_truncation")
|
|
1185
|
+
if retry_on_truncation_raw is None:
|
|
1186
|
+
retry_on_truncation_raw = payload.get("no_truncation")
|
|
1187
|
+
retry_on_truncation = True
|
|
1188
|
+
if retry_on_truncation_raw is not None:
|
|
1189
|
+
retry_on_truncation = _coerce_boolish(retry_on_truncation_raw)
|
|
1190
|
+
|
|
1191
|
+
allow_truncation_raw = payload.get("allow_truncation")
|
|
1192
|
+
if allow_truncation_raw is None:
|
|
1193
|
+
allow_truncation_raw = payload.get("allow_truncated")
|
|
1194
|
+
allow_truncation = _coerce_boolish(allow_truncation_raw) if allow_truncation_raw is not None else False
|
|
1195
|
+
|
|
1196
|
+
max_truncation_attempts = 3
|
|
1197
|
+
raw_attempts = payload.get("max_truncation_attempts")
|
|
1198
|
+
if raw_attempts is None:
|
|
1199
|
+
raw_attempts = payload.get("truncation_max_attempts")
|
|
1200
|
+
if raw_attempts is not None and not isinstance(raw_attempts, bool):
|
|
1201
|
+
try:
|
|
1202
|
+
max_truncation_attempts = max(1, int(raw_attempts))
|
|
1203
|
+
except Exception:
|
|
1204
|
+
max_truncation_attempts = 3
|
|
1205
|
+
|
|
1206
|
+
params_attempt = dict(params_for_call)
|
|
1207
|
+
base_params_attempt = dict(base_params)
|
|
1208
|
+
|
|
1209
|
+
try:
|
|
1210
|
+
last_finish_reason: Optional[str] = None
|
|
1211
|
+
for attempt in range(1, max_truncation_attempts + 1):
|
|
1212
|
+
try:
|
|
1213
|
+
result = llm.generate(
|
|
1214
|
+
prompt=str(prompt or ""),
|
|
1215
|
+
messages=messages_for_call,
|
|
1216
|
+
system_prompt=system_prompt,
|
|
1217
|
+
media=resolved_media,
|
|
1218
|
+
tools=tools,
|
|
1219
|
+
params=params_attempt,
|
|
1220
|
+
)
|
|
1221
|
+
except Exception as e:
|
|
1222
|
+
looks_like_validation = False
|
|
1223
|
+
try:
|
|
1224
|
+
from pydantic import ValidationError as PydanticValidationError # type: ignore
|
|
1225
|
+
|
|
1226
|
+
looks_like_validation = isinstance(e, PydanticValidationError)
|
|
1227
|
+
except Exception:
|
|
1228
|
+
looks_like_validation = False
|
|
1229
|
+
|
|
1230
|
+
msg = str(e)
|
|
1231
|
+
if not looks_like_validation:
|
|
1232
|
+
lowered = msg.lower()
|
|
1233
|
+
if "validation errors for" in lowered or "structured output generation failed" in lowered:
|
|
1234
|
+
looks_like_validation = True
|
|
1235
|
+
|
|
1236
|
+
if not (fallback_enabled and structured_requested and looks_like_validation):
|
|
1237
|
+
raise
|
|
1238
|
+
|
|
1239
|
+
logger.warning(
|
|
1240
|
+
"LLM_CALL structured output failed; retrying without schema",
|
|
1241
|
+
error=msg,
|
|
1242
|
+
)
|
|
1243
|
+
|
|
1244
|
+
result = llm.generate(
|
|
1245
|
+
prompt=str(prompt or ""),
|
|
1246
|
+
messages=messages_for_call,
|
|
1247
|
+
system_prompt=system_prompt,
|
|
1248
|
+
media=resolved_media,
|
|
1249
|
+
tools=tools,
|
|
1250
|
+
params=base_params_attempt,
|
|
1251
|
+
)
|
|
1252
|
+
structured_failed = True
|
|
1253
|
+
structured_error = msg
|
|
1254
|
+
|
|
1255
|
+
finish_reason = None
|
|
1256
|
+
if isinstance(result, dict):
|
|
1257
|
+
fr = result.get("finish_reason")
|
|
1258
|
+
finish_reason = fr if isinstance(fr, str) else None
|
|
1259
|
+
last_finish_reason = finish_reason
|
|
1260
|
+
|
|
1261
|
+
truncation_attempts.append(
|
|
1262
|
+
{
|
|
1263
|
+
"attempt": attempt,
|
|
1264
|
+
"finish_reason": finish_reason,
|
|
1265
|
+
"max_output_tokens": params_attempt.get("max_output_tokens"),
|
|
1266
|
+
"structured_fallback": bool(structured_failed),
|
|
1267
|
+
}
|
|
1268
|
+
)
|
|
1269
|
+
|
|
1270
|
+
if not _finish_reason_is_truncation(finish_reason):
|
|
1271
|
+
break
|
|
1272
|
+
had_truncation = True
|
|
1273
|
+
|
|
1274
|
+
if allow_truncation:
|
|
1275
|
+
break
|
|
1276
|
+
|
|
1277
|
+
if not retry_on_truncation or attempt >= max_truncation_attempts:
|
|
1278
|
+
break
|
|
1279
|
+
|
|
1280
|
+
bumped = _bump_max_output_tokens(params_attempt)
|
|
1281
|
+
logger.warning(
|
|
1282
|
+
"LLM_CALL output truncated; retrying with higher max_output_tokens",
|
|
1283
|
+
finish_reason=finish_reason,
|
|
1284
|
+
attempt=attempt,
|
|
1285
|
+
max_output_tokens=params_attempt.get("max_output_tokens"),
|
|
1286
|
+
next_max_output_tokens=bumped.get("max_output_tokens"),
|
|
1287
|
+
)
|
|
1288
|
+
params_attempt = bumped
|
|
1289
|
+
base_params_attempt = _bump_max_output_tokens(base_params_attempt)
|
|
1290
|
+
|
|
1291
|
+
if _finish_reason_is_truncation(last_finish_reason) and not allow_truncation:
|
|
1292
|
+
budgets = ", ".join(
|
|
1293
|
+
[
|
|
1294
|
+
str(a.get("max_output_tokens"))
|
|
1295
|
+
for a in truncation_attempts
|
|
1296
|
+
if a.get("max_output_tokens") is not None
|
|
1297
|
+
][:6]
|
|
1298
|
+
)
|
|
1299
|
+
suffix = " …" if len(truncation_attempts) > 6 else ""
|
|
1300
|
+
raise RuntimeError(
|
|
1301
|
+
"LLM_CALL output was truncated (finish_reason=length). "
|
|
1302
|
+
f"Attempted max_output_tokens: {budgets}{suffix}. "
|
|
1303
|
+
"Increase max_output_tokens/max_out_tokens (or set allow_truncation=true)."
|
|
1304
|
+
)
|
|
1305
|
+
|
|
1306
|
+
if had_truncation and not _finish_reason_is_truncation(last_finish_reason):
|
|
1307
|
+
logger.warning(
|
|
1308
|
+
"LLM_CALL output truncation resolved after retries",
|
|
1309
|
+
attempts=len(truncation_attempts),
|
|
1310
|
+
max_output_tokens=params_attempt.get("max_output_tokens"),
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
# Keep observability aligned with the actual params used.
|
|
1314
|
+
if had_truncation or len(truncation_attempts) > 1:
|
|
1315
|
+
runtime_observability["llm_generate_kwargs"] = _jsonable(
|
|
1316
|
+
{
|
|
1317
|
+
"prompt": str(prompt or ""),
|
|
1318
|
+
"messages": messages_for_call,
|
|
1319
|
+
"system_prompt": system_prompt,
|
|
1320
|
+
"media": media_for_call,
|
|
1321
|
+
"tools": tools,
|
|
1322
|
+
"params": params_attempt,
|
|
1323
|
+
"structured_output_fallback": fallback_enabled,
|
|
1324
|
+
"truncation_attempts": truncation_attempts,
|
|
1325
|
+
}
|
|
1326
|
+
)
|
|
1327
|
+
finally:
|
|
1328
|
+
if tmpdir is not None:
|
|
1329
|
+
tmpdir.cleanup()
|
|
1330
|
+
|
|
1331
|
+
if structured_requested and isinstance(result, dict):
|
|
1332
|
+
# Best-effort: when structured outputs fail (or providers ignore response_model),
|
|
1333
|
+
# try to parse the returned text into `data` so downstream nodes can consume it.
|
|
1334
|
+
try:
|
|
1335
|
+
existing_data = result.get("data")
|
|
1336
|
+
except Exception:
|
|
1337
|
+
existing_data = None
|
|
1338
|
+
|
|
1339
|
+
content_value = result.get("content") if isinstance(result.get("content"), str) else None
|
|
1340
|
+
|
|
1341
|
+
if existing_data is None and isinstance(content_value, str) and content_value.strip():
|
|
1342
|
+
parsed: Any = None
|
|
1343
|
+
parse_error: Optional[str] = None
|
|
1344
|
+
try:
|
|
1345
|
+
from abstractruntime.visualflow_compiler.visual.builtins import data_parse_json
|
|
1346
|
+
|
|
1347
|
+
parsed = data_parse_json({"text": content_value, "wrap_scalar": True})
|
|
1348
|
+
except Exception as e:
|
|
1349
|
+
parse_error = str(e)
|
|
1350
|
+
parsed = None
|
|
1351
|
+
|
|
1352
|
+
# Response schemas in this system are object-only; wrap non-dicts for safety.
|
|
1353
|
+
if parsed is not None and not isinstance(parsed, dict):
|
|
1354
|
+
parsed = {"value": parsed}
|
|
1355
|
+
if parsed is not None:
|
|
1356
|
+
result["data"] = parsed
|
|
1357
|
+
|
|
1358
|
+
if parse_error is not None:
|
|
1359
|
+
meta = result.get("metadata")
|
|
1360
|
+
if not isinstance(meta, dict):
|
|
1361
|
+
meta = {}
|
|
1362
|
+
result["metadata"] = meta
|
|
1363
|
+
meta["_structured_output_parse_error"] = parse_error
|
|
1364
|
+
|
|
1365
|
+
if isinstance(result, dict):
|
|
1366
|
+
meta = result.get("metadata")
|
|
1367
|
+
if not isinstance(meta, dict):
|
|
1368
|
+
meta = {}
|
|
1369
|
+
result["metadata"] = meta
|
|
1370
|
+
if structured_failed:
|
|
1371
|
+
meta["_structured_output_fallback"] = {"used": True, "error": structured_error or ""}
|
|
1372
|
+
if had_truncation or len(truncation_attempts) > 1:
|
|
1373
|
+
meta["_truncation"] = {
|
|
1374
|
+
"attempts": truncation_attempts,
|
|
1375
|
+
"resolved": not _finish_reason_is_truncation(result.get("finish_reason") if isinstance(result.get("finish_reason"), str) else None),
|
|
1376
|
+
}
|
|
1377
|
+
existing = meta.get("_runtime_observability")
|
|
1378
|
+
if not isinstance(existing, dict):
|
|
1379
|
+
existing = {}
|
|
1380
|
+
meta["_runtime_observability"] = existing
|
|
1381
|
+
existing.update(runtime_observability)
|
|
1382
|
+
|
|
1383
|
+
# VisualFlow "Use context" UX: when requested, persist the turn into the run's
|
|
1384
|
+
# active context (`vars.context.messages`) so subsequent LLM/Agent/Subflow nodes
|
|
1385
|
+
# can see the interaction history without extra wiring.
|
|
1386
|
+
#
|
|
1387
|
+
# IMPORTANT: This is opt-in via payload.include_context/use_context; AbstractRuntime
|
|
1388
|
+
# does not implicitly store all LLM calls in context.
|
|
1389
|
+
try:
|
|
1390
|
+
inc_raw = payload.get("include_context")
|
|
1391
|
+
if inc_raw is None:
|
|
1392
|
+
inc_raw = payload.get("use_context")
|
|
1393
|
+
if _coerce_boolish(inc_raw):
|
|
1394
|
+
from abstractruntime.core.vars import get_context
|
|
1395
|
+
|
|
1396
|
+
ctx_ns = get_context(run.vars)
|
|
1397
|
+
msgs_any = ctx_ns.get("messages")
|
|
1398
|
+
if not isinstance(msgs_any, list):
|
|
1399
|
+
msgs_any = []
|
|
1400
|
+
ctx_ns["messages"] = msgs_any
|
|
1401
|
+
|
|
1402
|
+
def _extract_assistant_text() -> str:
|
|
1403
|
+
if isinstance(result, dict):
|
|
1404
|
+
c = result.get("content")
|
|
1405
|
+
if isinstance(c, str) and c.strip():
|
|
1406
|
+
return c.strip()
|
|
1407
|
+
d = result.get("data")
|
|
1408
|
+
if isinstance(d, (dict, list)):
|
|
1409
|
+
import json as _json
|
|
1410
|
+
|
|
1411
|
+
return _json.dumps(d, ensure_ascii=False, indent=2)
|
|
1412
|
+
return ""
|
|
1413
|
+
|
|
1414
|
+
user_text = user_text_for_context
|
|
1415
|
+
assistant_text = _extract_assistant_text()
|
|
1416
|
+
node_id = str(getattr(run, "current_node", None) or "").strip() or "unknown"
|
|
1417
|
+
|
|
1418
|
+
if user_text:
|
|
1419
|
+
msgs_any.append(
|
|
1420
|
+
{
|
|
1421
|
+
"role": "user",
|
|
1422
|
+
"content": user_text,
|
|
1423
|
+
"metadata": {"kind": "llm_turn", "node_id": node_id},
|
|
1424
|
+
}
|
|
1425
|
+
)
|
|
1426
|
+
if assistant_text:
|
|
1427
|
+
msgs_any.append(
|
|
1428
|
+
{
|
|
1429
|
+
"role": "assistant",
|
|
1430
|
+
"content": assistant_text,
|
|
1431
|
+
"metadata": {"kind": "llm_turn", "node_id": node_id},
|
|
1432
|
+
}
|
|
1433
|
+
)
|
|
1434
|
+
if isinstance(getattr(run, "output", None), dict):
|
|
1435
|
+
run.output["messages"] = msgs_any
|
|
1436
|
+
except Exception:
|
|
1437
|
+
pass
|
|
65
1438
|
return EffectOutcome.completed(result=result)
|
|
66
1439
|
except Exception as e:
|
|
67
1440
|
logger.error("LLM_CALL failed", error=str(e))
|
|
@@ -70,7 +1443,12 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
|
70
1443
|
return _handler
|
|
71
1444
|
|
|
72
1445
|
|
|
73
|
-
def make_tool_calls_handler(
|
|
1446
|
+
def make_tool_calls_handler(
|
|
1447
|
+
*,
|
|
1448
|
+
tools: Optional[ToolExecutor] = None,
|
|
1449
|
+
artifact_store: Optional[ArtifactStore] = None,
|
|
1450
|
+
run_store: Optional[RunStore] = None,
|
|
1451
|
+
) -> EffectHandler:
|
|
74
1452
|
"""Create a TOOL_CALLS effect handler.
|
|
75
1453
|
|
|
76
1454
|
Tool execution is performed exclusively via the host-configured ToolExecutor.
|
|
@@ -81,6 +1459,11 @@ def make_tool_calls_handler(*, tools: Optional[ToolExecutor] = None) -> EffectHa
|
|
|
81
1459
|
tool_calls = payload.get("tool_calls")
|
|
82
1460
|
if not isinstance(tool_calls, list):
|
|
83
1461
|
return EffectOutcome.failed("tool_calls requires payload.tool_calls (list)")
|
|
1462
|
+
allowed_tools_raw = payload.get("allowed_tools")
|
|
1463
|
+
allowlist_enabled = isinstance(allowed_tools_raw, list)
|
|
1464
|
+
allowed_tools: Set[str] = set()
|
|
1465
|
+
if allowlist_enabled:
|
|
1466
|
+
allowed_tools = {str(t) for t in allowed_tools_raw if isinstance(t, str) and t.strip()}
|
|
84
1467
|
|
|
85
1468
|
if tools is None:
|
|
86
1469
|
return EffectOutcome.failed(
|
|
@@ -88,32 +1471,1239 @@ def make_tool_calls_handler(*, tools: Optional[ToolExecutor] = None) -> EffectHa
|
|
|
88
1471
|
"MappingToolExecutor/AbstractCoreToolExecutor/PassthroughToolExecutor."
|
|
89
1472
|
)
|
|
90
1473
|
|
|
1474
|
+
original_call_count = len(tool_calls)
|
|
1475
|
+
|
|
1476
|
+
# Always block non-dict tool call entries: passthrough hosts expect dicts and may crash otherwise.
|
|
1477
|
+
blocked_by_index: Dict[int, Dict[str, Any]] = {}
|
|
1478
|
+
pre_results_by_index: Dict[int, Dict[str, Any]] = {}
|
|
1479
|
+
planned: list[Dict[str, Any]] = []
|
|
1480
|
+
|
|
1481
|
+
# For evidence and deterministic resume merging, keep a positional tool call list aligned to the
|
|
1482
|
+
# *original* tool call order. Blocked entries are represented as empty-args stubs.
|
|
1483
|
+
tool_calls_for_evidence: list[Dict[str, Any]] = []
|
|
1484
|
+
|
|
1485
|
+
# Optional workspace policy (run.vars-driven). When configured, this rewrites/blocks
|
|
1486
|
+
# filesystem-ish tool arguments before they reach the ToolExecutor.
|
|
1487
|
+
scope: Optional[WorkspaceScope] = None
|
|
91
1488
|
try:
|
|
92
|
-
|
|
1489
|
+
vars0 = getattr(run, "vars", None)
|
|
1490
|
+
scope = WorkspaceScope.from_input_data(vars0) if isinstance(vars0, dict) else None
|
|
93
1491
|
except Exception as e:
|
|
94
|
-
logger.error("TOOL_CALLS execution failed", error=str(e))
|
|
95
1492
|
return EffectOutcome.failed(str(e))
|
|
96
1493
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
1494
|
+
sid_str = str(getattr(run, "session_id", "") or "").strip()
|
|
1495
|
+
session_attachments_cache: Optional[list[Dict[str, Any]]] = None
|
|
1496
|
+
|
|
1497
|
+
def _loads_dict_like(value: Any) -> Optional[Dict[str, Any]]:
|
|
1498
|
+
if value is None:
|
|
1499
|
+
return None
|
|
1500
|
+
if isinstance(value, dict):
|
|
1501
|
+
return dict(value)
|
|
1502
|
+
if not isinstance(value, str):
|
|
1503
|
+
return None
|
|
1504
|
+
text = value.strip()
|
|
1505
|
+
if not text:
|
|
1506
|
+
return None
|
|
1507
|
+
try:
|
|
1508
|
+
parsed = json.loads(text)
|
|
1509
|
+
except Exception:
|
|
1510
|
+
return None
|
|
1511
|
+
return parsed if isinstance(parsed, dict) else None
|
|
1512
|
+
|
|
1513
|
+
def _ensure_session_memory_run_exists(*, session_id: str) -> None:
|
|
1514
|
+
if run_store is None:
|
|
1515
|
+
return
|
|
1516
|
+
sid = str(session_id or "").strip()
|
|
1517
|
+
if not sid:
|
|
1518
|
+
return
|
|
1519
|
+
rid = session_memory_owner_run_id(sid)
|
|
1520
|
+
try:
|
|
1521
|
+
existing = run_store.load(str(rid))
|
|
1522
|
+
except Exception:
|
|
1523
|
+
existing = None
|
|
1524
|
+
if existing is not None:
|
|
1525
|
+
return
|
|
1526
|
+
now_iso = datetime.datetime.now(datetime.timezone.utc).isoformat()
|
|
1527
|
+
run0 = RunState(
|
|
1528
|
+
run_id=str(rid),
|
|
1529
|
+
workflow_id="__session_memory__",
|
|
1530
|
+
status=RunStatus.COMPLETED,
|
|
1531
|
+
current_node="done",
|
|
1532
|
+
vars={
|
|
1533
|
+
"context": {"task": "", "messages": []},
|
|
1534
|
+
"scratchpad": {},
|
|
1535
|
+
"_runtime": {"memory_spans": []},
|
|
1536
|
+
"_temp": {},
|
|
1537
|
+
"_limits": {},
|
|
1538
|
+
},
|
|
1539
|
+
waiting=None,
|
|
1540
|
+
output={"messages": []},
|
|
1541
|
+
error=None,
|
|
1542
|
+
created_at=now_iso,
|
|
1543
|
+
updated_at=now_iso,
|
|
1544
|
+
actor_id=None,
|
|
1545
|
+
session_id=sid,
|
|
1546
|
+
parent_run_id=None,
|
|
107
1547
|
)
|
|
108
|
-
|
|
1548
|
+
try:
|
|
1549
|
+
run_store.save(run0)
|
|
1550
|
+
except Exception:
|
|
1551
|
+
# Best-effort: artifacts can still be stored, but run-scoped APIs may 404.
|
|
1552
|
+
pass
|
|
1553
|
+
|
|
1554
|
+
def _max_attachment_bytes() -> int:
|
|
1555
|
+
raw = str(os.getenv("ABSTRACTGATEWAY_MAX_ATTACHMENT_BYTES", "") or "").strip()
|
|
1556
|
+
if raw:
|
|
1557
|
+
try:
|
|
1558
|
+
v = int(raw)
|
|
1559
|
+
if v > 0:
|
|
1560
|
+
return v
|
|
1561
|
+
except Exception:
|
|
1562
|
+
pass
|
|
1563
|
+
return 25 * 1024 * 1024
|
|
1564
|
+
|
|
1565
|
+
def _enqueue_pending_media(media_items: Any) -> None:
|
|
1566
|
+
if not isinstance(media_items, list) or not media_items:
|
|
1567
|
+
return
|
|
1568
|
+
vars0 = getattr(run, "vars", None)
|
|
1569
|
+
if not isinstance(vars0, dict):
|
|
1570
|
+
return
|
|
1571
|
+
runtime_ns = vars0.get("_runtime")
|
|
1572
|
+
if not isinstance(runtime_ns, dict):
|
|
1573
|
+
runtime_ns = {}
|
|
1574
|
+
vars0["_runtime"] = runtime_ns
|
|
1575
|
+
|
|
1576
|
+
pending = runtime_ns.get("pending_media")
|
|
1577
|
+
if not isinstance(pending, list):
|
|
1578
|
+
pending = []
|
|
1579
|
+
|
|
1580
|
+
def _key(item: Any) -> Optional[Tuple[str, str]]:
|
|
1581
|
+
if isinstance(item, str):
|
|
1582
|
+
s = item.strip()
|
|
1583
|
+
return ("path", s) if s else None
|
|
1584
|
+
if isinstance(item, dict):
|
|
1585
|
+
aid = item.get("$artifact") or item.get("artifact_id")
|
|
1586
|
+
if isinstance(aid, str) and aid.strip():
|
|
1587
|
+
return ("artifact", aid.strip())
|
|
1588
|
+
return None
|
|
1589
|
+
|
|
1590
|
+
seen: set[Tuple[str, str]] = set()
|
|
1591
|
+
for it in pending:
|
|
1592
|
+
k = _key(it)
|
|
1593
|
+
if k is not None:
|
|
1594
|
+
seen.add(k)
|
|
1595
|
+
|
|
1596
|
+
for it in media_items:
|
|
1597
|
+
k = _key(it)
|
|
1598
|
+
if k is None or k in seen:
|
|
1599
|
+
continue
|
|
1600
|
+
if isinstance(it, str):
|
|
1601
|
+
pending.append(it.strip())
|
|
1602
|
+
elif isinstance(it, dict):
|
|
1603
|
+
pending.append(dict(it))
|
|
1604
|
+
else:
|
|
1605
|
+
continue
|
|
1606
|
+
seen.add(k)
|
|
1607
|
+
|
|
1608
|
+
runtime_ns["pending_media"] = pending
|
|
1609
|
+
|
|
1610
|
+
def _register_read_file_as_attachment(*, session_id: str, file_path: str) -> Optional[Dict[str, Any]]:
|
|
1611
|
+
if artifact_store is None:
|
|
1612
|
+
return None
|
|
1613
|
+
sid = str(session_id or "").strip()
|
|
1614
|
+
if not sid:
|
|
1615
|
+
return None
|
|
1616
|
+
fp_raw = str(file_path or "").strip()
|
|
1617
|
+
if not fp_raw:
|
|
1618
|
+
return None
|
|
1619
|
+
|
|
1620
|
+
try:
|
|
1621
|
+
p = Path(fp_raw).expanduser()
|
|
1622
|
+
except Exception:
|
|
1623
|
+
return None
|
|
1624
|
+
try:
|
|
1625
|
+
resolved = p.resolve()
|
|
1626
|
+
except Exception:
|
|
1627
|
+
resolved = p
|
|
1628
|
+
|
|
1629
|
+
try:
|
|
1630
|
+
size = int(resolved.stat().st_size)
|
|
1631
|
+
except Exception:
|
|
1632
|
+
size = -1
|
|
1633
|
+
max_bytes = _max_attachment_bytes()
|
|
1634
|
+
if size >= 0 and size > max_bytes:
|
|
1635
|
+
return None
|
|
1636
|
+
|
|
1637
|
+
try:
|
|
1638
|
+
content = resolved.read_bytes()
|
|
1639
|
+
except Exception:
|
|
1640
|
+
return None
|
|
1641
|
+
if len(content) > max_bytes:
|
|
1642
|
+
return None
|
|
1643
|
+
|
|
1644
|
+
sha256 = hashlib.sha256(bytes(content)).hexdigest()
|
|
1645
|
+
|
|
1646
|
+
handle = resolved.as_posix()
|
|
1647
|
+
if scope is not None:
|
|
1648
|
+
try:
|
|
1649
|
+
handle = resolved.relative_to(scope.root).as_posix()
|
|
1650
|
+
except Exception:
|
|
1651
|
+
handle = resolved.as_posix()
|
|
1652
|
+
|
|
1653
|
+
filename = resolved.name or (handle.split("/")[-1] if handle else "")
|
|
1654
|
+
guessed, _enc = mimetypes.guess_type(filename)
|
|
1655
|
+
content_type = str(guessed or "text/plain")
|
|
1656
|
+
|
|
1657
|
+
rid = session_memory_owner_run_id(sid)
|
|
1658
|
+
try:
|
|
1659
|
+
existing = artifact_store.list_by_run(str(rid))
|
|
1660
|
+
except Exception:
|
|
1661
|
+
existing = []
|
|
1662
|
+
for m in existing or []:
|
|
1663
|
+
tags = getattr(m, "tags", None)
|
|
1664
|
+
if not isinstance(tags, dict):
|
|
1665
|
+
continue
|
|
1666
|
+
if str(tags.get("kind") or "") != "attachment":
|
|
1667
|
+
continue
|
|
1668
|
+
if str(tags.get("path") or "") != str(handle):
|
|
1669
|
+
continue
|
|
1670
|
+
if str(tags.get("sha256") or "") == sha256:
|
|
1671
|
+
return {
|
|
1672
|
+
"artifact_id": str(getattr(m, "artifact_id", "") or ""),
|
|
1673
|
+
"handle": str(handle),
|
|
1674
|
+
"sha256": sha256,
|
|
1675
|
+
"content_type": content_type,
|
|
1676
|
+
"size_bytes": len(content),
|
|
1677
|
+
}
|
|
1678
|
+
|
|
1679
|
+
_ensure_session_memory_run_exists(session_id=sid)
|
|
1680
|
+
|
|
1681
|
+
tags: Dict[str, str] = {
|
|
1682
|
+
"kind": "attachment",
|
|
1683
|
+
"source": "tool.read_file",
|
|
1684
|
+
"path": str(handle),
|
|
1685
|
+
"filename": str(filename),
|
|
1686
|
+
"session_id": sid,
|
|
1687
|
+
"sha256": sha256,
|
|
1688
|
+
}
|
|
1689
|
+
try:
|
|
1690
|
+
meta = artifact_store.store(bytes(content), content_type=str(content_type), run_id=str(rid), tags=tags)
|
|
1691
|
+
except Exception:
|
|
1692
|
+
return None
|
|
1693
|
+
return {
|
|
1694
|
+
"artifact_id": str(getattr(meta, "artifact_id", "") or ""),
|
|
1695
|
+
"handle": str(handle),
|
|
1696
|
+
"sha256": sha256,
|
|
1697
|
+
"content_type": content_type,
|
|
1698
|
+
"size_bytes": len(content),
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
def _normalize_attachment_query(raw: Any) -> str:
|
|
1702
|
+
text = str(raw or "").strip()
|
|
1703
|
+
if not text:
|
|
1704
|
+
return ""
|
|
1705
|
+
if text.startswith("@"):
|
|
1706
|
+
text = text[1:].strip()
|
|
1707
|
+
text = text.replace("\\", "/")
|
|
1708
|
+
if text.lower().startswith("file://"):
|
|
1709
|
+
try:
|
|
1710
|
+
from urllib.parse import unquote, urlparse
|
|
1711
|
+
|
|
1712
|
+
parsed = urlparse(text)
|
|
1713
|
+
if parsed.scheme == "file":
|
|
1714
|
+
text = unquote(parsed.path)
|
|
1715
|
+
except Exception:
|
|
1716
|
+
# Best-effort fallback: strip the prefix.
|
|
1717
|
+
text = text[7:]
|
|
1718
|
+
text = str(text or "").strip()
|
|
1719
|
+
while text.startswith("./"):
|
|
1720
|
+
text = text[2:]
|
|
1721
|
+
return text
|
|
1722
|
+
|
|
1723
|
+
def _get_session_attachments() -> list[Dict[str, Any]]:
|
|
1724
|
+
nonlocal session_attachments_cache
|
|
1725
|
+
if session_attachments_cache is not None:
|
|
1726
|
+
return list(session_attachments_cache)
|
|
1727
|
+
if artifact_store is None or not sid_str:
|
|
1728
|
+
session_attachments_cache = []
|
|
1729
|
+
return []
|
|
1730
|
+
try:
|
|
1731
|
+
session_attachments_cache = list_session_attachments(
|
|
1732
|
+
artifact_store=artifact_store, session_id=sid_str, limit=5000
|
|
1733
|
+
)
|
|
1734
|
+
except Exception:
|
|
1735
|
+
session_attachments_cache = []
|
|
1736
|
+
return list(session_attachments_cache)
|
|
1737
|
+
|
|
1738
|
+
def _read_file_output_from_open_attachment(*, file_path: str, opened: Dict[str, Any]) -> Optional[str]:
|
|
1739
|
+
rendered = opened.get("rendered")
|
|
1740
|
+
if not isinstance(rendered, str) or not rendered.strip():
|
|
1741
|
+
return None
|
|
1742
|
+
|
|
1743
|
+
start2 = opened.get("start_line")
|
|
1744
|
+
end2 = opened.get("end_line")
|
|
1745
|
+
count = 0
|
|
1746
|
+
try:
|
|
1747
|
+
if isinstance(start2, int) and isinstance(end2, int) and start2 >= 1 and end2 >= start2:
|
|
1748
|
+
count = end2 - start2 + 1
|
|
1749
|
+
except Exception:
|
|
1750
|
+
count = 0
|
|
1751
|
+
|
|
1752
|
+
if count <= 0:
|
|
1753
|
+
try:
|
|
1754
|
+
count = len([ln for ln in rendered.splitlines() if re.match(r"^\\s*\\d+:\\s", ln)])
|
|
1755
|
+
except Exception:
|
|
1756
|
+
count = 0
|
|
1757
|
+
|
|
1758
|
+
header = f"File: {str(file_path)} ({max(0, int(count))} lines)"
|
|
1759
|
+
aid2 = str(opened.get("artifact_id") or "").strip()
|
|
1760
|
+
sha2 = str(opened.get("sha256") or "").strip()
|
|
1761
|
+
handle2 = str(opened.get("handle") or "").strip()
|
|
1762
|
+
bits2: list[str] = []
|
|
1763
|
+
if handle2:
|
|
1764
|
+
bits2.append(f"@{handle2}")
|
|
1765
|
+
if aid2:
|
|
1766
|
+
bits2.append(f"id={aid2}")
|
|
1767
|
+
if sha2:
|
|
1768
|
+
bits2.append(f"sha={sha2[:8]}…")
|
|
1769
|
+
info = f"(from attachment: {', '.join(bits2)})" if bits2 else "(from attachment)"
|
|
1770
|
+
|
|
1771
|
+
body = "\n".join(rendered.splitlines()[1:]).lstrip("\n")
|
|
1772
|
+
return header + "\n" + info + ("\n" + body if body else "")
|
|
1773
|
+
|
|
1774
|
+
def _attachment_backed_result_for_scope_error(
|
|
1775
|
+
*,
|
|
1776
|
+
tool_name: str,
|
|
1777
|
+
arguments: Dict[str, Any],
|
|
1778
|
+
call_id: str,
|
|
1779
|
+
runtime_call_id_out: Optional[str],
|
|
1780
|
+
) -> Optional[Dict[str, Any]]:
|
|
1781
|
+
if artifact_store is None or not sid_str:
|
|
1782
|
+
return None
|
|
1783
|
+
|
|
1784
|
+
def _as_result(*, success: bool, output: Any, error: Optional[str]) -> Dict[str, Any]:
|
|
1785
|
+
return {
|
|
1786
|
+
"call_id": call_id,
|
|
1787
|
+
"runtime_call_id": runtime_call_id_out,
|
|
1788
|
+
"name": str(tool_name or ""),
|
|
1789
|
+
"success": bool(success),
|
|
1790
|
+
"output": output,
|
|
1791
|
+
"error": error if not success else None,
|
|
1792
|
+
}
|
|
1793
|
+
|
|
1794
|
+
if tool_name == "read_file":
|
|
1795
|
+
fp = (
|
|
1796
|
+
arguments.get("file_path")
|
|
1797
|
+
or arguments.get("path")
|
|
1798
|
+
or arguments.get("filename")
|
|
1799
|
+
or arguments.get("file")
|
|
1800
|
+
)
|
|
1801
|
+
fp_norm = _normalize_attachment_query(fp)
|
|
1802
|
+
if not fp_norm:
|
|
1803
|
+
return None
|
|
1804
|
+
|
|
1805
|
+
start_line = arguments.get("start_line") or arguments.get("startLine") or arguments.get("start_line_one_indexed") or 1
|
|
1806
|
+
end_line = (
|
|
1807
|
+
arguments.get("end_line")
|
|
1808
|
+
or arguments.get("endLine")
|
|
1809
|
+
or arguments.get("end_line_one_indexed_inclusive")
|
|
1810
|
+
or arguments.get("end_line_one_indexed")
|
|
1811
|
+
)
|
|
1812
|
+
try:
|
|
1813
|
+
start_i = int(start_line) if start_line is not None and not isinstance(start_line, bool) else 1
|
|
1814
|
+
except Exception:
|
|
1815
|
+
start_i = 1
|
|
1816
|
+
end_i: Optional[int] = None
|
|
1817
|
+
try:
|
|
1818
|
+
if end_line is not None and not isinstance(end_line, bool):
|
|
1819
|
+
end_i = int(end_line)
|
|
1820
|
+
except Exception:
|
|
1821
|
+
end_i = None
|
|
1822
|
+
|
|
1823
|
+
ok, opened, err = execute_open_attachment(
|
|
1824
|
+
artifact_store=artifact_store,
|
|
1825
|
+
session_id=sid_str,
|
|
1826
|
+
artifact_id=None,
|
|
1827
|
+
handle=str(fp_norm),
|
|
1828
|
+
expected_sha256=None,
|
|
1829
|
+
start_line=int(start_i),
|
|
1830
|
+
end_line=int(end_i) if end_i is not None else None,
|
|
1831
|
+
max_chars=8000,
|
|
1832
|
+
)
|
|
1833
|
+
if err == "attachment not found":
|
|
1834
|
+
return None
|
|
1835
|
+
if isinstance(opened, dict):
|
|
1836
|
+
_enqueue_pending_media(opened.get("media"))
|
|
1837
|
+
if ok:
|
|
1838
|
+
output_text = _read_file_output_from_open_attachment(file_path=str(fp), opened=opened)
|
|
1839
|
+
if output_text is not None:
|
|
1840
|
+
return _as_result(success=True, output=output_text, error=None)
|
|
1841
|
+
rendered = opened.get("rendered")
|
|
1842
|
+
if isinstance(rendered, str) and rendered.strip():
|
|
1843
|
+
return _as_result(success=False, output=rendered, error=str(err or "Failed to open attachment"))
|
|
1844
|
+
return _as_result(success=False, output=None, error=str(err or "Failed to open attachment"))
|
|
1845
|
+
|
|
1846
|
+
if tool_name == "list_files":
|
|
1847
|
+
dir_path = arguments.get("directory_path") or arguments.get("path") or arguments.get("folder")
|
|
1848
|
+
prefix = _normalize_attachment_query(dir_path)
|
|
1849
|
+
if not prefix:
|
|
1850
|
+
return None
|
|
1851
|
+
|
|
1852
|
+
entries = _get_session_attachments()
|
|
1853
|
+
if not entries:
|
|
1854
|
+
return None
|
|
1855
|
+
|
|
1856
|
+
prefix_slash = prefix if prefix.endswith("/") else prefix + "/"
|
|
1857
|
+
matches: list[Dict[str, Any]] = []
|
|
1858
|
+
for e in entries:
|
|
1859
|
+
h = _normalize_attachment_query(e.get("handle"))
|
|
1860
|
+
if not h:
|
|
1861
|
+
continue
|
|
1862
|
+
if h == prefix or h.startswith(prefix_slash):
|
|
1863
|
+
matches.append(dict(e))
|
|
1864
|
+
|
|
1865
|
+
if not matches:
|
|
1866
|
+
return None
|
|
1867
|
+
|
|
1868
|
+
pattern = str(arguments.get("pattern") or "*").strip() or "*"
|
|
1869
|
+
recursive = bool(arguments.get("recursive"))
|
|
1870
|
+
include_hidden = bool(arguments.get("include_hidden") or arguments.get("includeHidden"))
|
|
1871
|
+
head_limit = arguments.get("head_limit") or arguments.get("headLimit")
|
|
1872
|
+
try:
|
|
1873
|
+
head_n = int(head_limit) if head_limit is not None and not isinstance(head_limit, bool) else 10
|
|
1874
|
+
except Exception:
|
|
1875
|
+
head_n = 10
|
|
1876
|
+
head_n = max(1, head_n)
|
|
1877
|
+
|
|
1878
|
+
import fnmatch
|
|
1879
|
+
|
|
1880
|
+
patterns = [p.strip() for p in str(pattern).split("|") if p.strip()] or ["*"]
|
|
1881
|
+
|
|
1882
|
+
def _matches(name: str) -> bool:
|
|
1883
|
+
low = str(name or "").lower()
|
|
1884
|
+
for pat in patterns:
|
|
1885
|
+
if fnmatch.fnmatch(low, pat.lower()):
|
|
1886
|
+
return True
|
|
1887
|
+
return False
|
|
1888
|
+
|
|
1889
|
+
def _is_hidden(rel: str) -> bool:
|
|
1890
|
+
parts = [p for p in str(rel or "").split("/") if p]
|
|
1891
|
+
return any(p.startswith(".") for p in parts)
|
|
1892
|
+
|
|
1893
|
+
rows: list[tuple[str, int]] = []
|
|
1894
|
+
for e in matches:
|
|
1895
|
+
h = _normalize_attachment_query(e.get("handle"))
|
|
1896
|
+
if not h:
|
|
1897
|
+
continue
|
|
1898
|
+
rel = h[len(prefix_slash) :] if h.startswith(prefix_slash) else h
|
|
1899
|
+
rel = rel.lstrip("/")
|
|
1900
|
+
if not rel:
|
|
1901
|
+
continue
|
|
1902
|
+
if not recursive and "/" in rel:
|
|
1903
|
+
continue
|
|
1904
|
+
if not include_hidden and _is_hidden(rel):
|
|
1905
|
+
continue
|
|
1906
|
+
if not _matches(rel):
|
|
1907
|
+
continue
|
|
1908
|
+
try:
|
|
1909
|
+
size_b = int(e.get("size_bytes") or 0)
|
|
1910
|
+
except Exception:
|
|
1911
|
+
size_b = 0
|
|
1912
|
+
rows.append((rel, size_b))
|
|
1913
|
+
|
|
1914
|
+
rows.sort(key=lambda x: x[0].lower())
|
|
1915
|
+
shown = rows[:head_n]
|
|
1916
|
+
|
|
1917
|
+
hidden_note = "hidden entries excluded" if not include_hidden else "hidden entries included"
|
|
1918
|
+
lines: list[str] = [
|
|
1919
|
+
f"Entries in '{prefix}' matching '{pattern}' ({hidden_note}; attachments only; filesystem access blocked):"
|
|
1920
|
+
]
|
|
1921
|
+
if not shown:
|
|
1922
|
+
lines.append(" (no attached entries)")
|
|
1923
|
+
else:
|
|
1924
|
+
for rel, size_b in shown:
|
|
1925
|
+
size_disp = f" ({size_b:,} bytes)" if size_b > 0 else ""
|
|
1926
|
+
lines.append(f" {rel}{size_disp}")
|
|
1927
|
+
if len(rows) > head_n:
|
|
1928
|
+
lines.append(f" ... ({len(rows) - head_n} more)")
|
|
1929
|
+
|
|
1930
|
+
return _as_result(success=True, output="\n".join(lines).rstrip(), error=None)
|
|
1931
|
+
|
|
1932
|
+
if tool_name == "skim_folders":
|
|
1933
|
+
raw_paths = arguments.get("paths") or arguments.get("path") or arguments.get("folder")
|
|
1934
|
+
paths_list: list[str] = []
|
|
1935
|
+
if isinstance(raw_paths, list):
|
|
1936
|
+
paths_list = [str(p).strip() for p in raw_paths if isinstance(p, str) and p.strip()]
|
|
1937
|
+
elif isinstance(raw_paths, str) and raw_paths.strip():
|
|
1938
|
+
paths_list = [raw_paths.strip()]
|
|
1939
|
+
if not paths_list:
|
|
1940
|
+
return None
|
|
1941
|
+
|
|
1942
|
+
entries = _get_session_attachments()
|
|
1943
|
+
if not entries:
|
|
1944
|
+
return None
|
|
1945
|
+
|
|
1946
|
+
include_hidden = bool(arguments.get("include_hidden") or arguments.get("includeHidden"))
|
|
1947
|
+
blocks: list[str] = []
|
|
1948
|
+
matched_any = False
|
|
1949
|
+
|
|
1950
|
+
for folder in paths_list:
|
|
1951
|
+
prefix = _normalize_attachment_query(folder)
|
|
1952
|
+
if not prefix:
|
|
1953
|
+
continue
|
|
1954
|
+
prefix_slash = prefix if prefix.endswith("/") else prefix + "/"
|
|
1955
|
+
rows: list[tuple[str, int]] = []
|
|
1956
|
+
for e in entries:
|
|
1957
|
+
h = _normalize_attachment_query(e.get("handle"))
|
|
1958
|
+
if not h:
|
|
1959
|
+
continue
|
|
1960
|
+
if h == prefix or h.startswith(prefix_slash):
|
|
1961
|
+
rel = h[len(prefix_slash) :] if h.startswith(prefix_slash) else h
|
|
1962
|
+
rel = rel.lstrip("/")
|
|
1963
|
+
if not rel:
|
|
1964
|
+
continue
|
|
1965
|
+
if not include_hidden and any(seg.startswith(".") for seg in rel.split("/") if seg):
|
|
1966
|
+
continue
|
|
1967
|
+
try:
|
|
1968
|
+
size_b = int(e.get("size_bytes") or 0)
|
|
1969
|
+
except Exception:
|
|
1970
|
+
size_b = 0
|
|
1971
|
+
rows.append((rel, size_b))
|
|
1972
|
+
|
|
1973
|
+
if not rows:
|
|
1974
|
+
continue
|
|
1975
|
+
matched_any = True
|
|
1976
|
+
rows.sort(key=lambda x: x[0].lower())
|
|
1977
|
+
hidden_note = "hidden entries excluded" if not include_hidden else "hidden entries included"
|
|
1978
|
+
lines: list[str] = [
|
|
1979
|
+
f"Folder map for '{prefix}' ({hidden_note}; attachments only; filesystem access blocked):"
|
|
1980
|
+
]
|
|
1981
|
+
for rel, size_b in rows[:200]:
|
|
1982
|
+
size_disp = f" ({size_b:,} bytes)" if size_b > 0 else ""
|
|
1983
|
+
lines.append(f" {rel}{size_disp}")
|
|
1984
|
+
if len(rows) > 200:
|
|
1985
|
+
lines.append(f" ... ({len(rows) - 200} more)")
|
|
1986
|
+
blocks.append("\n".join(lines).rstrip())
|
|
1987
|
+
|
|
1988
|
+
if not matched_any:
|
|
1989
|
+
return None
|
|
1990
|
+
return _as_result(success=True, output="\n\n".join(blocks).rstrip(), error=None)
|
|
1991
|
+
|
|
1992
|
+
if tool_name in {"skim_files", "search_files"}:
|
|
1993
|
+
# Attachment-backed reads/searches: operate only on session attachments, never the filesystem.
|
|
1994
|
+
entries = _get_session_attachments()
|
|
1995
|
+
if not entries:
|
|
1996
|
+
return None
|
|
1997
|
+
|
|
1998
|
+
if tool_name == "skim_files":
|
|
1999
|
+
raw_paths = (
|
|
2000
|
+
arguments.get("paths")
|
|
2001
|
+
or arguments.get("path")
|
|
2002
|
+
or arguments.get("file_path")
|
|
2003
|
+
or arguments.get("filename")
|
|
2004
|
+
or arguments.get("file")
|
|
2005
|
+
)
|
|
2006
|
+
paths_list: list[str] = []
|
|
2007
|
+
if isinstance(raw_paths, list):
|
|
2008
|
+
paths_list = [str(p).strip() for p in raw_paths if isinstance(p, str) and p.strip()]
|
|
2009
|
+
elif isinstance(raw_paths, str) and raw_paths.strip():
|
|
2010
|
+
paths_list = [raw_paths.strip()]
|
|
2011
|
+
if not paths_list:
|
|
2012
|
+
return None
|
|
2013
|
+
|
|
2014
|
+
head_lines = arguments.get("head_lines") or arguments.get("headLines") or 25
|
|
2015
|
+
try:
|
|
2016
|
+
head_n = int(head_lines) if head_lines is not None and not isinstance(head_lines, bool) else 25
|
|
2017
|
+
except Exception:
|
|
2018
|
+
head_n = 25
|
|
2019
|
+
head_n = min(max(1, head_n), 400)
|
|
2020
|
+
|
|
2021
|
+
rendered_blocks: list[str] = []
|
|
2022
|
+
matched_any = False
|
|
2023
|
+
for p in paths_list:
|
|
2024
|
+
p_norm = _normalize_attachment_query(p)
|
|
2025
|
+
if not p_norm:
|
|
2026
|
+
continue
|
|
2027
|
+
ok, opened, err = execute_open_attachment(
|
|
2028
|
+
artifact_store=artifact_store,
|
|
2029
|
+
session_id=sid_str,
|
|
2030
|
+
artifact_id=None,
|
|
2031
|
+
handle=str(p_norm),
|
|
2032
|
+
expected_sha256=None,
|
|
2033
|
+
start_line=1,
|
|
2034
|
+
end_line=int(head_n),
|
|
2035
|
+
max_chars=12000,
|
|
2036
|
+
)
|
|
2037
|
+
if err == "attachment not found":
|
|
2038
|
+
continue
|
|
2039
|
+
matched_any = True
|
|
2040
|
+
if isinstance(opened, dict):
|
|
2041
|
+
_enqueue_pending_media(opened.get("media"))
|
|
2042
|
+
block = opened.get("rendered")
|
|
2043
|
+
if isinstance(block, str) and block.strip():
|
|
2044
|
+
rendered_blocks.append(block.strip())
|
|
2045
|
+
else:
|
|
2046
|
+
rendered_blocks.append(f"Error: failed to skim attachment '{p_norm}'.")
|
|
2047
|
+
else:
|
|
2048
|
+
rendered_blocks.append(f"Error: failed to skim attachment '{p_norm}': {err or 'unknown error'}")
|
|
2049
|
+
|
|
2050
|
+
if not matched_any:
|
|
2051
|
+
return None
|
|
2052
|
+
|
|
2053
|
+
out_text = "\n\n".join(rendered_blocks).strip()
|
|
2054
|
+
return _as_result(success=True, output=out_text, error=None)
|
|
2055
|
+
|
|
2056
|
+
# search_files
|
|
2057
|
+
pattern = str(arguments.get("pattern") or "").strip()
|
|
2058
|
+
if not pattern:
|
|
2059
|
+
return None
|
|
2060
|
+
path_raw = arguments.get("path") or arguments.get("file_path") or arguments.get("directory_path") or ""
|
|
2061
|
+
path_norm = _normalize_attachment_query(path_raw)
|
|
2062
|
+
head_limit = arguments.get("head_limit") or arguments.get("headLimit")
|
|
2063
|
+
max_hits = arguments.get("max_hits") or arguments.get("maxHits")
|
|
2064
|
+
try:
|
|
2065
|
+
head_n = int(head_limit) if head_limit is not None and not isinstance(head_limit, bool) else 10
|
|
2066
|
+
except Exception:
|
|
2067
|
+
head_n = 10
|
|
2068
|
+
try:
|
|
2069
|
+
max_files = int(max_hits) if max_hits is not None and not isinstance(max_hits, bool) else 8
|
|
2070
|
+
except Exception:
|
|
2071
|
+
max_files = 8
|
|
2072
|
+
head_n = max(1, head_n)
|
|
2073
|
+
max_files = max(1, max_files)
|
|
2074
|
+
|
|
2075
|
+
try:
|
|
2076
|
+
rx = re.compile(pattern, re.IGNORECASE)
|
|
2077
|
+
except Exception as e:
|
|
2078
|
+
return _as_result(success=False, output=None, error=f"Invalid regex pattern '{pattern}': {e}")
|
|
2079
|
+
|
|
2080
|
+
prefix_slash = ""
|
|
2081
|
+
candidates: list[Dict[str, Any]] = []
|
|
2082
|
+
if path_norm:
|
|
2083
|
+
exact = [e for e in entries if _normalize_attachment_query(e.get("handle")) == path_norm]
|
|
2084
|
+
if exact:
|
|
2085
|
+
candidates = exact
|
|
2086
|
+
else:
|
|
2087
|
+
prefix_slash = path_norm if path_norm.endswith("/") else path_norm + "/"
|
|
2088
|
+
candidates = [
|
|
2089
|
+
e for e in entries if _normalize_attachment_query(e.get("handle")).startswith(prefix_slash)
|
|
2090
|
+
]
|
|
2091
|
+
else:
|
|
2092
|
+
candidates = list(entries)
|
|
2093
|
+
|
|
2094
|
+
if not candidates:
|
|
2095
|
+
return None
|
|
2096
|
+
|
|
2097
|
+
out_lines: list[str] = [
|
|
2098
|
+
f"Search results in session attachments for pattern '{pattern}' (attachments only; filesystem access blocked):"
|
|
2099
|
+
]
|
|
2100
|
+
matched_files = 0
|
|
2101
|
+
for e in candidates:
|
|
2102
|
+
if matched_files >= max_files:
|
|
2103
|
+
break
|
|
2104
|
+
aid = str(e.get("artifact_id") or "").strip()
|
|
2105
|
+
handle = _normalize_attachment_query(e.get("handle"))
|
|
2106
|
+
if not aid or not handle:
|
|
2107
|
+
continue
|
|
2108
|
+
art = artifact_store.load(aid)
|
|
2109
|
+
if art is None:
|
|
2110
|
+
continue
|
|
2111
|
+
try:
|
|
2112
|
+
text = art.content.decode("utf-8")
|
|
2113
|
+
except Exception:
|
|
2114
|
+
continue
|
|
2115
|
+
hits: list[str] = []
|
|
2116
|
+
for i, ln in enumerate(text.splitlines(), start=1):
|
|
2117
|
+
if rx.search(ln):
|
|
2118
|
+
hits.append(f"{i}: {ln}")
|
|
2119
|
+
if len(hits) >= head_n:
|
|
2120
|
+
break
|
|
2121
|
+
if not hits:
|
|
2122
|
+
continue
|
|
2123
|
+
matched_files += 1
|
|
2124
|
+
out_lines.append(f"\nFile: {handle}")
|
|
2125
|
+
out_lines.extend([" " + h for h in hits])
|
|
2126
|
+
|
|
2127
|
+
if matched_files == 0:
|
|
2128
|
+
return _as_result(success=True, output="No matches found in session attachments.", error=None)
|
|
2129
|
+
|
|
2130
|
+
if matched_files >= max_files and len(candidates) > max_files:
|
|
2131
|
+
out_lines.append(f"\nNote: stopped after max_hits={max_files}; more attachments may match.")
|
|
2132
|
+
|
|
2133
|
+
return _as_result(success=True, output="\n".join(out_lines).rstrip(), error=None)
|
|
2134
|
+
|
|
2135
|
+
return None
|
|
2136
|
+
|
|
2137
|
+
# Parse + plan tool calls (preserve order; runtime-owned tools must not run ahead of host tools).
|
|
2138
|
+
for idx, tc in enumerate(tool_calls):
|
|
2139
|
+
if not isinstance(tc, dict):
|
|
2140
|
+
blocked_by_index[idx] = {
|
|
2141
|
+
"call_id": "",
|
|
2142
|
+
"runtime_call_id": None,
|
|
2143
|
+
"name": "",
|
|
2144
|
+
"success": False,
|
|
2145
|
+
"output": None,
|
|
2146
|
+
"error": "Invalid tool call (expected an object)",
|
|
2147
|
+
}
|
|
2148
|
+
tool_calls_for_evidence.append({})
|
|
2149
|
+
continue
|
|
2150
|
+
|
|
2151
|
+
name_raw = tc.get("name")
|
|
2152
|
+
name = name_raw.strip() if isinstance(name_raw, str) else ""
|
|
2153
|
+
call_id = str(tc.get("call_id") or "")
|
|
2154
|
+
runtime_call_id = tc.get("runtime_call_id")
|
|
2155
|
+
runtime_call_id_str = str(runtime_call_id).strip() if runtime_call_id is not None else ""
|
|
2156
|
+
runtime_call_id_out = runtime_call_id_str or None
|
|
2157
|
+
|
|
2158
|
+
if allowlist_enabled:
|
|
2159
|
+
if not name:
|
|
2160
|
+
blocked_by_index[idx] = {
|
|
2161
|
+
"call_id": call_id,
|
|
2162
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2163
|
+
"name": "",
|
|
2164
|
+
"success": False,
|
|
2165
|
+
"output": None,
|
|
2166
|
+
"error": "Tool call missing a valid name",
|
|
2167
|
+
}
|
|
2168
|
+
tool_calls_for_evidence.append(
|
|
2169
|
+
{"call_id": call_id, "runtime_call_id": runtime_call_id_out, "name": "", "arguments": {}}
|
|
2170
|
+
)
|
|
2171
|
+
continue
|
|
2172
|
+
if name not in allowed_tools:
|
|
2173
|
+
blocked_by_index[idx] = {
|
|
2174
|
+
"call_id": call_id,
|
|
2175
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2176
|
+
"name": name,
|
|
2177
|
+
"success": False,
|
|
2178
|
+
"output": None,
|
|
2179
|
+
"error": f"Tool '{name}' is not allowed for this node",
|
|
2180
|
+
}
|
|
2181
|
+
# Do not leak arguments for disallowed tools into the durable wait payload.
|
|
2182
|
+
tool_calls_for_evidence.append(
|
|
2183
|
+
{"call_id": call_id, "runtime_call_id": runtime_call_id_out, "name": name, "arguments": {}}
|
|
2184
|
+
)
|
|
2185
|
+
continue
|
|
2186
|
+
|
|
2187
|
+
raw_arguments = tc.get("arguments") or {}
|
|
2188
|
+
arguments = dict(raw_arguments) if isinstance(raw_arguments, dict) else (_loads_dict_like(raw_arguments) or {})
|
|
2189
|
+
|
|
2190
|
+
if name == "open_attachment":
|
|
2191
|
+
tool_calls_for_evidence.append(
|
|
2192
|
+
{
|
|
2193
|
+
"call_id": call_id,
|
|
2194
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2195
|
+
"name": name,
|
|
2196
|
+
"arguments": dict(arguments),
|
|
2197
|
+
}
|
|
2198
|
+
)
|
|
2199
|
+
planned.append(
|
|
2200
|
+
{
|
|
2201
|
+
"idx": idx,
|
|
2202
|
+
"kind": "runtime",
|
|
2203
|
+
"name": name,
|
|
2204
|
+
"call_id": call_id,
|
|
2205
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2206
|
+
"arguments": dict(arguments),
|
|
2207
|
+
}
|
|
2208
|
+
)
|
|
2209
|
+
continue
|
|
2210
|
+
|
|
2211
|
+
# Host tools: rewrite under workspace scope (when configured) before execution.
|
|
2212
|
+
tc2 = dict(tc)
|
|
2213
|
+
if scope is not None:
|
|
2214
|
+
try:
|
|
2215
|
+
rewritten_args = rewrite_tool_arguments(tool_name=name, args=arguments, scope=scope)
|
|
2216
|
+
tc2["arguments"] = rewritten_args
|
|
2217
|
+
except Exception as e:
|
|
2218
|
+
fixed = _attachment_backed_result_for_scope_error(
|
|
2219
|
+
tool_name=name,
|
|
2220
|
+
arguments=dict(arguments),
|
|
2221
|
+
call_id=call_id,
|
|
2222
|
+
runtime_call_id_out=runtime_call_id_out,
|
|
2223
|
+
)
|
|
2224
|
+
if fixed is not None:
|
|
2225
|
+
blocked_by_index[idx] = fixed
|
|
2226
|
+
tool_calls_for_evidence.append(
|
|
2227
|
+
{
|
|
2228
|
+
"call_id": call_id,
|
|
2229
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2230
|
+
"name": name,
|
|
2231
|
+
"arguments": tc.get("arguments") or {},
|
|
2232
|
+
}
|
|
2233
|
+
)
|
|
2234
|
+
continue
|
|
2235
|
+
blocked_by_index[idx] = {
|
|
2236
|
+
"call_id": call_id,
|
|
2237
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2238
|
+
"name": name,
|
|
2239
|
+
"success": False,
|
|
2240
|
+
"output": None,
|
|
2241
|
+
"error": str(e),
|
|
2242
|
+
}
|
|
2243
|
+
tool_calls_for_evidence.append(
|
|
2244
|
+
{
|
|
2245
|
+
"call_id": call_id,
|
|
2246
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2247
|
+
"name": name,
|
|
2248
|
+
"arguments": tc.get("arguments") or {},
|
|
2249
|
+
}
|
|
2250
|
+
)
|
|
2251
|
+
continue
|
|
2252
|
+
else:
|
|
2253
|
+
tc2["arguments"] = dict(arguments)
|
|
2254
|
+
|
|
2255
|
+
tool_calls_for_evidence.append(tc2)
|
|
2256
|
+
planned.append({"idx": idx, "kind": "host", "name": name, "tc": tc2})
|
|
2257
|
+
|
|
2258
|
+
# Fast path: if nothing is planned (everything blocked), return blocked results.
|
|
2259
|
+
if not planned and blocked_by_index:
|
|
2260
|
+
merged_results: list[Any] = []
|
|
2261
|
+
for idx in range(len(tool_calls)):
|
|
2262
|
+
fixed = blocked_by_index.get(idx)
|
|
2263
|
+
merged_results.append(
|
|
2264
|
+
fixed
|
|
2265
|
+
if fixed is not None
|
|
2266
|
+
else {
|
|
2267
|
+
"call_id": "",
|
|
2268
|
+
"runtime_call_id": None,
|
|
2269
|
+
"name": "",
|
|
2270
|
+
"success": False,
|
|
2271
|
+
"output": None,
|
|
2272
|
+
"error": "Missing tool result",
|
|
2273
|
+
}
|
|
2274
|
+
)
|
|
2275
|
+
return EffectOutcome.completed(result={"mode": "executed", "results": merged_results})
|
|
2276
|
+
|
|
2277
|
+
has_host_calls = any(item.get("kind") == "host" for item in planned)
|
|
2278
|
+
if not has_host_calls:
|
|
2279
|
+
results_by_index: Dict[int, Dict[str, Any]] = dict(blocked_by_index)
|
|
2280
|
+
for item in planned:
|
|
2281
|
+
if item.get("kind") != "runtime":
|
|
2282
|
+
continue
|
|
2283
|
+
args = dict(item.get("arguments") or {})
|
|
2284
|
+
call_id = str(item.get("call_id") or "")
|
|
2285
|
+
runtime_call_id_out = item.get("runtime_call_id")
|
|
2286
|
+
aid = args.get("artifact_id") or args.get("$artifact") or args.get("id")
|
|
2287
|
+
handle = args.get("handle") or args.get("path")
|
|
2288
|
+
expected_sha256 = args.get("expected_sha256") or args.get("sha256")
|
|
2289
|
+
start_line = args.get("start_line") or args.get("startLine") or 1
|
|
2290
|
+
end_line = args.get("end_line") or args.get("endLine")
|
|
2291
|
+
max_chars = args.get("max_chars") or args.get("maxChars") or 8000
|
|
2292
|
+
|
|
2293
|
+
if artifact_store is None:
|
|
2294
|
+
results_by_index[item["idx"]] = {
|
|
2295
|
+
"call_id": call_id,
|
|
2296
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2297
|
+
"name": "open_attachment",
|
|
2298
|
+
"success": False,
|
|
2299
|
+
"output": {"rendered": "Error: ArtifactStore is not available (cannot open attachments)."},
|
|
2300
|
+
"error": "ArtifactStore is not available",
|
|
2301
|
+
}
|
|
2302
|
+
continue
|
|
2303
|
+
|
|
2304
|
+
success, output, err = execute_open_attachment(
|
|
2305
|
+
artifact_store=artifact_store,
|
|
2306
|
+
session_id=sid_str,
|
|
2307
|
+
artifact_id=str(aid).strip() if aid is not None else None,
|
|
2308
|
+
handle=str(handle).strip() if handle is not None else None,
|
|
2309
|
+
expected_sha256=str(expected_sha256).strip() if expected_sha256 is not None else None,
|
|
2310
|
+
start_line=int(start_line) if not isinstance(start_line, bool) else 1,
|
|
2311
|
+
end_line=int(end_line) if end_line is not None and not isinstance(end_line, bool) else None,
|
|
2312
|
+
max_chars=int(max_chars) if not isinstance(max_chars, bool) else 8000,
|
|
2313
|
+
)
|
|
2314
|
+
if bool(success) and isinstance(output, dict):
|
|
2315
|
+
_enqueue_pending_media(output.get("media"))
|
|
2316
|
+
results_by_index[item["idx"]] = {
|
|
2317
|
+
"call_id": call_id,
|
|
2318
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2319
|
+
"name": "open_attachment",
|
|
2320
|
+
"success": bool(success),
|
|
2321
|
+
"output": _jsonable(output),
|
|
2322
|
+
"error": str(err or "") if not success else None,
|
|
2323
|
+
}
|
|
2324
|
+
|
|
2325
|
+
merged_results: list[Any] = []
|
|
2326
|
+
for idx in range(len(tool_calls)):
|
|
2327
|
+
r = results_by_index.get(idx)
|
|
2328
|
+
merged_results.append(
|
|
2329
|
+
r
|
|
2330
|
+
if r is not None
|
|
2331
|
+
else {
|
|
2332
|
+
"call_id": "",
|
|
2333
|
+
"runtime_call_id": None,
|
|
2334
|
+
"name": "",
|
|
2335
|
+
"success": False,
|
|
2336
|
+
"output": None,
|
|
2337
|
+
"error": "Missing tool result",
|
|
2338
|
+
}
|
|
2339
|
+
)
|
|
2340
|
+
return EffectOutcome.completed(result={"mode": "executed", "results": merged_results})
|
|
2341
|
+
|
|
2342
|
+
# Detect delegating executors (best-effort): passthrough/untrusted modes cannot safely
|
|
2343
|
+
# interleave runtime-owned tools with host tools, so we fall back to the legacy wait behavior.
|
|
2344
|
+
executor_delegates = False
|
|
2345
|
+
try:
|
|
2346
|
+
probe = tools.execute(tool_calls=[])
|
|
2347
|
+
mode_probe = probe.get("mode")
|
|
2348
|
+
executor_delegates = bool(mode_probe and mode_probe != "executed")
|
|
2349
|
+
except Exception:
|
|
2350
|
+
executor_delegates = False
|
|
2351
|
+
|
|
2352
|
+
if executor_delegates:
|
|
2353
|
+
host_tool_calls: list[Dict[str, Any]] = []
|
|
2354
|
+
for item in planned:
|
|
2355
|
+
if item.get("kind") == "runtime":
|
|
2356
|
+
args = dict(item.get("arguments") or {})
|
|
2357
|
+
aid = args.get("artifact_id") or args.get("$artifact") or args.get("id")
|
|
2358
|
+
handle = args.get("handle") or args.get("path")
|
|
2359
|
+
expected_sha256 = args.get("expected_sha256") or args.get("sha256")
|
|
2360
|
+
start_line = args.get("start_line") or args.get("startLine") or 1
|
|
2361
|
+
end_line = args.get("end_line") or args.get("endLine")
|
|
2362
|
+
max_chars = args.get("max_chars") or args.get("maxChars") or 8000
|
|
2363
|
+
|
|
2364
|
+
if artifact_store is None:
|
|
2365
|
+
pre_results_by_index[item["idx"]] = {
|
|
2366
|
+
"call_id": item.get("call_id") or "",
|
|
2367
|
+
"runtime_call_id": item.get("runtime_call_id"),
|
|
2368
|
+
"name": "open_attachment",
|
|
2369
|
+
"success": False,
|
|
2370
|
+
"output": {"rendered": "Error: ArtifactStore is not available (cannot open attachments)."},
|
|
2371
|
+
"error": "ArtifactStore is not available",
|
|
2372
|
+
}
|
|
2373
|
+
continue
|
|
2374
|
+
|
|
2375
|
+
success, output, err = execute_open_attachment(
|
|
2376
|
+
artifact_store=artifact_store,
|
|
2377
|
+
session_id=sid_str,
|
|
2378
|
+
artifact_id=str(aid).strip() if aid is not None else None,
|
|
2379
|
+
handle=str(handle).strip() if handle is not None else None,
|
|
2380
|
+
expected_sha256=str(expected_sha256).strip() if expected_sha256 is not None else None,
|
|
2381
|
+
start_line=int(start_line) if not isinstance(start_line, bool) else 1,
|
|
2382
|
+
end_line=int(end_line) if end_line is not None and not isinstance(end_line, bool) else None,
|
|
2383
|
+
max_chars=int(max_chars) if not isinstance(max_chars, bool) else 8000,
|
|
2384
|
+
)
|
|
2385
|
+
if bool(success) and isinstance(output, dict):
|
|
2386
|
+
_enqueue_pending_media(output.get("media"))
|
|
2387
|
+
pre_results_by_index[item["idx"]] = {
|
|
2388
|
+
"call_id": item.get("call_id") or "",
|
|
2389
|
+
"runtime_call_id": item.get("runtime_call_id"),
|
|
2390
|
+
"name": "open_attachment",
|
|
2391
|
+
"success": bool(success),
|
|
2392
|
+
"output": _jsonable(output),
|
|
2393
|
+
"error": str(err or "") if not success else None,
|
|
2394
|
+
}
|
|
2395
|
+
continue
|
|
2396
|
+
|
|
2397
|
+
if item.get("kind") == "host":
|
|
2398
|
+
tc2 = item.get("tc")
|
|
2399
|
+
if isinstance(tc2, dict):
|
|
2400
|
+
host_tool_calls.append(tc2)
|
|
2401
|
+
|
|
2402
|
+
try:
|
|
2403
|
+
result = tools.execute(tool_calls=host_tool_calls)
|
|
2404
|
+
except Exception as e:
|
|
2405
|
+
logger.error("TOOL_CALLS execution failed", error=str(e))
|
|
2406
|
+
return EffectOutcome.failed(str(e))
|
|
2407
|
+
|
|
2408
|
+
mode = result.get("mode")
|
|
2409
|
+
if mode and mode != "executed":
|
|
2410
|
+
wait_key = payload.get("wait_key") or result.get("wait_key") or f"tool_calls:{run.run_id}:{run.current_node}"
|
|
2411
|
+
raw_wait_reason = result.get("wait_reason")
|
|
2412
|
+
wait_reason = WaitReason.EVENT
|
|
2413
|
+
if isinstance(raw_wait_reason, str) and raw_wait_reason.strip():
|
|
2414
|
+
try:
|
|
2415
|
+
wait_reason = WaitReason(raw_wait_reason.strip())
|
|
2416
|
+
except ValueError:
|
|
2417
|
+
wait_reason = WaitReason.EVENT
|
|
2418
|
+
elif str(mode).strip().lower() == "delegated":
|
|
2419
|
+
wait_reason = WaitReason.JOB
|
|
2420
|
+
|
|
2421
|
+
tool_calls_for_wait = result.get("tool_calls")
|
|
2422
|
+
if not isinstance(tool_calls_for_wait, list):
|
|
2423
|
+
tool_calls_for_wait = host_tool_calls
|
|
2424
|
+
|
|
2425
|
+
details: Dict[str, Any] = {"mode": mode, "tool_calls": _jsonable(tool_calls_for_wait)}
|
|
2426
|
+
executor_details = result.get("details")
|
|
2427
|
+
if isinstance(executor_details, dict) and executor_details:
|
|
2428
|
+
details["executor"] = _jsonable(executor_details)
|
|
2429
|
+
if blocked_by_index or pre_results_by_index:
|
|
2430
|
+
details["original_call_count"] = original_call_count
|
|
2431
|
+
if blocked_by_index:
|
|
2432
|
+
details["blocked_by_index"] = {str(k): _jsonable(v) for k, v in blocked_by_index.items()}
|
|
2433
|
+
if pre_results_by_index:
|
|
2434
|
+
details["pre_results_by_index"] = {str(k): _jsonable(v) for k, v in pre_results_by_index.items()}
|
|
2435
|
+
details["tool_calls_for_evidence"] = _jsonable(tool_calls_for_evidence)
|
|
2436
|
+
|
|
2437
|
+
wait = WaitState(
|
|
2438
|
+
reason=wait_reason,
|
|
2439
|
+
wait_key=str(wait_key),
|
|
2440
|
+
resume_to_node=payload.get("resume_to_node") or default_next_node,
|
|
2441
|
+
result_key=effect.result_key,
|
|
2442
|
+
details=details,
|
|
2443
|
+
)
|
|
2444
|
+
return EffectOutcome.waiting(wait)
|
|
2445
|
+
|
|
2446
|
+
# Defensive: if a delegating executor unexpectedly executes, merge like legacy path.
|
|
2447
|
+
existing_results = result.get("results")
|
|
2448
|
+
merged_results: list[Any] = []
|
|
2449
|
+
executed_iter = iter(existing_results if isinstance(existing_results, list) else [])
|
|
2450
|
+
for idx in range(len(tool_calls)):
|
|
2451
|
+
fixed = pre_results_by_index.get(idx) or blocked_by_index.get(idx)
|
|
2452
|
+
if fixed is not None:
|
|
2453
|
+
merged_results.append(fixed)
|
|
2454
|
+
continue
|
|
2455
|
+
try:
|
|
2456
|
+
merged_results.append(next(executed_iter))
|
|
2457
|
+
except StopIteration:
|
|
2458
|
+
merged_results.append(
|
|
2459
|
+
{
|
|
2460
|
+
"call_id": "",
|
|
2461
|
+
"runtime_call_id": None,
|
|
2462
|
+
"name": "",
|
|
2463
|
+
"success": False,
|
|
2464
|
+
"output": None,
|
|
2465
|
+
"error": "Missing tool result",
|
|
2466
|
+
}
|
|
2467
|
+
)
|
|
2468
|
+
return EffectOutcome.completed(result={"mode": "executed", "results": merged_results})
|
|
2469
|
+
|
|
2470
|
+
# Executing mode: preserve ordering by interleaving runtime-owned tools and host tools.
|
|
2471
|
+
results_by_index: Dict[int, Dict[str, Any]] = dict(blocked_by_index)
|
|
2472
|
+
|
|
2473
|
+
i = 0
|
|
2474
|
+
while i < len(planned):
|
|
2475
|
+
item = planned[i]
|
|
2476
|
+
kind = item.get("kind")
|
|
2477
|
+
if kind == "runtime":
|
|
2478
|
+
args = dict(item.get("arguments") or {})
|
|
2479
|
+
call_id = str(item.get("call_id") or "")
|
|
2480
|
+
runtime_call_id_out = item.get("runtime_call_id")
|
|
2481
|
+
aid = args.get("artifact_id") or args.get("$artifact") or args.get("id")
|
|
2482
|
+
handle = args.get("handle") or args.get("path")
|
|
2483
|
+
expected_sha256 = args.get("expected_sha256") or args.get("sha256")
|
|
2484
|
+
start_line = args.get("start_line") or args.get("startLine") or 1
|
|
2485
|
+
end_line = args.get("end_line") or args.get("endLine")
|
|
2486
|
+
max_chars = args.get("max_chars") or args.get("maxChars") or 8000
|
|
2487
|
+
|
|
2488
|
+
if artifact_store is None:
|
|
2489
|
+
results_by_index[item["idx"]] = {
|
|
2490
|
+
"call_id": call_id,
|
|
2491
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2492
|
+
"name": "open_attachment",
|
|
2493
|
+
"success": False,
|
|
2494
|
+
"output": {"rendered": "Error: ArtifactStore is not available (cannot open attachments)."},
|
|
2495
|
+
"error": "ArtifactStore is not available",
|
|
2496
|
+
}
|
|
2497
|
+
i += 1
|
|
2498
|
+
continue
|
|
2499
|
+
|
|
2500
|
+
success, output, err = execute_open_attachment(
|
|
2501
|
+
artifact_store=artifact_store,
|
|
2502
|
+
session_id=sid_str,
|
|
2503
|
+
artifact_id=str(aid).strip() if aid is not None else None,
|
|
2504
|
+
handle=str(handle).strip() if handle is not None else None,
|
|
2505
|
+
expected_sha256=str(expected_sha256).strip() if expected_sha256 is not None else None,
|
|
2506
|
+
start_line=int(start_line) if not isinstance(start_line, bool) else 1,
|
|
2507
|
+
end_line=int(end_line) if end_line is not None and not isinstance(end_line, bool) else None,
|
|
2508
|
+
max_chars=int(max_chars) if not isinstance(max_chars, bool) else 8000,
|
|
2509
|
+
)
|
|
2510
|
+
if bool(success) and isinstance(output, dict):
|
|
2511
|
+
_enqueue_pending_media(output.get("media"))
|
|
2512
|
+
results_by_index[item["idx"]] = {
|
|
2513
|
+
"call_id": call_id,
|
|
2514
|
+
"runtime_call_id": runtime_call_id_out,
|
|
2515
|
+
"name": "open_attachment",
|
|
2516
|
+
"success": bool(success),
|
|
2517
|
+
"output": _jsonable(output),
|
|
2518
|
+
"error": str(err or "") if not success else None,
|
|
2519
|
+
}
|
|
2520
|
+
i += 1
|
|
2521
|
+
continue
|
|
2522
|
+
|
|
2523
|
+
# Host tool segment.
|
|
2524
|
+
seg_items: list[Dict[str, Any]] = []
|
|
2525
|
+
seg_calls: list[Dict[str, Any]] = []
|
|
2526
|
+
while i < len(planned) and planned[i].get("kind") == "host":
|
|
2527
|
+
seg_items.append(planned[i])
|
|
2528
|
+
tc2 = planned[i].get("tc")
|
|
2529
|
+
if isinstance(tc2, dict):
|
|
2530
|
+
seg_calls.append(tc2)
|
|
2531
|
+
i += 1
|
|
2532
|
+
|
|
2533
|
+
if not seg_calls:
|
|
2534
|
+
continue
|
|
2535
|
+
|
|
2536
|
+
try:
|
|
2537
|
+
seg_result = tools.execute(tool_calls=seg_calls)
|
|
2538
|
+
except Exception as e:
|
|
2539
|
+
logger.error("TOOL_CALLS execution failed", error=str(e))
|
|
2540
|
+
return EffectOutcome.failed(str(e))
|
|
2541
|
+
|
|
2542
|
+
mode = seg_result.get("mode")
|
|
2543
|
+
if mode and mode != "executed":
|
|
2544
|
+
return EffectOutcome.failed("ToolExecutor returned delegated mode during executed TOOL_CALLS batch")
|
|
2545
|
+
|
|
2546
|
+
seg_results = seg_result.get("results")
|
|
2547
|
+
if not isinstance(seg_results, list):
|
|
2548
|
+
return EffectOutcome.failed("ToolExecutor returned invalid results")
|
|
2549
|
+
|
|
2550
|
+
# Map results back to original tool call indices and register read_file outputs as attachments.
|
|
2551
|
+
max_inline_bytes = 256 * 1024
|
|
2552
|
+
try:
|
|
2553
|
+
raw_max_inline = str(os.getenv("ABSTRACTRUNTIME_MAX_INLINE_BYTES", "") or "").strip()
|
|
2554
|
+
if raw_max_inline:
|
|
2555
|
+
max_inline_bytes = max(1, int(raw_max_inline))
|
|
2556
|
+
except Exception:
|
|
2557
|
+
max_inline_bytes = 256 * 1024
|
|
2558
|
+
|
|
2559
|
+
for seg_item, r in zip(seg_items, seg_results):
|
|
2560
|
+
idx = int(seg_item.get("idx") or 0)
|
|
2561
|
+
r_out: Any = r
|
|
2562
|
+
if isinstance(r, dict):
|
|
2563
|
+
r_out = dict(r)
|
|
2564
|
+
else:
|
|
2565
|
+
tc2 = seg_item.get("tc") if isinstance(seg_item.get("tc"), dict) else {}
|
|
2566
|
+
r_out = {
|
|
2567
|
+
"call_id": str(tc2.get("call_id") or ""),
|
|
2568
|
+
"runtime_call_id": tc2.get("runtime_call_id"),
|
|
2569
|
+
"name": str(tc2.get("name") or ""),
|
|
2570
|
+
"success": False,
|
|
2571
|
+
"output": None,
|
|
2572
|
+
"error": "Invalid tool result",
|
|
2573
|
+
}
|
|
2574
|
+
|
|
2575
|
+
if seg_item.get("name") != "read_file":
|
|
2576
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2577
|
+
continue
|
|
2578
|
+
tc2 = seg_item.get("tc")
|
|
2579
|
+
args = tc2.get("arguments") if isinstance(tc2, dict) else None
|
|
2580
|
+
if not isinstance(args, dict):
|
|
2581
|
+
args = {}
|
|
2582
|
+
fp = args.get("file_path") or args.get("path") or args.get("filename") or args.get("file")
|
|
2583
|
+
if fp is None:
|
|
2584
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2585
|
+
continue
|
|
2586
|
+
|
|
2587
|
+
# Fallback: if filesystem read_file fails, attempt to resolve from the session attachment store.
|
|
2588
|
+
#
|
|
2589
|
+
# This supports browser uploads (no server-side file path) and intentionally bypasses
|
|
2590
|
+
# workspace allow/ignore policies because the user explicitly provided the bytes.
|
|
2591
|
+
if isinstance(r, dict) and r.get("success") is not True and artifact_store is not None and sid_str:
|
|
2592
|
+
start_line = args.get("start_line") or args.get("startLine") or args.get("start_line_one_indexed") or 1
|
|
2593
|
+
end_line = (
|
|
2594
|
+
args.get("end_line")
|
|
2595
|
+
or args.get("endLine")
|
|
2596
|
+
or args.get("end_line_one_indexed_inclusive")
|
|
2597
|
+
or args.get("end_line_one_indexed")
|
|
2598
|
+
)
|
|
2599
|
+
try:
|
|
2600
|
+
start_i = int(start_line) if start_line is not None and not isinstance(start_line, bool) else 1
|
|
2601
|
+
except Exception:
|
|
2602
|
+
start_i = 1
|
|
2603
|
+
end_i: Optional[int] = None
|
|
2604
|
+
try:
|
|
2605
|
+
if end_line is not None and not isinstance(end_line, bool):
|
|
2606
|
+
end_i = int(end_line)
|
|
2607
|
+
except Exception:
|
|
2608
|
+
end_i = None
|
|
2609
|
+
|
|
2610
|
+
success2, out2, _err2 = execute_open_attachment(
|
|
2611
|
+
artifact_store=artifact_store,
|
|
2612
|
+
session_id=sid_str,
|
|
2613
|
+
artifact_id=None,
|
|
2614
|
+
handle=str(fp),
|
|
2615
|
+
expected_sha256=None,
|
|
2616
|
+
start_line=int(start_i),
|
|
2617
|
+
end_line=int(end_i) if end_i is not None else None,
|
|
2618
|
+
max_chars=8000,
|
|
2619
|
+
)
|
|
2620
|
+
if isinstance(out2, dict):
|
|
2621
|
+
_enqueue_pending_media(out2.get("media"))
|
|
2622
|
+
if success2 and isinstance(out2, dict):
|
|
2623
|
+
output_text = _read_file_output_from_open_attachment(file_path=str(fp), opened=out2)
|
|
2624
|
+
if output_text is not None and isinstance(r_out, dict):
|
|
2625
|
+
r_out["success"] = True
|
|
2626
|
+
r_out["output"] = output_text
|
|
2627
|
+
r_out["error"] = None
|
|
2628
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2629
|
+
continue
|
|
2630
|
+
|
|
2631
|
+
if not isinstance(r, dict) or r.get("success") is not True:
|
|
2632
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2633
|
+
continue
|
|
2634
|
+
out = r.get("output")
|
|
2635
|
+
if not isinstance(out, str) or not out.lstrip().startswith("File:"):
|
|
2636
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2637
|
+
continue
|
|
2638
|
+
att = _register_read_file_as_attachment(session_id=sid_str, file_path=str(fp))
|
|
2639
|
+
if att and isinstance(r_out, dict):
|
|
2640
|
+
# If the read_file output would be offloaded anyway, keep the durable ledger lean by
|
|
2641
|
+
# returning a stub and rely on the attachment + open_attachment for bounded excerpts.
|
|
2642
|
+
try:
|
|
2643
|
+
n = len(out.encode("utf-8"))
|
|
2644
|
+
except Exception:
|
|
2645
|
+
n = len(out)
|
|
2646
|
+
if n > max_inline_bytes:
|
|
2647
|
+
aid = str(att.get("artifact_id") or "").strip()
|
|
2648
|
+
handle = str(att.get("handle") or "").strip()
|
|
2649
|
+
sha = str(att.get("sha256") or "").strip()
|
|
2650
|
+
sha_disp = (sha[:8] + "…") if sha else ""
|
|
2651
|
+
display = handle.replace("\\", "/")
|
|
2652
|
+
if _is_abs_path_like(display):
|
|
2653
|
+
display = display.rsplit("/", 1)[-1] or display
|
|
2654
|
+
hint = (
|
|
2655
|
+
f"[read_file]: (stored as attachment) @{display} "
|
|
2656
|
+
f"(id={aid}{', sha=' + sha_disp if sha_disp else ''}).\n"
|
|
2657
|
+
f"Use open_attachment(artifact_id='{aid}', start_line=1, end_line=200) for bounded excerpts."
|
|
2658
|
+
)
|
|
2659
|
+
r_out["output"] = hint
|
|
2660
|
+
|
|
2661
|
+
results_by_index[idx] = _jsonable(r_out)
|
|
2662
|
+
|
|
2663
|
+
# Fill missing results when executor returned fewer entries than expected.
|
|
2664
|
+
if len(seg_results) < len(seg_items):
|
|
2665
|
+
for seg_item in seg_items[len(seg_results) :]:
|
|
2666
|
+
idx = int(seg_item.get("idx") or 0)
|
|
2667
|
+
tc2 = seg_item.get("tc") if isinstance(seg_item.get("tc"), dict) else {}
|
|
2668
|
+
results_by_index[idx] = {
|
|
2669
|
+
"call_id": str(tc2.get("call_id") or ""),
|
|
2670
|
+
"runtime_call_id": tc2.get("runtime_call_id"),
|
|
2671
|
+
"name": str(tc2.get("name") or ""),
|
|
2672
|
+
"success": False,
|
|
2673
|
+
"output": None,
|
|
2674
|
+
"error": "Missing tool result",
|
|
2675
|
+
}
|
|
2676
|
+
|
|
2677
|
+
merged_results: list[Any] = []
|
|
2678
|
+
for idx in range(len(tool_calls)):
|
|
2679
|
+
r = results_by_index.get(idx)
|
|
2680
|
+
if r is None:
|
|
2681
|
+
merged_results.append(
|
|
2682
|
+
{
|
|
2683
|
+
"call_id": "",
|
|
2684
|
+
"runtime_call_id": None,
|
|
2685
|
+
"name": "",
|
|
2686
|
+
"success": False,
|
|
2687
|
+
"output": None,
|
|
2688
|
+
"error": "Missing tool result",
|
|
2689
|
+
}
|
|
2690
|
+
)
|
|
2691
|
+
else:
|
|
2692
|
+
merged_results.append(r)
|
|
109
2693
|
|
|
110
|
-
return EffectOutcome.completed(result=
|
|
2694
|
+
return EffectOutcome.completed(result={"mode": "executed", "results": merged_results})
|
|
111
2695
|
|
|
112
2696
|
return _handler
|
|
113
2697
|
|
|
114
2698
|
|
|
115
|
-
def build_effect_handlers(
|
|
2699
|
+
def build_effect_handlers(
|
|
2700
|
+
*,
|
|
2701
|
+
llm: AbstractCoreLLMClient,
|
|
2702
|
+
tools: ToolExecutor = None,
|
|
2703
|
+
artifact_store: Optional[ArtifactStore] = None,
|
|
2704
|
+
run_store: Optional[RunStore] = None,
|
|
2705
|
+
) -> Dict[EffectType, Any]:
|
|
116
2706
|
return {
|
|
117
|
-
EffectType.LLM_CALL: make_llm_call_handler(llm=llm),
|
|
118
|
-
EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools),
|
|
2707
|
+
EffectType.LLM_CALL: make_llm_call_handler(llm=llm, artifact_store=artifact_store),
|
|
2708
|
+
EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools, artifact_store=artifact_store, run_store=run_store),
|
|
119
2709
|
}
|