abstractcode 0.2.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcode/__init__.py +1 -1
- abstractcode/cli.py +911 -9
- abstractcode/file_mentions.py +276 -0
- abstractcode/flow_cli.py +1413 -0
- abstractcode/fullscreen_ui.py +2473 -158
- abstractcode/gateway_cli.py +715 -0
- abstractcode/py.typed +1 -0
- abstractcode/react_shell.py +8140 -546
- abstractcode/recall.py +384 -0
- abstractcode/remember.py +184 -0
- abstractcode/terminal_markdown.py +557 -0
- abstractcode/theme.py +244 -0
- abstractcode/workflow_agent.py +1412 -0
- abstractcode/workflow_cli.py +229 -0
- abstractcode-0.3.1.dist-info/METADATA +158 -0
- abstractcode-0.3.1.dist-info/RECORD +21 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/WHEEL +1 -1
- abstractcode-0.2.0.dist-info/METADATA +0 -160
- abstractcode-0.2.0.dist-info/RECORD +0 -11
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {abstractcode-0.2.0.dist-info → abstractcode-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1412 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
import zipfile
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from abstractagent.agents.base import BaseAgent
|
|
12
|
+
from abstractruntime import RunState, RunStatus, Runtime, WorkflowSpec
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _now_iso() -> str:
|
|
16
|
+
return datetime.now(timezone.utc).isoformat()
|
|
17
|
+
|
|
18
|
+
_UI_EVENT_NAMESPACE = "abstract"
|
|
19
|
+
|
|
20
|
+
_STATUS_EVENT_NAME = f"{_UI_EVENT_NAMESPACE}.status"
|
|
21
|
+
_MESSAGE_EVENT_NAME = f"{_UI_EVENT_NAMESPACE}.message"
|
|
22
|
+
_TOOL_EXEC_EVENT_NAME = f"{_UI_EVENT_NAMESPACE}.tool_execution"
|
|
23
|
+
_TOOL_RESULT_EVENT_NAME = f"{_UI_EVENT_NAMESPACE}.tool_result"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _normalize_ui_event_name(name: str) -> str:
|
|
27
|
+
s = str(name or "").strip()
|
|
28
|
+
if s.startswith("abstractcode."):
|
|
29
|
+
return f"{_UI_EVENT_NAMESPACE}.{s[len('abstractcode.'):]}".strip(".")
|
|
30
|
+
return s
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _new_message(*, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
34
|
+
from uuid import uuid4
|
|
35
|
+
|
|
36
|
+
meta: Dict[str, Any] = dict(metadata) if isinstance(metadata, dict) else {}
|
|
37
|
+
meta.setdefault("message_id", f"msg_{uuid4().hex}")
|
|
38
|
+
return {
|
|
39
|
+
"role": str(role or "").strip() or "user",
|
|
40
|
+
"content": str(content or ""),
|
|
41
|
+
"timestamp": _now_iso(),
|
|
42
|
+
"metadata": meta,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _copy_messages(messages: Any) -> List[Dict[str, Any]]:
|
|
47
|
+
if not isinstance(messages, list):
|
|
48
|
+
return []
|
|
49
|
+
out: List[Dict[str, Any]] = []
|
|
50
|
+
for m in messages:
|
|
51
|
+
if isinstance(m, dict):
|
|
52
|
+
out.append(dict(m))
|
|
53
|
+
return out
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass(frozen=True)
|
|
57
|
+
class ResolvedVisualFlow:
|
|
58
|
+
visual_flow: Dict[str, Any]
|
|
59
|
+
flows: Dict[str, Dict[str, Any]]
|
|
60
|
+
flows_dir: Path
|
|
61
|
+
bundle_id: Optional[str] = None
|
|
62
|
+
bundle_version: Optional[str] = None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _default_flows_dir() -> Path:
|
|
66
|
+
try:
|
|
67
|
+
from .flow_cli import default_flows_dir
|
|
68
|
+
|
|
69
|
+
return default_flows_dir()
|
|
70
|
+
except Exception:
|
|
71
|
+
return Path("flows")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _default_bundles_dir() -> Path:
|
|
75
|
+
"""Best-effort location for `.flow` bundles (WorkflowBundle zips)."""
|
|
76
|
+
try:
|
|
77
|
+
from abstractruntime.workflow_bundle import default_workflow_bundles_dir # type: ignore
|
|
78
|
+
|
|
79
|
+
return default_workflow_bundles_dir()
|
|
80
|
+
except Exception:
|
|
81
|
+
candidate = Path("flows") / "bundles"
|
|
82
|
+
if candidate.exists() and candidate.is_dir():
|
|
83
|
+
return candidate
|
|
84
|
+
return Path("flows")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _is_flow_bundle(path: Path) -> bool:
|
|
88
|
+
try:
|
|
89
|
+
if path.suffix.lower() == ".flow":
|
|
90
|
+
return True
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
try:
|
|
94
|
+
return zipfile.is_zipfile(path)
|
|
95
|
+
except Exception:
|
|
96
|
+
return False
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _load_visual_flows_from_bundle(bundle_path: Path) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, Any]]:
|
|
100
|
+
"""Load VisualFlow JSON objects from a `.flow` bundle (zip).
|
|
101
|
+
|
|
102
|
+
Returns: (flows_by_id, manifest_dict)
|
|
103
|
+
"""
|
|
104
|
+
try:
|
|
105
|
+
from abstractruntime.workflow_bundle import open_workflow_bundle # type: ignore
|
|
106
|
+
except Exception as e: # pragma: no cover
|
|
107
|
+
raise RuntimeError(
|
|
108
|
+
"AbstractRuntime workflow_bundle support is required to run `.flow` bundles.\n"
|
|
109
|
+
'Install with: pip install "abstractruntime"'
|
|
110
|
+
) from e
|
|
111
|
+
|
|
112
|
+
bundle = open_workflow_bundle(bundle_path)
|
|
113
|
+
man = bundle.manifest
|
|
114
|
+
|
|
115
|
+
entrypoints: List[Dict[str, Any]] = []
|
|
116
|
+
for ep in getattr(man, "entrypoints", None) or []:
|
|
117
|
+
fid = str(getattr(ep, "flow_id", "") or "").strip()
|
|
118
|
+
if not fid:
|
|
119
|
+
continue
|
|
120
|
+
entrypoints.append(
|
|
121
|
+
{
|
|
122
|
+
"flow_id": fid,
|
|
123
|
+
"name": str(getattr(ep, "name", "") or ""),
|
|
124
|
+
"description": str(getattr(ep, "description", "") or ""),
|
|
125
|
+
"interfaces": list(getattr(ep, "interfaces", []) or []),
|
|
126
|
+
}
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
manifest: Dict[str, Any] = {
|
|
130
|
+
"bundle_id": str(getattr(man, "bundle_id", "") or ""),
|
|
131
|
+
"bundle_version": str(getattr(man, "bundle_version", "") or ""),
|
|
132
|
+
"default_entrypoint": str(getattr(man, "default_entrypoint", "") or ""),
|
|
133
|
+
"entrypoints": entrypoints,
|
|
134
|
+
"flows": dict(getattr(man, "flows", None) or {}),
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
flows: Dict[str, Dict[str, Any]] = {}
|
|
138
|
+
for fid, rel in (getattr(man, "flows", None) or {}).items():
|
|
139
|
+
if not isinstance(rel, str) or not rel.strip():
|
|
140
|
+
continue
|
|
141
|
+
try:
|
|
142
|
+
raw = bundle.read_json(rel)
|
|
143
|
+
except Exception:
|
|
144
|
+
continue
|
|
145
|
+
if not isinstance(raw, dict):
|
|
146
|
+
continue
|
|
147
|
+
flow_id = str(raw.get("id") or fid or "").strip()
|
|
148
|
+
if not flow_id:
|
|
149
|
+
continue
|
|
150
|
+
flows[flow_id] = raw
|
|
151
|
+
return flows, manifest
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _load_visual_flows(flows_dir: Path) -> Dict[str, Dict[str, Any]]:
|
|
155
|
+
flows: Dict[str, Dict[str, Any]] = {}
|
|
156
|
+
if not flows_dir.exists():
|
|
157
|
+
return flows
|
|
158
|
+
for path in sorted(flows_dir.glob("*.json")):
|
|
159
|
+
try:
|
|
160
|
+
raw = json.loads(path.read_text(encoding="utf-8"))
|
|
161
|
+
except Exception:
|
|
162
|
+
continue
|
|
163
|
+
if not isinstance(raw, dict):
|
|
164
|
+
continue
|
|
165
|
+
fid = str(raw.get("id") or "").strip()
|
|
166
|
+
if not fid:
|
|
167
|
+
continue
|
|
168
|
+
flows[fid] = raw
|
|
169
|
+
return flows
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def resolve_visual_flow(
|
|
173
|
+
flow_ref: str,
|
|
174
|
+
*,
|
|
175
|
+
flows_dir: Optional[str],
|
|
176
|
+
require_interface: Optional[str] = None,
|
|
177
|
+
) -> ResolvedVisualFlow:
|
|
178
|
+
"""Resolve a VisualFlow by id, name, or path to a `.json` or bundled `.flow` file.
|
|
179
|
+
|
|
180
|
+
Also accepts bundle refs (by bundle_id), e.g.:
|
|
181
|
+
- basic-agent
|
|
182
|
+
- basic-llm@0.0.2
|
|
183
|
+
- basic-llm.flow
|
|
184
|
+
- basic-llm@0.0.2.flow
|
|
185
|
+
- basic-llm:c4bd3db6 (bundle_id:flow_id)
|
|
186
|
+
- basic-llm@0.0.2:c4bd3db6
|
|
187
|
+
"""
|
|
188
|
+
ref_raw = str(flow_ref or "").strip()
|
|
189
|
+
if not ref_raw:
|
|
190
|
+
raise ValueError("flow reference is required (flow id, name, .json/.flow path, or bundle id)")
|
|
191
|
+
|
|
192
|
+
def _require_flow_interface(raw: Dict[str, Any]) -> None:
|
|
193
|
+
if not require_interface:
|
|
194
|
+
return
|
|
195
|
+
interfaces = raw.get("interfaces")
|
|
196
|
+
if isinstance(interfaces, list) and require_interface in interfaces:
|
|
197
|
+
return
|
|
198
|
+
raise ValueError(f"Workflow does not implement '{require_interface}'")
|
|
199
|
+
|
|
200
|
+
ref = ref_raw
|
|
201
|
+
bundle_flow_id: Optional[str] = None
|
|
202
|
+
# Support bundle_id:flow_id (like AbstractCode web). Avoid clobbering Windows drive letters.
|
|
203
|
+
if ":" in ref and not re.match(r"^[A-Za-z]:[\\\\/]", ref):
|
|
204
|
+
left, right = ref.split(":", 1)
|
|
205
|
+
if left.strip() and right.strip():
|
|
206
|
+
ref = left.strip()
|
|
207
|
+
bundle_flow_id = right.strip()
|
|
208
|
+
|
|
209
|
+
path = Path(ref).expanduser()
|
|
210
|
+
flows_dir_path: Path
|
|
211
|
+
if path.exists() and path.is_file() and _is_flow_bundle(path):
|
|
212
|
+
flows, manifest = _load_visual_flows_from_bundle(path)
|
|
213
|
+
bundle_id = str(manifest.get("bundle_id") or "").strip() or None
|
|
214
|
+
bundle_version = str(manifest.get("bundle_version") or "").strip() or None
|
|
215
|
+
default_id = str(manifest.get("default_entrypoint") or "").strip()
|
|
216
|
+
selected_id = bundle_flow_id or default_id
|
|
217
|
+
if not selected_id and flows:
|
|
218
|
+
selected_id = next(iter(flows.keys()))
|
|
219
|
+
vf = flows.get(selected_id) if selected_id else None
|
|
220
|
+
if vf is None:
|
|
221
|
+
available = ", ".join(sorted(flows.keys()))
|
|
222
|
+
raise ValueError(f"Bundle entrypoint '{selected_id}' not found in {path} (available: {available})")
|
|
223
|
+
# Prefer bundle-level interface markers when present; fall back to flow.interfaces.
|
|
224
|
+
if require_interface:
|
|
225
|
+
try:
|
|
226
|
+
eps = list(manifest.get("entrypoints") or [])
|
|
227
|
+
ep = next(
|
|
228
|
+
(
|
|
229
|
+
e
|
|
230
|
+
for e in eps
|
|
231
|
+
if isinstance(e, dict) and str(e.get("flow_id") or "").strip() == str(selected_id)
|
|
232
|
+
),
|
|
233
|
+
None,
|
|
234
|
+
)
|
|
235
|
+
if ep is None:
|
|
236
|
+
raise ValueError
|
|
237
|
+
if require_interface not in list(ep.get("interfaces") or []):
|
|
238
|
+
raise ValueError
|
|
239
|
+
except Exception:
|
|
240
|
+
_require_flow_interface(vf)
|
|
241
|
+
return ResolvedVisualFlow(
|
|
242
|
+
visual_flow=vf,
|
|
243
|
+
flows=flows,
|
|
244
|
+
flows_dir=path.resolve(),
|
|
245
|
+
bundle_id=bundle_id,
|
|
246
|
+
bundle_version=bundle_version,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if path.exists() and path.is_file():
|
|
250
|
+
try:
|
|
251
|
+
raw = json.loads(path.read_text(encoding="utf-8"))
|
|
252
|
+
except Exception as e:
|
|
253
|
+
raise ValueError(f"Cannot read flow file: {path}") from e
|
|
254
|
+
if not isinstance(raw, dict):
|
|
255
|
+
raise ValueError(f"Flow JSON must be an object: {path}")
|
|
256
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else path.parent.resolve()
|
|
257
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
258
|
+
fid = str(raw.get("id") or "").strip()
|
|
259
|
+
if fid:
|
|
260
|
+
flows[fid] = raw
|
|
261
|
+
_require_flow_interface(raw)
|
|
262
|
+
return ResolvedVisualFlow(visual_flow=raw, flows=flows, flows_dir=flows_dir_path)
|
|
263
|
+
|
|
264
|
+
# Prefer installed bundle refs when a bundle_id (or entrypoint name) matches.
|
|
265
|
+
try:
|
|
266
|
+
from abstractruntime.workflow_bundle import WorkflowBundleRegistry, WorkflowBundleRegistryError # type: ignore
|
|
267
|
+
|
|
268
|
+
reg = WorkflowBundleRegistry(_default_bundles_dir())
|
|
269
|
+
ep = reg.resolve_entrypoint(ref_raw, interface=require_interface)
|
|
270
|
+
b = reg.resolve_bundle(ep.bundle_ref)
|
|
271
|
+
flows2, _manifest = _load_visual_flows_from_bundle(b.path)
|
|
272
|
+
vf = flows2.get(ep.flow_id)
|
|
273
|
+
if vf is None:
|
|
274
|
+
available = ", ".join(sorted(flows2.keys()))
|
|
275
|
+
raise ValueError(f"Bundle entrypoint '{ep.flow_id}' not found in {b.path} (available: {available})")
|
|
276
|
+
return ResolvedVisualFlow(
|
|
277
|
+
visual_flow=vf,
|
|
278
|
+
flows=flows2,
|
|
279
|
+
flows_dir=b.path.resolve(),
|
|
280
|
+
bundle_id=str(ep.bundle_id),
|
|
281
|
+
bundle_version=str(ep.bundle_version),
|
|
282
|
+
)
|
|
283
|
+
except WorkflowBundleRegistryError:
|
|
284
|
+
pass
|
|
285
|
+
except Exception:
|
|
286
|
+
pass
|
|
287
|
+
|
|
288
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else _default_flows_dir().resolve()
|
|
289
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
290
|
+
|
|
291
|
+
if ref in flows:
|
|
292
|
+
_require_flow_interface(flows[ref])
|
|
293
|
+
return ResolvedVisualFlow(visual_flow=flows[ref], flows=flows, flows_dir=flows_dir_path)
|
|
294
|
+
|
|
295
|
+
# Fall back to exact name match (case-insensitive).
|
|
296
|
+
matches: list[Dict[str, Any]] = []
|
|
297
|
+
needle = ref.casefold()
|
|
298
|
+
for vf in flows.values():
|
|
299
|
+
name = vf.get("name")
|
|
300
|
+
if isinstance(name, str) and name.strip() and name.strip().casefold() == needle:
|
|
301
|
+
matches.append(vf)
|
|
302
|
+
|
|
303
|
+
if not matches:
|
|
304
|
+
raise ValueError(f"Flow '{ref_raw}' not found in {flows_dir_path}")
|
|
305
|
+
if len(matches) > 1:
|
|
306
|
+
options = ", ".join([f"{str(v.get('name') or '')} ({str(v.get('id') or '')})" for v in matches])
|
|
307
|
+
raise ValueError(f"Multiple flows match '{ref}': {options}")
|
|
308
|
+
|
|
309
|
+
vf = matches[0]
|
|
310
|
+
_require_flow_interface(vf)
|
|
311
|
+
return ResolvedVisualFlow(visual_flow=vf, flows=flows, flows_dir=flows_dir_path)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def _tool_definitions_from_callables(tools: List[Callable[..., Any]]) -> List[Any]:
|
|
315
|
+
from abstractcore.tools import ToolDefinition
|
|
316
|
+
|
|
317
|
+
out: List[Any] = []
|
|
318
|
+
for t in tools:
|
|
319
|
+
tool_def = getattr(t, "_tool_definition", None) or ToolDefinition.from_function(t)
|
|
320
|
+
out.append(tool_def)
|
|
321
|
+
return out
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _workflow_registry() -> Any:
|
|
325
|
+
try:
|
|
326
|
+
from abstractruntime import WorkflowRegistry # type: ignore
|
|
327
|
+
|
|
328
|
+
return WorkflowRegistry()
|
|
329
|
+
except Exception: # pragma: no cover
|
|
330
|
+
try:
|
|
331
|
+
from abstractruntime.scheduler.registry import WorkflowRegistry # type: ignore
|
|
332
|
+
|
|
333
|
+
return WorkflowRegistry()
|
|
334
|
+
except Exception: # pragma: no cover
|
|
335
|
+
|
|
336
|
+
class WorkflowRegistry(dict): # type: ignore[no-redef]
|
|
337
|
+
def register(self, workflow: Any) -> None:
|
|
338
|
+
self[str(getattr(workflow, "workflow_id", ""))] = workflow
|
|
339
|
+
|
|
340
|
+
return WorkflowRegistry()
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def _node_type_str(node: Any) -> str:
|
|
344
|
+
if isinstance(node, dict):
|
|
345
|
+
return str(node.get("type") or "")
|
|
346
|
+
t = getattr(node, "type", None)
|
|
347
|
+
return t.value if hasattr(t, "value") else str(t or "")
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _subflow_id(node: Any) -> Optional[str]:
|
|
351
|
+
data = node.get("data") if isinstance(node, dict) else getattr(node, "data", None)
|
|
352
|
+
if not isinstance(data, dict):
|
|
353
|
+
return None
|
|
354
|
+
sid = data.get("subflowId") or data.get("flowId") or data.get("workflowId") or data.get("workflow_id")
|
|
355
|
+
if isinstance(sid, str) and sid.strip():
|
|
356
|
+
return sid.strip()
|
|
357
|
+
return None
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def _compile_visual_flow_tree(
|
|
361
|
+
*,
|
|
362
|
+
root: Dict[str, Any],
|
|
363
|
+
flows: Dict[str, Dict[str, Any]],
|
|
364
|
+
tools: List[Callable[..., Any]],
|
|
365
|
+
runtime: Runtime,
|
|
366
|
+
bundle_id: Optional[str] = None,
|
|
367
|
+
bundle_version: Optional[str] = None,
|
|
368
|
+
) -> Tuple[WorkflowSpec, Any]:
|
|
369
|
+
from abstractruntime.visualflow_compiler import compile_visualflow
|
|
370
|
+
from abstractruntime.visualflow_compiler.visual.agent_ids import visual_react_workflow_id
|
|
371
|
+
|
|
372
|
+
# Collect referenced subflows (cycles are allowed; compile/register each id once).
|
|
373
|
+
ordered_ids: List[str] = []
|
|
374
|
+
seen: set[str] = set()
|
|
375
|
+
queue: List[str] = [str(root.get("id") or "")]
|
|
376
|
+
|
|
377
|
+
while queue:
|
|
378
|
+
fid = queue.pop(0)
|
|
379
|
+
if not fid or fid in seen:
|
|
380
|
+
continue
|
|
381
|
+
vf_raw = flows.get(fid)
|
|
382
|
+
if vf_raw is None:
|
|
383
|
+
raise ValueError(f"Subflow '{fid}' not found in loaded flows")
|
|
384
|
+
seen.add(fid)
|
|
385
|
+
ordered_ids.append(fid)
|
|
386
|
+
|
|
387
|
+
for n in list(vf_raw.get("nodes") or []):
|
|
388
|
+
if _node_type_str(n) != "subflow":
|
|
389
|
+
continue
|
|
390
|
+
sid = _subflow_id(n)
|
|
391
|
+
if sid:
|
|
392
|
+
queue.append(sid)
|
|
393
|
+
|
|
394
|
+
bundle_ref = None
|
|
395
|
+
if isinstance(bundle_id, str) and bundle_id.strip() and isinstance(bundle_version, str) and bundle_version.strip():
|
|
396
|
+
bundle_ref = f"{bundle_id.strip()}@{bundle_version.strip()}"
|
|
397
|
+
|
|
398
|
+
def _namespace(prefix: str, flow_id: str) -> str:
|
|
399
|
+
return f"{prefix}:{flow_id}"
|
|
400
|
+
|
|
401
|
+
def _namespace_visualflow_raw(*, raw: Dict[str, Any], prefix: str, flow_id: str, id_map: Dict[str, str]) -> Dict[str, Any]:
|
|
402
|
+
def _rewrite(v: Any) -> Any:
|
|
403
|
+
if isinstance(v, str):
|
|
404
|
+
s = v.strip()
|
|
405
|
+
return id_map.get(s) or v
|
|
406
|
+
if isinstance(v, list):
|
|
407
|
+
return [_rewrite(x) for x in v]
|
|
408
|
+
if isinstance(v, dict):
|
|
409
|
+
return {k: _rewrite(v2) for k, v2 in v.items()}
|
|
410
|
+
return v
|
|
411
|
+
|
|
412
|
+
out_any = _rewrite(raw)
|
|
413
|
+
out: Dict[str, Any] = dict(out_any) if isinstance(out_any, dict) else dict(raw)
|
|
414
|
+
out["id"] = id_map.get(flow_id) or _namespace(prefix, flow_id)
|
|
415
|
+
|
|
416
|
+
nodes_raw = out.get("nodes")
|
|
417
|
+
if isinstance(nodes_raw, list):
|
|
418
|
+
new_nodes: list[Any] = []
|
|
419
|
+
for n_any in nodes_raw:
|
|
420
|
+
n = dict(n_any) if isinstance(n_any, dict) else n_any
|
|
421
|
+
if isinstance(n, dict) and str(n.get("type") or "") == "agent":
|
|
422
|
+
node_id = str(n.get("id") or "").strip()
|
|
423
|
+
data = n.get("data")
|
|
424
|
+
data_d = dict(data) if isinstance(data, dict) else {}
|
|
425
|
+
cfg_raw = data_d.get("agentConfig")
|
|
426
|
+
cfg = dict(cfg_raw) if isinstance(cfg_raw, dict) else {}
|
|
427
|
+
if node_id:
|
|
428
|
+
cfg["_react_workflow_id"] = visual_react_workflow_id(flow_id=str(out.get("id") or ""), node_id=node_id)
|
|
429
|
+
data_d["agentConfig"] = cfg
|
|
430
|
+
n["data"] = data_d
|
|
431
|
+
new_nodes.append(n)
|
|
432
|
+
out["nodes"] = new_nodes
|
|
433
|
+
|
|
434
|
+
return out
|
|
435
|
+
|
|
436
|
+
registry = _workflow_registry()
|
|
437
|
+
|
|
438
|
+
specs_by_id: Dict[str, WorkflowSpec] = {}
|
|
439
|
+
id_map: Dict[str, str] = {}
|
|
440
|
+
if bundle_ref:
|
|
441
|
+
id_map = {fid: _namespace(bundle_ref, fid) for fid in ordered_ids}
|
|
442
|
+
|
|
443
|
+
compiled_flows: list[Dict[str, Any]] = []
|
|
444
|
+
for fid in ordered_ids:
|
|
445
|
+
raw0 = flows.get(fid)
|
|
446
|
+
if raw0 is None:
|
|
447
|
+
continue
|
|
448
|
+
raw = (
|
|
449
|
+
_namespace_visualflow_raw(raw=raw0, prefix=bundle_ref, flow_id=fid, id_map=id_map)
|
|
450
|
+
if bundle_ref
|
|
451
|
+
else dict(raw0)
|
|
452
|
+
)
|
|
453
|
+
try:
|
|
454
|
+
spec = compile_visualflow(raw)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
raise RuntimeError(f"Failed compiling VisualFlow '{fid}': {e}") from e
|
|
457
|
+
specs_by_id[str(spec.workflow_id)] = spec
|
|
458
|
+
compiled_flows.append(raw)
|
|
459
|
+
register = getattr(registry, "register", None)
|
|
460
|
+
if callable(register):
|
|
461
|
+
register(spec)
|
|
462
|
+
else:
|
|
463
|
+
registry[str(spec.workflow_id)] = spec
|
|
464
|
+
|
|
465
|
+
# Register per-Agent-node ReAct subworkflows so visual Agent nodes can run.
|
|
466
|
+
agent_nodes: List[Tuple[str, Dict[str, Any]]] = []
|
|
467
|
+
for vf in compiled_flows:
|
|
468
|
+
flow_id = str(vf.get("id") or "").strip()
|
|
469
|
+
for n in list(vf.get("nodes") or []):
|
|
470
|
+
if _node_type_str(n) != "agent":
|
|
471
|
+
continue
|
|
472
|
+
data = n.get("data") if isinstance(n, dict) else None
|
|
473
|
+
cfg = data.get("agentConfig", {}) if isinstance(data, dict) else {}
|
|
474
|
+
cfg = dict(cfg) if isinstance(cfg, dict) else {}
|
|
475
|
+
wf_id_raw = cfg.get("_react_workflow_id")
|
|
476
|
+
wf_id = (
|
|
477
|
+
wf_id_raw.strip()
|
|
478
|
+
if isinstance(wf_id_raw, str) and wf_id_raw.strip()
|
|
479
|
+
else visual_react_workflow_id(flow_id=flow_id or "unknown", node_id=str((n.get("id") if isinstance(n, dict) else "") or ""))
|
|
480
|
+
)
|
|
481
|
+
agent_nodes.append((wf_id, cfg))
|
|
482
|
+
|
|
483
|
+
if agent_nodes:
|
|
484
|
+
from abstractagent.adapters.react_runtime import create_react_workflow
|
|
485
|
+
from abstractagent.logic.builtins import (
|
|
486
|
+
ASK_USER_TOOL,
|
|
487
|
+
COMPACT_MEMORY_TOOL,
|
|
488
|
+
DELEGATE_AGENT_TOOL,
|
|
489
|
+
INSPECT_VARS_TOOL,
|
|
490
|
+
OPEN_ATTACHMENT_TOOL,
|
|
491
|
+
RECALL_MEMORY_TOOL,
|
|
492
|
+
REMEMBER_NOTE_TOOL,
|
|
493
|
+
REMEMBER_TOOL,
|
|
494
|
+
)
|
|
495
|
+
from abstractagent.logic.react import ReActLogic
|
|
496
|
+
|
|
497
|
+
def _normalize_tool_names(raw: Any) -> List[str]:
|
|
498
|
+
if not isinstance(raw, list):
|
|
499
|
+
return []
|
|
500
|
+
out: List[str] = []
|
|
501
|
+
for t in raw:
|
|
502
|
+
if isinstance(t, str) and t.strip():
|
|
503
|
+
out.append(t.strip())
|
|
504
|
+
return out
|
|
505
|
+
|
|
506
|
+
tool_defs = [
|
|
507
|
+
ASK_USER_TOOL,
|
|
508
|
+
OPEN_ATTACHMENT_TOOL,
|
|
509
|
+
RECALL_MEMORY_TOOL,
|
|
510
|
+
INSPECT_VARS_TOOL,
|
|
511
|
+
REMEMBER_TOOL,
|
|
512
|
+
REMEMBER_NOTE_TOOL,
|
|
513
|
+
COMPACT_MEMORY_TOOL,
|
|
514
|
+
DELEGATE_AGENT_TOOL,
|
|
515
|
+
*_tool_definitions_from_callables(tools),
|
|
516
|
+
]
|
|
517
|
+
|
|
518
|
+
for workflow_id, cfg in agent_nodes:
|
|
519
|
+
tools_selected = _normalize_tool_names(cfg.get("tools"))
|
|
520
|
+
logic = ReActLogic(tools=tool_defs, max_tokens=None)
|
|
521
|
+
sub = create_react_workflow(
|
|
522
|
+
logic=logic,
|
|
523
|
+
workflow_id=workflow_id,
|
|
524
|
+
provider=None,
|
|
525
|
+
model=None,
|
|
526
|
+
allowed_tools=tools_selected,
|
|
527
|
+
on_step=None,
|
|
528
|
+
)
|
|
529
|
+
register = getattr(registry, "register", None)
|
|
530
|
+
if callable(register):
|
|
531
|
+
register(sub)
|
|
532
|
+
else:
|
|
533
|
+
registry[str(sub.workflow_id)] = sub
|
|
534
|
+
|
|
535
|
+
if hasattr(runtime, "set_workflow_registry"):
|
|
536
|
+
runtime.set_workflow_registry(registry) # type: ignore[call-arg]
|
|
537
|
+
else: # pragma: no cover
|
|
538
|
+
raise RuntimeError("Runtime does not support workflow registries (required for subflows/agent nodes).")
|
|
539
|
+
|
|
540
|
+
root_id = str(root.get("id") or "")
|
|
541
|
+
root_wid = id_map.get(root_id) if bundle_ref else root_id
|
|
542
|
+
root_spec = specs_by_id.get(root_wid)
|
|
543
|
+
if root_spec is None:
|
|
544
|
+
# Shouldn't happen because root id was seeded into the queue.
|
|
545
|
+
raise RuntimeError(f"Root workflow '{root_wid or root_id}' was not compiled/registered.")
|
|
546
|
+
return root_spec, registry
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
def _apply_abstractcode_agent_v1_scaffold(flow: Dict[str, Any], *, include_recommended: bool = True) -> None:
|
|
550
|
+
"""Best-effort: ensure required pins exist for `abstractcode.agent.v1` flows.
|
|
551
|
+
|
|
552
|
+
This mirrors the VisualFlow interface scaffold in `abstractflow.visual.interfaces`,
|
|
553
|
+
but operates directly on raw dict JSON so AbstractCode can run bundles without
|
|
554
|
+
depending on AbstractFlow.
|
|
555
|
+
"""
|
|
556
|
+
iid = "abstractcode.agent.v1"
|
|
557
|
+
|
|
558
|
+
interfaces = flow.get("interfaces")
|
|
559
|
+
if not isinstance(interfaces, list):
|
|
560
|
+
interfaces = []
|
|
561
|
+
flow["interfaces"] = interfaces
|
|
562
|
+
if iid not in interfaces:
|
|
563
|
+
interfaces.append(iid)
|
|
564
|
+
|
|
565
|
+
nodes = flow.get("nodes")
|
|
566
|
+
if not isinstance(nodes, list):
|
|
567
|
+
return
|
|
568
|
+
|
|
569
|
+
def _ensure_node_data(node: Dict[str, Any]) -> Dict[str, Any]:
|
|
570
|
+
data = node.get("data")
|
|
571
|
+
if not isinstance(data, dict):
|
|
572
|
+
data = {}
|
|
573
|
+
node["data"] = data
|
|
574
|
+
return data
|
|
575
|
+
|
|
576
|
+
def _ensure_pin_list(data: Dict[str, Any], key: str) -> list[dict[str, Any]]:
|
|
577
|
+
pins_any = data.get(key)
|
|
578
|
+
if not isinstance(pins_any, list):
|
|
579
|
+
pins: list[dict[str, Any]] = []
|
|
580
|
+
data[key] = pins
|
|
581
|
+
return pins
|
|
582
|
+
if all(isinstance(p, dict) for p in pins_any):
|
|
583
|
+
return pins_any # type: ignore[return-value]
|
|
584
|
+
filtered: list[dict[str, Any]] = [p for p in pins_any if isinstance(p, dict)]
|
|
585
|
+
data[key] = filtered
|
|
586
|
+
return filtered
|
|
587
|
+
|
|
588
|
+
def _ensure_pin(pins: list[dict[str, Any]], *, pin_id: str, pin_type: str, label: Optional[str] = None) -> None:
|
|
589
|
+
if any(isinstance(p.get("id"), str) and p.get("id") == pin_id for p in pins):
|
|
590
|
+
return
|
|
591
|
+
pins.append({"id": pin_id, "label": label if isinstance(label, str) else pin_id, "type": pin_type})
|
|
592
|
+
|
|
593
|
+
start = next((n for n in nodes if isinstance(n, dict) and str(n.get("type") or "") == "on_flow_start"), None)
|
|
594
|
+
if isinstance(start, dict):
|
|
595
|
+
data = _ensure_node_data(start)
|
|
596
|
+
outs = _ensure_pin_list(data, "outputs")
|
|
597
|
+
_ensure_pin(outs, pin_id="exec-out", pin_type="execution", label="")
|
|
598
|
+
_ensure_pin(outs, pin_id="provider", pin_type="provider")
|
|
599
|
+
_ensure_pin(outs, pin_id="model", pin_type="model")
|
|
600
|
+
_ensure_pin(outs, pin_id="prompt", pin_type="string")
|
|
601
|
+
if include_recommended:
|
|
602
|
+
_ensure_pin(outs, pin_id="tools", pin_type="tools")
|
|
603
|
+
|
|
604
|
+
for end in [n for n in nodes if isinstance(n, dict) and str(n.get("type") or "") == "on_flow_end"]:
|
|
605
|
+
data = _ensure_node_data(end)
|
|
606
|
+
ins = _ensure_pin_list(data, "inputs")
|
|
607
|
+
_ensure_pin(ins, pin_id="exec-in", pin_type="execution", label="")
|
|
608
|
+
_ensure_pin(ins, pin_id="response", pin_type="string")
|
|
609
|
+
_ensure_pin(ins, pin_id="success", pin_type="boolean")
|
|
610
|
+
_ensure_pin(ins, pin_id="meta", pin_type="object")
|
|
611
|
+
if include_recommended:
|
|
612
|
+
_ensure_pin(ins, pin_id="scratchpad", pin_type="object")
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def _validate_abstractcode_agent_v1(flow: Dict[str, Any]) -> List[str]:
|
|
616
|
+
"""Minimal contract validation for `abstractcode.agent.v1` workflows (stdlib-only)."""
|
|
617
|
+
errors: list[str] = []
|
|
618
|
+
nodes = flow.get("nodes")
|
|
619
|
+
if not isinstance(nodes, list):
|
|
620
|
+
return ["Flow.nodes must be a list"]
|
|
621
|
+
|
|
622
|
+
def _pins(node: Dict[str, Any], key: str) -> Optional[set[str]]:
|
|
623
|
+
data = node.get("data")
|
|
624
|
+
if not isinstance(data, dict):
|
|
625
|
+
return None
|
|
626
|
+
pins = data.get(key)
|
|
627
|
+
if not isinstance(pins, list):
|
|
628
|
+
return None
|
|
629
|
+
out: set[str] = set()
|
|
630
|
+
for p in pins:
|
|
631
|
+
if not isinstance(p, dict):
|
|
632
|
+
continue
|
|
633
|
+
if p.get("type") == "execution":
|
|
634
|
+
continue
|
|
635
|
+
pid = p.get("id")
|
|
636
|
+
if isinstance(pid, str) and pid.strip():
|
|
637
|
+
out.add(pid.strip())
|
|
638
|
+
return out
|
|
639
|
+
|
|
640
|
+
start = next((n for n in nodes if isinstance(n, dict) and str(n.get("type") or "") == "on_flow_start"), None)
|
|
641
|
+
if not isinstance(start, dict):
|
|
642
|
+
errors.append("Missing On Flow Start node (type=on_flow_start)")
|
|
643
|
+
else:
|
|
644
|
+
outs = _pins(start, "outputs")
|
|
645
|
+
required_out = {"prompt", "provider", "model", "tools"}
|
|
646
|
+
if outs is not None and not required_out.issubset(outs):
|
|
647
|
+
missing = ", ".join(sorted(required_out - outs))
|
|
648
|
+
errors.append(f"On Flow Start outputs missing: {missing}")
|
|
649
|
+
|
|
650
|
+
end = next((n for n in nodes if isinstance(n, dict) and str(n.get("type") or "") == "on_flow_end"), None)
|
|
651
|
+
if not isinstance(end, dict):
|
|
652
|
+
errors.append("Missing On Flow End node (type=on_flow_end)")
|
|
653
|
+
else:
|
|
654
|
+
ins = _pins(end, "inputs")
|
|
655
|
+
required_in = {"response", "success", "meta"}
|
|
656
|
+
if ins is not None and not required_in.issubset(ins):
|
|
657
|
+
missing = ", ".join(sorted(required_in - ins))
|
|
658
|
+
errors.append(f"On Flow End inputs missing: {missing}")
|
|
659
|
+
|
|
660
|
+
return errors
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
class WorkflowAgent(BaseAgent):
|
|
664
|
+
"""Run a VisualFlow workflow as a RunnableFlow in AbstractCode.
|
|
665
|
+
|
|
666
|
+
Contract: the workflow must declare `interfaces: ["abstractcode.agent.v1"]` and expose:
|
|
667
|
+
- On Flow Start output pins (required): `provider` (provider), `model` (model), `prompt` (string)
|
|
668
|
+
- On Flow End input pins (required): `response` (string), `success` (boolean), `meta` (object)
|
|
669
|
+
"""
|
|
670
|
+
|
|
671
|
+
def __init__(
|
|
672
|
+
self,
|
|
673
|
+
*,
|
|
674
|
+
runtime: Runtime,
|
|
675
|
+
flow_ref: str,
|
|
676
|
+
flows_dir: Optional[str] = None,
|
|
677
|
+
tools: Optional[List[Callable[..., Any]]] = None,
|
|
678
|
+
on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
|
|
679
|
+
max_iterations: int = 25,
|
|
680
|
+
max_tokens: Optional[int] = None,
|
|
681
|
+
actor_id: Optional[str] = None,
|
|
682
|
+
session_id: Optional[str] = None,
|
|
683
|
+
):
|
|
684
|
+
self._max_iterations = int(max_iterations) if isinstance(max_iterations, int) else 25
|
|
685
|
+
if self._max_iterations < 1:
|
|
686
|
+
self._max_iterations = 1
|
|
687
|
+
self._max_tokens = max_tokens
|
|
688
|
+
self._flow_ref = str(flow_ref or "").strip()
|
|
689
|
+
if not self._flow_ref:
|
|
690
|
+
raise ValueError("flow_ref is required")
|
|
691
|
+
|
|
692
|
+
ABSTRACTCODE_AGENT_V1 = "abstractcode.agent.v1"
|
|
693
|
+
resolved = resolve_visual_flow(self._flow_ref, flows_dir=flows_dir, require_interface=ABSTRACTCODE_AGENT_V1)
|
|
694
|
+
self.visual_flow = resolved.visual_flow
|
|
695
|
+
self.flows = resolved.flows
|
|
696
|
+
self.flows_dir = resolved.flows_dir
|
|
697
|
+
self._bundle_id = resolved.bundle_id
|
|
698
|
+
self._bundle_version = resolved.bundle_version
|
|
699
|
+
|
|
700
|
+
_apply_abstractcode_agent_v1_scaffold(self.visual_flow, include_recommended=True)
|
|
701
|
+
|
|
702
|
+
errors = _validate_abstractcode_agent_v1(self.visual_flow)
|
|
703
|
+
if errors:
|
|
704
|
+
joined = "\n".join([f"- {e}" for e in errors])
|
|
705
|
+
raise ValueError(f"Workflow does not implement '{ABSTRACTCODE_AGENT_V1}':\n{joined}")
|
|
706
|
+
|
|
707
|
+
self._last_task: Optional[str] = None
|
|
708
|
+
self._ledger_unsubscribe: Optional[Callable[[], None]] = None
|
|
709
|
+
self._node_labels_by_id: Dict[str, str] = {}
|
|
710
|
+
|
|
711
|
+
super().__init__(
|
|
712
|
+
runtime=runtime,
|
|
713
|
+
tools=tools,
|
|
714
|
+
on_step=on_step,
|
|
715
|
+
actor_id=actor_id,
|
|
716
|
+
session_id=session_id,
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
def _create_workflow(self) -> WorkflowSpec:
|
|
720
|
+
tools = list(self.tools or [])
|
|
721
|
+
spec, _registry = _compile_visual_flow_tree(
|
|
722
|
+
root=self.visual_flow,
|
|
723
|
+
flows=self.flows,
|
|
724
|
+
tools=tools,
|
|
725
|
+
runtime=self.runtime,
|
|
726
|
+
bundle_id=self._bundle_id,
|
|
727
|
+
bundle_version=self._bundle_version,
|
|
728
|
+
)
|
|
729
|
+
return spec
|
|
730
|
+
|
|
731
|
+
def start(
|
|
732
|
+
self,
|
|
733
|
+
task: str,
|
|
734
|
+
*,
|
|
735
|
+
allowed_tools: Optional[List[str]] = None,
|
|
736
|
+
attachments: Optional[List[Any]] = None,
|
|
737
|
+
**_: Any,
|
|
738
|
+
) -> str:
|
|
739
|
+
task = str(task or "").strip()
|
|
740
|
+
if not task:
|
|
741
|
+
raise ValueError("task must be a non-empty string")
|
|
742
|
+
|
|
743
|
+
self._last_task = task
|
|
744
|
+
|
|
745
|
+
try:
|
|
746
|
+
base_limits = dict(self.runtime.config.to_limits_dict())
|
|
747
|
+
except Exception:
|
|
748
|
+
base_limits = {}
|
|
749
|
+
limits: Dict[str, Any] = dict(base_limits)
|
|
750
|
+
limits.setdefault("warn_iterations_pct", 80)
|
|
751
|
+
limits.setdefault("warn_tokens_pct", 80)
|
|
752
|
+
limits["max_iterations"] = int(self._max_iterations)
|
|
753
|
+
limits["current_iteration"] = 0
|
|
754
|
+
limits.setdefault("max_history_messages", -1)
|
|
755
|
+
limits.setdefault("estimated_tokens_used", 0)
|
|
756
|
+
if self._max_tokens is not None:
|
|
757
|
+
try:
|
|
758
|
+
mt = int(self._max_tokens)
|
|
759
|
+
except Exception:
|
|
760
|
+
mt = None
|
|
761
|
+
if isinstance(mt, int) and mt > 0:
|
|
762
|
+
limits["max_tokens"] = mt
|
|
763
|
+
|
|
764
|
+
runtime_provider = getattr(getattr(self.runtime, "config", None), "provider", None)
|
|
765
|
+
runtime_model = getattr(getattr(self.runtime, "config", None), "model", None)
|
|
766
|
+
|
|
767
|
+
vars: Dict[str, Any] = {
|
|
768
|
+
"prompt": task,
|
|
769
|
+
"context": {"task": task, "messages": _copy_messages(self.session_messages)},
|
|
770
|
+
"_temp": {},
|
|
771
|
+
"_limits": limits,
|
|
772
|
+
}
|
|
773
|
+
if attachments:
|
|
774
|
+
items = list(attachments) if isinstance(attachments, tuple) else attachments if isinstance(attachments, list) else []
|
|
775
|
+
normalized: list[Any] = []
|
|
776
|
+
for a in items:
|
|
777
|
+
if isinstance(a, dict):
|
|
778
|
+
normalized.append(dict(a))
|
|
779
|
+
elif isinstance(a, str) and a.strip():
|
|
780
|
+
normalized.append(a.strip())
|
|
781
|
+
if normalized:
|
|
782
|
+
ctx = vars.get("context")
|
|
783
|
+
if not isinstance(ctx, dict):
|
|
784
|
+
ctx = {"task": task, "messages": _copy_messages(self.session_messages)}
|
|
785
|
+
vars["context"] = ctx
|
|
786
|
+
ctx["attachments"] = normalized
|
|
787
|
+
|
|
788
|
+
if isinstance(runtime_provider, str) and runtime_provider.strip():
|
|
789
|
+
vars["provider"] = runtime_provider.strip()
|
|
790
|
+
if isinstance(runtime_model, str) and runtime_model.strip():
|
|
791
|
+
vars["model"] = runtime_model.strip()
|
|
792
|
+
|
|
793
|
+
if isinstance(allowed_tools, list):
|
|
794
|
+
normalized = [str(t).strip() for t in allowed_tools if isinstance(t, str) and t.strip()]
|
|
795
|
+
vars["tools"] = normalized
|
|
796
|
+
vars["_runtime"] = {"allowed_tools": normalized}
|
|
797
|
+
else:
|
|
798
|
+
# Provide a safe default so interface-scaffolded `tools` pins resolve.
|
|
799
|
+
vars["tools"] = []
|
|
800
|
+
|
|
801
|
+
actor_id = self._ensure_actor_id()
|
|
802
|
+
session_id = self._ensure_session_id()
|
|
803
|
+
|
|
804
|
+
run_id = self.runtime.start(
|
|
805
|
+
workflow=self.workflow,
|
|
806
|
+
vars=vars,
|
|
807
|
+
actor_id=actor_id,
|
|
808
|
+
session_id=session_id,
|
|
809
|
+
)
|
|
810
|
+
self._current_run_id = run_id
|
|
811
|
+
|
|
812
|
+
# Build a stable node_id -> label map for UX (used for status updates).
|
|
813
|
+
try:
|
|
814
|
+
labels: Dict[str, str] = {}
|
|
815
|
+
for n in list(self.visual_flow.get("nodes") or []):
|
|
816
|
+
if not isinstance(n, dict):
|
|
817
|
+
continue
|
|
818
|
+
nid = n.get("id")
|
|
819
|
+
if not isinstance(nid, str) or not nid:
|
|
820
|
+
continue
|
|
821
|
+
data = n.get("data")
|
|
822
|
+
label = data.get("label") if isinstance(data, dict) else None
|
|
823
|
+
if isinstance(label, str) and label.strip():
|
|
824
|
+
labels[nid] = label.strip()
|
|
825
|
+
self._node_labels_by_id = labels
|
|
826
|
+
except Exception:
|
|
827
|
+
self._node_labels_by_id = {}
|
|
828
|
+
|
|
829
|
+
# Subscribe to ledger records so we can surface real-time status updates
|
|
830
|
+
# even while a blocking effect (LLM/tool HTTP) is in-flight.
|
|
831
|
+
self._ledger_unsubscribe = None
|
|
832
|
+
if self.on_step:
|
|
833
|
+
try:
|
|
834
|
+
self._ledger_unsubscribe = self._subscribe_ui_events(actor_id=actor_id, session_id=session_id)
|
|
835
|
+
except Exception:
|
|
836
|
+
self._ledger_unsubscribe = None
|
|
837
|
+
|
|
838
|
+
if self.on_step:
|
|
839
|
+
try:
|
|
840
|
+
self.on_step(
|
|
841
|
+
"init",
|
|
842
|
+
{
|
|
843
|
+
"flow_id": str(self.visual_flow.get("id") or ""),
|
|
844
|
+
"flow_name": str(self.visual_flow.get("name") or ""),
|
|
845
|
+
"bundle_id": self._bundle_id,
|
|
846
|
+
"bundle_version": self._bundle_version,
|
|
847
|
+
},
|
|
848
|
+
)
|
|
849
|
+
except Exception:
|
|
850
|
+
pass
|
|
851
|
+
|
|
852
|
+
return run_id
|
|
853
|
+
|
|
854
|
+
def _subscribe_ui_events(self, *, actor_id: str, session_id: str) -> Optional[Callable[[], None]]:
|
|
855
|
+
"""Subscribe to ledger appends and translate reserved workflow UX events into on_step(...).
|
|
856
|
+
|
|
857
|
+
This is best-effort and must never affect correctness.
|
|
858
|
+
"""
|
|
859
|
+
|
|
860
|
+
def _extract_text(payload: Any) -> str:
|
|
861
|
+
if isinstance(payload, str):
|
|
862
|
+
return payload
|
|
863
|
+
if isinstance(payload, dict):
|
|
864
|
+
v0 = payload.get("value")
|
|
865
|
+
if isinstance(v0, str) and v0.strip():
|
|
866
|
+
return v0.strip()
|
|
867
|
+
for k in ("text", "message", "status"):
|
|
868
|
+
v = payload.get(k)
|
|
869
|
+
if isinstance(v, str) and v.strip():
|
|
870
|
+
return v.strip()
|
|
871
|
+
return ""
|
|
872
|
+
|
|
873
|
+
def _extract_duration_seconds(payload: Any) -> Optional[float]:
|
|
874
|
+
if not isinstance(payload, dict):
|
|
875
|
+
return None
|
|
876
|
+
raw = payload.get("duration")
|
|
877
|
+
if raw is None:
|
|
878
|
+
raw = payload.get("duration_s")
|
|
879
|
+
if raw is None:
|
|
880
|
+
return None
|
|
881
|
+
try:
|
|
882
|
+
return float(raw)
|
|
883
|
+
except Exception:
|
|
884
|
+
return None
|
|
885
|
+
|
|
886
|
+
def _extract_status(payload: Any) -> Dict[str, Any]:
|
|
887
|
+
if isinstance(payload, str):
|
|
888
|
+
return {"text": payload}
|
|
889
|
+
if isinstance(payload, dict):
|
|
890
|
+
text = _extract_text(payload)
|
|
891
|
+
out: Dict[str, Any] = {"text": text}
|
|
892
|
+
dur = _extract_duration_seconds(payload)
|
|
893
|
+
if dur is not None:
|
|
894
|
+
out["duration"] = dur
|
|
895
|
+
return out
|
|
896
|
+
return {"text": str(payload or "")}
|
|
897
|
+
|
|
898
|
+
def _extract_message(payload: Any) -> Dict[str, Any]:
|
|
899
|
+
if isinstance(payload, str):
|
|
900
|
+
return {"text": payload}
|
|
901
|
+
if isinstance(payload, dict):
|
|
902
|
+
text = _extract_text(payload)
|
|
903
|
+
out: Dict[str, Any] = {"text": text}
|
|
904
|
+
level = payload.get("level")
|
|
905
|
+
if isinstance(level, str) and level.strip():
|
|
906
|
+
out["level"] = level.strip().lower()
|
|
907
|
+
title = payload.get("title")
|
|
908
|
+
if isinstance(title, str) and title.strip():
|
|
909
|
+
out["title"] = title.strip()
|
|
910
|
+
meta = payload.get("meta")
|
|
911
|
+
if isinstance(meta, dict):
|
|
912
|
+
out["meta"] = dict(meta)
|
|
913
|
+
return out
|
|
914
|
+
return {"text": str(payload or "")}
|
|
915
|
+
|
|
916
|
+
def _extract_tool_exec(payload: Any) -> Dict[str, Any]:
|
|
917
|
+
if isinstance(payload, str):
|
|
918
|
+
return {"tool": payload, "args": {}}
|
|
919
|
+
if isinstance(payload, dict):
|
|
920
|
+
# Support both AbstractCore-normalized tool call shapes and common OpenAI-style shapes.
|
|
921
|
+
#
|
|
922
|
+
# Normalized (preferred):
|
|
923
|
+
# {"name": "...", "arguments": {...}, "call_id": "..."}
|
|
924
|
+
#
|
|
925
|
+
# OpenAI-ish:
|
|
926
|
+
# {"id": "...", "type":"function", "function":{"name":"...", "arguments":"{...json...}"}}
|
|
927
|
+
tool = payload.get("tool") or payload.get("name") or payload.get("tool_name")
|
|
928
|
+
args = payload.get("arguments")
|
|
929
|
+
if args is None:
|
|
930
|
+
args = payload.get("args")
|
|
931
|
+
call_id = payload.get("call_id") or payload.get("callId") or payload.get("id")
|
|
932
|
+
|
|
933
|
+
fn = payload.get("function")
|
|
934
|
+
if tool is None and isinstance(fn, dict):
|
|
935
|
+
tool = fn.get("name")
|
|
936
|
+
if args is None and isinstance(fn, dict):
|
|
937
|
+
args = fn.get("arguments")
|
|
938
|
+
|
|
939
|
+
parsed_args: Dict[str, Any] = {}
|
|
940
|
+
if isinstance(args, dict):
|
|
941
|
+
parsed_args = dict(args)
|
|
942
|
+
elif isinstance(args, str) and args.strip():
|
|
943
|
+
# Some providers send JSON arguments as a string.
|
|
944
|
+
try:
|
|
945
|
+
parsed = json.loads(args)
|
|
946
|
+
if isinstance(parsed, dict):
|
|
947
|
+
parsed_args = parsed
|
|
948
|
+
except Exception:
|
|
949
|
+
parsed_args = {}
|
|
950
|
+
|
|
951
|
+
out: Dict[str, Any] = {"tool": str(tool or "tool"), "args": parsed_args}
|
|
952
|
+
if isinstance(call_id, str) and call_id.strip():
|
|
953
|
+
out["call_id"] = call_id.strip()
|
|
954
|
+
return out
|
|
955
|
+
return {"tool": "tool", "args": {}}
|
|
956
|
+
|
|
957
|
+
def _extract_tool_result(payload: Any) -> Dict[str, Any]:
|
|
958
|
+
# Normalize to ReactShell's existing "observe" step contract:
|
|
959
|
+
# {tool, result (string), success?}
|
|
960
|
+
tool = "tool"
|
|
961
|
+
success = None
|
|
962
|
+
result_str = ""
|
|
963
|
+
if isinstance(payload, dict):
|
|
964
|
+
tool_raw = payload.get("tool") or payload.get("name") or payload.get("tool_name")
|
|
965
|
+
if isinstance(tool_raw, str) and tool_raw.strip():
|
|
966
|
+
tool = tool_raw.strip()
|
|
967
|
+
if "success" in payload:
|
|
968
|
+
try:
|
|
969
|
+
success = bool(payload.get("success"))
|
|
970
|
+
except Exception:
|
|
971
|
+
success = None
|
|
972
|
+
# Prefer output/result; fallback to error/value.
|
|
973
|
+
raw = payload.get("output")
|
|
974
|
+
if raw is None:
|
|
975
|
+
raw = payload.get("result")
|
|
976
|
+
if raw is None:
|
|
977
|
+
raw = payload.get("error")
|
|
978
|
+
if raw is None:
|
|
979
|
+
raw = payload.get("value")
|
|
980
|
+
if raw is None:
|
|
981
|
+
raw = ""
|
|
982
|
+
if isinstance(raw, str):
|
|
983
|
+
result_str = raw
|
|
984
|
+
else:
|
|
985
|
+
try:
|
|
986
|
+
result_str = json.dumps(raw, ensure_ascii=False, sort_keys=True, indent=2)
|
|
987
|
+
except Exception:
|
|
988
|
+
result_str = str(raw)
|
|
989
|
+
elif isinstance(payload, str):
|
|
990
|
+
result_str = payload
|
|
991
|
+
else:
|
|
992
|
+
result_str = str(payload or "")
|
|
993
|
+
out: Dict[str, Any] = {"tool": tool, "result": result_str}
|
|
994
|
+
if success is not None:
|
|
995
|
+
out["success"] = success
|
|
996
|
+
return out
|
|
997
|
+
|
|
998
|
+
def _on_record(rec: Dict[str, Any]) -> None:
|
|
999
|
+
try:
|
|
1000
|
+
if rec.get("actor_id") != actor_id:
|
|
1001
|
+
return
|
|
1002
|
+
if rec.get("session_id") != session_id:
|
|
1003
|
+
return
|
|
1004
|
+
status = rec.get("status")
|
|
1005
|
+
status_str = status.value if hasattr(status, "value") else str(status or "")
|
|
1006
|
+
if status_str != "completed":
|
|
1007
|
+
return
|
|
1008
|
+
eff = rec.get("effect")
|
|
1009
|
+
if not isinstance(eff, dict) or str(eff.get("type") or "") != "emit_event":
|
|
1010
|
+
return
|
|
1011
|
+
payload = eff.get("payload") if isinstance(eff.get("payload"), dict) else {}
|
|
1012
|
+
name = str(payload.get("name") or payload.get("event_name") or "").strip()
|
|
1013
|
+
if not name:
|
|
1014
|
+
return
|
|
1015
|
+
name = _normalize_ui_event_name(name)
|
|
1016
|
+
|
|
1017
|
+
event_payload = payload.get("payload")
|
|
1018
|
+
if name == _STATUS_EVENT_NAME:
|
|
1019
|
+
st = _extract_status(event_payload)
|
|
1020
|
+
if callable(self.on_step) and str(st.get("text") or "").strip():
|
|
1021
|
+
self.on_step("status", st)
|
|
1022
|
+
return
|
|
1023
|
+
|
|
1024
|
+
if name == _MESSAGE_EVENT_NAME:
|
|
1025
|
+
msg = _extract_message(event_payload)
|
|
1026
|
+
if callable(self.on_step) and str(msg.get("text") or "").strip():
|
|
1027
|
+
self.on_step("message", msg)
|
|
1028
|
+
return
|
|
1029
|
+
|
|
1030
|
+
if name == _TOOL_EXEC_EVENT_NAME:
|
|
1031
|
+
# Backwards-compatible: older emit_event nodes wrapped non-dict payloads under {"value": ...}.
|
|
1032
|
+
raw_tc_payload = event_payload
|
|
1033
|
+
if isinstance(raw_tc_payload, dict) and isinstance(raw_tc_payload.get("value"), list):
|
|
1034
|
+
raw_tc_payload = raw_tc_payload.get("value")
|
|
1035
|
+
|
|
1036
|
+
if isinstance(raw_tc_payload, list):
|
|
1037
|
+
for item in raw_tc_payload:
|
|
1038
|
+
tc = _extract_tool_exec(item)
|
|
1039
|
+
if callable(self.on_step) and str(tc.get("tool") or "").strip():
|
|
1040
|
+
# Reuse AbstractCode's existing "tool call" UX.
|
|
1041
|
+
self.on_step("act", tc)
|
|
1042
|
+
else:
|
|
1043
|
+
tc = _extract_tool_exec(raw_tc_payload)
|
|
1044
|
+
if callable(self.on_step) and str(tc.get("tool") or "").strip():
|
|
1045
|
+
# Reuse AbstractCode's existing "tool call" UX.
|
|
1046
|
+
self.on_step("act", tc)
|
|
1047
|
+
return
|
|
1048
|
+
|
|
1049
|
+
if name == _TOOL_RESULT_EVENT_NAME:
|
|
1050
|
+
raw_tr_payload = event_payload
|
|
1051
|
+
if isinstance(raw_tr_payload, dict) and isinstance(raw_tr_payload.get("value"), list):
|
|
1052
|
+
raw_tr_payload = raw_tr_payload.get("value")
|
|
1053
|
+
|
|
1054
|
+
if isinstance(raw_tr_payload, list):
|
|
1055
|
+
for item in raw_tr_payload:
|
|
1056
|
+
tr = _extract_tool_result(item)
|
|
1057
|
+
if callable(self.on_step):
|
|
1058
|
+
# Reuse AbstractCode's existing "tool result" UX.
|
|
1059
|
+
self.on_step("observe", tr)
|
|
1060
|
+
else:
|
|
1061
|
+
tr = _extract_tool_result(raw_tr_payload)
|
|
1062
|
+
if callable(self.on_step):
|
|
1063
|
+
# Reuse AbstractCode's existing "tool result" UX.
|
|
1064
|
+
self.on_step("observe", tr)
|
|
1065
|
+
return
|
|
1066
|
+
except Exception:
|
|
1067
|
+
return
|
|
1068
|
+
|
|
1069
|
+
try:
|
|
1070
|
+
unsub = self.runtime.subscribe_ledger(_on_record, run_id=None)
|
|
1071
|
+
return unsub if callable(unsub) else None
|
|
1072
|
+
except Exception:
|
|
1073
|
+
return None
|
|
1074
|
+
|
|
1075
|
+
def _cleanup_ledger_subscription(self) -> None:
|
|
1076
|
+
unsub = self._ledger_unsubscribe
|
|
1077
|
+
self._ledger_unsubscribe = None
|
|
1078
|
+
if callable(unsub):
|
|
1079
|
+
try:
|
|
1080
|
+
unsub()
|
|
1081
|
+
except Exception:
|
|
1082
|
+
pass
|
|
1083
|
+
|
|
1084
|
+
def _auto_wait_until(self, state: RunState) -> Optional[RunState]:
|
|
1085
|
+
"""Best-effort: auto-drive short WAIT_UNTIL delays for workflow agents.
|
|
1086
|
+
|
|
1087
|
+
Why:
|
|
1088
|
+
- Visual workflows commonly use Delay (WAIT_UNTIL) for UX pacing.
|
|
1089
|
+
- AbstractCode's agent run loop expects `step()` to keep making progress without
|
|
1090
|
+
manual `/resume` for short waits.
|
|
1091
|
+
|
|
1092
|
+
Notes:
|
|
1093
|
+
- This is intentionally conservative: it yields back if the wait changes to a
|
|
1094
|
+
different reason (tool approvals, user prompts, pauses).
|
|
1095
|
+
- Cancellation/pause are polled so control-plane actions remain responsive.
|
|
1096
|
+
"""
|
|
1097
|
+
waiting = getattr(state, "waiting", None)
|
|
1098
|
+
if waiting is None:
|
|
1099
|
+
return None
|
|
1100
|
+
|
|
1101
|
+
reason = getattr(waiting, "reason", None)
|
|
1102
|
+
reason_value = reason.value if hasattr(reason, "value") else str(reason or "")
|
|
1103
|
+
if reason_value != "until":
|
|
1104
|
+
return None
|
|
1105
|
+
|
|
1106
|
+
until_raw = getattr(waiting, "until", None)
|
|
1107
|
+
if not isinstance(until_raw, str) or not until_raw.strip():
|
|
1108
|
+
return None
|
|
1109
|
+
|
|
1110
|
+
def _parse_until_iso(value: str) -> Optional[datetime]:
|
|
1111
|
+
s = str(value or "").strip()
|
|
1112
|
+
if not s:
|
|
1113
|
+
return None
|
|
1114
|
+
# Accept both "+00:00" and "Z"
|
|
1115
|
+
if s.endswith("Z"):
|
|
1116
|
+
s = s[:-1] + "+00:00"
|
|
1117
|
+
try:
|
|
1118
|
+
dt = datetime.fromisoformat(s)
|
|
1119
|
+
except Exception:
|
|
1120
|
+
return None
|
|
1121
|
+
if dt.tzinfo is None:
|
|
1122
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
1123
|
+
return dt.astimezone(timezone.utc)
|
|
1124
|
+
|
|
1125
|
+
until_dt = _parse_until_iso(until_raw)
|
|
1126
|
+
if until_dt is None:
|
|
1127
|
+
return None
|
|
1128
|
+
|
|
1129
|
+
import time
|
|
1130
|
+
|
|
1131
|
+
# Cap auto-wait to avoid surprising "hangs" for long schedules.
|
|
1132
|
+
max_auto_wait_s = 30.0
|
|
1133
|
+
|
|
1134
|
+
while True:
|
|
1135
|
+
try:
|
|
1136
|
+
latest = self.runtime.get_state(state.run_id)
|
|
1137
|
+
except Exception:
|
|
1138
|
+
latest = state
|
|
1139
|
+
|
|
1140
|
+
# Stop if externally controlled or otherwise no longer a time wait.
|
|
1141
|
+
if getattr(latest, "status", None) in (RunStatus.CANCELLED, RunStatus.FAILED, RunStatus.COMPLETED):
|
|
1142
|
+
return latest
|
|
1143
|
+
|
|
1144
|
+
latest_wait = getattr(latest, "waiting", None)
|
|
1145
|
+
if latest_wait is None:
|
|
1146
|
+
return latest
|
|
1147
|
+
r = getattr(latest_wait, "reason", None)
|
|
1148
|
+
r_val = r.value if hasattr(r, "value") else str(r or "")
|
|
1149
|
+
if r_val != "until":
|
|
1150
|
+
# Another wait type (pause/user/tool/event/subworkflow) should be handled by the host.
|
|
1151
|
+
return latest
|
|
1152
|
+
|
|
1153
|
+
now = datetime.now(timezone.utc)
|
|
1154
|
+
remaining = (until_dt - now).total_seconds()
|
|
1155
|
+
if remaining <= 0:
|
|
1156
|
+
# Runtime.tick will auto-unblock on the next call.
|
|
1157
|
+
return None
|
|
1158
|
+
|
|
1159
|
+
if remaining > max_auto_wait_s:
|
|
1160
|
+
# Leave it waiting; user can /resume later.
|
|
1161
|
+
return latest
|
|
1162
|
+
|
|
1163
|
+
time.sleep(min(0.25, max(0.0, float(remaining))))
|
|
1164
|
+
|
|
1165
|
+
def _auto_drive_subworkflow_wait(self, state: RunState) -> Optional[RunState]:
|
|
1166
|
+
"""Best-effort: drive async SUBWORKFLOW waits for non-interactive hosts.
|
|
1167
|
+
|
|
1168
|
+
Visual subflow nodes are compiled into async+wait subworkflow effects so
|
|
1169
|
+
interactive hosts (e.g. web) can stream nested runs. AbstractCode's agent
|
|
1170
|
+
loop expects `step()` to keep progressing without needing an external
|
|
1171
|
+
sub-run driver, so we tick sub-runs and bubble their completions up.
|
|
1172
|
+
"""
|
|
1173
|
+
from abstractruntime.core.models import WaitReason
|
|
1174
|
+
|
|
1175
|
+
waiting = getattr(state, "waiting", None)
|
|
1176
|
+
if waiting is None or getattr(waiting, "reason", None) != WaitReason.SUBWORKFLOW:
|
|
1177
|
+
return None
|
|
1178
|
+
|
|
1179
|
+
top_run_id = str(getattr(state, "run_id", "") or "")
|
|
1180
|
+
if not top_run_id:
|
|
1181
|
+
return None
|
|
1182
|
+
|
|
1183
|
+
def _extract_sub_run_id(wait_state: object) -> Optional[str]:
|
|
1184
|
+
details = getattr(wait_state, "details", None)
|
|
1185
|
+
if isinstance(details, dict):
|
|
1186
|
+
sub_run_id = details.get("sub_run_id")
|
|
1187
|
+
if isinstance(sub_run_id, str) and sub_run_id:
|
|
1188
|
+
return sub_run_id
|
|
1189
|
+
wait_key = getattr(wait_state, "wait_key", None)
|
|
1190
|
+
if isinstance(wait_key, str) and wait_key.startswith("subworkflow:"):
|
|
1191
|
+
return wait_key.split("subworkflow:", 1)[1] or None
|
|
1192
|
+
return None
|
|
1193
|
+
|
|
1194
|
+
def _workflow_for(run_state: object) -> Any:
|
|
1195
|
+
reg = getattr(self.runtime, "workflow_registry", None)
|
|
1196
|
+
getter = getattr(reg, "get", None) if reg is not None else None
|
|
1197
|
+
if callable(getter):
|
|
1198
|
+
wf = getter(getattr(run_state, "workflow_id", ""))
|
|
1199
|
+
if wf is not None:
|
|
1200
|
+
return wf
|
|
1201
|
+
if getattr(self.workflow, "workflow_id", None) == getattr(run_state, "workflow_id", None):
|
|
1202
|
+
return self.workflow
|
|
1203
|
+
raise RuntimeError(f"Workflow '{getattr(run_state, 'workflow_id', '')}' not found in runtime registry")
|
|
1204
|
+
|
|
1205
|
+
def _bubble_completion(child_state: object) -> Optional[str]:
|
|
1206
|
+
parent_id = getattr(child_state, "parent_run_id", None)
|
|
1207
|
+
if not isinstance(parent_id, str) or not parent_id:
|
|
1208
|
+
return None
|
|
1209
|
+
parent_state = self.runtime.get_state(parent_id)
|
|
1210
|
+
parent_wait = getattr(parent_state, "waiting", None)
|
|
1211
|
+
if parent_state.status != RunStatus.WAITING or parent_wait is None:
|
|
1212
|
+
return None
|
|
1213
|
+
if parent_wait.reason != WaitReason.SUBWORKFLOW:
|
|
1214
|
+
return None
|
|
1215
|
+
self.runtime.resume(
|
|
1216
|
+
workflow=_workflow_for(parent_state),
|
|
1217
|
+
run_id=parent_id,
|
|
1218
|
+
wait_key=None,
|
|
1219
|
+
payload={
|
|
1220
|
+
"sub_run_id": getattr(child_state, "run_id", None),
|
|
1221
|
+
"output": getattr(child_state, "output", None),
|
|
1222
|
+
"node_traces": self.runtime.get_node_traces(getattr(child_state, "run_id", "")),
|
|
1223
|
+
},
|
|
1224
|
+
max_steps=0,
|
|
1225
|
+
)
|
|
1226
|
+
return parent_id
|
|
1227
|
+
|
|
1228
|
+
# Drive subruns until we either make progress or hit a non-subworkflow wait.
|
|
1229
|
+
for _ in range(200):
|
|
1230
|
+
# Descend to the deepest sub-run referenced by SUBWORKFLOW waits.
|
|
1231
|
+
current_run_id = top_run_id
|
|
1232
|
+
for _ in range(25):
|
|
1233
|
+
cur_state = self.runtime.get_state(current_run_id)
|
|
1234
|
+
cur_wait = getattr(cur_state, "waiting", None)
|
|
1235
|
+
if cur_state.status != RunStatus.WAITING or cur_wait is None:
|
|
1236
|
+
break
|
|
1237
|
+
if cur_wait.reason != WaitReason.SUBWORKFLOW:
|
|
1238
|
+
break
|
|
1239
|
+
next_id = _extract_sub_run_id(cur_wait)
|
|
1240
|
+
if not next_id:
|
|
1241
|
+
break
|
|
1242
|
+
current_run_id = next_id
|
|
1243
|
+
|
|
1244
|
+
current_state = self.runtime.get_state(current_run_id)
|
|
1245
|
+
|
|
1246
|
+
# Tick running subruns until they block/complete.
|
|
1247
|
+
if current_state.status == RunStatus.RUNNING:
|
|
1248
|
+
current_state = self.runtime.tick(
|
|
1249
|
+
workflow=_workflow_for(current_state),
|
|
1250
|
+
run_id=current_run_id,
|
|
1251
|
+
max_steps=100,
|
|
1252
|
+
)
|
|
1253
|
+
|
|
1254
|
+
if current_state.status == RunStatus.RUNNING:
|
|
1255
|
+
continue
|
|
1256
|
+
|
|
1257
|
+
if current_state.status in (RunStatus.FAILED, RunStatus.CANCELLED):
|
|
1258
|
+
return current_state
|
|
1259
|
+
|
|
1260
|
+
if current_state.status == RunStatus.WAITING:
|
|
1261
|
+
cur_wait = getattr(current_state, "waiting", None)
|
|
1262
|
+
if cur_wait is None:
|
|
1263
|
+
break
|
|
1264
|
+
if cur_wait.reason == WaitReason.SUBWORKFLOW:
|
|
1265
|
+
continue
|
|
1266
|
+
# Blocked on a real wait (USER/EVENT/UNTIL/...): stop auto-driving.
|
|
1267
|
+
return self.runtime.get_state(top_run_id)
|
|
1268
|
+
|
|
1269
|
+
if current_state.status == RunStatus.COMPLETED:
|
|
1270
|
+
parent_id = _bubble_completion(current_state)
|
|
1271
|
+
if parent_id is None:
|
|
1272
|
+
return self.runtime.get_state(top_run_id)
|
|
1273
|
+
continue
|
|
1274
|
+
|
|
1275
|
+
return self.runtime.get_state(top_run_id)
|
|
1276
|
+
|
|
1277
|
+
def step(self) -> RunState:
|
|
1278
|
+
if not self._current_run_id:
|
|
1279
|
+
raise RuntimeError("No active run. Call start() first.")
|
|
1280
|
+
|
|
1281
|
+
state = self.runtime.tick(workflow=self.workflow, run_id=self._current_run_id, max_steps=1)
|
|
1282
|
+
|
|
1283
|
+
# Auto-drive short time waits (Delay node) so workflow agents can use pacing
|
|
1284
|
+
# without requiring manual `/resume`.
|
|
1285
|
+
if state.status == RunStatus.WAITING:
|
|
1286
|
+
advanced = self._auto_wait_until(state)
|
|
1287
|
+
if isinstance(advanced, RunState):
|
|
1288
|
+
state = advanced
|
|
1289
|
+
elif advanced is None:
|
|
1290
|
+
# Time passed (or will pass within our polling loop): continue ticking once.
|
|
1291
|
+
state = self.runtime.tick(workflow=self.workflow, run_id=self._current_run_id, max_steps=1)
|
|
1292
|
+
|
|
1293
|
+
if state.status == RunStatus.WAITING:
|
|
1294
|
+
driven = self._auto_drive_subworkflow_wait(state)
|
|
1295
|
+
if isinstance(driven, RunState):
|
|
1296
|
+
state = driven
|
|
1297
|
+
|
|
1298
|
+
if state.status == RunStatus.COMPLETED:
|
|
1299
|
+
response_text = ""
|
|
1300
|
+
meta_out: Dict[str, Any] = {}
|
|
1301
|
+
scratchpad_out: Any = None
|
|
1302
|
+
workflow_success: Optional[bool] = None
|
|
1303
|
+
out = getattr(state, "output", None)
|
|
1304
|
+
if isinstance(out, dict):
|
|
1305
|
+
def _pick_textish(value: Any) -> str:
|
|
1306
|
+
if isinstance(value, str):
|
|
1307
|
+
return value.strip()
|
|
1308
|
+
if value is None:
|
|
1309
|
+
return ""
|
|
1310
|
+
if isinstance(value, bool):
|
|
1311
|
+
return str(value).lower()
|
|
1312
|
+
if isinstance(value, (int, float)):
|
|
1313
|
+
return str(value)
|
|
1314
|
+
return ""
|
|
1315
|
+
|
|
1316
|
+
payload = out.get("result") if isinstance(out.get("result"), dict) else out
|
|
1317
|
+
|
|
1318
|
+
response_text = _pick_textish(payload.get("response"))
|
|
1319
|
+
if not response_text:
|
|
1320
|
+
response_text = (
|
|
1321
|
+
_pick_textish(payload.get("answer"))
|
|
1322
|
+
or _pick_textish(payload.get("message"))
|
|
1323
|
+
or _pick_textish(payload.get("text"))
|
|
1324
|
+
or _pick_textish(payload.get("content"))
|
|
1325
|
+
)
|
|
1326
|
+
if not response_text and isinstance(out.get("result"), str):
|
|
1327
|
+
response_text = str(out.get("result") or "").strip()
|
|
1328
|
+
|
|
1329
|
+
if isinstance(payload.get("success"), bool):
|
|
1330
|
+
workflow_success = bool(payload.get("success"))
|
|
1331
|
+
|
|
1332
|
+
raw_meta = payload.get("meta")
|
|
1333
|
+
if isinstance(raw_meta, dict):
|
|
1334
|
+
meta_out = dict(raw_meta)
|
|
1335
|
+
scratchpad_out = payload.get("scratchpad")
|
|
1336
|
+
if scratchpad_out is None and isinstance(out.get("scratchpad"), (dict, list, str, int, float, bool)):
|
|
1337
|
+
scratchpad_out = out.get("scratchpad")
|
|
1338
|
+
|
|
1339
|
+
# Backward-compat: older runs used meta.success instead of a first-class pin.
|
|
1340
|
+
if workflow_success is None and isinstance(meta_out.get("success"), bool):
|
|
1341
|
+
workflow_success = bool(meta_out.get("success"))
|
|
1342
|
+
|
|
1343
|
+
# Fallback: if the workflow doesn't expose success, treat run completion as success.
|
|
1344
|
+
if workflow_success is None and isinstance(out.get("success"), bool):
|
|
1345
|
+
workflow_success = bool(out.get("success"))
|
|
1346
|
+
if workflow_success is None:
|
|
1347
|
+
workflow_success = True
|
|
1348
|
+
|
|
1349
|
+
task = str(self._last_task or "")
|
|
1350
|
+
ctx = state.vars.get("context") if isinstance(getattr(state, "vars", None), dict) else None
|
|
1351
|
+
if not isinstance(ctx, dict):
|
|
1352
|
+
ctx = {"task": task, "messages": []}
|
|
1353
|
+
state.vars["context"] = ctx
|
|
1354
|
+
|
|
1355
|
+
msgs_raw = ctx.get("messages")
|
|
1356
|
+
msgs = _copy_messages(msgs_raw)
|
|
1357
|
+
msgs.append(_new_message(role="user", content=task))
|
|
1358
|
+
|
|
1359
|
+
assistant_meta: Dict[str, Any] = {}
|
|
1360
|
+
if meta_out:
|
|
1361
|
+
assistant_meta["workflow_meta"] = meta_out
|
|
1362
|
+
if scratchpad_out is not None:
|
|
1363
|
+
assistant_meta["workflow_scratchpad"] = scratchpad_out
|
|
1364
|
+
if workflow_success is not None:
|
|
1365
|
+
assistant_meta["workflow_success"] = workflow_success
|
|
1366
|
+
|
|
1367
|
+
msgs.append(_new_message(role="assistant", content=response_text, metadata=assistant_meta))
|
|
1368
|
+
ctx["messages"] = msgs
|
|
1369
|
+
|
|
1370
|
+
# Persist best-effort so restarts can load history from run state.
|
|
1371
|
+
store = getattr(self.runtime, "run_store", None) or getattr(self.runtime, "_run_store", None)
|
|
1372
|
+
save = getattr(store, "save", None)
|
|
1373
|
+
if callable(save):
|
|
1374
|
+
try:
|
|
1375
|
+
save(state)
|
|
1376
|
+
except Exception:
|
|
1377
|
+
pass
|
|
1378
|
+
|
|
1379
|
+
self.session_messages = list(msgs)
|
|
1380
|
+
|
|
1381
|
+
if self.on_step:
|
|
1382
|
+
try:
|
|
1383
|
+
self.on_step(
|
|
1384
|
+
"done",
|
|
1385
|
+
{
|
|
1386
|
+
"answer": response_text,
|
|
1387
|
+
"success": workflow_success,
|
|
1388
|
+
"meta": meta_out or None,
|
|
1389
|
+
"scratchpad": scratchpad_out,
|
|
1390
|
+
},
|
|
1391
|
+
)
|
|
1392
|
+
except Exception:
|
|
1393
|
+
pass
|
|
1394
|
+
self._cleanup_ledger_subscription()
|
|
1395
|
+
|
|
1396
|
+
if state.status in (RunStatus.FAILED, RunStatus.CANCELLED):
|
|
1397
|
+
self._sync_session_caches_from_state(state)
|
|
1398
|
+
self._cleanup_ledger_subscription()
|
|
1399
|
+
|
|
1400
|
+
return state
|
|
1401
|
+
|
|
1402
|
+
|
|
1403
|
+
def dump_visual_flow_json(flow: Any) -> str:
|
|
1404
|
+
"""Debug helper for printing a VisualFlow as JSON (used in tests)."""
|
|
1405
|
+
try:
|
|
1406
|
+
return flow.model_dump_json(indent=2)
|
|
1407
|
+
except Exception:
|
|
1408
|
+
try:
|
|
1409
|
+
data = flow.model_dump()
|
|
1410
|
+
except Exception:
|
|
1411
|
+
data = {}
|
|
1412
|
+
return json.dumps(data, indent=2, ensure_ascii=False, default=str)
|