abstractcode 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,894 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from dataclasses import dataclass
5
+ from datetime import datetime, timezone
6
+ from pathlib import Path
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple
8
+
9
+ from abstractagent.agents.base import BaseAgent
10
+ from abstractruntime import RunState, RunStatus, Runtime, WorkflowSpec
11
+
12
+
13
+ def _now_iso() -> str:
14
+ return datetime.now(timezone.utc).isoformat()
15
+
16
+ _STATUS_EVENT_NAME = "abstractcode.status"
17
+ _MESSAGE_EVENT_NAME = "abstractcode.message"
18
+ _TOOL_EXEC_EVENT_NAME = "abstractcode.tool_execution"
19
+ _TOOL_RESULT_EVENT_NAME = "abstractcode.tool_result"
20
+
21
+
22
+ def _new_message(*, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
23
+ from uuid import uuid4
24
+
25
+ meta: Dict[str, Any] = dict(metadata) if isinstance(metadata, dict) else {}
26
+ meta.setdefault("message_id", f"msg_{uuid4().hex}")
27
+ return {
28
+ "role": str(role or "").strip() or "user",
29
+ "content": str(content or ""),
30
+ "timestamp": _now_iso(),
31
+ "metadata": meta,
32
+ }
33
+
34
+
35
+ def _copy_messages(messages: Any) -> List[Dict[str, Any]]:
36
+ if not isinstance(messages, list):
37
+ return []
38
+ out: List[Dict[str, Any]] = []
39
+ for m in messages:
40
+ if isinstance(m, dict):
41
+ out.append(dict(m))
42
+ return out
43
+
44
+
45
+ @dataclass(frozen=True)
46
+ class ResolvedVisualFlow:
47
+ visual_flow: Any
48
+ flows: Dict[str, Any]
49
+ flows_dir: Path
50
+
51
+
52
+ def _default_flows_dir() -> Path:
53
+ try:
54
+ from .flow_cli import default_flows_dir
55
+
56
+ return default_flows_dir()
57
+ except Exception:
58
+ return Path("flows")
59
+
60
+
61
+ def _load_visual_flows(flows_dir: Path) -> Dict[str, Any]:
62
+ try:
63
+ from abstractflow.visual.models import VisualFlow
64
+ except Exception as e: # pragma: no cover
65
+ raise RuntimeError(
66
+ "AbstractFlow is required to run VisualFlow workflows.\n"
67
+ 'Install with: pip install "abstractcode[flow]"'
68
+ ) from e
69
+
70
+ flows: Dict[str, Any] = {}
71
+ if not flows_dir.exists():
72
+ return flows
73
+ for path in sorted(flows_dir.glob("*.json")):
74
+ try:
75
+ raw = path.read_text(encoding="utf-8")
76
+ vf = VisualFlow.model_validate_json(raw)
77
+ except Exception:
78
+ continue
79
+ flows[str(vf.id)] = vf
80
+ return flows
81
+
82
+
83
+ def resolve_visual_flow(flow_ref: str, *, flows_dir: Optional[str]) -> ResolvedVisualFlow:
84
+ """Resolve a VisualFlow by id, name, or path to a .json file."""
85
+ ref = str(flow_ref or "").strip()
86
+ if not ref:
87
+ raise ValueError("flow reference is required (flow id, name, or .json path)")
88
+
89
+ path = Path(ref).expanduser()
90
+ flows_dir_path: Path
91
+ if path.exists() and path.is_file():
92
+ try:
93
+ raw = path.read_text(encoding="utf-8")
94
+ except Exception as e:
95
+ raise ValueError(f"Cannot read flow file: {path}") from e
96
+
97
+ try:
98
+ from abstractflow.visual.models import VisualFlow
99
+ except Exception as e: # pragma: no cover
100
+ raise RuntimeError(
101
+ "AbstractFlow is required to run VisualFlow workflows.\n"
102
+ 'Install with: pip install "abstractcode[flow]"'
103
+ ) from e
104
+
105
+ vf = VisualFlow.model_validate_json(raw)
106
+ flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else path.parent.resolve()
107
+ flows = _load_visual_flows(flows_dir_path)
108
+ flows[str(vf.id)] = vf
109
+ return ResolvedVisualFlow(visual_flow=vf, flows=flows, flows_dir=flows_dir_path)
110
+
111
+ flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else _default_flows_dir().resolve()
112
+ flows = _load_visual_flows(flows_dir_path)
113
+
114
+ if ref in flows:
115
+ return ResolvedVisualFlow(visual_flow=flows[ref], flows=flows, flows_dir=flows_dir_path)
116
+
117
+ # Fall back to exact name match (case-insensitive).
118
+ matches = []
119
+ needle = ref.casefold()
120
+ for vf in flows.values():
121
+ name = getattr(vf, "name", None)
122
+ if isinstance(name, str) and name.strip() and name.strip().casefold() == needle:
123
+ matches.append(vf)
124
+
125
+ if not matches:
126
+ raise ValueError(f"Flow '{ref}' not found in {flows_dir_path}")
127
+ if len(matches) > 1:
128
+ options = ", ".join([f"{getattr(v, 'name', '')} ({getattr(v, 'id', '')})" for v in matches])
129
+ raise ValueError(f"Multiple flows match '{ref}': {options}")
130
+
131
+ vf = matches[0]
132
+ return ResolvedVisualFlow(visual_flow=vf, flows=flows, flows_dir=flows_dir_path)
133
+
134
+
135
+ def _tool_definitions_from_callables(tools: List[Callable[..., Any]]) -> List[Any]:
136
+ from abstractcore.tools import ToolDefinition
137
+
138
+ out: List[Any] = []
139
+ for t in tools:
140
+ tool_def = getattr(t, "_tool_definition", None) or ToolDefinition.from_function(t)
141
+ out.append(tool_def)
142
+ return out
143
+
144
+
145
+ def _workflow_registry() -> Any:
146
+ try:
147
+ from abstractruntime import WorkflowRegistry # type: ignore
148
+
149
+ return WorkflowRegistry()
150
+ except Exception: # pragma: no cover
151
+ try:
152
+ from abstractruntime.scheduler.registry import WorkflowRegistry # type: ignore
153
+
154
+ return WorkflowRegistry()
155
+ except Exception: # pragma: no cover
156
+
157
+ class WorkflowRegistry(dict): # type: ignore[no-redef]
158
+ def register(self, workflow: Any) -> None:
159
+ self[str(getattr(workflow, "workflow_id", ""))] = workflow
160
+
161
+ return WorkflowRegistry()
162
+
163
+
164
+ def _node_type_str(node: Any) -> str:
165
+ t = getattr(node, "type", None)
166
+ return t.value if hasattr(t, "value") else str(t or "")
167
+
168
+
169
+ def _subflow_id(node: Any) -> Optional[str]:
170
+ data = getattr(node, "data", None)
171
+ if not isinstance(data, dict):
172
+ return None
173
+ sid = data.get("subflowId") or data.get("flowId") or data.get("workflowId") or data.get("workflow_id")
174
+ if isinstance(sid, str) and sid.strip():
175
+ return sid.strip()
176
+ return None
177
+
178
+
179
+ def _compile_visual_flow_tree(
180
+ *,
181
+ root: Any,
182
+ flows: Dict[str, Any],
183
+ tools: List[Callable[..., Any]],
184
+ runtime: Runtime,
185
+ ) -> Tuple[WorkflowSpec, Any]:
186
+ from abstractflow.compiler import compile_flow
187
+ from abstractflow.visual.agent_ids import visual_react_workflow_id
188
+ from abstractflow.visual.executor import visual_to_flow
189
+
190
+ # Collect referenced subflows (cycles are allowed; compile/register each id once).
191
+ ordered: List[Any] = []
192
+ seen: set[str] = set()
193
+ queue: List[str] = [str(getattr(root, "id", "") or "")]
194
+
195
+ while queue:
196
+ fid = queue.pop(0)
197
+ if not fid or fid in seen:
198
+ continue
199
+ vf = flows.get(fid)
200
+ if vf is None:
201
+ raise ValueError(f"Subflow '{fid}' not found in loaded flows")
202
+ seen.add(fid)
203
+ ordered.append(vf)
204
+
205
+ for n in getattr(vf, "nodes", []) or []:
206
+ if _node_type_str(n) != "subflow":
207
+ continue
208
+ sid = _subflow_id(n)
209
+ if sid:
210
+ queue.append(sid)
211
+
212
+ registry = _workflow_registry()
213
+
214
+ specs_by_id: Dict[str, WorkflowSpec] = {}
215
+ for vf in ordered:
216
+ f = visual_to_flow(vf)
217
+ spec = compile_flow(f)
218
+ specs_by_id[str(spec.workflow_id)] = spec
219
+ register = getattr(registry, "register", None)
220
+ if callable(register):
221
+ register(spec)
222
+ else:
223
+ registry[str(spec.workflow_id)] = spec
224
+
225
+ # Register per-Agent-node ReAct subworkflows so visual Agent nodes can run.
226
+ agent_nodes: List[Tuple[str, Dict[str, Any]]] = []
227
+ for vf in ordered:
228
+ for n in getattr(vf, "nodes", []) or []:
229
+ if _node_type_str(n) != "agent":
230
+ continue
231
+ data = getattr(n, "data", None)
232
+ cfg = data.get("agentConfig", {}) if isinstance(data, dict) else {}
233
+ cfg = dict(cfg) if isinstance(cfg, dict) else {}
234
+ wf_id_raw = cfg.get("_react_workflow_id")
235
+ wf_id = (
236
+ wf_id_raw.strip()
237
+ if isinstance(wf_id_raw, str) and wf_id_raw.strip()
238
+ else visual_react_workflow_id(flow_id=vf.id, node_id=n.id)
239
+ )
240
+ agent_nodes.append((wf_id, cfg))
241
+
242
+ if agent_nodes:
243
+ from abstractagent.adapters.react_runtime import create_react_workflow
244
+ from abstractagent.logic.builtins import (
245
+ ASK_USER_TOOL,
246
+ COMPACT_MEMORY_TOOL,
247
+ INSPECT_VARS_TOOL,
248
+ RECALL_MEMORY_TOOL,
249
+ REMEMBER_NOTE_TOOL,
250
+ REMEMBER_TOOL,
251
+ )
252
+ from abstractagent.logic.react import ReActLogic
253
+
254
+ def _normalize_tool_names(raw: Any) -> List[str]:
255
+ if not isinstance(raw, list):
256
+ return []
257
+ out: List[str] = []
258
+ for t in raw:
259
+ if isinstance(t, str) and t.strip():
260
+ out.append(t.strip())
261
+ return out
262
+
263
+ tool_defs = [
264
+ ASK_USER_TOOL,
265
+ RECALL_MEMORY_TOOL,
266
+ INSPECT_VARS_TOOL,
267
+ REMEMBER_TOOL,
268
+ REMEMBER_NOTE_TOOL,
269
+ COMPACT_MEMORY_TOOL,
270
+ *_tool_definitions_from_callables(tools),
271
+ ]
272
+
273
+ for workflow_id, cfg in agent_nodes:
274
+ tools_selected = _normalize_tool_names(cfg.get("tools"))
275
+ logic = ReActLogic(tools=tool_defs, max_tokens=None)
276
+ sub = create_react_workflow(
277
+ logic=logic,
278
+ workflow_id=workflow_id,
279
+ provider=None,
280
+ model=None,
281
+ allowed_tools=tools_selected,
282
+ on_step=None,
283
+ )
284
+ register = getattr(registry, "register", None)
285
+ if callable(register):
286
+ register(sub)
287
+ else:
288
+ registry[str(sub.workflow_id)] = sub
289
+
290
+ if hasattr(runtime, "set_workflow_registry"):
291
+ runtime.set_workflow_registry(registry) # type: ignore[call-arg]
292
+ else: # pragma: no cover
293
+ raise RuntimeError("Runtime does not support workflow registries (required for subflows/agent nodes).")
294
+
295
+ root_id = str(getattr(root, "id", "") or "")
296
+ root_spec = specs_by_id.get(root_id)
297
+ if root_spec is None:
298
+ # Shouldn't happen because root id was seeded into the queue.
299
+ raise RuntimeError(f"Root workflow '{root_id}' was not compiled/registered.")
300
+ return root_spec, registry
301
+
302
+
303
+ class WorkflowAgent(BaseAgent):
304
+ """Run a VisualFlow workflow as an AbstractCode agent.
305
+
306
+ Contract: the workflow must declare `interfaces: ["abstractcode.agent.v1"]` and expose:
307
+ - On Flow Start output pin: `request` (string)
308
+ - On Flow End input pin: `response` (string)
309
+ """
310
+
311
+ def __init__(
312
+ self,
313
+ *,
314
+ runtime: Runtime,
315
+ flow_ref: str,
316
+ flows_dir: Optional[str] = None,
317
+ tools: Optional[List[Callable[..., Any]]] = None,
318
+ on_step: Optional[Callable[[str, Dict[str, Any]], None]] = None,
319
+ max_iterations: int = 25,
320
+ max_tokens: Optional[int] = None,
321
+ actor_id: Optional[str] = None,
322
+ session_id: Optional[str] = None,
323
+ ):
324
+ self._max_iterations = int(max_iterations) if isinstance(max_iterations, int) else 25
325
+ if self._max_iterations < 1:
326
+ self._max_iterations = 1
327
+ self._max_tokens = max_tokens
328
+ self._flow_ref = str(flow_ref or "").strip()
329
+ if not self._flow_ref:
330
+ raise ValueError("flow_ref is required")
331
+
332
+ resolved = resolve_visual_flow(self._flow_ref, flows_dir=flows_dir)
333
+ self.visual_flow = resolved.visual_flow
334
+ self.flows = resolved.flows
335
+ self.flows_dir = resolved.flows_dir
336
+
337
+ # Validate interface contract before creating the workflow spec.
338
+ try:
339
+ from abstractflow.visual.interfaces import (
340
+ ABSTRACTCODE_AGENT_V1,
341
+ apply_visual_flow_interface_scaffold,
342
+ validate_visual_flow_interface,
343
+ )
344
+ except Exception as e: # pragma: no cover
345
+ raise RuntimeError(
346
+ "AbstractFlow is required to validate VisualFlow interfaces.\n"
347
+ 'Install with: pip install "abstractcode[flow]"'
348
+ ) from e
349
+
350
+ # Authoring UX: keep interface-marked flows scaffolded even if the underlying
351
+ # JSON was created before the contract expanded (or was edited manually).
352
+ try:
353
+ apply_visual_flow_interface_scaffold(self.visual_flow, ABSTRACTCODE_AGENT_V1, include_recommended=True)
354
+ except Exception:
355
+ pass
356
+
357
+ errors = validate_visual_flow_interface(self.visual_flow, ABSTRACTCODE_AGENT_V1)
358
+ if errors:
359
+ joined = "\n".join([f"- {e}" for e in errors])
360
+ raise ValueError(f"Workflow does not implement '{ABSTRACTCODE_AGENT_V1}':\n{joined}")
361
+
362
+ self._last_task: Optional[str] = None
363
+ self._ledger_unsubscribe: Optional[Callable[[], None]] = None
364
+ self._node_labels_by_id: Dict[str, str] = {}
365
+
366
+ super().__init__(
367
+ runtime=runtime,
368
+ tools=tools,
369
+ on_step=on_step,
370
+ actor_id=actor_id,
371
+ session_id=session_id,
372
+ )
373
+
374
+ def _create_workflow(self) -> WorkflowSpec:
375
+ tools = list(self.tools or [])
376
+ spec, _registry = _compile_visual_flow_tree(root=self.visual_flow, flows=self.flows, tools=tools, runtime=self.runtime)
377
+ return spec
378
+
379
+ def start(self, task: str, *, allowed_tools: Optional[List[str]] = None, **_: Any) -> str:
380
+ task = str(task or "").strip()
381
+ if not task:
382
+ raise ValueError("task must be a non-empty string")
383
+
384
+ self._last_task = task
385
+
386
+ try:
387
+ base_limits = dict(self.runtime.config.to_limits_dict())
388
+ except Exception:
389
+ base_limits = {}
390
+ limits: Dict[str, Any] = dict(base_limits)
391
+ limits.setdefault("warn_iterations_pct", 80)
392
+ limits.setdefault("warn_tokens_pct", 80)
393
+ limits["max_iterations"] = int(self._max_iterations)
394
+ limits["current_iteration"] = 0
395
+ limits.setdefault("max_history_messages", -1)
396
+ limits.setdefault("estimated_tokens_used", 0)
397
+ if self._max_tokens is not None:
398
+ try:
399
+ mt = int(self._max_tokens)
400
+ except Exception:
401
+ mt = None
402
+ if isinstance(mt, int) and mt > 0:
403
+ limits["max_tokens"] = mt
404
+
405
+ runtime_provider = getattr(getattr(self.runtime, "config", None), "provider", None)
406
+ runtime_model = getattr(getattr(self.runtime, "config", None), "model", None)
407
+
408
+ vars: Dict[str, Any] = {
409
+ "request": task,
410
+ "context": {"task": task, "messages": _copy_messages(self.session_messages)},
411
+ "_temp": {},
412
+ "_limits": limits,
413
+ }
414
+
415
+ if isinstance(runtime_provider, str) and runtime_provider.strip():
416
+ vars["provider"] = runtime_provider.strip()
417
+ if isinstance(runtime_model, str) and runtime_model.strip():
418
+ vars["model"] = runtime_model.strip()
419
+
420
+ if isinstance(allowed_tools, list):
421
+ normalized = [str(t).strip() for t in allowed_tools if isinstance(t, str) and t.strip()]
422
+ vars["tools"] = normalized
423
+ vars["_runtime"] = {"allowed_tools": normalized}
424
+ else:
425
+ # Provide a safe default so interface-scaffolded `tools` pins resolve.
426
+ vars["tools"] = []
427
+
428
+ actor_id = self._ensure_actor_id()
429
+ session_id = self._ensure_session_id()
430
+
431
+ run_id = self.runtime.start(
432
+ workflow=self.workflow,
433
+ vars=vars,
434
+ actor_id=actor_id,
435
+ session_id=session_id,
436
+ )
437
+ self._current_run_id = run_id
438
+
439
+ # Build a stable node_id -> label map for UX (used for status updates).
440
+ try:
441
+ labels: Dict[str, str] = {}
442
+ for n in getattr(self.visual_flow, "nodes", []) or []:
443
+ nid = getattr(n, "id", None)
444
+ if not isinstance(nid, str) or not nid:
445
+ continue
446
+ data = getattr(n, "data", None)
447
+ label = data.get("label") if isinstance(data, dict) else None
448
+ if isinstance(label, str) and label.strip():
449
+ labels[nid] = label.strip()
450
+ self._node_labels_by_id = labels
451
+ except Exception:
452
+ self._node_labels_by_id = {}
453
+
454
+ # Subscribe to ledger records so we can surface real-time status updates
455
+ # even while a blocking effect (LLM/tool HTTP) is in-flight.
456
+ self._ledger_unsubscribe = None
457
+ if self.on_step:
458
+ try:
459
+ self._ledger_unsubscribe = self._subscribe_ui_events(actor_id=actor_id, session_id=session_id)
460
+ except Exception:
461
+ self._ledger_unsubscribe = None
462
+
463
+ if self.on_step:
464
+ try:
465
+ self.on_step(
466
+ "init",
467
+ {
468
+ "flow_id": str(getattr(self.visual_flow, "id", "") or ""),
469
+ "flow_name": str(getattr(self.visual_flow, "name", "") or ""),
470
+ },
471
+ )
472
+ except Exception:
473
+ pass
474
+
475
+ return run_id
476
+
477
+ def _subscribe_ui_events(self, *, actor_id: str, session_id: str) -> Optional[Callable[[], None]]:
478
+ """Subscribe to ledger appends and translate reserved workflow UX events into on_step(...).
479
+
480
+ This is best-effort and must never affect correctness.
481
+ """
482
+
483
+ def _extract_text(payload: Any) -> str:
484
+ if isinstance(payload, str):
485
+ return payload
486
+ if isinstance(payload, dict):
487
+ v0 = payload.get("value")
488
+ if isinstance(v0, str) and v0.strip():
489
+ return v0.strip()
490
+ for k in ("text", "message", "status"):
491
+ v = payload.get(k)
492
+ if isinstance(v, str) and v.strip():
493
+ return v.strip()
494
+ return ""
495
+
496
+ def _extract_duration_seconds(payload: Any) -> Optional[float]:
497
+ if not isinstance(payload, dict):
498
+ return None
499
+ raw = payload.get("duration")
500
+ if raw is None:
501
+ raw = payload.get("duration_s")
502
+ if raw is None:
503
+ return None
504
+ try:
505
+ return float(raw)
506
+ except Exception:
507
+ return None
508
+
509
+ def _extract_status(payload: Any) -> Dict[str, Any]:
510
+ if isinstance(payload, str):
511
+ return {"text": payload}
512
+ if isinstance(payload, dict):
513
+ text = _extract_text(payload)
514
+ out: Dict[str, Any] = {"text": text}
515
+ dur = _extract_duration_seconds(payload)
516
+ if dur is not None:
517
+ out["duration"] = dur
518
+ return out
519
+ return {"text": str(payload or "")}
520
+
521
+ def _extract_message(payload: Any) -> Dict[str, Any]:
522
+ if isinstance(payload, str):
523
+ return {"text": payload}
524
+ if isinstance(payload, dict):
525
+ text = _extract_text(payload)
526
+ out: Dict[str, Any] = {"text": text}
527
+ level = payload.get("level")
528
+ if isinstance(level, str) and level.strip():
529
+ out["level"] = level.strip().lower()
530
+ title = payload.get("title")
531
+ if isinstance(title, str) and title.strip():
532
+ out["title"] = title.strip()
533
+ meta = payload.get("meta")
534
+ if isinstance(meta, dict):
535
+ out["meta"] = dict(meta)
536
+ return out
537
+ return {"text": str(payload or "")}
538
+
539
+ def _extract_tool_exec(payload: Any) -> Dict[str, Any]:
540
+ if isinstance(payload, str):
541
+ return {"tool": payload, "args": {}}
542
+ if isinstance(payload, dict):
543
+ # Support both AbstractCore-normalized tool call shapes and common OpenAI-style shapes.
544
+ #
545
+ # Normalized (preferred):
546
+ # {"name": "...", "arguments": {...}, "call_id": "..."}
547
+ #
548
+ # OpenAI-ish:
549
+ # {"id": "...", "type":"function", "function":{"name":"...", "arguments":"{...json...}"}}
550
+ tool = payload.get("tool") or payload.get("name") or payload.get("tool_name")
551
+ args = payload.get("arguments")
552
+ if args is None:
553
+ args = payload.get("args")
554
+ call_id = payload.get("call_id") or payload.get("callId") or payload.get("id")
555
+
556
+ fn = payload.get("function")
557
+ if tool is None and isinstance(fn, dict):
558
+ tool = fn.get("name")
559
+ if args is None and isinstance(fn, dict):
560
+ args = fn.get("arguments")
561
+
562
+ parsed_args: Dict[str, Any] = {}
563
+ if isinstance(args, dict):
564
+ parsed_args = dict(args)
565
+ elif isinstance(args, str) and args.strip():
566
+ # Some providers send JSON arguments as a string.
567
+ try:
568
+ parsed = json.loads(args)
569
+ if isinstance(parsed, dict):
570
+ parsed_args = parsed
571
+ except Exception:
572
+ parsed_args = {}
573
+
574
+ out: Dict[str, Any] = {"tool": str(tool or "tool"), "args": parsed_args}
575
+ if isinstance(call_id, str) and call_id.strip():
576
+ out["call_id"] = call_id.strip()
577
+ return out
578
+ return {"tool": "tool", "args": {}}
579
+
580
+ def _extract_tool_result(payload: Any) -> Dict[str, Any]:
581
+ # Normalize to ReactShell's existing "observe" step contract:
582
+ # {tool, result (string), success?}
583
+ tool = "tool"
584
+ success = None
585
+ result_str = ""
586
+ if isinstance(payload, dict):
587
+ tool_raw = payload.get("tool") or payload.get("name") or payload.get("tool_name")
588
+ if isinstance(tool_raw, str) and tool_raw.strip():
589
+ tool = tool_raw.strip()
590
+ if "success" in payload:
591
+ try:
592
+ success = bool(payload.get("success"))
593
+ except Exception:
594
+ success = None
595
+ # Prefer output/result; fallback to error/value.
596
+ raw = payload.get("output")
597
+ if raw is None:
598
+ raw = payload.get("result")
599
+ if raw is None:
600
+ raw = payload.get("error")
601
+ if raw is None:
602
+ raw = payload.get("value")
603
+ if raw is None:
604
+ raw = ""
605
+ if isinstance(raw, str):
606
+ result_str = raw
607
+ else:
608
+ try:
609
+ result_str = json.dumps(raw, ensure_ascii=False, sort_keys=True, indent=2)
610
+ except Exception:
611
+ result_str = str(raw)
612
+ elif isinstance(payload, str):
613
+ result_str = payload
614
+ else:
615
+ result_str = str(payload or "")
616
+ out: Dict[str, Any] = {"tool": tool, "result": result_str}
617
+ if success is not None:
618
+ out["success"] = success
619
+ return out
620
+
621
+ def _on_record(rec: Dict[str, Any]) -> None:
622
+ try:
623
+ if rec.get("actor_id") != actor_id:
624
+ return
625
+ if rec.get("session_id") != session_id:
626
+ return
627
+ status = rec.get("status")
628
+ status_str = status.value if hasattr(status, "value") else str(status or "")
629
+ if status_str != "completed":
630
+ return
631
+ eff = rec.get("effect")
632
+ if not isinstance(eff, dict) or str(eff.get("type") or "") != "emit_event":
633
+ return
634
+ payload = eff.get("payload") if isinstance(eff.get("payload"), dict) else {}
635
+ name = str(payload.get("name") or payload.get("event_name") or "").strip()
636
+ if not name:
637
+ return
638
+
639
+ event_payload = payload.get("payload")
640
+ if name == _STATUS_EVENT_NAME:
641
+ st = _extract_status(event_payload)
642
+ if callable(self.on_step) and str(st.get("text") or "").strip():
643
+ self.on_step("status", st)
644
+ return
645
+
646
+ if name == _MESSAGE_EVENT_NAME:
647
+ msg = _extract_message(event_payload)
648
+ if callable(self.on_step) and str(msg.get("text") or "").strip():
649
+ self.on_step("message", msg)
650
+ return
651
+
652
+ if name == _TOOL_EXEC_EVENT_NAME:
653
+ # Backwards-compatible: older emit_event nodes wrapped non-dict payloads under {"value": ...}.
654
+ raw_tc_payload = event_payload
655
+ if isinstance(raw_tc_payload, dict) and isinstance(raw_tc_payload.get("value"), list):
656
+ raw_tc_payload = raw_tc_payload.get("value")
657
+
658
+ if isinstance(raw_tc_payload, list):
659
+ for item in raw_tc_payload:
660
+ tc = _extract_tool_exec(item)
661
+ if callable(self.on_step) and str(tc.get("tool") or "").strip():
662
+ # Reuse AbstractCode's existing "tool call" UX.
663
+ self.on_step("act", tc)
664
+ else:
665
+ tc = _extract_tool_exec(raw_tc_payload)
666
+ if callable(self.on_step) and str(tc.get("tool") or "").strip():
667
+ # Reuse AbstractCode's existing "tool call" UX.
668
+ self.on_step("act", tc)
669
+ return
670
+
671
+ if name == _TOOL_RESULT_EVENT_NAME:
672
+ raw_tr_payload = event_payload
673
+ if isinstance(raw_tr_payload, dict) and isinstance(raw_tr_payload.get("value"), list):
674
+ raw_tr_payload = raw_tr_payload.get("value")
675
+
676
+ if isinstance(raw_tr_payload, list):
677
+ for item in raw_tr_payload:
678
+ tr = _extract_tool_result(item)
679
+ if callable(self.on_step):
680
+ # Reuse AbstractCode's existing "tool result" UX.
681
+ self.on_step("observe", tr)
682
+ else:
683
+ tr = _extract_tool_result(raw_tr_payload)
684
+ if callable(self.on_step):
685
+ # Reuse AbstractCode's existing "tool result" UX.
686
+ self.on_step("observe", tr)
687
+ return
688
+ except Exception:
689
+ return
690
+
691
+ try:
692
+ unsub = self.runtime.subscribe_ledger(_on_record, run_id=None)
693
+ return unsub if callable(unsub) else None
694
+ except Exception:
695
+ return None
696
+
697
+ def _cleanup_ledger_subscription(self) -> None:
698
+ unsub = self._ledger_unsubscribe
699
+ self._ledger_unsubscribe = None
700
+ if callable(unsub):
701
+ try:
702
+ unsub()
703
+ except Exception:
704
+ pass
705
+
706
+ def _auto_wait_until(self, state: RunState) -> Optional[RunState]:
707
+ """Best-effort: auto-drive short WAIT_UNTIL delays for workflow agents.
708
+
709
+ Why:
710
+ - Visual workflows commonly use Delay (WAIT_UNTIL) for UX pacing.
711
+ - AbstractCode's agent run loop expects `step()` to keep making progress without
712
+ manual `/resume` for short waits.
713
+
714
+ Notes:
715
+ - This is intentionally conservative: it yields back if the wait changes to a
716
+ different reason (tool approvals, user prompts, pauses).
717
+ - Cancellation/pause are polled so control-plane actions remain responsive.
718
+ """
719
+ waiting = getattr(state, "waiting", None)
720
+ if waiting is None:
721
+ return None
722
+
723
+ reason = getattr(waiting, "reason", None)
724
+ reason_value = reason.value if hasattr(reason, "value") else str(reason or "")
725
+ if reason_value != "until":
726
+ return None
727
+
728
+ until_raw = getattr(waiting, "until", None)
729
+ if not isinstance(until_raw, str) or not until_raw.strip():
730
+ return None
731
+
732
+ def _parse_until_iso(value: str) -> Optional[datetime]:
733
+ s = str(value or "").strip()
734
+ if not s:
735
+ return None
736
+ # Accept both "+00:00" and "Z"
737
+ if s.endswith("Z"):
738
+ s = s[:-1] + "+00:00"
739
+ try:
740
+ dt = datetime.fromisoformat(s)
741
+ except Exception:
742
+ return None
743
+ if dt.tzinfo is None:
744
+ dt = dt.replace(tzinfo=timezone.utc)
745
+ return dt.astimezone(timezone.utc)
746
+
747
+ until_dt = _parse_until_iso(until_raw)
748
+ if until_dt is None:
749
+ return None
750
+
751
+ import time
752
+
753
+ # Cap auto-wait to avoid surprising "hangs" for long schedules.
754
+ max_auto_wait_s = 30.0
755
+
756
+ while True:
757
+ try:
758
+ latest = self.runtime.get_state(state.run_id)
759
+ except Exception:
760
+ latest = state
761
+
762
+ # Stop if externally controlled or otherwise no longer a time wait.
763
+ if getattr(latest, "status", None) in (RunStatus.CANCELLED, RunStatus.FAILED, RunStatus.COMPLETED):
764
+ return latest
765
+
766
+ latest_wait = getattr(latest, "waiting", None)
767
+ if latest_wait is None:
768
+ return latest
769
+ r = getattr(latest_wait, "reason", None)
770
+ r_val = r.value if hasattr(r, "value") else str(r or "")
771
+ if r_val != "until":
772
+ # Another wait type (pause/user/tool/event/subworkflow) should be handled by the host.
773
+ return latest
774
+
775
+ now = datetime.now(timezone.utc)
776
+ remaining = (until_dt - now).total_seconds()
777
+ if remaining <= 0:
778
+ # Runtime.tick will auto-unblock on the next call.
779
+ return None
780
+
781
+ if remaining > max_auto_wait_s:
782
+ # Leave it waiting; user can /resume later.
783
+ return latest
784
+
785
+ time.sleep(min(0.25, max(0.0, float(remaining))))
786
+
787
+ def step(self) -> RunState:
788
+ if not self._current_run_id:
789
+ raise RuntimeError("No active run. Call start() first.")
790
+
791
+ state = self.runtime.tick(workflow=self.workflow, run_id=self._current_run_id, max_steps=1)
792
+
793
+ # Auto-drive short time waits (Delay node) so workflow agents can use pacing
794
+ # without requiring manual `/resume`.
795
+ if state.status == RunStatus.WAITING:
796
+ advanced = self._auto_wait_until(state)
797
+ if isinstance(advanced, RunState):
798
+ state = advanced
799
+ elif advanced is None:
800
+ # Time passed (or will pass within our polling loop): continue ticking once.
801
+ state = self.runtime.tick(workflow=self.workflow, run_id=self._current_run_id, max_steps=1)
802
+
803
+ if state.status == RunStatus.COMPLETED:
804
+ response_text = ""
805
+ meta_out: Dict[str, Any] = {}
806
+ scratchpad_out: Any = None
807
+ raw_result_out: Any = None
808
+ out = getattr(state, "output", None)
809
+ if isinstance(out, dict):
810
+ result_payload = out.get("result") if isinstance(out.get("result"), dict) else None
811
+ if isinstance(out.get("response"), str):
812
+ response_text = str(out.get("response") or "")
813
+ else:
814
+ result = out.get("result")
815
+ if isinstance(result, dict) and "response" in result:
816
+ response_text = str(result.get("response") or "")
817
+ elif isinstance(result, str):
818
+ response_text = str(result or "")
819
+ raw_meta = out.get("meta")
820
+ if isinstance(raw_meta, dict):
821
+ meta_out = dict(raw_meta)
822
+ elif isinstance(result_payload, dict) and isinstance(result_payload.get("meta"), dict):
823
+ meta_out = dict(result_payload.get("meta") or {})
824
+ scratchpad_out = out.get("scratchpad")
825
+ if scratchpad_out is None and isinstance(result_payload, dict) and "scratchpad" in result_payload:
826
+ scratchpad_out = result_payload.get("scratchpad")
827
+ raw_result_out = out.get("raw_result")
828
+ if raw_result_out is None and isinstance(result_payload, dict) and "raw_result" in result_payload:
829
+ raw_result_out = result_payload.get("raw_result")
830
+
831
+ task = str(self._last_task or "")
832
+ ctx = state.vars.get("context") if isinstance(getattr(state, "vars", None), dict) else None
833
+ if not isinstance(ctx, dict):
834
+ ctx = {"task": task, "messages": []}
835
+ state.vars["context"] = ctx
836
+
837
+ msgs_raw = ctx.get("messages")
838
+ msgs = _copy_messages(msgs_raw)
839
+ msgs.append(_new_message(role="user", content=task))
840
+
841
+ assistant_meta: Dict[str, Any] = {}
842
+ if meta_out:
843
+ assistant_meta["workflow_meta"] = meta_out
844
+ if scratchpad_out is not None:
845
+ assistant_meta["workflow_scratchpad"] = scratchpad_out
846
+ if raw_result_out is not None:
847
+ assistant_meta["workflow_raw_result"] = raw_result_out
848
+
849
+ msgs.append(_new_message(role="assistant", content=response_text, metadata=assistant_meta))
850
+ ctx["messages"] = msgs
851
+
852
+ # Persist best-effort so restarts can load history from run state.
853
+ store = getattr(self.runtime, "run_store", None) or getattr(self.runtime, "_run_store", None)
854
+ save = getattr(store, "save", None)
855
+ if callable(save):
856
+ try:
857
+ save(state)
858
+ except Exception:
859
+ pass
860
+
861
+ self.session_messages = list(msgs)
862
+
863
+ if self.on_step:
864
+ try:
865
+ self.on_step(
866
+ "done",
867
+ {
868
+ "answer": response_text,
869
+ "meta": meta_out or None,
870
+ "scratchpad": scratchpad_out,
871
+ "raw_result": raw_result_out,
872
+ },
873
+ )
874
+ except Exception:
875
+ pass
876
+ self._cleanup_ledger_subscription()
877
+
878
+ if state.status in (RunStatus.FAILED, RunStatus.CANCELLED):
879
+ self._sync_session_caches_from_state(state)
880
+ self._cleanup_ledger_subscription()
881
+
882
+ return state
883
+
884
+
885
+ def dump_visual_flow_json(flow: Any) -> str:
886
+ """Debug helper for printing a VisualFlow as JSON (used in tests)."""
887
+ try:
888
+ return flow.model_dump_json(indent=2)
889
+ except Exception:
890
+ try:
891
+ data = flow.model_dump()
892
+ except Exception:
893
+ data = {}
894
+ return json.dumps(data, indent=2, ensure_ascii=False, default=str)