abstractcode 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcode/__init__.py +6 -37
- abstractcode/cli.py +401 -0
- abstractcode/flow_cli.py +1413 -0
- abstractcode/fullscreen_ui.py +1453 -0
- abstractcode/input_handler.py +81 -0
- abstractcode/py.typed +1 -0
- abstractcode/react_shell.py +6440 -0
- abstractcode/recall.py +384 -0
- abstractcode/remember.py +184 -0
- abstractcode/terminal_markdown.py +168 -0
- abstractcode/workflow_agent.py +894 -0
- abstractcode-0.3.0.dist-info/METADATA +270 -0
- abstractcode-0.3.0.dist-info/RECORD +17 -0
- abstractcode-0.1.0.dist-info/METADATA +0 -114
- abstractcode-0.1.0.dist-info/RECORD +0 -7
- {abstractcode-0.1.0.dist-info → abstractcode-0.3.0.dist-info}/WHEEL +0 -0
- {abstractcode-0.1.0.dist-info → abstractcode-0.3.0.dist-info}/entry_points.txt +0 -0
- {abstractcode-0.1.0.dist-info → abstractcode-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {abstractcode-0.1.0.dist-info → abstractcode-0.3.0.dist-info}/top_level.txt +0 -0
abstractcode/flow_cli.py
ADDED
|
@@ -0,0 +1,1413 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import time
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, Iterable, List, Optional, Tuple, Literal
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
FlowVerbosity = Literal["none", "default", "full"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class FlowRunResult:
|
|
16
|
+
"""Summary of a flow execution for host-side UX and REPL context injection."""
|
|
17
|
+
|
|
18
|
+
flow_id: str
|
|
19
|
+
flow_name: str
|
|
20
|
+
run_id: str
|
|
21
|
+
status: str
|
|
22
|
+
store_dir: Optional[str]
|
|
23
|
+
tool_calls: List[Dict[str, Any]]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass(frozen=True)
|
|
27
|
+
class FlowRunRef:
|
|
28
|
+
"""Durable reference to a visual-flow run (the full state lives in the RunStore)."""
|
|
29
|
+
|
|
30
|
+
flow_id: str
|
|
31
|
+
flows_dir: str
|
|
32
|
+
run_id: str
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def default_flow_state_file() -> str:
|
|
36
|
+
env = os.getenv("ABSTRACTCODE_FLOW_STATE_FILE")
|
|
37
|
+
if env:
|
|
38
|
+
return env
|
|
39
|
+
return str(Path.home() / ".abstractcode" / "flow_state.json")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def default_flows_dir() -> Path:
|
|
43
|
+
env = os.getenv("ABSTRACTFLOW_FLOWS_DIR")
|
|
44
|
+
if env:
|
|
45
|
+
return Path(env)
|
|
46
|
+
# Monorepo-friendly default.
|
|
47
|
+
candidate = Path("abstractflow/web/flows")
|
|
48
|
+
if candidate.exists() and candidate.is_dir():
|
|
49
|
+
return candidate
|
|
50
|
+
return Path("flows")
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _read_json(path: Path) -> Any:
|
|
54
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _write_json(path: Path, data: Any) -> None:
|
|
58
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
59
|
+
path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _load_flow_ref(path: Path) -> Optional[FlowRunRef]:
|
|
63
|
+
if not path.exists():
|
|
64
|
+
return None
|
|
65
|
+
try:
|
|
66
|
+
raw = _read_json(path)
|
|
67
|
+
except Exception:
|
|
68
|
+
return None
|
|
69
|
+
if not isinstance(raw, dict):
|
|
70
|
+
return None
|
|
71
|
+
if raw.get("kind") and raw.get("kind") != "flow":
|
|
72
|
+
return None
|
|
73
|
+
flow_id = raw.get("flow_id")
|
|
74
|
+
flows_dir = raw.get("flows_dir")
|
|
75
|
+
run_id = raw.get("run_id")
|
|
76
|
+
if not isinstance(flow_id, str) or not flow_id.strip():
|
|
77
|
+
return None
|
|
78
|
+
if not isinstance(flows_dir, str) or not flows_dir.strip():
|
|
79
|
+
return None
|
|
80
|
+
if not isinstance(run_id, str) or not run_id.strip():
|
|
81
|
+
return None
|
|
82
|
+
return FlowRunRef(flow_id=flow_id.strip(), flows_dir=flows_dir.strip(), run_id=run_id.strip())
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _save_flow_ref(path: Path, ref: FlowRunRef) -> None:
|
|
86
|
+
_write_json(
|
|
87
|
+
path,
|
|
88
|
+
{
|
|
89
|
+
"kind": "flow",
|
|
90
|
+
"flow_id": ref.flow_id,
|
|
91
|
+
"flows_dir": ref.flows_dir,
|
|
92
|
+
"run_id": ref.run_id,
|
|
93
|
+
},
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
def _flow_store_dir(state_path: Path) -> Path:
|
|
97
|
+
return state_path.with_name(state_path.stem + ".d")
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _parse_input_json(*, raw_json: Optional[str], json_path: Optional[str]) -> Dict[str, Any]:
|
|
101
|
+
if raw_json and json_path:
|
|
102
|
+
raise ValueError("Provide either --input-json or --input-file, not both.")
|
|
103
|
+
|
|
104
|
+
if json_path:
|
|
105
|
+
payload = _read_json(Path(json_path).expanduser().resolve())
|
|
106
|
+
if not isinstance(payload, dict):
|
|
107
|
+
raise ValueError("--input-file must contain a JSON object")
|
|
108
|
+
return dict(payload)
|
|
109
|
+
|
|
110
|
+
if raw_json:
|
|
111
|
+
payload = json.loads(raw_json)
|
|
112
|
+
if not isinstance(payload, dict):
|
|
113
|
+
raise ValueError("--input-json must be a JSON object")
|
|
114
|
+
return dict(payload)
|
|
115
|
+
|
|
116
|
+
return {}
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _coerce_value(raw: str) -> Any:
|
|
120
|
+
v = str(raw)
|
|
121
|
+
lower = v.strip().lower()
|
|
122
|
+
if lower in ("true", "yes", "y"):
|
|
123
|
+
return True
|
|
124
|
+
if lower in ("false", "no", "n"):
|
|
125
|
+
return False
|
|
126
|
+
if lower in ("null", "none"):
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
# JSON objects/arrays (useful for payload-like params).
|
|
130
|
+
if v and v[0] in ("{", "["):
|
|
131
|
+
try:
|
|
132
|
+
return json.loads(v)
|
|
133
|
+
except Exception:
|
|
134
|
+
pass
|
|
135
|
+
|
|
136
|
+
# Integers
|
|
137
|
+
try:
|
|
138
|
+
if lower.startswith(("+", "-")):
|
|
139
|
+
int_candidate = lower[1:]
|
|
140
|
+
else:
|
|
141
|
+
int_candidate = lower
|
|
142
|
+
if int_candidate.isdigit():
|
|
143
|
+
return int(lower, 10)
|
|
144
|
+
except Exception:
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
# Floats
|
|
148
|
+
try:
|
|
149
|
+
if any(c in lower for c in (".", "e")):
|
|
150
|
+
return float(lower)
|
|
151
|
+
except Exception:
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
return v
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _parse_kv_list(items: List[str]) -> Dict[str, Any]:
|
|
158
|
+
out: Dict[str, Any] = {}
|
|
159
|
+
for item in items:
|
|
160
|
+
raw = str(item or "").strip()
|
|
161
|
+
if not raw:
|
|
162
|
+
continue
|
|
163
|
+
if "=" not in raw:
|
|
164
|
+
raise ValueError(f"Invalid --param value (expected key=value): {raw}")
|
|
165
|
+
k, v = raw.split("=", 1)
|
|
166
|
+
key = k.strip()
|
|
167
|
+
if not key:
|
|
168
|
+
raise ValueError(f"Invalid --param key: {raw}")
|
|
169
|
+
out[key] = _coerce_value(v)
|
|
170
|
+
return out
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _parse_unknown_params(argv: List[str]) -> Dict[str, Any]:
|
|
174
|
+
"""Parse unknown CLI args as input params.
|
|
175
|
+
|
|
176
|
+
Supports:
|
|
177
|
+
- --key=value
|
|
178
|
+
- --key value
|
|
179
|
+
- key=value
|
|
180
|
+
- --flag (sets flag=true)
|
|
181
|
+
"""
|
|
182
|
+
out: Dict[str, Any] = {}
|
|
183
|
+
i = 0
|
|
184
|
+
while i < len(argv):
|
|
185
|
+
token = str(argv[i] or "")
|
|
186
|
+
if not token:
|
|
187
|
+
i += 1
|
|
188
|
+
continue
|
|
189
|
+
|
|
190
|
+
if token.startswith("--"):
|
|
191
|
+
keyval = token[2:]
|
|
192
|
+
if not keyval:
|
|
193
|
+
raise ValueError("Invalid parameter flag '--'")
|
|
194
|
+
if "=" in keyval:
|
|
195
|
+
k, v = keyval.split("=", 1)
|
|
196
|
+
key = k.strip()
|
|
197
|
+
if not key:
|
|
198
|
+
raise ValueError(f"Invalid parameter flag: {token}")
|
|
199
|
+
out[key] = _coerce_value(v)
|
|
200
|
+
i += 1
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
key = keyval.strip()
|
|
204
|
+
if not key:
|
|
205
|
+
raise ValueError(f"Invalid parameter flag: {token}")
|
|
206
|
+
if i + 1 < len(argv) and not str(argv[i + 1]).startswith("--"):
|
|
207
|
+
nxt = str(argv[i + 1])
|
|
208
|
+
# Heuristic: if the next token looks like a standalone `key=value`,
|
|
209
|
+
# treat this flag as boolean and let the next token be parsed normally.
|
|
210
|
+
if "=" in nxt:
|
|
211
|
+
out[key] = True
|
|
212
|
+
i += 1
|
|
213
|
+
continue
|
|
214
|
+
out[key] = _coerce_value(nxt)
|
|
215
|
+
i += 2
|
|
216
|
+
continue
|
|
217
|
+
out[key] = True
|
|
218
|
+
i += 1
|
|
219
|
+
continue
|
|
220
|
+
|
|
221
|
+
if "=" in token:
|
|
222
|
+
k, v = token.split("=", 1)
|
|
223
|
+
key = k.strip()
|
|
224
|
+
if not key:
|
|
225
|
+
raise ValueError(f"Invalid parameter: {token}")
|
|
226
|
+
out[key] = _coerce_value(v)
|
|
227
|
+
i += 1
|
|
228
|
+
continue
|
|
229
|
+
|
|
230
|
+
raise ValueError(f"Unexpected argument: {token}")
|
|
231
|
+
|
|
232
|
+
return out
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _required_entry_inputs(vf: Any) -> List[str]:
|
|
236
|
+
"""Return required entry-node input keys (entry outputs excluding execution).
|
|
237
|
+
|
|
238
|
+
AbstractFlow's web UI uses the entry node's output pins as run inputs. For the
|
|
239
|
+
CLI, we fail fast if any of these are missing instead of prompting.
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
nodes = getattr(vf, "nodes", None)
|
|
244
|
+
except Exception:
|
|
245
|
+
nodes = None
|
|
246
|
+
if not isinstance(nodes, list) or not nodes:
|
|
247
|
+
return []
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
entry_id = getattr(vf, "entryNode", None)
|
|
251
|
+
except Exception:
|
|
252
|
+
entry_id = None
|
|
253
|
+
|
|
254
|
+
ENTRY_TYPES = {"on_flow_start", "on_user_request", "on_agent_message", "on_schedule", "on_event"}
|
|
255
|
+
|
|
256
|
+
def _node_type(node: Any) -> str:
|
|
257
|
+
t = getattr(node, "type", None)
|
|
258
|
+
return t.value if hasattr(t, "value") else str(t or "")
|
|
259
|
+
|
|
260
|
+
entry = None
|
|
261
|
+
if isinstance(entry_id, str) and entry_id:
|
|
262
|
+
for n in nodes:
|
|
263
|
+
if str(getattr(n, "id", "") or "") == entry_id:
|
|
264
|
+
entry = n
|
|
265
|
+
break
|
|
266
|
+
if entry is None:
|
|
267
|
+
for n in nodes:
|
|
268
|
+
if _node_type(n) in ENTRY_TYPES:
|
|
269
|
+
entry = n
|
|
270
|
+
break
|
|
271
|
+
if entry is None:
|
|
272
|
+
entry = nodes[0]
|
|
273
|
+
|
|
274
|
+
data = getattr(entry, "data", None)
|
|
275
|
+
raw_pins = None
|
|
276
|
+
if isinstance(data, dict):
|
|
277
|
+
raw_pins = data.get("outputs")
|
|
278
|
+
if raw_pins is None:
|
|
279
|
+
raw_pins = getattr(entry, "outputs", None)
|
|
280
|
+
|
|
281
|
+
required: List[str] = []
|
|
282
|
+
if isinstance(raw_pins, list):
|
|
283
|
+
for p in raw_pins:
|
|
284
|
+
if isinstance(p, dict):
|
|
285
|
+
pid = str(p.get("id") or "").strip()
|
|
286
|
+
ptype = str(p.get("type") or "").strip()
|
|
287
|
+
else:
|
|
288
|
+
pid = str(getattr(p, "id", "") or "").strip()
|
|
289
|
+
ptype = str(getattr(getattr(p, "type", None), "value", None) or getattr(p, "type", "") or "").strip()
|
|
290
|
+
if not pid or ptype == "execution":
|
|
291
|
+
continue
|
|
292
|
+
required.append(pid)
|
|
293
|
+
|
|
294
|
+
return required
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _render_text(text: str) -> str:
|
|
298
|
+
"""Render UI-facing text without showing escaped newlines."""
|
|
299
|
+
s = str(text)
|
|
300
|
+
if "\\n" in s or "\\t" in s:
|
|
301
|
+
s = s.replace("\\n", "\n").replace("\\t", "\t")
|
|
302
|
+
return s
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _load_visual_flows(flows_dir: Path) -> Dict[str, Any]:
|
|
306
|
+
try:
|
|
307
|
+
from abstractflow.visual.models import VisualFlow
|
|
308
|
+
except Exception as e:
|
|
309
|
+
raise RuntimeError(
|
|
310
|
+
"AbstractFlow is required to run VisualFlow workflows.\n"
|
|
311
|
+
"Install with: pip install \"abstractcode[flow]\""
|
|
312
|
+
) from e
|
|
313
|
+
|
|
314
|
+
flows: Dict[str, Any] = {}
|
|
315
|
+
if not flows_dir.exists():
|
|
316
|
+
return flows
|
|
317
|
+
for path in sorted(flows_dir.glob("*.json")):
|
|
318
|
+
try:
|
|
319
|
+
raw = path.read_text(encoding="utf-8")
|
|
320
|
+
except Exception:
|
|
321
|
+
continue
|
|
322
|
+
try:
|
|
323
|
+
vf = VisualFlow.model_validate_json(raw)
|
|
324
|
+
except Exception:
|
|
325
|
+
continue
|
|
326
|
+
flows[str(vf.id)] = vf
|
|
327
|
+
return flows
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
def _resolve_flow(
|
|
331
|
+
flow_ref: str,
|
|
332
|
+
*,
|
|
333
|
+
flows_dir: Optional[str],
|
|
334
|
+
) -> Tuple[Any, Dict[str, Any], Path]:
|
|
335
|
+
"""Resolve a VisualFlow either by id (in flows_dir) or by a .json path."""
|
|
336
|
+
ref = str(flow_ref or "").strip()
|
|
337
|
+
if not ref:
|
|
338
|
+
raise ValueError("flow reference is required (flow id or .json path)")
|
|
339
|
+
|
|
340
|
+
path = Path(ref).expanduser()
|
|
341
|
+
flows_dir_path: Path
|
|
342
|
+
if path.exists() and path.is_file():
|
|
343
|
+
try:
|
|
344
|
+
raw = path.read_text(encoding="utf-8")
|
|
345
|
+
except Exception as e:
|
|
346
|
+
raise ValueError(f"Cannot read flow file: {path}") from e
|
|
347
|
+
|
|
348
|
+
try:
|
|
349
|
+
from abstractflow.visual.models import VisualFlow
|
|
350
|
+
except Exception as e:
|
|
351
|
+
raise RuntimeError(
|
|
352
|
+
"AbstractFlow is required to run VisualFlow workflows.\n"
|
|
353
|
+
"Install with: pip install \"abstractcode[flow]\""
|
|
354
|
+
) from e
|
|
355
|
+
|
|
356
|
+
vf = VisualFlow.model_validate_json(raw)
|
|
357
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else path.parent.resolve()
|
|
358
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
359
|
+
flows[str(vf.id)] = vf
|
|
360
|
+
return vf, flows, flows_dir_path
|
|
361
|
+
|
|
362
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else default_flows_dir().resolve()
|
|
363
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
364
|
+
if ref not in flows:
|
|
365
|
+
raise ValueError(f"Flow '{ref}' not found in {flows_dir_path}")
|
|
366
|
+
return flows[ref], flows, flows_dir_path
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def _is_pause_wait(wait: Any, *, run_id: str) -> bool:
|
|
370
|
+
wait_key = getattr(wait, "wait_key", None)
|
|
371
|
+
if isinstance(wait_key, str) and wait_key == f"pause:{run_id}":
|
|
372
|
+
return True
|
|
373
|
+
details = getattr(wait, "details", None)
|
|
374
|
+
if isinstance(details, dict) and details.get("kind") == "pause":
|
|
375
|
+
return True
|
|
376
|
+
return False
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def _extract_sub_run_id(wait: Any) -> Optional[str]:
|
|
380
|
+
details = getattr(wait, "details", None)
|
|
381
|
+
if isinstance(details, dict):
|
|
382
|
+
sub_run_id = details.get("sub_run_id")
|
|
383
|
+
if isinstance(sub_run_id, str) and sub_run_id:
|
|
384
|
+
return sub_run_id
|
|
385
|
+
wait_key = getattr(wait, "wait_key", None)
|
|
386
|
+
if isinstance(wait_key, str) and wait_key.startswith("subworkflow:"):
|
|
387
|
+
return wait_key.split("subworkflow:", 1)[1] or None
|
|
388
|
+
return None
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def _iter_descendants(runtime: Any, root_run_id: str) -> List[str]:
|
|
392
|
+
"""Return [root] + descendants (best-effort) using QueryableRunStore.list_children."""
|
|
393
|
+
out: List[str] = [root_run_id]
|
|
394
|
+
seen = {root_run_id}
|
|
395
|
+
queue = [root_run_id]
|
|
396
|
+
|
|
397
|
+
run_store = getattr(runtime, "run_store", None)
|
|
398
|
+
list_children = getattr(run_store, "list_children", None)
|
|
399
|
+
if not callable(list_children):
|
|
400
|
+
return out
|
|
401
|
+
|
|
402
|
+
while queue:
|
|
403
|
+
current = queue.pop(0)
|
|
404
|
+
try:
|
|
405
|
+
children = list_children(parent_run_id=current) # type: ignore[misc]
|
|
406
|
+
except Exception:
|
|
407
|
+
continue
|
|
408
|
+
if not isinstance(children, list):
|
|
409
|
+
continue
|
|
410
|
+
for child in children:
|
|
411
|
+
cid = getattr(child, "run_id", None)
|
|
412
|
+
if not isinstance(cid, str) or not cid or cid in seen:
|
|
413
|
+
continue
|
|
414
|
+
seen.add(cid)
|
|
415
|
+
out.append(cid)
|
|
416
|
+
queue.append(cid)
|
|
417
|
+
return out
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def _workflow_for(runtime: Any, runner_workflow: Any, workflow_id: str) -> Any:
|
|
421
|
+
reg = getattr(runtime, "workflow_registry", None)
|
|
422
|
+
if reg is not None:
|
|
423
|
+
getter = getattr(reg, "get", None)
|
|
424
|
+
if callable(getter):
|
|
425
|
+
wf = getter(workflow_id)
|
|
426
|
+
if wf is not None:
|
|
427
|
+
return wf
|
|
428
|
+
if getattr(runner_workflow, "workflow_id", None) == workflow_id:
|
|
429
|
+
return runner_workflow
|
|
430
|
+
raise RuntimeError(f"Workflow '{workflow_id}' not found in runtime registry")
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def _print_answer_user_records(
|
|
434
|
+
*,
|
|
435
|
+
runtime: Any,
|
|
436
|
+
run_ids: Iterable[str],
|
|
437
|
+
offsets: Dict[str, int],
|
|
438
|
+
emit: Any,
|
|
439
|
+
) -> None:
|
|
440
|
+
for rid in run_ids:
|
|
441
|
+
ledger = runtime.get_ledger(rid)
|
|
442
|
+
if not isinstance(ledger, list):
|
|
443
|
+
continue
|
|
444
|
+
start = int(offsets.get(rid, 0) or 0)
|
|
445
|
+
if start < 0:
|
|
446
|
+
start = 0
|
|
447
|
+
for rec in ledger[start:]:
|
|
448
|
+
if not isinstance(rec, dict):
|
|
449
|
+
continue
|
|
450
|
+
if rec.get("status") != "completed":
|
|
451
|
+
continue
|
|
452
|
+
eff = rec.get("effect")
|
|
453
|
+
if not isinstance(eff, dict):
|
|
454
|
+
continue
|
|
455
|
+
if eff.get("type") != "answer_user":
|
|
456
|
+
continue
|
|
457
|
+
result = rec.get("result")
|
|
458
|
+
if isinstance(result, dict) and isinstance(result.get("message"), str):
|
|
459
|
+
emit(_render_text(result["message"]))
|
|
460
|
+
offsets[rid] = len(ledger)
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
@dataclass
|
|
464
|
+
class _ApprovalState:
|
|
465
|
+
approve_all: bool = False
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def _approve_and_execute(
|
|
469
|
+
*,
|
|
470
|
+
tool_calls: List[Dict[str, Any]],
|
|
471
|
+
tool_runner: Any,
|
|
472
|
+
auto_approve: bool,
|
|
473
|
+
approval_state: _ApprovalState,
|
|
474
|
+
prompt_fn: Any,
|
|
475
|
+
print_fn: Any,
|
|
476
|
+
trace: Optional[FlowRunResult] = None,
|
|
477
|
+
) -> Optional[Dict[str, Any]]:
|
|
478
|
+
if auto_approve or approval_state.approve_all:
|
|
479
|
+
payload = tool_runner.execute(tool_calls=tool_calls)
|
|
480
|
+
if trace is not None and isinstance(payload, dict):
|
|
481
|
+
_capture_tool_results(trace=trace, tool_calls=tool_calls, payload=payload)
|
|
482
|
+
return payload
|
|
483
|
+
|
|
484
|
+
print_fn("\nTool approval required")
|
|
485
|
+
print_fn("-" * 60)
|
|
486
|
+
approve_all = False
|
|
487
|
+
approved: List[Dict[str, Any]] = []
|
|
488
|
+
results: List[Dict[str, Any]] = []
|
|
489
|
+
|
|
490
|
+
for tc in tool_calls:
|
|
491
|
+
name = str(tc.get("name", "") or "")
|
|
492
|
+
args = dict(tc.get("arguments") or {})
|
|
493
|
+
call_id = str(tc.get("call_id") or "")
|
|
494
|
+
|
|
495
|
+
print_fn(f"\n{name}")
|
|
496
|
+
print_fn("args: " + json.dumps(args, indent=2, ensure_ascii=False))
|
|
497
|
+
|
|
498
|
+
if not approve_all:
|
|
499
|
+
while True:
|
|
500
|
+
choice = str(prompt_fn("Approve? [y]es/[n]o/[a]ll/[q]uit: ")).strip().lower()
|
|
501
|
+
if choice in ("y", "yes"):
|
|
502
|
+
break
|
|
503
|
+
if choice in ("a", "all"):
|
|
504
|
+
approve_all = True
|
|
505
|
+
approval_state.approve_all = True
|
|
506
|
+
break
|
|
507
|
+
if choice in ("n", "no"):
|
|
508
|
+
results.append(
|
|
509
|
+
{"call_id": call_id, "name": name, "success": False, "output": None, "error": "Rejected by user"}
|
|
510
|
+
)
|
|
511
|
+
name = ""
|
|
512
|
+
break
|
|
513
|
+
if choice in ("q", "quit"):
|
|
514
|
+
return None
|
|
515
|
+
print_fn("Invalid choice.")
|
|
516
|
+
|
|
517
|
+
if not name:
|
|
518
|
+
continue
|
|
519
|
+
approved.append({"name": name, "arguments": args, "call_id": call_id})
|
|
520
|
+
|
|
521
|
+
if approved:
|
|
522
|
+
payload = tool_runner.execute(tool_calls=approved)
|
|
523
|
+
if trace is not None and isinstance(payload, dict):
|
|
524
|
+
_capture_tool_results(trace=trace, tool_calls=approved, payload=payload)
|
|
525
|
+
if isinstance(payload, dict):
|
|
526
|
+
exec_results = payload.get("results")
|
|
527
|
+
if isinstance(exec_results, list):
|
|
528
|
+
results.extend(exec_results)
|
|
529
|
+
else:
|
|
530
|
+
results.append({"call_id": "", "name": "tools", "success": False, "output": None, "error": "Invalid tool runner output"})
|
|
531
|
+
|
|
532
|
+
return {"mode": "executed", "results": results}
|
|
533
|
+
|
|
534
|
+
|
|
535
|
+
def _capture_tool_results(*, trace: FlowRunResult, tool_calls: List[Dict[str, Any]], payload: Dict[str, Any]) -> None:
|
|
536
|
+
"""Capture executed tool call metadata into the flow result (no truncation)."""
|
|
537
|
+
results = payload.get("results")
|
|
538
|
+
results_by_id: Dict[str, Dict[str, Any]] = {}
|
|
539
|
+
if isinstance(results, list):
|
|
540
|
+
for r in results:
|
|
541
|
+
if not isinstance(r, dict):
|
|
542
|
+
continue
|
|
543
|
+
cid = str(r.get("call_id") or "")
|
|
544
|
+
if cid:
|
|
545
|
+
results_by_id[cid] = r
|
|
546
|
+
|
|
547
|
+
for tc in tool_calls:
|
|
548
|
+
if not isinstance(tc, dict):
|
|
549
|
+
continue
|
|
550
|
+
call_id = str(tc.get("call_id") or "")
|
|
551
|
+
name = str(tc.get("name") or "")
|
|
552
|
+
args = tc.get("arguments")
|
|
553
|
+
args_dict = dict(args) if isinstance(args, dict) else {}
|
|
554
|
+
r = results_by_id.get(call_id, {})
|
|
555
|
+
trace.tool_calls.append(
|
|
556
|
+
{
|
|
557
|
+
"name": name,
|
|
558
|
+
"arguments": args_dict,
|
|
559
|
+
"success": bool(r.get("success")) if isinstance(r, dict) and "success" in r else None,
|
|
560
|
+
"error": r.get("error") if isinstance(r, dict) else None,
|
|
561
|
+
"output_chars": len(str(r.get("output") or "")) if isinstance(r, dict) else None,
|
|
562
|
+
}
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
def _node_meta(vf: Any) -> Dict[str, Dict[str, str]]:
|
|
567
|
+
meta: Dict[str, Dict[str, str]] = {}
|
|
568
|
+
nodes = getattr(vf, "nodes", None)
|
|
569
|
+
if not isinstance(nodes, list):
|
|
570
|
+
return meta
|
|
571
|
+
for n in nodes:
|
|
572
|
+
try:
|
|
573
|
+
node_id = str(getattr(n, "id", "") or "")
|
|
574
|
+
if not node_id:
|
|
575
|
+
continue
|
|
576
|
+
node_type = getattr(n, "type", None)
|
|
577
|
+
node_type_val = getattr(node_type, "value", None)
|
|
578
|
+
ntype = str(node_type_val if node_type_val is not None else node_type or "")
|
|
579
|
+
label = ""
|
|
580
|
+
data = getattr(n, "data", None)
|
|
581
|
+
if isinstance(data, dict):
|
|
582
|
+
label = str(data.get("label") or "")
|
|
583
|
+
if not label:
|
|
584
|
+
label = str(getattr(n, "label", "") or "")
|
|
585
|
+
if not label:
|
|
586
|
+
label = ntype or node_id
|
|
587
|
+
meta[node_id] = {"label": label, "type": ntype}
|
|
588
|
+
except Exception:
|
|
589
|
+
continue
|
|
590
|
+
return meta
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
def _duration_ms(rec: Dict[str, Any]) -> Optional[float]:
|
|
594
|
+
started = rec.get("started_at")
|
|
595
|
+
ended = rec.get("ended_at")
|
|
596
|
+
if not isinstance(started, str) or not isinstance(ended, str) or not started or not ended:
|
|
597
|
+
return None
|
|
598
|
+
try:
|
|
599
|
+
from datetime import datetime
|
|
600
|
+
|
|
601
|
+
s = datetime.fromisoformat(started.replace("Z", "+00:00"))
|
|
602
|
+
e = datetime.fromisoformat(ended.replace("Z", "+00:00"))
|
|
603
|
+
return float((e - s).total_seconds() * 1000.0)
|
|
604
|
+
except Exception:
|
|
605
|
+
return None
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def _print_step_records(
|
|
609
|
+
*,
|
|
610
|
+
runtime: Any,
|
|
611
|
+
run_ids: Iterable[str],
|
|
612
|
+
offsets: Dict[str, int],
|
|
613
|
+
emit: Any,
|
|
614
|
+
verbosity: FlowVerbosity,
|
|
615
|
+
node_meta: Dict[str, Dict[str, str]],
|
|
616
|
+
) -> None:
|
|
617
|
+
"""Print new ledger records for observability (like AbstractFlow left panel)."""
|
|
618
|
+
if verbosity == "none":
|
|
619
|
+
return
|
|
620
|
+
|
|
621
|
+
for rid in run_ids:
|
|
622
|
+
ledger = runtime.get_ledger(rid)
|
|
623
|
+
if not isinstance(ledger, list):
|
|
624
|
+
continue
|
|
625
|
+
start = int(offsets.get(rid, 0) or 0)
|
|
626
|
+
if start < 0:
|
|
627
|
+
start = 0
|
|
628
|
+
|
|
629
|
+
for rec in ledger[start:]:
|
|
630
|
+
if not isinstance(rec, dict):
|
|
631
|
+
continue
|
|
632
|
+
status = rec.get("status")
|
|
633
|
+
eff = rec.get("effect")
|
|
634
|
+
eff_type = None
|
|
635
|
+
if isinstance(eff, dict):
|
|
636
|
+
eff_type = eff.get("type")
|
|
637
|
+
if verbosity != "full" and eff_type == "answer_user":
|
|
638
|
+
# answer_user is printed separately (as the user-visible output).
|
|
639
|
+
continue
|
|
640
|
+
|
|
641
|
+
nid = str(rec.get("node_id") or "")
|
|
642
|
+
meta = node_meta.get(nid, {})
|
|
643
|
+
label = meta.get("label") or nid
|
|
644
|
+
ntype = meta.get("type") or ""
|
|
645
|
+
|
|
646
|
+
if verbosity == "full":
|
|
647
|
+
emit(json.dumps(rec, indent=2, ensure_ascii=False))
|
|
648
|
+
continue
|
|
649
|
+
|
|
650
|
+
dur = _duration_ms(rec)
|
|
651
|
+
dur_txt = f"{dur/1000.0:.2f}s" if isinstance(dur, (int, float)) else ""
|
|
652
|
+
parts = [f"{label}"]
|
|
653
|
+
if ntype:
|
|
654
|
+
parts.append(f"({ntype})")
|
|
655
|
+
if isinstance(eff_type, str) and eff_type:
|
|
656
|
+
parts.append(str(eff_type))
|
|
657
|
+
if isinstance(status, str) and status:
|
|
658
|
+
parts.append(str(status).upper())
|
|
659
|
+
if dur_txt:
|
|
660
|
+
parts.append(dur_txt)
|
|
661
|
+
emit(" ".join([p for p in parts if p]))
|
|
662
|
+
|
|
663
|
+
offsets[rid] = len(ledger)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def _resume_and_bubble(
|
|
667
|
+
*,
|
|
668
|
+
runtime: Any,
|
|
669
|
+
runner_workflow: Any,
|
|
670
|
+
top_run_id: str,
|
|
671
|
+
target_run_id: str,
|
|
672
|
+
payload: Dict[str, Any],
|
|
673
|
+
wait_key: Optional[str],
|
|
674
|
+
) -> None:
|
|
675
|
+
"""Resume `target_run_id` and bubble subworkflow completions up to `top_run_id`."""
|
|
676
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
677
|
+
|
|
678
|
+
def _spec_for(state: Any) -> Any:
|
|
679
|
+
return _workflow_for(runtime, runner_workflow, getattr(state, "workflow_id", ""))
|
|
680
|
+
|
|
681
|
+
target_state = runtime.get_state(target_run_id)
|
|
682
|
+
runtime.resume(
|
|
683
|
+
workflow=_spec_for(target_state),
|
|
684
|
+
run_id=target_run_id,
|
|
685
|
+
wait_key=wait_key,
|
|
686
|
+
payload=payload,
|
|
687
|
+
max_steps=0,
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
current_run_id = target_run_id
|
|
691
|
+
for _ in range(50):
|
|
692
|
+
st = runtime.get_state(current_run_id)
|
|
693
|
+
if st.status == RunStatus.RUNNING:
|
|
694
|
+
st = runtime.tick(workflow=_spec_for(st), run_id=current_run_id, max_steps=100)
|
|
695
|
+
|
|
696
|
+
if st.status == RunStatus.WAITING:
|
|
697
|
+
return
|
|
698
|
+
if st.status == RunStatus.FAILED:
|
|
699
|
+
raise RuntimeError(st.error or "Subworkflow failed")
|
|
700
|
+
if st.status != RunStatus.COMPLETED:
|
|
701
|
+
return
|
|
702
|
+
|
|
703
|
+
parent_id = getattr(st, "parent_run_id", None)
|
|
704
|
+
if not isinstance(parent_id, str) or not parent_id:
|
|
705
|
+
return
|
|
706
|
+
|
|
707
|
+
parent = runtime.get_state(parent_id)
|
|
708
|
+
if parent.status != RunStatus.WAITING or parent.waiting is None:
|
|
709
|
+
return
|
|
710
|
+
if parent.waiting.reason != WaitReason.SUBWORKFLOW:
|
|
711
|
+
return
|
|
712
|
+
|
|
713
|
+
runtime.resume(
|
|
714
|
+
workflow=_spec_for(parent),
|
|
715
|
+
run_id=parent_id,
|
|
716
|
+
wait_key=None,
|
|
717
|
+
payload={
|
|
718
|
+
"sub_run_id": st.run_id,
|
|
719
|
+
"output": st.output,
|
|
720
|
+
"node_traces": runtime.get_node_traces(st.run_id),
|
|
721
|
+
},
|
|
722
|
+
max_steps=0,
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
if parent_id == top_run_id:
|
|
726
|
+
return
|
|
727
|
+
current_run_id = parent_id
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
def _drive_until_blocked(
|
|
731
|
+
*,
|
|
732
|
+
runner: Any,
|
|
733
|
+
tool_runner: Any,
|
|
734
|
+
auto_approve: bool,
|
|
735
|
+
wait_until: bool,
|
|
736
|
+
verbosity: FlowVerbosity = "default",
|
|
737
|
+
node_meta: Optional[Dict[str, Dict[str, str]]] = None,
|
|
738
|
+
trace: Optional[FlowRunResult] = None,
|
|
739
|
+
prompt_fn: Any = None,
|
|
740
|
+
ask_user_fn: Any = None,
|
|
741
|
+
print_fn: Any = None,
|
|
742
|
+
approval_state: Optional[_ApprovalState] = None,
|
|
743
|
+
on_answer_user: Any = None,
|
|
744
|
+
) -> None:
|
|
745
|
+
"""Drive a visual-flow session until completion or an external wait."""
|
|
746
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
747
|
+
|
|
748
|
+
runtime = runner.runtime
|
|
749
|
+
top_run_id = runner.run_id
|
|
750
|
+
if not isinstance(top_run_id, str) or not top_run_id:
|
|
751
|
+
raise RuntimeError("Runner has no run_id")
|
|
752
|
+
|
|
753
|
+
answer_offsets: Dict[str, int] = {}
|
|
754
|
+
step_offsets: Dict[str, int] = {}
|
|
755
|
+
approval = approval_state or _ApprovalState()
|
|
756
|
+
meta = node_meta or {}
|
|
757
|
+
|
|
758
|
+
_print = print_fn or print
|
|
759
|
+
_prompt = prompt_fn or (lambda msg: input(msg))
|
|
760
|
+
def _default_ask_user(prompt: str, choices: Optional[List[str]]) -> Optional[str]:
|
|
761
|
+
if isinstance(choices, list) and choices:
|
|
762
|
+
for i, c in enumerate(choices):
|
|
763
|
+
_print(f"[{i+1}] {c}")
|
|
764
|
+
return input(prompt + " ").strip()
|
|
765
|
+
|
|
766
|
+
_ask_user = ask_user_fn or _default_ask_user
|
|
767
|
+
_emit_answer = on_answer_user or _print
|
|
768
|
+
|
|
769
|
+
def _tick_ready_runs(run_ids: List[str]) -> None:
|
|
770
|
+
for rid in run_ids:
|
|
771
|
+
st = runtime.get_state(rid)
|
|
772
|
+
if st.status == RunStatus.RUNNING:
|
|
773
|
+
wf = _workflow_for(runtime, runner.workflow, st.workflow_id)
|
|
774
|
+
runtime.tick(workflow=wf, run_id=rid, max_steps=10)
|
|
775
|
+
continue
|
|
776
|
+
if st.status == RunStatus.WAITING and st.waiting and st.waiting.reason == WaitReason.UNTIL:
|
|
777
|
+
wf = _workflow_for(runtime, runner.workflow, st.workflow_id)
|
|
778
|
+
runtime.tick(workflow=wf, run_id=rid, max_steps=10)
|
|
779
|
+
|
|
780
|
+
while True:
|
|
781
|
+
run_ids = _iter_descendants(runtime, top_run_id)
|
|
782
|
+
_tick_ready_runs(run_ids)
|
|
783
|
+
_print_step_records(runtime=runtime, run_ids=run_ids, offsets=step_offsets, emit=_print, verbosity=verbosity, node_meta=meta)
|
|
784
|
+
_print_answer_user_records(runtime=runtime, run_ids=run_ids, offsets=answer_offsets, emit=_emit_answer)
|
|
785
|
+
|
|
786
|
+
top = runtime.get_state(top_run_id)
|
|
787
|
+
if top.status == RunStatus.COMPLETED:
|
|
788
|
+
# Mirror VisualSessionRunner semantics: finish when children are idle listeners or terminal.
|
|
789
|
+
all_idle_or_done = True
|
|
790
|
+
for rid in run_ids:
|
|
791
|
+
if rid == top_run_id:
|
|
792
|
+
continue
|
|
793
|
+
st = runtime.get_state(rid)
|
|
794
|
+
if st.status in (RunStatus.COMPLETED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
795
|
+
continue
|
|
796
|
+
if st.status == RunStatus.WAITING and st.waiting and st.waiting.reason == WaitReason.EVENT:
|
|
797
|
+
continue
|
|
798
|
+
all_idle_or_done = False
|
|
799
|
+
if all_idle_or_done:
|
|
800
|
+
# Cancel idle listeners so the session ends cleanly.
|
|
801
|
+
for rid in run_ids:
|
|
802
|
+
if rid == top_run_id:
|
|
803
|
+
continue
|
|
804
|
+
st = runtime.get_state(rid)
|
|
805
|
+
if st.status == RunStatus.WAITING and st.waiting and st.waiting.reason == WaitReason.EVENT:
|
|
806
|
+
try:
|
|
807
|
+
runtime.cancel_run(rid, reason="Session completed")
|
|
808
|
+
except Exception:
|
|
809
|
+
pass
|
|
810
|
+
return
|
|
811
|
+
continue
|
|
812
|
+
|
|
813
|
+
if top.status == RunStatus.FAILED:
|
|
814
|
+
raise RuntimeError(top.error or "Flow failed")
|
|
815
|
+
if top.status == RunStatus.CANCELLED:
|
|
816
|
+
print("Run cancelled.")
|
|
817
|
+
return
|
|
818
|
+
|
|
819
|
+
if top.status != RunStatus.WAITING or top.waiting is None:
|
|
820
|
+
continue
|
|
821
|
+
|
|
822
|
+
# Resolve deepest waiting run for subworkflow chains.
|
|
823
|
+
target_run_id = top_run_id
|
|
824
|
+
while True:
|
|
825
|
+
st = runtime.get_state(target_run_id)
|
|
826
|
+
if st.status != RunStatus.WAITING or st.waiting is None:
|
|
827
|
+
break
|
|
828
|
+
if st.waiting.reason != WaitReason.SUBWORKFLOW:
|
|
829
|
+
break
|
|
830
|
+
nxt = _extract_sub_run_id(st.waiting)
|
|
831
|
+
if not nxt:
|
|
832
|
+
break
|
|
833
|
+
target_run_id = nxt
|
|
834
|
+
|
|
835
|
+
target = runtime.get_state(target_run_id)
|
|
836
|
+
wait = target.waiting
|
|
837
|
+
if wait is None:
|
|
838
|
+
continue
|
|
839
|
+
|
|
840
|
+
# Paused runs should be resumed via Runtime.resume_run().
|
|
841
|
+
if wait.reason == WaitReason.USER and _is_pause_wait(wait, run_id=target_run_id):
|
|
842
|
+
print(f"Run is paused ({target_run_id}). Use `abstractcode flow resume-run` to continue.")
|
|
843
|
+
return
|
|
844
|
+
|
|
845
|
+
if wait.reason == WaitReason.USER:
|
|
846
|
+
prompt = _render_text(getattr(wait, "prompt", None) or "Please respond:")
|
|
847
|
+
choices = getattr(wait, "choices", None)
|
|
848
|
+
if not isinstance(choices, list):
|
|
849
|
+
choices = None
|
|
850
|
+
response = _ask_user(prompt, choices)
|
|
851
|
+
if response is None:
|
|
852
|
+
_print("Left run waiting (not resumed).")
|
|
853
|
+
return
|
|
854
|
+
response = str(response).strip()
|
|
855
|
+
_resume_and_bubble(
|
|
856
|
+
runtime=runtime,
|
|
857
|
+
runner_workflow=runner.workflow,
|
|
858
|
+
top_run_id=top_run_id,
|
|
859
|
+
target_run_id=target_run_id,
|
|
860
|
+
payload={"response": response},
|
|
861
|
+
wait_key=getattr(wait, "wait_key", None),
|
|
862
|
+
)
|
|
863
|
+
continue
|
|
864
|
+
|
|
865
|
+
if wait.reason == WaitReason.EVENT:
|
|
866
|
+
details = getattr(wait, "details", None)
|
|
867
|
+
tool_calls = details.get("tool_calls") if isinstance(details, dict) else None
|
|
868
|
+
if isinstance(tool_calls, list):
|
|
869
|
+
payload = _approve_and_execute(
|
|
870
|
+
tool_calls=tool_calls,
|
|
871
|
+
tool_runner=tool_runner,
|
|
872
|
+
auto_approve=auto_approve,
|
|
873
|
+
approval_state=approval,
|
|
874
|
+
prompt_fn=_prompt,
|
|
875
|
+
print_fn=_print,
|
|
876
|
+
trace=trace,
|
|
877
|
+
)
|
|
878
|
+
if payload is None:
|
|
879
|
+
_print("Left run waiting (not resumed).")
|
|
880
|
+
return
|
|
881
|
+
_resume_and_bubble(
|
|
882
|
+
runtime=runtime,
|
|
883
|
+
runner_workflow=runner.workflow,
|
|
884
|
+
top_run_id=top_run_id,
|
|
885
|
+
target_run_id=target_run_id,
|
|
886
|
+
payload=payload,
|
|
887
|
+
wait_key=getattr(wait, "wait_key", None),
|
|
888
|
+
)
|
|
889
|
+
continue
|
|
890
|
+
|
|
891
|
+
# Event waits can carry prompt/choices for durable human input.
|
|
892
|
+
prompt = getattr(wait, "prompt", None)
|
|
893
|
+
if isinstance(prompt, str) and prompt.strip():
|
|
894
|
+
choices = getattr(wait, "choices", None)
|
|
895
|
+
if not isinstance(choices, list):
|
|
896
|
+
choices = None
|
|
897
|
+
response = _ask_user(_render_text(prompt), choices)
|
|
898
|
+
if response is None:
|
|
899
|
+
_print("Left run waiting (not resumed).")
|
|
900
|
+
return
|
|
901
|
+
response = str(response).strip()
|
|
902
|
+
_resume_and_bubble(
|
|
903
|
+
runtime=runtime,
|
|
904
|
+
runner_workflow=runner.workflow,
|
|
905
|
+
top_run_id=top_run_id,
|
|
906
|
+
target_run_id=target_run_id,
|
|
907
|
+
payload={"response": response},
|
|
908
|
+
wait_key=getattr(wait, "wait_key", None),
|
|
909
|
+
)
|
|
910
|
+
continue
|
|
911
|
+
|
|
912
|
+
_print(f"Waiting for event: {getattr(wait, 'wait_key', None)}")
|
|
913
|
+
return
|
|
914
|
+
|
|
915
|
+
if wait.reason == WaitReason.UNTIL:
|
|
916
|
+
until = getattr(wait, "until", None)
|
|
917
|
+
_print(f"Waiting until: {until}")
|
|
918
|
+
if not wait_until or not isinstance(until, str) or not until:
|
|
919
|
+
return
|
|
920
|
+
|
|
921
|
+
# Sleep in coarse increments to keep the CLI responsive.
|
|
922
|
+
try:
|
|
923
|
+
import datetime as _dt
|
|
924
|
+
|
|
925
|
+
u = until
|
|
926
|
+
if u.endswith("Z"):
|
|
927
|
+
u = u[:-1] + "+00:00"
|
|
928
|
+
due = _dt.datetime.fromisoformat(u)
|
|
929
|
+
now = _dt.datetime.now(_dt.timezone.utc)
|
|
930
|
+
delta_s = max(0.0, (due - now).total_seconds())
|
|
931
|
+
except Exception:
|
|
932
|
+
delta_s = 1.0
|
|
933
|
+
time.sleep(min(delta_s, 60.0))
|
|
934
|
+
continue
|
|
935
|
+
|
|
936
|
+
if wait.reason == WaitReason.SUBWORKFLOW:
|
|
937
|
+
_print("Waiting for subworkflow…")
|
|
938
|
+
return
|
|
939
|
+
|
|
940
|
+
_print(f"Waiting: {wait.reason.value} ({getattr(wait, 'wait_key', None)})")
|
|
941
|
+
return
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
def run_flow_command(
|
|
945
|
+
*,
|
|
946
|
+
flow_ref: str,
|
|
947
|
+
flows_dir: Optional[str],
|
|
948
|
+
input_json: Optional[str],
|
|
949
|
+
input_file: Optional[str],
|
|
950
|
+
params: List[str],
|
|
951
|
+
extra_args: List[str],
|
|
952
|
+
flow_state_file: Optional[str],
|
|
953
|
+
no_state: bool,
|
|
954
|
+
auto_approve: bool,
|
|
955
|
+
wait_until: bool,
|
|
956
|
+
verbosity: FlowVerbosity = "default",
|
|
957
|
+
prompt_fn: Any = None,
|
|
958
|
+
ask_user_fn: Any = None,
|
|
959
|
+
print_fn: Any = None,
|
|
960
|
+
on_answer_user: Any = None,
|
|
961
|
+
) -> FlowRunResult:
|
|
962
|
+
try:
|
|
963
|
+
import abstractflow # noqa: F401
|
|
964
|
+
except Exception as e:
|
|
965
|
+
raise RuntimeError(
|
|
966
|
+
"AbstractFlow is required to run VisualFlow workflows.\n"
|
|
967
|
+
"Install with: pip install \"abstractcode[flow]\""
|
|
968
|
+
) from e
|
|
969
|
+
|
|
970
|
+
from abstractruntime.integrations.abstractcore import MappingToolExecutor, PassthroughToolExecutor
|
|
971
|
+
from abstractruntime.integrations.abstractcore.default_tools import get_default_tools
|
|
972
|
+
from abstractruntime.storage.artifacts import FileArtifactStore, InMemoryArtifactStore
|
|
973
|
+
from abstractruntime.storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
|
|
974
|
+
from abstractruntime.storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
975
|
+
|
|
976
|
+
vf, flows, flows_dir_path = _resolve_flow(flow_ref, flows_dir=flows_dir)
|
|
977
|
+
input_data = _parse_input_json(raw_json=input_json, json_path=input_file)
|
|
978
|
+
input_data.update(_parse_kv_list(params))
|
|
979
|
+
input_data.update(_parse_unknown_params(extra_args))
|
|
980
|
+
|
|
981
|
+
# Fail fast if the flow declares entry inputs and the user didn't provide them.
|
|
982
|
+
required = _required_entry_inputs(vf)
|
|
983
|
+
missing = [k for k in required if k not in input_data]
|
|
984
|
+
if missing:
|
|
985
|
+
missing_txt = ", ".join(missing)
|
|
986
|
+
raise ValueError(
|
|
987
|
+
f"Missing required flow inputs: {missing_txt}. "
|
|
988
|
+
f"Provide them as flags (e.g. --{missing[0]} ...) or via --input-json/--input-file/--param."
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
# Stores: file-backed only when state is enabled.
|
|
992
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
993
|
+
if no_state:
|
|
994
|
+
run_store = InMemoryRunStore()
|
|
995
|
+
ledger_store = InMemoryLedgerStore()
|
|
996
|
+
artifact_store = InMemoryArtifactStore()
|
|
997
|
+
else:
|
|
998
|
+
state_path.parent.mkdir(parents=True, exist_ok=True)
|
|
999
|
+
store_dir = _flow_store_dir(state_path)
|
|
1000
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1001
|
+
ledger_store = JsonlLedgerStore(store_dir)
|
|
1002
|
+
artifact_store = FileArtifactStore(store_dir)
|
|
1003
|
+
|
|
1004
|
+
tool_executor = PassthroughToolExecutor(mode="approval_required")
|
|
1005
|
+
tool_runner = MappingToolExecutor.from_tools(get_default_tools())
|
|
1006
|
+
|
|
1007
|
+
from abstractflow.visual.executor import create_visual_runner
|
|
1008
|
+
|
|
1009
|
+
runner = create_visual_runner(
|
|
1010
|
+
vf,
|
|
1011
|
+
flows=flows,
|
|
1012
|
+
run_store=run_store,
|
|
1013
|
+
ledger_store=ledger_store,
|
|
1014
|
+
artifact_store=artifact_store,
|
|
1015
|
+
tool_executor=tool_executor,
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
run_id = runner.start(input_data)
|
|
1019
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1020
|
+
store_dir: Optional[str] = None
|
|
1021
|
+
if not no_state:
|
|
1022
|
+
store_dir = str(_flow_store_dir(state_path))
|
|
1023
|
+
trace = FlowRunResult(
|
|
1024
|
+
flow_id=str(vf.id),
|
|
1025
|
+
flow_name=str(getattr(vf, "name", "") or str(vf.id)),
|
|
1026
|
+
run_id=str(run_id),
|
|
1027
|
+
status="running",
|
|
1028
|
+
store_dir=store_dir,
|
|
1029
|
+
tool_calls=[],
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
if not no_state:
|
|
1033
|
+
_save_flow_ref(state_path, FlowRunRef(flow_id=str(vf.id), flows_dir=str(flows_dir_path), run_id=run_id))
|
|
1034
|
+
|
|
1035
|
+
try:
|
|
1036
|
+
_drive_until_blocked(
|
|
1037
|
+
runner=runner,
|
|
1038
|
+
tool_runner=tool_runner,
|
|
1039
|
+
auto_approve=auto_approve,
|
|
1040
|
+
wait_until=wait_until,
|
|
1041
|
+
verbosity=verbosity,
|
|
1042
|
+
node_meta=_node_meta(vf),
|
|
1043
|
+
trace=trace,
|
|
1044
|
+
prompt_fn=prompt_fn,
|
|
1045
|
+
ask_user_fn=ask_user_fn,
|
|
1046
|
+
print_fn=print_fn,
|
|
1047
|
+
on_answer_user=on_answer_user,
|
|
1048
|
+
)
|
|
1049
|
+
try:
|
|
1050
|
+
trace.status = str(runner.runtime.get_state(run_id).status.value)
|
|
1051
|
+
except Exception:
|
|
1052
|
+
trace.status = "unknown"
|
|
1053
|
+
return trace
|
|
1054
|
+
except KeyboardInterrupt:
|
|
1055
|
+
# Best-effort: pause the whole run tree so schedulers/event emitters won't advance it.
|
|
1056
|
+
try:
|
|
1057
|
+
for rid in _iter_descendants(runner.runtime, run_id):
|
|
1058
|
+
runner.runtime.pause_run(rid, reason="Paused via AbstractCode (KeyboardInterrupt)")
|
|
1059
|
+
except Exception:
|
|
1060
|
+
pass
|
|
1061
|
+
print("\nInterrupted. Run paused (best-effort).")
|
|
1062
|
+
try:
|
|
1063
|
+
trace.status = str(runner.runtime.get_state(run_id).status.value)
|
|
1064
|
+
except Exception:
|
|
1065
|
+
trace.status = "unknown"
|
|
1066
|
+
return trace
|
|
1067
|
+
|
|
1068
|
+
|
|
1069
|
+
def resume_flow_command(
|
|
1070
|
+
*,
|
|
1071
|
+
flow_state_file: Optional[str],
|
|
1072
|
+
no_state: bool,
|
|
1073
|
+
auto_approve: bool,
|
|
1074
|
+
wait_until: bool,
|
|
1075
|
+
verbosity: FlowVerbosity = "default",
|
|
1076
|
+
prompt_fn: Any = None,
|
|
1077
|
+
ask_user_fn: Any = None,
|
|
1078
|
+
print_fn: Any = None,
|
|
1079
|
+
on_answer_user: Any = None,
|
|
1080
|
+
) -> FlowRunResult:
|
|
1081
|
+
try:
|
|
1082
|
+
import abstractflow # noqa: F401
|
|
1083
|
+
except Exception as e:
|
|
1084
|
+
raise RuntimeError(
|
|
1085
|
+
"AbstractFlow is required to run VisualFlow workflows.\n"
|
|
1086
|
+
"Install with: pip install \"abstractcode[flow]\""
|
|
1087
|
+
) from e
|
|
1088
|
+
|
|
1089
|
+
if no_state:
|
|
1090
|
+
raise ValueError("Cannot resume flows with --no-state (in-memory only).")
|
|
1091
|
+
|
|
1092
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1093
|
+
ref = _load_flow_ref(state_path)
|
|
1094
|
+
if ref is None:
|
|
1095
|
+
raise ValueError(f"No saved flow run found at {state_path}")
|
|
1096
|
+
|
|
1097
|
+
flows_dir_path = Path(ref.flows_dir).expanduser().resolve()
|
|
1098
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
1099
|
+
if ref.flow_id not in flows:
|
|
1100
|
+
raise ValueError(f"Flow '{ref.flow_id}' not found in {flows_dir_path}")
|
|
1101
|
+
vf = flows[ref.flow_id]
|
|
1102
|
+
|
|
1103
|
+
from abstractruntime.integrations.abstractcore import MappingToolExecutor, PassthroughToolExecutor
|
|
1104
|
+
from abstractruntime.integrations.abstractcore.default_tools import get_default_tools
|
|
1105
|
+
from abstractruntime.storage.artifacts import FileArtifactStore
|
|
1106
|
+
from abstractruntime.storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
1107
|
+
|
|
1108
|
+
store_dir = _flow_store_dir(state_path)
|
|
1109
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1110
|
+
ledger_store = JsonlLedgerStore(store_dir)
|
|
1111
|
+
artifact_store = FileArtifactStore(store_dir)
|
|
1112
|
+
|
|
1113
|
+
tool_executor = PassthroughToolExecutor(mode="approval_required")
|
|
1114
|
+
tool_runner = MappingToolExecutor.from_tools(get_default_tools())
|
|
1115
|
+
|
|
1116
|
+
from abstractflow.visual.executor import create_visual_runner
|
|
1117
|
+
|
|
1118
|
+
runner = create_visual_runner(
|
|
1119
|
+
vf,
|
|
1120
|
+
flows=flows,
|
|
1121
|
+
run_store=run_store,
|
|
1122
|
+
ledger_store=ledger_store,
|
|
1123
|
+
artifact_store=artifact_store,
|
|
1124
|
+
tool_executor=tool_executor,
|
|
1125
|
+
)
|
|
1126
|
+
|
|
1127
|
+
# Attach to existing run id.
|
|
1128
|
+
runner._current_run_id = ref.run_id # type: ignore[attr-defined]
|
|
1129
|
+
trace = FlowRunResult(
|
|
1130
|
+
flow_id=str(vf.id),
|
|
1131
|
+
flow_name=str(getattr(vf, "name", "") or str(vf.id)),
|
|
1132
|
+
run_id=str(ref.run_id),
|
|
1133
|
+
status="running",
|
|
1134
|
+
store_dir=str(store_dir),
|
|
1135
|
+
tool_calls=[],
|
|
1136
|
+
)
|
|
1137
|
+
|
|
1138
|
+
# Best-effort: if the run was paused, unpause it before continuing.
|
|
1139
|
+
try:
|
|
1140
|
+
for rid in _iter_descendants(runner.runtime, ref.run_id):
|
|
1141
|
+
runner.runtime.resume_run(rid)
|
|
1142
|
+
except Exception:
|
|
1143
|
+
pass
|
|
1144
|
+
|
|
1145
|
+
_drive_until_blocked(
|
|
1146
|
+
runner=runner,
|
|
1147
|
+
tool_runner=tool_runner,
|
|
1148
|
+
auto_approve=auto_approve,
|
|
1149
|
+
wait_until=wait_until,
|
|
1150
|
+
verbosity=verbosity,
|
|
1151
|
+
node_meta=_node_meta(vf),
|
|
1152
|
+
trace=trace,
|
|
1153
|
+
prompt_fn=prompt_fn,
|
|
1154
|
+
ask_user_fn=ask_user_fn,
|
|
1155
|
+
print_fn=print_fn,
|
|
1156
|
+
on_answer_user=on_answer_user,
|
|
1157
|
+
)
|
|
1158
|
+
try:
|
|
1159
|
+
trace.status = str(runner.runtime.get_state(ref.run_id).status.value)
|
|
1160
|
+
except Exception:
|
|
1161
|
+
trace.status = "unknown"
|
|
1162
|
+
return trace
|
|
1163
|
+
|
|
1164
|
+
|
|
1165
|
+
def control_flow_command(
|
|
1166
|
+
*,
|
|
1167
|
+
action: str,
|
|
1168
|
+
flow_state_file: Optional[str],
|
|
1169
|
+
) -> None:
|
|
1170
|
+
"""Pause/resume-run/cancel the current flow run (best-effort includes descendants)."""
|
|
1171
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1172
|
+
ref = _load_flow_ref(state_path)
|
|
1173
|
+
if ref is None:
|
|
1174
|
+
raise ValueError(f"No saved flow run found at {state_path}")
|
|
1175
|
+
|
|
1176
|
+
store_dir = _flow_store_dir(state_path)
|
|
1177
|
+
from abstractruntime.storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
1178
|
+
from abstractruntime.storage.artifacts import FileArtifactStore
|
|
1179
|
+
from abstractruntime import Runtime
|
|
1180
|
+
|
|
1181
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1182
|
+
ledger_store = JsonlLedgerStore(store_dir)
|
|
1183
|
+
artifact_store = FileArtifactStore(store_dir)
|
|
1184
|
+
|
|
1185
|
+
runtime = Runtime(run_store=run_store, ledger_store=ledger_store, artifact_store=artifact_store)
|
|
1186
|
+
run_ids = _iter_descendants(runtime, ref.run_id)
|
|
1187
|
+
|
|
1188
|
+
action2 = str(action or "").strip().lower()
|
|
1189
|
+
if action2 == "pause":
|
|
1190
|
+
for rid in run_ids:
|
|
1191
|
+
runtime.pause_run(rid, reason="Paused via AbstractCode")
|
|
1192
|
+
print(f"Paused {len(run_ids)} run(s).")
|
|
1193
|
+
return
|
|
1194
|
+
if action2 == "resume":
|
|
1195
|
+
for rid in run_ids:
|
|
1196
|
+
runtime.resume_run(rid)
|
|
1197
|
+
print(f"Resumed {len(run_ids)} run(s).")
|
|
1198
|
+
return
|
|
1199
|
+
if action2 == "cancel":
|
|
1200
|
+
for rid in run_ids:
|
|
1201
|
+
runtime.cancel_run(rid, reason="Cancelled via AbstractCode")
|
|
1202
|
+
print(f"Cancelled {len(run_ids)} run(s).")
|
|
1203
|
+
return
|
|
1204
|
+
|
|
1205
|
+
raise ValueError(f"Unknown control action: {action2}")
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
def list_flow_runs_command(
|
|
1209
|
+
*,
|
|
1210
|
+
flow_state_file: Optional[str],
|
|
1211
|
+
limit: int = 20,
|
|
1212
|
+
) -> None:
|
|
1213
|
+
"""List recent flow runs from the configured flow store directory."""
|
|
1214
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1215
|
+
store_dir = _flow_store_dir(state_path)
|
|
1216
|
+
|
|
1217
|
+
if not store_dir.exists():
|
|
1218
|
+
print(f"No flow run store found at {store_dir}")
|
|
1219
|
+
return
|
|
1220
|
+
|
|
1221
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
1222
|
+
from abstractruntime.storage.json_files import JsonFileRunStore
|
|
1223
|
+
|
|
1224
|
+
current = _load_flow_ref(state_path)
|
|
1225
|
+
current_run_id = current.run_id if current else None
|
|
1226
|
+
|
|
1227
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1228
|
+
runs = run_store.list_runs(limit=int(limit or 20))
|
|
1229
|
+
if not runs:
|
|
1230
|
+
print("No runs found.")
|
|
1231
|
+
return
|
|
1232
|
+
|
|
1233
|
+
print(f"Store: {store_dir}")
|
|
1234
|
+
print("Most recent runs:")
|
|
1235
|
+
for r in runs:
|
|
1236
|
+
marker = "*" if current_run_id and r.run_id == current_run_id else " "
|
|
1237
|
+
wait = r.waiting
|
|
1238
|
+
wait_txt = ""
|
|
1239
|
+
if r.status == RunStatus.WAITING and wait is not None:
|
|
1240
|
+
reason = wait.reason.value if isinstance(wait.reason, WaitReason) else str(wait.reason)
|
|
1241
|
+
wait_txt = f" waiting={reason} key={wait.wait_key}"
|
|
1242
|
+
updated = r.updated_at or r.created_at or ""
|
|
1243
|
+
print(f"{marker} {r.run_id} wf={r.workflow_id} {r.status.value} {updated}{wait_txt}")
|
|
1244
|
+
|
|
1245
|
+
|
|
1246
|
+
def attach_flow_run_command(
|
|
1247
|
+
*,
|
|
1248
|
+
run_id: str,
|
|
1249
|
+
flows_dir: Optional[str],
|
|
1250
|
+
flow_state_file: Optional[str],
|
|
1251
|
+
) -> None:
|
|
1252
|
+
"""Set the current flow run reference to an existing run_id."""
|
|
1253
|
+
run_id2 = str(run_id or "").strip()
|
|
1254
|
+
if not run_id2:
|
|
1255
|
+
raise ValueError("run_id is required")
|
|
1256
|
+
|
|
1257
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1258
|
+
store_dir = _flow_store_dir(state_path)
|
|
1259
|
+
if not store_dir.exists():
|
|
1260
|
+
raise ValueError(f"No flow run store found at {store_dir}")
|
|
1261
|
+
|
|
1262
|
+
from abstractruntime.storage.json_files import JsonFileRunStore
|
|
1263
|
+
|
|
1264
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1265
|
+
run = run_store.load(run_id2)
|
|
1266
|
+
if run is None:
|
|
1267
|
+
raise ValueError(f"Run not found: {run_id2}")
|
|
1268
|
+
|
|
1269
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else None
|
|
1270
|
+
current = _load_flow_ref(state_path)
|
|
1271
|
+
if flows_dir_path is None:
|
|
1272
|
+
if current and current.flows_dir:
|
|
1273
|
+
flows_dir_path = Path(current.flows_dir).expanduser().resolve()
|
|
1274
|
+
else:
|
|
1275
|
+
flows_dir_path = default_flows_dir().resolve()
|
|
1276
|
+
|
|
1277
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
1278
|
+
|
|
1279
|
+
flow_id = None
|
|
1280
|
+
if isinstance(run.workflow_id, str) and run.workflow_id in flows:
|
|
1281
|
+
flow_id = run.workflow_id
|
|
1282
|
+
elif current and current.flow_id in flows:
|
|
1283
|
+
flow_id = current.flow_id
|
|
1284
|
+
|
|
1285
|
+
if not flow_id:
|
|
1286
|
+
raise ValueError(
|
|
1287
|
+
f"Cannot infer flow id for run '{run_id2}' (workflow_id='{run.workflow_id}'). "
|
|
1288
|
+
"Provide --flows-dir pointing at the VisualFlow JSON directory."
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
_save_flow_ref(state_path, FlowRunRef(flow_id=str(flow_id), flows_dir=str(flows_dir_path), run_id=run_id2))
|
|
1292
|
+
print(f"Attached flow run: {run_id2} (flow={flow_id})")
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
def emit_flow_event_command(
|
|
1296
|
+
*,
|
|
1297
|
+
name: Optional[str],
|
|
1298
|
+
wait_key: Optional[str],
|
|
1299
|
+
scope: str,
|
|
1300
|
+
payload_json: Optional[str],
|
|
1301
|
+
payload_file: Optional[str],
|
|
1302
|
+
session_id: Optional[str],
|
|
1303
|
+
max_steps: int,
|
|
1304
|
+
flows_dir: Optional[str],
|
|
1305
|
+
flow_state_file: Optional[str],
|
|
1306
|
+
auto_approve: bool,
|
|
1307
|
+
) -> None:
|
|
1308
|
+
"""Emit a custom event (name/scope) or resume a raw wait_key."""
|
|
1309
|
+
if bool(name) == bool(wait_key):
|
|
1310
|
+
raise ValueError("Provide exactly one of --name or --wait-key")
|
|
1311
|
+
|
|
1312
|
+
state_path = Path(flow_state_file or default_flow_state_file()).expanduser().resolve()
|
|
1313
|
+
ref = _load_flow_ref(state_path)
|
|
1314
|
+
if ref is None:
|
|
1315
|
+
raise ValueError(f"No saved flow run found at {state_path}")
|
|
1316
|
+
|
|
1317
|
+
flows_dir_path = Path(flows_dir).expanduser().resolve() if flows_dir else Path(ref.flows_dir).expanduser().resolve()
|
|
1318
|
+
flows = _load_visual_flows(flows_dir_path)
|
|
1319
|
+
if ref.flow_id not in flows:
|
|
1320
|
+
raise ValueError(f"Flow '{ref.flow_id}' not found in {flows_dir_path}")
|
|
1321
|
+
vf = flows[ref.flow_id]
|
|
1322
|
+
|
|
1323
|
+
def _load_payload() -> Dict[str, Any]:
|
|
1324
|
+
if payload_json and payload_file:
|
|
1325
|
+
raise ValueError("Provide either --payload-json or --payload-file, not both.")
|
|
1326
|
+
if payload_file:
|
|
1327
|
+
raw = _read_json(Path(payload_file).expanduser().resolve())
|
|
1328
|
+
if isinstance(raw, dict):
|
|
1329
|
+
return dict(raw)
|
|
1330
|
+
return {"value": raw}
|
|
1331
|
+
if payload_json:
|
|
1332
|
+
raw = json.loads(payload_json)
|
|
1333
|
+
if isinstance(raw, dict):
|
|
1334
|
+
return dict(raw)
|
|
1335
|
+
return {"value": raw}
|
|
1336
|
+
return {}
|
|
1337
|
+
|
|
1338
|
+
payload = _load_payload()
|
|
1339
|
+
|
|
1340
|
+
from abstractruntime.integrations.abstractcore import MappingToolExecutor, PassthroughToolExecutor
|
|
1341
|
+
from abstractruntime.integrations.abstractcore.default_tools import get_default_tools
|
|
1342
|
+
from abstractruntime.storage.artifacts import FileArtifactStore
|
|
1343
|
+
from abstractruntime.storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
1344
|
+
|
|
1345
|
+
store_dir = _flow_store_dir(state_path)
|
|
1346
|
+
run_store = JsonFileRunStore(store_dir)
|
|
1347
|
+
ledger_store = JsonlLedgerStore(store_dir)
|
|
1348
|
+
artifact_store = FileArtifactStore(store_dir)
|
|
1349
|
+
|
|
1350
|
+
tool_executor = PassthroughToolExecutor(mode="approval_required")
|
|
1351
|
+
tool_runner = MappingToolExecutor.from_tools(get_default_tools())
|
|
1352
|
+
|
|
1353
|
+
from abstractflow.visual.executor import create_visual_runner
|
|
1354
|
+
|
|
1355
|
+
runner = create_visual_runner(
|
|
1356
|
+
vf,
|
|
1357
|
+
flows=flows,
|
|
1358
|
+
run_store=run_store,
|
|
1359
|
+
ledger_store=ledger_store,
|
|
1360
|
+
artifact_store=artifact_store,
|
|
1361
|
+
tool_executor=tool_executor,
|
|
1362
|
+
)
|
|
1363
|
+
runner._current_run_id = ref.run_id # type: ignore[attr-defined]
|
|
1364
|
+
|
|
1365
|
+
runtime = runner.runtime
|
|
1366
|
+
reg = getattr(runtime, "workflow_registry", None)
|
|
1367
|
+
if reg is None and hasattr(runtime, "set_workflow_registry"):
|
|
1368
|
+
try:
|
|
1369
|
+
from abstractruntime.scheduler.registry import WorkflowRegistry
|
|
1370
|
+
except Exception:
|
|
1371
|
+
WorkflowRegistry = None # type: ignore[assignment]
|
|
1372
|
+
if WorkflowRegistry is not None:
|
|
1373
|
+
registry = WorkflowRegistry()
|
|
1374
|
+
registry.register(runner.workflow)
|
|
1375
|
+
runtime.set_workflow_registry(registry)
|
|
1376
|
+
|
|
1377
|
+
if name:
|
|
1378
|
+
from abstractruntime.scheduler.scheduler import Scheduler
|
|
1379
|
+
|
|
1380
|
+
scope2 = str(scope or "session").strip().lower() or "session"
|
|
1381
|
+
sess = session_id
|
|
1382
|
+
if sess is None and scope2 == "session":
|
|
1383
|
+
sess = ref.run_id
|
|
1384
|
+
|
|
1385
|
+
scheduler = Scheduler(runtime=runtime, registry=runtime.workflow_registry) # type: ignore[arg-type]
|
|
1386
|
+
resumed = scheduler.emit_event(
|
|
1387
|
+
name=str(name),
|
|
1388
|
+
payload=payload,
|
|
1389
|
+
scope=scope2,
|
|
1390
|
+
session_id=sess,
|
|
1391
|
+
max_steps=int(max_steps or 0),
|
|
1392
|
+
)
|
|
1393
|
+
print(f"Emitted event '{name}' scope={scope2} resumed={len(resumed)}")
|
|
1394
|
+
else:
|
|
1395
|
+
# Raw wait_key resumption: resume all WAITING EVENT runs that match this key.
|
|
1396
|
+
wk = str(wait_key or "").strip()
|
|
1397
|
+
if not wk:
|
|
1398
|
+
raise ValueError("--wait-key must be non-empty")
|
|
1399
|
+
|
|
1400
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
1401
|
+
|
|
1402
|
+
candidates = runtime.run_store.list_runs(status=RunStatus.WAITING, wait_reason=WaitReason.EVENT, limit=10_000)
|
|
1403
|
+
resumed_count = 0
|
|
1404
|
+
for r in candidates:
|
|
1405
|
+
if r.waiting is None or r.waiting.wait_key != wk:
|
|
1406
|
+
continue
|
|
1407
|
+
wf = _workflow_for(runtime, runner.workflow, r.workflow_id)
|
|
1408
|
+
runtime.resume(workflow=wf, run_id=r.run_id, wait_key=wk, payload=payload, max_steps=int(max_steps or 0))
|
|
1409
|
+
resumed_count += 1
|
|
1410
|
+
print(f"Resumed wait_key '{wk}' runs={resumed_count}")
|
|
1411
|
+
|
|
1412
|
+
# Drive the session until it blocks again.
|
|
1413
|
+
_drive_until_blocked(runner=runner, tool_runner=tool_runner, auto_approve=bool(auto_approve), wait_until=False)
|