abstractflow 0.1.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. abstractflow/__init__.py +74 -94
  2. abstractflow/__main__.py +2 -0
  3. abstractflow/adapters/__init__.py +11 -0
  4. abstractflow/adapters/agent_adapter.py +5 -0
  5. abstractflow/adapters/control_adapter.py +5 -0
  6. abstractflow/adapters/effect_adapter.py +5 -0
  7. abstractflow/adapters/event_adapter.py +5 -0
  8. abstractflow/adapters/function_adapter.py +5 -0
  9. abstractflow/adapters/subflow_adapter.py +5 -0
  10. abstractflow/adapters/variable_adapter.py +5 -0
  11. abstractflow/cli.py +75 -28
  12. abstractflow/compiler.py +23 -0
  13. abstractflow/core/__init__.py +5 -0
  14. abstractflow/core/flow.py +11 -0
  15. abstractflow/py.typed +2 -0
  16. abstractflow/runner.py +402 -0
  17. abstractflow/visual/__init__.py +43 -0
  18. abstractflow/visual/agent_ids.py +5 -0
  19. abstractflow/visual/builtins.py +5 -0
  20. abstractflow/visual/code_executor.py +5 -0
  21. abstractflow/visual/event_ids.py +33 -0
  22. abstractflow/visual/executor.py +968 -0
  23. abstractflow/visual/interfaces.py +440 -0
  24. abstractflow/visual/models.py +277 -0
  25. abstractflow/visual/session_runner.py +182 -0
  26. abstractflow/visual/workspace_scoped_tools.py +29 -0
  27. abstractflow/workflow_bundle.py +290 -0
  28. abstractflow-0.3.1.dist-info/METADATA +186 -0
  29. abstractflow-0.3.1.dist-info/RECORD +33 -0
  30. {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/WHEEL +1 -1
  31. {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/licenses/LICENSE +2 -0
  32. abstractflow-0.1.0.dist-info/METADATA +0 -238
  33. abstractflow-0.1.0.dist-info/RECORD +0 -10
  34. {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/entry_points.txt +0 -0
  35. {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,968 @@
1
+ """Portable visual-flow execution utilities.
2
+
3
+ This module wires the VisualFlow authoring DSL (JSON) to AbstractRuntime for
4
+ durable execution. Compilation semantics (VisualFlow → Flow → WorkflowSpec) are
5
+ delegated to `abstractruntime.visualflow_compiler` so there is a single
6
+ semantics engine across the framework.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import os
12
+ import hashlib
13
+ import threading
14
+ from typing import Any, Dict, Optional, cast
15
+
16
+ from ..core.flow import Flow
17
+ from ..runner import FlowRunner
18
+
19
+ from .agent_ids import visual_react_workflow_id
20
+ from .models import VisualFlow
21
+
22
+
23
+ _MEMORY_KG_STORE_CACHE_LOCK = threading.Lock()
24
+ # Keyed by (store_base_dir, gateway_url, token_fingerprint).
25
+ #
26
+ # Why include the token fingerprint:
27
+ # - The embedder captures the auth token at construction time.
28
+ # - The UI can set/update the token at runtime (without restarting the backend).
29
+ # - If we didn't key by token, we'd keep using a cached store with a stale token and get 401s.
30
+ _MEMORY_KG_STORE_CACHE: dict[tuple[str, str, str], Any] = {}
31
+
32
+ def _resolve_gateway_auth_token() -> str | None:
33
+ """Resolve the gateway auth token for host-to-gateway calls.
34
+
35
+ Canonical env vars:
36
+ - ABSTRACTGATEWAY_AUTH_TOKEN
37
+ - ABSTRACTFLOW_GATEWAY_AUTH_TOKEN (legacy compatibility)
38
+
39
+ Additional host fallbacks:
40
+ - ABSTRACTCODE_GATEWAY_TOKEN (AbstractCode CLI convention)
41
+ - ABSTRACTGATEWAY_AUTH_TOKENS / ABSTRACTFLOW_GATEWAY_AUTH_TOKENS (first token)
42
+ """
43
+ candidates = [
44
+ "ABSTRACTGATEWAY_AUTH_TOKEN",
45
+ "ABSTRACTFLOW_GATEWAY_AUTH_TOKEN",
46
+ "ABSTRACTCODE_GATEWAY_TOKEN",
47
+ ]
48
+ for name in candidates:
49
+ raw = os.getenv(name)
50
+ token = str(raw or "").strip()
51
+ if token:
52
+ return token
53
+
54
+ token_lists = [
55
+ "ABSTRACTGATEWAY_AUTH_TOKENS",
56
+ "ABSTRACTFLOW_GATEWAY_AUTH_TOKENS",
57
+ ]
58
+ for name in token_lists:
59
+ raw = os.getenv(name)
60
+ if not isinstance(raw, str) or not raw.strip():
61
+ continue
62
+ first = raw.split(",", 1)[0].strip()
63
+ if first:
64
+ return first
65
+
66
+ return None
67
+
68
+
69
+ def create_visual_runner(
70
+ visual_flow: VisualFlow,
71
+ *,
72
+ flows: Dict[str, VisualFlow],
73
+ run_store: Optional[Any] = None,
74
+ ledger_store: Optional[Any] = None,
75
+ artifact_store: Optional[Any] = None,
76
+ tool_executor: Optional[Any] = None,
77
+ input_data: Optional[Dict[str, Any]] = None,
78
+ ) -> FlowRunner:
79
+ """Create a FlowRunner for a visual run with a correctly wired runtime.
80
+
81
+ Responsibilities:
82
+ - Build a WorkflowRegistry containing the root flow and any referenced subflows.
83
+ - Create a runtime with an ArtifactStore (required for MEMORY_* effects).
84
+ - If any LLM_CALL / Agent nodes exist in the flow tree, wire AbstractCore-backed
85
+ effect handlers (via AbstractRuntime's integration module).
86
+
87
+ Notes:
88
+ - When LLM nodes rely on *connected* provider/model pins (e.g. from ON_FLOW_START),
89
+ this runner still needs a default provider/model to initialize runtime capabilities.
90
+ We use `input_data["provider"]`/`input_data["model"]` when provided, otherwise fall
91
+ back to static pin defaults (best-effort).
92
+ """
93
+ # Be resilient to different AbstractRuntime install layouts: not all exports
94
+ # are guaranteed to be re-exported from `abstractruntime.__init__`.
95
+ try:
96
+ from abstractruntime import Runtime # type: ignore
97
+ except Exception: # pragma: no cover
98
+ from abstractruntime.core.runtime import Runtime # type: ignore
99
+
100
+ try:
101
+ from abstractruntime import InMemoryRunStore, InMemoryLedgerStore # type: ignore
102
+ except Exception: # pragma: no cover
103
+ from abstractruntime.storage.in_memory import InMemoryRunStore, InMemoryLedgerStore # type: ignore
104
+
105
+ # Workflow registry is used for START_SUBWORKFLOW composition (subflows + Agent nodes).
106
+ #
107
+ # This project supports different AbstractRuntime distributions; some older installs
108
+ # may not expose WorkflowRegistry. In that case, fall back to a tiny in-process
109
+ # dict-based registry with the same `.register()` + `.get()` surface.
110
+ try:
111
+ from abstractruntime import WorkflowRegistry # type: ignore
112
+ except Exception: # pragma: no cover
113
+ try:
114
+ from abstractruntime.scheduler.registry import WorkflowRegistry # type: ignore
115
+ except Exception: # pragma: no cover
116
+ from abstractruntime.core.spec import WorkflowSpec # type: ignore
117
+
118
+ class WorkflowRegistry(dict): # type: ignore[no-redef]
119
+ def register(self, workflow: "WorkflowSpec") -> None:
120
+ self[str(workflow.workflow_id)] = workflow
121
+
122
+ from ..compiler import compile_flow
123
+ from .event_ids import visual_event_listener_workflow_id
124
+ from .session_runner import VisualSessionRunner
125
+
126
+ def _node_type(node: Any) -> str:
127
+ t = getattr(node, "type", None)
128
+ return t.value if hasattr(t, "value") else str(t)
129
+
130
+ def _reachable_exec_node_ids(vf: VisualFlow) -> set[str]:
131
+ """Return execution-reachable node ids (within this VisualFlow only).
132
+
133
+ We consider only the *execution graph* (exec edges: targetHandle=exec-in).
134
+ Disconnected/isolated execution nodes are ignored (Blueprint-style).
135
+ """
136
+ EXEC_TYPES: set[str] = {
137
+ # Triggers / core exec
138
+ "on_flow_start",
139
+ "on_user_request",
140
+ "on_agent_message",
141
+ "on_schedule",
142
+ "on_event",
143
+ "on_flow_end",
144
+ "agent",
145
+ "function",
146
+ "code",
147
+ "subflow",
148
+ # Workflow variables (execution setter)
149
+ "set_var",
150
+ "set_vars",
151
+ "set_var_property",
152
+ # Control exec
153
+ "if",
154
+ "switch",
155
+ "loop",
156
+ "while",
157
+ "for",
158
+ "sequence",
159
+ "parallel",
160
+ # Effects
161
+ "ask_user",
162
+ "answer_user",
163
+ "llm_call",
164
+ "tool_calls",
165
+ "wait_until",
166
+ "wait_event",
167
+ "emit_event",
168
+ "read_file",
169
+ "write_file",
170
+ "memory_note",
171
+ "memory_query",
172
+ "memory_tag",
173
+ "memory_compact",
174
+ "memory_rehydrate",
175
+ "memory_kg_assert",
176
+ "memory_kg_query",
177
+ "memact_compose",
178
+ }
179
+
180
+ node_types: Dict[str, str] = {n.id: _node_type(n) for n in vf.nodes}
181
+ exec_ids = {nid for nid, t in node_types.items() if t in EXEC_TYPES}
182
+ if not exec_ids:
183
+ return set()
184
+
185
+ incoming_exec = {e.target for e in vf.edges if getattr(e, "targetHandle", None) == "exec-in"}
186
+
187
+ roots: list[str] = []
188
+ if isinstance(vf.entryNode, str) and vf.entryNode in exec_ids:
189
+ roots.append(vf.entryNode)
190
+ # Custom events are independent entrypoints; include them as roots for "executable" reachability.
191
+ for n in vf.nodes:
192
+ if n.id in exec_ids and node_types.get(n.id) == "on_event":
193
+ roots.append(n.id)
194
+
195
+ if not roots:
196
+ # Fallback: infer a single root as "exec node with no incoming edge".
197
+ for n in vf.nodes:
198
+ if n.id in exec_ids and n.id not in incoming_exec:
199
+ roots.append(n.id)
200
+ break
201
+ if not roots:
202
+ roots.append(next(iter(exec_ids)))
203
+
204
+ adj: Dict[str, list[str]] = {}
205
+ for e in vf.edges:
206
+ if getattr(e, "targetHandle", None) != "exec-in":
207
+ continue
208
+ if e.source not in exec_ids or e.target not in exec_ids:
209
+ continue
210
+ adj.setdefault(e.source, []).append(e.target)
211
+
212
+ reachable: set[str] = set()
213
+ stack2 = list(dict.fromkeys([r for r in roots if isinstance(r, str) and r]))
214
+ while stack2:
215
+ cur = stack2.pop()
216
+ if cur in reachable:
217
+ continue
218
+ reachable.add(cur)
219
+ for nxt in adj.get(cur, []):
220
+ if nxt not in reachable:
221
+ stack2.append(nxt)
222
+ return reachable
223
+
224
+ # Collect all reachable flows (root + transitive subflows).
225
+ #
226
+ # Important: subflows are executed via runtime `START_SUBWORKFLOW` by workflow id.
227
+ # This means subflow cycles (including self-recursion) are valid and should not be
228
+ # rejected at runner-wiring time; we only need to register each workflow id once.
229
+ ordered: list[VisualFlow] = []
230
+ visited: set[str] = set()
231
+
232
+ def _dfs(vf: VisualFlow) -> None:
233
+ if vf.id in visited:
234
+ return
235
+ visited.add(vf.id)
236
+ ordered.append(vf)
237
+
238
+ reachable = _reachable_exec_node_ids(vf)
239
+ for n in vf.nodes:
240
+ node_type = _node_type(n)
241
+ if node_type != "subflow":
242
+ continue
243
+ if reachable and n.id not in reachable:
244
+ continue
245
+ subflow_id = n.data.get("subflowId") or n.data.get("flowId") # legacy
246
+ if not isinstance(subflow_id, str) or not subflow_id.strip():
247
+ raise ValueError(f"Subflow node '{n.id}' missing subflowId")
248
+ subflow_id = subflow_id.strip()
249
+ child = flows.get(subflow_id)
250
+ # Self-recursion should work even if `flows` does not redundantly include this vf.
251
+ if child is None and subflow_id == vf.id:
252
+ child = vf
253
+ if child is None:
254
+ raise ValueError(f"Referenced subflow '{subflow_id}' not found")
255
+ _dfs(child)
256
+
257
+ _dfs(visual_flow)
258
+
259
+ # Detect optional runtime features needed by this flow tree.
260
+ # These flags keep `create_visual_runner()` resilient to older AbstractRuntime installs.
261
+ needs_registry = False
262
+ needs_artifacts = False
263
+ needs_memory_kg = False
264
+ for vf in ordered:
265
+ reachable = _reachable_exec_node_ids(vf)
266
+ for n in vf.nodes:
267
+ if reachable and n.id not in reachable:
268
+ continue
269
+ t = _node_type(n)
270
+ if t in {"subflow", "agent"}:
271
+ needs_registry = True
272
+ if t in {"on_event", "emit_event"}:
273
+ needs_registry = True
274
+ if t in {"memory_note", "memory_query", "memory_rehydrate", "memory_compact"}:
275
+ needs_artifacts = True
276
+ if t in {"memory_kg_assert", "memory_kg_query"}:
277
+ needs_memory_kg = True
278
+
279
+ # Detect whether this flow tree needs AbstractCore LLM integration.
280
+ # Provider/model can be supplied either via node config *or* via connected input pins.
281
+ has_llm_nodes = False
282
+ llm_configs: set[tuple[str, str]] = set()
283
+ default_llm: tuple[str, str] | None = None
284
+ provider_hints: list[str] = []
285
+
286
+ def _pin_connected(vf: VisualFlow, *, node_id: str, pin_id: str) -> bool:
287
+ for e in vf.edges:
288
+ try:
289
+ if e.target == node_id and e.targetHandle == pin_id:
290
+ return True
291
+ except Exception:
292
+ continue
293
+ return False
294
+
295
+ def _infer_connected_pin_default(vf: VisualFlow, *, node_id: str, pin_id: str) -> Optional[str]:
296
+ """Best-effort static inference for a connected pin's default value.
297
+
298
+ This is used only to pick a reasonable *default* provider/model for the runtime
299
+ (capabilities, limits, etc). Per-node/provider routing still happens at execution
300
+ time via effect payloads.
301
+ """
302
+ try:
303
+ for e in vf.edges:
304
+ if e.target != node_id or e.targetHandle != pin_id:
305
+ continue
306
+ source_id = getattr(e, "source", None)
307
+ if not isinstance(source_id, str) or not source_id:
308
+ continue
309
+ source_handle = getattr(e, "sourceHandle", None)
310
+ if not isinstance(source_handle, str) or not source_handle:
311
+ source_handle = pin_id
312
+
313
+ src = next((n for n in vf.nodes if getattr(n, "id", None) == source_id), None)
314
+ if src is None:
315
+ return None
316
+ data = getattr(src, "data", None)
317
+ if not isinstance(data, dict):
318
+ return None
319
+
320
+ pin_defaults = data.get("pinDefaults")
321
+ if isinstance(pin_defaults, dict) and source_handle in pin_defaults:
322
+ v = pin_defaults.get(source_handle)
323
+ if isinstance(v, str) and v.strip():
324
+ return v.strip()
325
+
326
+ literal_value = data.get("literalValue")
327
+ if isinstance(literal_value, str) and literal_value.strip():
328
+ return literal_value.strip()
329
+ if isinstance(literal_value, dict):
330
+ dv = literal_value.get("default")
331
+ if isinstance(dv, str) and dv.strip():
332
+ return dv.strip()
333
+ vv = literal_value.get(source_handle)
334
+ if isinstance(vv, str) and vv.strip():
335
+ return vv.strip()
336
+ return None
337
+ except Exception:
338
+ return None
339
+
340
+ return None
341
+
342
+ def _add_pair(provider_raw: Any, model_raw: Any) -> None:
343
+ nonlocal default_llm
344
+ if not isinstance(provider_raw, str) or not provider_raw.strip():
345
+ return
346
+ if not isinstance(model_raw, str) or not model_raw.strip():
347
+ return
348
+ pair = (provider_raw.strip().lower(), model_raw.strip())
349
+ llm_configs.add(pair)
350
+ if default_llm is None:
351
+ default_llm = pair
352
+
353
+ # Prefer run inputs for the runtime default provider/model when available.
354
+ # This avoids expensive provider probing and makes model capability detection match
355
+ # what the user selected in the Run Flow modal.
356
+ if isinstance(input_data, dict):
357
+ _add_pair(input_data.get("provider"), input_data.get("model"))
358
+
359
+ for vf in ordered:
360
+ reachable = _reachable_exec_node_ids(vf)
361
+ for n in vf.nodes:
362
+ node_type = _node_type(n)
363
+ if reachable and n.id not in reachable:
364
+ continue
365
+ if node_type in {"llm_call", "agent", "tool_calls", "memory_compact"}:
366
+ has_llm_nodes = True
367
+
368
+ if node_type == "llm_call":
369
+ cfg = n.data.get("effectConfig", {}) if isinstance(n.data, dict) else {}
370
+ cfg = cfg if isinstance(cfg, dict) else {}
371
+ provider = cfg.get("provider")
372
+ model = cfg.get("model")
373
+
374
+ provider_ok = isinstance(provider, str) and provider.strip()
375
+ model_ok = isinstance(model, str) and model.strip()
376
+ provider_connected = _pin_connected(vf, node_id=n.id, pin_id="provider")
377
+ model_connected = _pin_connected(vf, node_id=n.id, pin_id="model")
378
+
379
+ if not provider_ok and not provider_connected:
380
+ raise ValueError(
381
+ f"LLM_CALL node '{n.id}' in flow '{vf.id}' missing provider "
382
+ "(set effectConfig.provider or connect the provider input pin)"
383
+ )
384
+ if not model_ok and not model_connected:
385
+ raise ValueError(
386
+ f"LLM_CALL node '{n.id}' in flow '{vf.id}' missing model "
387
+ "(set effectConfig.model or connect the model input pin)"
388
+ )
389
+ provider_default = (
390
+ provider
391
+ if provider_ok
392
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
393
+ if provider_connected
394
+ else None
395
+ )
396
+ model_default = (
397
+ model
398
+ if model_ok
399
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
400
+ if model_connected
401
+ else None
402
+ )
403
+ _add_pair(provider_default, model_default)
404
+
405
+ elif node_type == "memory_compact":
406
+ cfg = n.data.get("effectConfig", {}) if isinstance(n.data, dict) else {}
407
+ cfg = cfg if isinstance(cfg, dict) else {}
408
+ provider = cfg.get("provider")
409
+ model = cfg.get("model")
410
+
411
+ provider_ok = isinstance(provider, str) and provider.strip()
412
+ model_ok = isinstance(model, str) and model.strip()
413
+ provider_connected = _pin_connected(vf, node_id=n.id, pin_id="provider")
414
+ model_connected = _pin_connected(vf, node_id=n.id, pin_id="model")
415
+
416
+ provider_default = (
417
+ provider
418
+ if provider_ok
419
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
420
+ if provider_connected
421
+ else None
422
+ )
423
+ model_default = (
424
+ model
425
+ if model_ok
426
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
427
+ if model_connected
428
+ else None
429
+ )
430
+ _add_pair(provider_default, model_default)
431
+
432
+ elif node_type == "agent":
433
+ cfg = n.data.get("agentConfig", {}) if isinstance(n.data, dict) else {}
434
+ cfg = cfg if isinstance(cfg, dict) else {}
435
+ provider = cfg.get("provider")
436
+ model = cfg.get("model")
437
+
438
+ provider_ok = isinstance(provider, str) and provider.strip()
439
+ model_ok = isinstance(model, str) and model.strip()
440
+ provider_connected = _pin_connected(vf, node_id=n.id, pin_id="provider")
441
+ model_connected = _pin_connected(vf, node_id=n.id, pin_id="model")
442
+
443
+ if not provider_ok and not provider_connected:
444
+ raise ValueError(
445
+ f"Agent node '{n.id}' in flow '{vf.id}' missing provider "
446
+ "(set agentConfig.provider or connect the provider input pin)"
447
+ )
448
+ if not model_ok and not model_connected:
449
+ raise ValueError(
450
+ f"Agent node '{n.id}' in flow '{vf.id}' missing model "
451
+ "(set agentConfig.model or connect the model input pin)"
452
+ )
453
+ provider_default = (
454
+ provider
455
+ if provider_ok
456
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
457
+ if provider_connected
458
+ else None
459
+ )
460
+ model_default = (
461
+ model
462
+ if model_ok
463
+ else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
464
+ if model_connected
465
+ else None
466
+ )
467
+ _add_pair(provider_default, model_default)
468
+
469
+ elif node_type == "provider_models":
470
+ cfg = n.data.get("providerModelsConfig", {}) if isinstance(n.data, dict) else {}
471
+ cfg = cfg if isinstance(cfg, dict) else {}
472
+ provider = cfg.get("provider")
473
+ if isinstance(provider, str) and provider.strip():
474
+ provider_hints.append(provider.strip().lower())
475
+ allowed = cfg.get("allowedModels")
476
+ if not isinstance(allowed, list):
477
+ allowed = cfg.get("allowed_models")
478
+ if isinstance(allowed, list):
479
+ for m in allowed:
480
+ _add_pair(provider, m)
481
+
482
+ extra_effect_handlers: Dict[Any, Any] = {}
483
+ if needs_memory_kg:
484
+ try:
485
+ # Dev convenience (monorepo):
486
+ #
487
+ # When running from source (without installing each package), `import abstractmemory`
488
+ # can resolve to the *project directory* (namespace package, no exports) instead of
489
+ # the src-layout package at `abstractmemory/src/abstractmemory`.
490
+ #
491
+ # Add the src-layout path when it exists so VisualFlows with `memory_kg_*` nodes
492
+ # work out-of-the-box in local dev environments.
493
+ import sys
494
+ from pathlib import Path
495
+
496
+ repo_root = Path(__file__).resolve().parents[3] # .../abstractframework
497
+ mem_src = repo_root / "abstractmemory" / "src"
498
+ if mem_src.is_dir():
499
+ mem_src_str = str(mem_src)
500
+ try:
501
+ sys.path.remove(mem_src_str)
502
+ except ValueError:
503
+ pass
504
+ sys.path.insert(0, mem_src_str)
505
+
506
+ from abstractmemory import LanceDBTripleStore
507
+ from abstractruntime.integrations.abstractmemory.effect_handlers import build_memory_kg_effect_handlers
508
+ from abstractruntime.storage.artifacts import utc_now_iso
509
+ except Exception as e:
510
+ raise RuntimeError(
511
+ "This flow uses memory_kg_* nodes, but AbstractMemory integration is not available. "
512
+ "Install `abstractmemory` (and optionally `abstractmemory[lancedb]`)."
513
+ ) from e
514
+
515
+ # Ensure stores exist so KG handlers can resolve run-tree scope fallbacks.
516
+ if run_store is None:
517
+ run_store = InMemoryRunStore()
518
+ if ledger_store is None:
519
+ ledger_store = InMemoryLedgerStore()
520
+
521
+ base_dir = None
522
+ mem_dir_raw = os.getenv("ABSTRACTMEMORY_DIR") or os.getenv("ABSTRACTFLOW_MEMORY_DIR")
523
+ if isinstance(mem_dir_raw, str) and mem_dir_raw.strip():
524
+ try:
525
+ base_dir = Path(mem_dir_raw).expanduser().resolve()
526
+ except Exception:
527
+ base_dir = None
528
+ if base_dir is None and artifact_store is not None:
529
+ base_attr = getattr(artifact_store, "_base", None)
530
+ if base_attr is not None:
531
+ try:
532
+ base_dir = Path(base_attr).expanduser().resolve() / "abstractmemory"
533
+ except Exception:
534
+ base_dir = None
535
+
536
+ # Embeddings are a gateway/runtime capability (singleton embedding space per gateway instance).
537
+ try:
538
+ from abstractmemory.embeddings import AbstractGatewayTextEmbedder
539
+ except Exception as e:
540
+ raise RuntimeError(
541
+ "This flow uses memory_kg_* nodes, but AbstractMemory gateway embeddings integration is not available. "
542
+ "Install `abstractmemory` (src layout) and ensure it is importable."
543
+ ) from e
544
+
545
+ gateway_url = str(os.getenv("ABSTRACTFLOW_GATEWAY_URL") or os.getenv("ABSTRACTGATEWAY_URL") or "").strip()
546
+ if not gateway_url:
547
+ gateway_url = "http://127.0.0.1:8081"
548
+ auth_token = _resolve_gateway_auth_token()
549
+ # Deterministic/offline mode:
550
+ # - When embeddings are explicitly disabled, allow LanceDB to operate in pattern-only mode.
551
+ # - Vector search (query_text) will raise in the store when no embedder is configured.
552
+ embedder = None
553
+ embed_provider = (
554
+ os.getenv("ABSTRACTFLOW_EMBEDDING_PROVIDER")
555
+ or os.getenv("ABSTRACTMEMORY_EMBEDDING_PROVIDER")
556
+ or os.getenv("ABSTRACTGATEWAY_EMBEDDING_PROVIDER")
557
+ )
558
+ if str(embed_provider or "").strip().lower() not in {"__disabled__", "disabled", "none", "off"}:
559
+ embedder = AbstractGatewayTextEmbedder(base_url=gateway_url, auth_token=auth_token)
560
+
561
+ if base_dir is None:
562
+ raise RuntimeError(
563
+ "This flow uses memory_kg_* nodes, but no durable memory directory could be resolved. "
564
+ "Set `ABSTRACTFLOW_MEMORY_DIR` (or `ABSTRACTMEMORY_DIR`), or run with a file-backed ArtifactStore."
565
+ )
566
+
567
+ base_dir.mkdir(parents=True, exist_ok=True)
568
+ token_fingerprint = "embeddings_disabled"
569
+ if embedder is not None:
570
+ if auth_token:
571
+ token_fingerprint = hashlib.sha256(auth_token.encode("utf-8")).hexdigest()[:12]
572
+ else:
573
+ token_fingerprint = "missing_token"
574
+ cache_key = (str(base_dir), gateway_url if embedder is not None else "__embeddings_disabled__", token_fingerprint)
575
+ with _MEMORY_KG_STORE_CACHE_LOCK:
576
+ store_obj = _MEMORY_KG_STORE_CACHE.get(cache_key)
577
+ if store_obj is None:
578
+ try:
579
+ store_obj = LanceDBTripleStore(base_dir / "kg", embedder=embedder)
580
+ except Exception as e:
581
+ raise RuntimeError(
582
+ "This flow uses memory_kg_* nodes, which require a LanceDB-backed store. "
583
+ "Install `lancedb` and ensure the host runs under the same environment."
584
+ ) from e
585
+ with _MEMORY_KG_STORE_CACHE_LOCK:
586
+ _MEMORY_KG_STORE_CACHE[cache_key] = store_obj
587
+
588
+ extra_effect_handlers = build_memory_kg_effect_handlers(store=store_obj, run_store=run_store, now_iso=utc_now_iso)
589
+
590
+ if has_llm_nodes:
591
+ provider_model = default_llm
592
+
593
+ # Strict behavior: do not probe unrelated providers/models to "guess" a default.
594
+ #
595
+ # A VisualFlow run must provide a deterministic provider+model for the runtime:
596
+ # - via run inputs (e.g. ON_FLOW_START pinDefaults / user-provided input_data), OR
597
+ # - via static node configs (effectConfig/agentConfig), OR
598
+ # - via connected pin defaults (best-effort).
599
+ #
600
+ # If we can't determine that, fail loudly with a clear error message.
601
+ if provider_model is None:
602
+ raise RuntimeError(
603
+ "This flow uses LLM nodes (llm_call/agent/memory_compact), but no default provider/model could be determined. "
604
+ "Set provider+model on a node, or connect provider/model pins to a node with pinDefaults "
605
+ "(e.g. ON_FLOW_START), or pass `input_data={'provider': ..., 'model': ...}` when creating the runner."
606
+ )
607
+
608
+ provider, model = provider_model
609
+ try:
610
+ from abstractruntime.integrations.abstractcore.factory import create_local_runtime
611
+ # Older/newer AbstractRuntime distributions expose tool executors differently.
612
+ # Tool execution is not required for plain LLM_CALL-only flows, so we make
613
+ # this optional and fall back to the factory defaults.
614
+ try:
615
+ from abstractruntime.integrations.abstractcore import MappingToolExecutor # type: ignore
616
+ except Exception: # pragma: no cover
617
+ try:
618
+ from abstractruntime.integrations.abstractcore.tool_executor import MappingToolExecutor # type: ignore
619
+ except Exception: # pragma: no cover
620
+ MappingToolExecutor = None # type: ignore[assignment]
621
+ try:
622
+ from abstractruntime.integrations.abstractcore.default_tools import get_default_tools # type: ignore
623
+ except Exception: # pragma: no cover
624
+ get_default_tools = None # type: ignore[assignment]
625
+ except Exception as e: # pragma: no cover
626
+ raise RuntimeError(
627
+ "This flow uses LLM nodes (llm_call/agent), but the installed AbstractRuntime "
628
+ "does not provide the AbstractCore integration. Install/enable the integration "
629
+ "or remove LLM nodes from the flow."
630
+ ) from e
631
+
632
+ effective_tool_executor = tool_executor
633
+ if effective_tool_executor is None and MappingToolExecutor is not None and callable(get_default_tools):
634
+ try:
635
+ effective_tool_executor = MappingToolExecutor.from_tools(get_default_tools()) # type: ignore[attr-defined]
636
+ except Exception:
637
+ effective_tool_executor = None
638
+
639
+ # LLM timeout policy (web-hosted workflow execution).
640
+ #
641
+ # Contract:
642
+ # - AbstractRuntime (the orchestrator) is the authority for execution policy such as timeouts.
643
+ # - This host can *override* that policy via env for deployments that want a different SLO.
644
+ #
645
+ # Env overrides:
646
+ # - ABSTRACTFLOW_LLM_TIMEOUT_S (float seconds)
647
+ # - ABSTRACTFLOW_LLM_TIMEOUT (alias)
648
+ #
649
+ # Set to 0 or a negative value to opt into "unlimited".
650
+ llm_kwargs: Dict[str, Any] = {}
651
+ timeout_raw = os.getenv("ABSTRACTFLOW_LLM_TIMEOUT_S") or os.getenv("ABSTRACTFLOW_LLM_TIMEOUT")
652
+ if timeout_raw is None or not str(timeout_raw).strip():
653
+ # No override: let the orchestrator (AbstractRuntime) apply its default.
654
+ pass
655
+ else:
656
+ raw = str(timeout_raw).strip().lower()
657
+ if raw in {"none", "null", "inf", "infinite", "unlimited"}:
658
+ # Explicit override: opt back into unlimited HTTP requests.
659
+ llm_kwargs["timeout"] = None
660
+ else:
661
+ try:
662
+ timeout_s = float(raw)
663
+ except Exception:
664
+ timeout_s = None
665
+ # Only override when parsing succeeded; otherwise fall back to AbstractCore config default.
666
+ if timeout_s is None:
667
+ pass
668
+ elif isinstance(timeout_s, (int, float)) and timeout_s <= 0:
669
+ # Consistent with the documented behavior: <=0 => unlimited.
670
+ llm_kwargs["timeout"] = None
671
+ else:
672
+ llm_kwargs["timeout"] = timeout_s
673
+
674
+ # Output token budget for web-hosted runs.
675
+ #
676
+ # Contract: do not impose an arbitrary default cap here. When unset, the runtime/provider
677
+ # uses the model's declared capabilities (`model_capabilities.json`) for its defaults.
678
+ #
679
+ # Operators can still override via env (including disabling by setting <=0 / "unlimited").
680
+ max_out_raw = os.getenv("ABSTRACTFLOW_LLM_MAX_OUTPUT_TOKENS") or os.getenv("ABSTRACTFLOW_MAX_OUTPUT_TOKENS")
681
+ max_out: Optional[int] = None
682
+ if max_out_raw is None or not str(max_out_raw).strip():
683
+ max_out = None
684
+ else:
685
+ try:
686
+ max_out = int(str(max_out_raw).strip())
687
+ except Exception:
688
+ max_out = None
689
+ if isinstance(max_out, int) and max_out <= 0:
690
+ max_out = None
691
+
692
+ # Pass runtime config to initialize `_limits.max_output_tokens`.
693
+ try:
694
+ from abstractruntime.core.config import RuntimeConfig
695
+ runtime_config = RuntimeConfig(max_output_tokens=max_out)
696
+ except Exception: # pragma: no cover
697
+ runtime_config = None
698
+
699
+ runtime = create_local_runtime(
700
+ provider=provider,
701
+ model=model,
702
+ llm_kwargs=llm_kwargs,
703
+ tool_executor=effective_tool_executor,
704
+ run_store=run_store,
705
+ ledger_store=ledger_store,
706
+ artifact_store=artifact_store,
707
+ config=runtime_config,
708
+ extra_effect_handlers=extra_effect_handlers,
709
+ )
710
+ else:
711
+ runtime_kwargs: Dict[str, Any] = {
712
+ "run_store": run_store or InMemoryRunStore(),
713
+ "ledger_store": ledger_store or InMemoryLedgerStore(),
714
+ }
715
+ if extra_effect_handlers:
716
+ runtime_kwargs["effect_handlers"] = extra_effect_handlers
717
+
718
+ if needs_artifacts:
719
+ # MEMORY_* effects require an ArtifactStore. Only configure it when needed.
720
+ artifact_store_obj: Any = artifact_store
721
+ if artifact_store_obj is None:
722
+ try:
723
+ from abstractruntime import InMemoryArtifactStore # type: ignore
724
+ artifact_store_obj = InMemoryArtifactStore()
725
+ except Exception: # pragma: no cover
726
+ try:
727
+ from abstractruntime.storage.artifacts import InMemoryArtifactStore # type: ignore
728
+ artifact_store_obj = InMemoryArtifactStore()
729
+ except Exception as e: # pragma: no cover
730
+ raise RuntimeError(
731
+ "This flow uses MEMORY_* nodes, but the installed AbstractRuntime "
732
+ "does not provide an ArtifactStore implementation."
733
+ ) from e
734
+
735
+ # Only pass artifact_store if the runtime supports it (older runtimes may not).
736
+ try:
737
+ from inspect import signature
738
+
739
+ if "artifact_store" in signature(Runtime).parameters:
740
+ runtime_kwargs["artifact_store"] = artifact_store_obj
741
+ except Exception: # pragma: no cover
742
+ # Best-effort: attempt to set via method if present.
743
+ pass
744
+
745
+ runtime = Runtime(**runtime_kwargs)
746
+
747
+ # Best-effort: configure artifact store via setter if supported.
748
+ if needs_artifacts and "artifact_store" not in runtime_kwargs and hasattr(runtime, "set_artifact_store"):
749
+ try:
750
+ runtime.set_artifact_store(artifact_store_obj) # type: ignore[name-defined]
751
+ except Exception:
752
+ pass
753
+
754
+ flow = visual_to_flow(visual_flow)
755
+ # Build and register custom event listener workflows (On Event nodes).
756
+ event_listener_specs: list[Any] = []
757
+ if needs_registry:
758
+ try:
759
+ from .agent_ids import visual_react_workflow_id
760
+ except Exception: # pragma: no cover
761
+ visual_react_workflow_id = None # type: ignore[assignment]
762
+
763
+ for vf in ordered:
764
+ reachable = _reachable_exec_node_ids(vf)
765
+ for n in vf.nodes:
766
+ if _node_type(n) != "on_event":
767
+ continue
768
+ # On Event nodes are roots by definition (even if disconnected from the main entry).
769
+ if reachable and n.id not in reachable:
770
+ continue
771
+
772
+ workflow_id = visual_event_listener_workflow_id(flow_id=vf.id, node_id=n.id)
773
+
774
+ # Create a derived VisualFlow for this listener workflow:
775
+ # - workflow id is unique (so it can be registered)
776
+ # - entryNode is the on_event node
777
+ derived = vf.model_copy(deep=True)
778
+ derived.id = workflow_id
779
+ derived.entryNode = n.id
780
+
781
+ # Ensure Agent nodes inside this derived workflow reference the canonical
782
+ # ReAct workflow IDs based on the *source* flow id, not the derived id.
783
+ if callable(visual_react_workflow_id):
784
+ for dn in derived.nodes:
785
+ if _node_type(dn) != "agent":
786
+ continue
787
+ raw_cfg = dn.data.get("agentConfig", {}) if isinstance(dn.data, dict) else {}
788
+ cfg = dict(raw_cfg) if isinstance(raw_cfg, dict) else {}
789
+ cfg.setdefault(
790
+ "_react_workflow_id",
791
+ visual_react_workflow_id(flow_id=vf.id, node_id=dn.id),
792
+ )
793
+ dn.data["agentConfig"] = cfg
794
+
795
+ listener_flow = visual_to_flow(derived)
796
+ listener_spec = compile_flow(listener_flow)
797
+ event_listener_specs.append(listener_spec)
798
+ runner: FlowRunner
799
+ if event_listener_specs:
800
+ runner = VisualSessionRunner(flow, runtime=runtime, event_listener_specs=event_listener_specs)
801
+ else:
802
+ runner = FlowRunner(flow, runtime=runtime)
803
+
804
+ if needs_registry:
805
+ registry = WorkflowRegistry()
806
+ registry.register(runner.workflow)
807
+ for vf in ordered[1:]:
808
+ child_flow = visual_to_flow(vf)
809
+ child_spec = compile_flow(child_flow)
810
+ registry.register(child_spec)
811
+ for spec in event_listener_specs:
812
+ registry.register(spec)
813
+
814
+ # Register per-Agent-node subworkflows (canonical AbstractAgent ReAct).
815
+ #
816
+ # Visual Agent nodes compile into START_SUBWORKFLOW effects that reference a
817
+ # deterministic workflow_id. The registry must contain those WorkflowSpecs.
818
+ #
819
+ # This keeps VisualFlow JSON portable across hosts: any host can run a
820
+ # VisualFlow document by registering these derived specs alongside the flow.
821
+ agent_nodes: list[tuple[str, Dict[str, Any]]] = []
822
+ for vf in ordered:
823
+ for n in vf.nodes:
824
+ node_type = _node_type(n)
825
+ if node_type != "agent":
826
+ continue
827
+ cfg = n.data.get("agentConfig", {})
828
+ agent_nodes.append((visual_react_workflow_id(flow_id=vf.id, node_id=n.id), cfg if isinstance(cfg, dict) else {}))
829
+
830
+ if agent_nodes:
831
+ try:
832
+ from abstractagent.adapters.react_runtime import create_react_workflow
833
+ from abstractagent.logic.react import ReActLogic
834
+ except Exception as e: # pragma: no cover
835
+ raise RuntimeError(
836
+ "Visual Agent nodes require AbstractAgent to be installed/importable."
837
+ ) from e
838
+
839
+ from abstractcore.tools import ToolDefinition
840
+ from abstractruntime.integrations.abstractcore.default_tools import list_default_tool_specs
841
+
842
+ def _tool_defs_from_specs(specs: list[dict[str, Any]]) -> list[ToolDefinition]:
843
+ out: list[ToolDefinition] = []
844
+ for s in specs:
845
+ if not isinstance(s, dict):
846
+ continue
847
+ name = s.get("name")
848
+ if not isinstance(name, str) or not name.strip():
849
+ continue
850
+ desc = s.get("description")
851
+ params = s.get("parameters")
852
+ out.append(
853
+ ToolDefinition(
854
+ name=name.strip(),
855
+ description=str(desc or ""),
856
+ parameters=dict(params) if isinstance(params, dict) else {},
857
+ )
858
+ )
859
+ return out
860
+
861
+ def _normalize_tool_names(raw: Any) -> list[str]:
862
+ if not isinstance(raw, list):
863
+ return []
864
+ out: list[str] = []
865
+ for t in raw:
866
+ if isinstance(t, str) and t.strip():
867
+ out.append(t.strip())
868
+ return out
869
+
870
+ all_tool_defs = _tool_defs_from_specs(list_default_tool_specs())
871
+ # Add schema-only runtime tools (executed as runtime effects by AbstractAgent adapters).
872
+ try:
873
+ from abstractagent.logic.builtins import ( # type: ignore
874
+ ASK_USER_TOOL,
875
+ COMPACT_MEMORY_TOOL,
876
+ DELEGATE_AGENT_TOOL,
877
+ INSPECT_VARS_TOOL,
878
+ RECALL_MEMORY_TOOL,
879
+ REMEMBER_TOOL,
880
+ )
881
+
882
+ builtin_defs = [
883
+ ASK_USER_TOOL,
884
+ RECALL_MEMORY_TOOL,
885
+ INSPECT_VARS_TOOL,
886
+ REMEMBER_TOOL,
887
+ COMPACT_MEMORY_TOOL,
888
+ DELEGATE_AGENT_TOOL,
889
+ ]
890
+ seen_names = {t.name for t in all_tool_defs if getattr(t, "name", None)}
891
+ for t in builtin_defs:
892
+ if getattr(t, "name", None) and t.name not in seen_names:
893
+ all_tool_defs.append(t)
894
+ seen_names.add(t.name)
895
+ except Exception:
896
+ pass
897
+
898
+ for workflow_id, cfg in agent_nodes:
899
+ provider_raw = cfg.get("provider")
900
+ model_raw = cfg.get("model")
901
+ # NOTE: Provider/model are injected durably through the Agent node's
902
+ # START_SUBWORKFLOW vars (see compiler `_build_sub_vars`). We keep the
903
+ # registered workflow spec provider/model-agnostic so Agent pins can
904
+ # override without breaking persistence/resume.
905
+ provider = None
906
+ model = None
907
+
908
+ tools_selected = _normalize_tool_names(cfg.get("tools"))
909
+ logic = ReActLogic(tools=all_tool_defs)
910
+ registry.register(
911
+ create_react_workflow(
912
+ logic=logic,
913
+ workflow_id=workflow_id,
914
+ provider=provider,
915
+ model=model,
916
+ allowed_tools=tools_selected,
917
+ )
918
+ )
919
+
920
+ if hasattr(runtime, "set_workflow_registry"):
921
+ runtime.set_workflow_registry(registry) # type: ignore[name-defined]
922
+ else: # pragma: no cover
923
+ raise RuntimeError(
924
+ "This flow requires subworkflows (agent/subflow nodes), but the installed "
925
+ "AbstractRuntime does not support workflow registries."
926
+ )
927
+
928
+ return runner
929
+
930
+
931
+ def visual_to_flow(visual: VisualFlow) -> Flow:
932
+ """Convert a VisualFlow definition to a runtime Flow IR."""
933
+ from abstractruntime.visualflow_compiler import load_visualflow_json as _load_visualflow_json
934
+ from abstractruntime.visualflow_compiler import visual_to_flow as _runtime_visual_to_flow
935
+
936
+ vf = _load_visualflow_json(visual)
937
+ return cast(Flow, _runtime_visual_to_flow(vf))
938
+
939
+
940
+ def execute_visual_flow(visual_flow: VisualFlow, input_data: Dict[str, Any], *, flows: Dict[str, VisualFlow]) -> Dict[str, Any]:
941
+ """Execute a visual flow with a correctly wired runtime (LLM/MEMORY/SUBFLOW)."""
942
+ runner = create_visual_runner(visual_flow, flows=flows)
943
+ result = runner.run(input_data)
944
+
945
+ if isinstance(result, dict) and result.get("waiting"):
946
+ state = runner.get_state()
947
+ wait = state.waiting if state else None
948
+ return {
949
+ "success": False,
950
+ "waiting": True,
951
+ "error": "Flow is waiting for input. Use a host resume mechanism to continue.",
952
+ "run_id": runner.run_id,
953
+ "wait_key": wait.wait_key if wait else None,
954
+ "prompt": wait.prompt if wait else None,
955
+ "choices": list(wait.choices) if wait and isinstance(wait.choices, list) else [],
956
+ "allow_free_text": bool(wait.allow_free_text) if wait else None,
957
+ }
958
+
959
+ if isinstance(result, dict):
960
+ return {
961
+ "success": bool(result.get("success", True)),
962
+ "waiting": False,
963
+ "result": result.get("result"),
964
+ "error": result.get("error"),
965
+ "run_id": runner.run_id,
966
+ }
967
+
968
+ return {"success": True, "waiting": False, "result": result, "run_id": runner.run_id}