abstractflow 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractflow/__init__.py +2 -2
- abstractflow/adapters/agent_adapter.py +2 -121
- abstractflow/adapters/control_adapter.py +2 -612
- abstractflow/adapters/effect_adapter.py +2 -642
- abstractflow/adapters/event_adapter.py +2 -304
- abstractflow/adapters/function_adapter.py +2 -94
- abstractflow/adapters/subflow_adapter.py +2 -71
- abstractflow/adapters/variable_adapter.py +2 -314
- abstractflow/cli.py +73 -28
- abstractflow/compiler.py +18 -2022
- abstractflow/core/flow.py +4 -240
- abstractflow/runner.py +59 -5
- abstractflow/visual/agent_ids.py +2 -26
- abstractflow/visual/builtins.py +2 -786
- abstractflow/visual/code_executor.py +2 -211
- abstractflow/visual/executor.py +319 -2140
- abstractflow/visual/interfaces.py +103 -10
- abstractflow/visual/models.py +26 -1
- abstractflow/visual/session_runner.py +23 -9
- abstractflow/visual/workspace_scoped_tools.py +11 -243
- abstractflow/workflow_bundle.py +290 -0
- abstractflow-0.3.1.dist-info/METADATA +186 -0
- abstractflow-0.3.1.dist-info/RECORD +33 -0
- {abstractflow-0.3.0.dist-info → abstractflow-0.3.1.dist-info}/WHEEL +1 -1
- abstractflow-0.3.0.dist-info/METADATA +0 -413
- abstractflow-0.3.0.dist-info/RECORD +0 -32
- {abstractflow-0.3.0.dist-info → abstractflow-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractflow-0.3.0.dist-info → abstractflow-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {abstractflow-0.3.0.dist-info → abstractflow-0.3.1.dist-info}/top_level.txt +0 -0
abstractflow/visual/executor.py
CHANGED
|
@@ -1,30 +1,69 @@
|
|
|
1
1
|
"""Portable visual-flow execution utilities.
|
|
2
2
|
|
|
3
|
-
This module
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
The goal is host portability: the same visual flow should run from non-web
|
|
8
|
-
hosts (AbstractCode, CLI) without importing the web backend implementation.
|
|
3
|
+
This module wires the VisualFlow authoring DSL (JSON) to AbstractRuntime for
|
|
4
|
+
durable execution. Compilation semantics (VisualFlow → Flow → WorkflowSpec) are
|
|
5
|
+
delegated to `abstractruntime.visualflow_compiler` so there is a single
|
|
6
|
+
semantics engine across the framework.
|
|
9
7
|
"""
|
|
10
8
|
|
|
11
9
|
from __future__ import annotations
|
|
12
10
|
|
|
13
11
|
import os
|
|
14
|
-
|
|
12
|
+
import hashlib
|
|
13
|
+
import threading
|
|
14
|
+
from typing import Any, Dict, Optional, cast
|
|
15
15
|
|
|
16
16
|
from ..core.flow import Flow
|
|
17
17
|
from ..runner import FlowRunner
|
|
18
18
|
|
|
19
|
-
from .builtins import get_builtin_handler
|
|
20
|
-
from .code_executor import create_code_handler
|
|
21
19
|
from .agent_ids import visual_react_workflow_id
|
|
22
|
-
from .models import
|
|
20
|
+
from .models import VisualFlow
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
_MEMORY_KG_STORE_CACHE_LOCK = threading.Lock()
|
|
24
|
+
# Keyed by (store_base_dir, gateway_url, token_fingerprint).
|
|
25
|
+
#
|
|
26
|
+
# Why include the token fingerprint:
|
|
27
|
+
# - The embedder captures the auth token at construction time.
|
|
28
|
+
# - The UI can set/update the token at runtime (without restarting the backend).
|
|
29
|
+
# - If we didn't key by token, we'd keep using a cached store with a stale token and get 401s.
|
|
30
|
+
_MEMORY_KG_STORE_CACHE: dict[tuple[str, str, str], Any] = {}
|
|
31
|
+
|
|
32
|
+
def _resolve_gateway_auth_token() -> str | None:
|
|
33
|
+
"""Resolve the gateway auth token for host-to-gateway calls.
|
|
23
34
|
|
|
35
|
+
Canonical env vars:
|
|
36
|
+
- ABSTRACTGATEWAY_AUTH_TOKEN
|
|
37
|
+
- ABSTRACTFLOW_GATEWAY_AUTH_TOKEN (legacy compatibility)
|
|
38
|
+
|
|
39
|
+
Additional host fallbacks:
|
|
40
|
+
- ABSTRACTCODE_GATEWAY_TOKEN (AbstractCode CLI convention)
|
|
41
|
+
- ABSTRACTGATEWAY_AUTH_TOKENS / ABSTRACTFLOW_GATEWAY_AUTH_TOKENS (first token)
|
|
42
|
+
"""
|
|
43
|
+
candidates = [
|
|
44
|
+
"ABSTRACTGATEWAY_AUTH_TOKEN",
|
|
45
|
+
"ABSTRACTFLOW_GATEWAY_AUTH_TOKEN",
|
|
46
|
+
"ABSTRACTCODE_GATEWAY_TOKEN",
|
|
47
|
+
]
|
|
48
|
+
for name in candidates:
|
|
49
|
+
raw = os.getenv(name)
|
|
50
|
+
token = str(raw or "").strip()
|
|
51
|
+
if token:
|
|
52
|
+
return token
|
|
53
|
+
|
|
54
|
+
token_lists = [
|
|
55
|
+
"ABSTRACTGATEWAY_AUTH_TOKENS",
|
|
56
|
+
"ABSTRACTFLOW_GATEWAY_AUTH_TOKENS",
|
|
57
|
+
]
|
|
58
|
+
for name in token_lists:
|
|
59
|
+
raw = os.getenv(name)
|
|
60
|
+
if not isinstance(raw, str) or not raw.strip():
|
|
61
|
+
continue
|
|
62
|
+
first = raw.split(",", 1)[0].strip()
|
|
63
|
+
if first:
|
|
64
|
+
return first
|
|
24
65
|
|
|
25
|
-
|
|
26
|
-
# Maps target_node_id -> { target_pin -> (source_node_id, source_pin) }
|
|
27
|
-
DataEdgeMap = Dict[str, Dict[str, tuple[str, str]]]
|
|
66
|
+
return None
|
|
28
67
|
|
|
29
68
|
|
|
30
69
|
def create_visual_runner(
|
|
@@ -35,6 +74,7 @@ def create_visual_runner(
|
|
|
35
74
|
ledger_store: Optional[Any] = None,
|
|
36
75
|
artifact_store: Optional[Any] = None,
|
|
37
76
|
tool_executor: Optional[Any] = None,
|
|
77
|
+
input_data: Optional[Dict[str, Any]] = None,
|
|
38
78
|
) -> FlowRunner:
|
|
39
79
|
"""Create a FlowRunner for a visual run with a correctly wired runtime.
|
|
40
80
|
|
|
@@ -43,6 +83,12 @@ def create_visual_runner(
|
|
|
43
83
|
- Create a runtime with an ArtifactStore (required for MEMORY_* effects).
|
|
44
84
|
- If any LLM_CALL / Agent nodes exist in the flow tree, wire AbstractCore-backed
|
|
45
85
|
effect handlers (via AbstractRuntime's integration module).
|
|
86
|
+
|
|
87
|
+
Notes:
|
|
88
|
+
- When LLM nodes rely on *connected* provider/model pins (e.g. from ON_FLOW_START),
|
|
89
|
+
this runner still needs a default provider/model to initialize runtime capabilities.
|
|
90
|
+
We use `input_data["provider"]`/`input_data["model"]` when provided, otherwise fall
|
|
91
|
+
back to static pin defaults (best-effort).
|
|
46
92
|
"""
|
|
47
93
|
# Be resilient to different AbstractRuntime install layouts: not all exports
|
|
48
94
|
# are guaranteed to be re-exported from `abstractruntime.__init__`.
|
|
@@ -123,7 +169,12 @@ def create_visual_runner(
|
|
|
123
169
|
"write_file",
|
|
124
170
|
"memory_note",
|
|
125
171
|
"memory_query",
|
|
172
|
+
"memory_tag",
|
|
173
|
+
"memory_compact",
|
|
126
174
|
"memory_rehydrate",
|
|
175
|
+
"memory_kg_assert",
|
|
176
|
+
"memory_kg_query",
|
|
177
|
+
"memact_compose",
|
|
127
178
|
}
|
|
128
179
|
|
|
129
180
|
node_types: Dict[str, str] = {n.id: _node_type(n) for n in vf.nodes}
|
|
@@ -209,6 +260,7 @@ def create_visual_runner(
|
|
|
209
260
|
# These flags keep `create_visual_runner()` resilient to older AbstractRuntime installs.
|
|
210
261
|
needs_registry = False
|
|
211
262
|
needs_artifacts = False
|
|
263
|
+
needs_memory_kg = False
|
|
212
264
|
for vf in ordered:
|
|
213
265
|
reachable = _reachable_exec_node_ids(vf)
|
|
214
266
|
for n in vf.nodes:
|
|
@@ -219,8 +271,10 @@ def create_visual_runner(
|
|
|
219
271
|
needs_registry = True
|
|
220
272
|
if t in {"on_event", "emit_event"}:
|
|
221
273
|
needs_registry = True
|
|
222
|
-
if t in {"memory_note", "memory_query", "memory_rehydrate"}:
|
|
274
|
+
if t in {"memory_note", "memory_query", "memory_rehydrate", "memory_compact"}:
|
|
223
275
|
needs_artifacts = True
|
|
276
|
+
if t in {"memory_kg_assert", "memory_kg_query"}:
|
|
277
|
+
needs_memory_kg = True
|
|
224
278
|
|
|
225
279
|
# Detect whether this flow tree needs AbstractCore LLM integration.
|
|
226
280
|
# Provider/model can be supplied either via node config *or* via connected input pins.
|
|
@@ -238,6 +292,53 @@ def create_visual_runner(
|
|
|
238
292
|
continue
|
|
239
293
|
return False
|
|
240
294
|
|
|
295
|
+
def _infer_connected_pin_default(vf: VisualFlow, *, node_id: str, pin_id: str) -> Optional[str]:
|
|
296
|
+
"""Best-effort static inference for a connected pin's default value.
|
|
297
|
+
|
|
298
|
+
This is used only to pick a reasonable *default* provider/model for the runtime
|
|
299
|
+
(capabilities, limits, etc). Per-node/provider routing still happens at execution
|
|
300
|
+
time via effect payloads.
|
|
301
|
+
"""
|
|
302
|
+
try:
|
|
303
|
+
for e in vf.edges:
|
|
304
|
+
if e.target != node_id or e.targetHandle != pin_id:
|
|
305
|
+
continue
|
|
306
|
+
source_id = getattr(e, "source", None)
|
|
307
|
+
if not isinstance(source_id, str) or not source_id:
|
|
308
|
+
continue
|
|
309
|
+
source_handle = getattr(e, "sourceHandle", None)
|
|
310
|
+
if not isinstance(source_handle, str) or not source_handle:
|
|
311
|
+
source_handle = pin_id
|
|
312
|
+
|
|
313
|
+
src = next((n for n in vf.nodes if getattr(n, "id", None) == source_id), None)
|
|
314
|
+
if src is None:
|
|
315
|
+
return None
|
|
316
|
+
data = getattr(src, "data", None)
|
|
317
|
+
if not isinstance(data, dict):
|
|
318
|
+
return None
|
|
319
|
+
|
|
320
|
+
pin_defaults = data.get("pinDefaults")
|
|
321
|
+
if isinstance(pin_defaults, dict) and source_handle in pin_defaults:
|
|
322
|
+
v = pin_defaults.get(source_handle)
|
|
323
|
+
if isinstance(v, str) and v.strip():
|
|
324
|
+
return v.strip()
|
|
325
|
+
|
|
326
|
+
literal_value = data.get("literalValue")
|
|
327
|
+
if isinstance(literal_value, str) and literal_value.strip():
|
|
328
|
+
return literal_value.strip()
|
|
329
|
+
if isinstance(literal_value, dict):
|
|
330
|
+
dv = literal_value.get("default")
|
|
331
|
+
if isinstance(dv, str) and dv.strip():
|
|
332
|
+
return dv.strip()
|
|
333
|
+
vv = literal_value.get(source_handle)
|
|
334
|
+
if isinstance(vv, str) and vv.strip():
|
|
335
|
+
return vv.strip()
|
|
336
|
+
return None
|
|
337
|
+
except Exception:
|
|
338
|
+
return None
|
|
339
|
+
|
|
340
|
+
return None
|
|
341
|
+
|
|
241
342
|
def _add_pair(provider_raw: Any, model_raw: Any) -> None:
|
|
242
343
|
nonlocal default_llm
|
|
243
344
|
if not isinstance(provider_raw, str) or not provider_raw.strip():
|
|
@@ -249,13 +350,19 @@ def create_visual_runner(
|
|
|
249
350
|
if default_llm is None:
|
|
250
351
|
default_llm = pair
|
|
251
352
|
|
|
353
|
+
# Prefer run inputs for the runtime default provider/model when available.
|
|
354
|
+
# This avoids expensive provider probing and makes model capability detection match
|
|
355
|
+
# what the user selected in the Run Flow modal.
|
|
356
|
+
if isinstance(input_data, dict):
|
|
357
|
+
_add_pair(input_data.get("provider"), input_data.get("model"))
|
|
358
|
+
|
|
252
359
|
for vf in ordered:
|
|
253
360
|
reachable = _reachable_exec_node_ids(vf)
|
|
254
361
|
for n in vf.nodes:
|
|
255
362
|
node_type = _node_type(n)
|
|
256
363
|
if reachable and n.id not in reachable:
|
|
257
364
|
continue
|
|
258
|
-
if node_type in {"llm_call", "agent", "tool_calls"}:
|
|
365
|
+
if node_type in {"llm_call", "agent", "tool_calls", "memory_compact"}:
|
|
259
366
|
has_llm_nodes = True
|
|
260
367
|
|
|
261
368
|
if node_type == "llm_call":
|
|
@@ -279,7 +386,48 @@ def create_visual_runner(
|
|
|
279
386
|
f"LLM_CALL node '{n.id}' in flow '{vf.id}' missing model "
|
|
280
387
|
"(set effectConfig.model or connect the model input pin)"
|
|
281
388
|
)
|
|
282
|
-
|
|
389
|
+
provider_default = (
|
|
390
|
+
provider
|
|
391
|
+
if provider_ok
|
|
392
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
|
|
393
|
+
if provider_connected
|
|
394
|
+
else None
|
|
395
|
+
)
|
|
396
|
+
model_default = (
|
|
397
|
+
model
|
|
398
|
+
if model_ok
|
|
399
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
|
|
400
|
+
if model_connected
|
|
401
|
+
else None
|
|
402
|
+
)
|
|
403
|
+
_add_pair(provider_default, model_default)
|
|
404
|
+
|
|
405
|
+
elif node_type == "memory_compact":
|
|
406
|
+
cfg = n.data.get("effectConfig", {}) if isinstance(n.data, dict) else {}
|
|
407
|
+
cfg = cfg if isinstance(cfg, dict) else {}
|
|
408
|
+
provider = cfg.get("provider")
|
|
409
|
+
model = cfg.get("model")
|
|
410
|
+
|
|
411
|
+
provider_ok = isinstance(provider, str) and provider.strip()
|
|
412
|
+
model_ok = isinstance(model, str) and model.strip()
|
|
413
|
+
provider_connected = _pin_connected(vf, node_id=n.id, pin_id="provider")
|
|
414
|
+
model_connected = _pin_connected(vf, node_id=n.id, pin_id="model")
|
|
415
|
+
|
|
416
|
+
provider_default = (
|
|
417
|
+
provider
|
|
418
|
+
if provider_ok
|
|
419
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
|
|
420
|
+
if provider_connected
|
|
421
|
+
else None
|
|
422
|
+
)
|
|
423
|
+
model_default = (
|
|
424
|
+
model
|
|
425
|
+
if model_ok
|
|
426
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
|
|
427
|
+
if model_connected
|
|
428
|
+
else None
|
|
429
|
+
)
|
|
430
|
+
_add_pair(provider_default, model_default)
|
|
283
431
|
|
|
284
432
|
elif node_type == "agent":
|
|
285
433
|
cfg = n.data.get("agentConfig", {}) if isinstance(n.data, dict) else {}
|
|
@@ -302,7 +450,21 @@ def create_visual_runner(
|
|
|
302
450
|
f"Agent node '{n.id}' in flow '{vf.id}' missing model "
|
|
303
451
|
"(set agentConfig.model or connect the model input pin)"
|
|
304
452
|
)
|
|
305
|
-
|
|
453
|
+
provider_default = (
|
|
454
|
+
provider
|
|
455
|
+
if provider_ok
|
|
456
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="provider")
|
|
457
|
+
if provider_connected
|
|
458
|
+
else None
|
|
459
|
+
)
|
|
460
|
+
model_default = (
|
|
461
|
+
model
|
|
462
|
+
if model_ok
|
|
463
|
+
else _infer_connected_pin_default(vf, node_id=n.id, pin_id="model")
|
|
464
|
+
if model_connected
|
|
465
|
+
else None
|
|
466
|
+
)
|
|
467
|
+
_add_pair(provider_default, model_default)
|
|
306
468
|
|
|
307
469
|
elif node_type == "provider_models":
|
|
308
470
|
cfg = n.data.get("providerModelsConfig", {}) if isinstance(n.data, dict) else {}
|
|
@@ -317,55 +479,130 @@ def create_visual_runner(
|
|
|
317
479
|
for m in allowed:
|
|
318
480
|
_add_pair(provider, m)
|
|
319
481
|
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
#
|
|
482
|
+
extra_effect_handlers: Dict[Any, Any] = {}
|
|
483
|
+
if needs_memory_kg:
|
|
484
|
+
try:
|
|
485
|
+
# Dev convenience (monorepo):
|
|
486
|
+
#
|
|
487
|
+
# When running from source (without installing each package), `import abstractmemory`
|
|
488
|
+
# can resolve to the *project directory* (namespace package, no exports) instead of
|
|
489
|
+
# the src-layout package at `abstractmemory/src/abstractmemory`.
|
|
490
|
+
#
|
|
491
|
+
# Add the src-layout path when it exists so VisualFlows with `memory_kg_*` nodes
|
|
492
|
+
# work out-of-the-box in local dev environments.
|
|
493
|
+
import sys
|
|
494
|
+
from pathlib import Path
|
|
495
|
+
|
|
496
|
+
repo_root = Path(__file__).resolve().parents[3] # .../abstractframework
|
|
497
|
+
mem_src = repo_root / "abstractmemory" / "src"
|
|
498
|
+
if mem_src.is_dir():
|
|
499
|
+
mem_src_str = str(mem_src)
|
|
500
|
+
try:
|
|
501
|
+
sys.path.remove(mem_src_str)
|
|
502
|
+
except ValueError:
|
|
503
|
+
pass
|
|
504
|
+
sys.path.insert(0, mem_src_str)
|
|
505
|
+
|
|
506
|
+
from abstractmemory import LanceDBTripleStore
|
|
507
|
+
from abstractruntime.integrations.abstractmemory.effect_handlers import build_memory_kg_effect_handlers
|
|
508
|
+
from abstractruntime.storage.artifacts import utc_now_iso
|
|
509
|
+
except Exception as e:
|
|
510
|
+
raise RuntimeError(
|
|
511
|
+
"This flow uses memory_kg_* nodes, but AbstractMemory integration is not available. "
|
|
512
|
+
"Install `abstractmemory` (and optionally `abstractmemory[lancedb]`)."
|
|
513
|
+
) from e
|
|
514
|
+
|
|
515
|
+
# Ensure stores exist so KG handlers can resolve run-tree scope fallbacks.
|
|
516
|
+
if run_store is None:
|
|
517
|
+
run_store = InMemoryRunStore()
|
|
518
|
+
if ledger_store is None:
|
|
519
|
+
ledger_store = InMemoryLedgerStore()
|
|
520
|
+
|
|
521
|
+
base_dir = None
|
|
522
|
+
mem_dir_raw = os.getenv("ABSTRACTMEMORY_DIR") or os.getenv("ABSTRACTFLOW_MEMORY_DIR")
|
|
523
|
+
if isinstance(mem_dir_raw, str) and mem_dir_raw.strip():
|
|
324
524
|
try:
|
|
325
|
-
|
|
525
|
+
base_dir = Path(mem_dir_raw).expanduser().resolve()
|
|
326
526
|
except Exception:
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
first = next((m for m in models if isinstance(m, str) and m.strip()), None)
|
|
336
|
-
if first:
|
|
337
|
-
provider_model = (p, first.strip())
|
|
338
|
-
break
|
|
527
|
+
base_dir = None
|
|
528
|
+
if base_dir is None and artifact_store is not None:
|
|
529
|
+
base_attr = getattr(artifact_store, "_base", None)
|
|
530
|
+
if base_attr is not None:
|
|
531
|
+
try:
|
|
532
|
+
base_dir = Path(base_attr).expanduser().resolve() / "abstractmemory"
|
|
533
|
+
except Exception:
|
|
534
|
+
base_dir = None
|
|
339
535
|
|
|
340
|
-
|
|
341
|
-
|
|
536
|
+
# Embeddings are a gateway/runtime capability (singleton embedding space per gateway instance).
|
|
537
|
+
try:
|
|
538
|
+
from abstractmemory.embeddings import AbstractGatewayTextEmbedder
|
|
539
|
+
except Exception as e:
|
|
540
|
+
raise RuntimeError(
|
|
541
|
+
"This flow uses memory_kg_* nodes, but AbstractMemory gateway embeddings integration is not available. "
|
|
542
|
+
"Install `abstractmemory` (src layout) and ensure it is importable."
|
|
543
|
+
) from e
|
|
544
|
+
|
|
545
|
+
gateway_url = str(os.getenv("ABSTRACTFLOW_GATEWAY_URL") or os.getenv("ABSTRACTGATEWAY_URL") or "").strip()
|
|
546
|
+
if not gateway_url:
|
|
547
|
+
gateway_url = "http://127.0.0.1:8081"
|
|
548
|
+
auth_token = _resolve_gateway_auth_token()
|
|
549
|
+
# Deterministic/offline mode:
|
|
550
|
+
# - When embeddings are explicitly disabled, allow LanceDB to operate in pattern-only mode.
|
|
551
|
+
# - Vector search (query_text) will raise in the store when no embedder is configured.
|
|
552
|
+
embedder = None
|
|
553
|
+
embed_provider = (
|
|
554
|
+
os.getenv("ABSTRACTFLOW_EMBEDDING_PROVIDER")
|
|
555
|
+
or os.getenv("ABSTRACTMEMORY_EMBEDDING_PROVIDER")
|
|
556
|
+
or os.getenv("ABSTRACTGATEWAY_EMBEDDING_PROVIDER")
|
|
557
|
+
)
|
|
558
|
+
if str(embed_provider or "").strip().lower() not in {"__disabled__", "disabled", "none", "off"}:
|
|
559
|
+
embedder = AbstractGatewayTextEmbedder(base_url=gateway_url, auth_token=auth_token)
|
|
560
|
+
|
|
561
|
+
if base_dir is None:
|
|
562
|
+
raise RuntimeError(
|
|
563
|
+
"This flow uses memory_kg_* nodes, but no durable memory directory could be resolved. "
|
|
564
|
+
"Set `ABSTRACTFLOW_MEMORY_DIR` (or `ABSTRACTMEMORY_DIR`), or run with a file-backed ArtifactStore."
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
base_dir.mkdir(parents=True, exist_ok=True)
|
|
568
|
+
token_fingerprint = "embeddings_disabled"
|
|
569
|
+
if embedder is not None:
|
|
570
|
+
if auth_token:
|
|
571
|
+
token_fingerprint = hashlib.sha256(auth_token.encode("utf-8")).hexdigest()[:12]
|
|
572
|
+
else:
|
|
573
|
+
token_fingerprint = "missing_token"
|
|
574
|
+
cache_key = (str(base_dir), gateway_url if embedder is not None else "__embeddings_disabled__", token_fingerprint)
|
|
575
|
+
with _MEMORY_KG_STORE_CACHE_LOCK:
|
|
576
|
+
store_obj = _MEMORY_KG_STORE_CACHE.get(cache_key)
|
|
577
|
+
if store_obj is None:
|
|
342
578
|
try:
|
|
343
|
-
|
|
579
|
+
store_obj = LanceDBTripleStore(base_dir / "kg", embedder=embedder)
|
|
580
|
+
except Exception as e:
|
|
581
|
+
raise RuntimeError(
|
|
582
|
+
"This flow uses memory_kg_* nodes, which require a LanceDB-backed store. "
|
|
583
|
+
"Install `lancedb` and ensure the host runs under the same environment."
|
|
584
|
+
) from e
|
|
585
|
+
with _MEMORY_KG_STORE_CACHE_LOCK:
|
|
586
|
+
_MEMORY_KG_STORE_CACHE[cache_key] = store_obj
|
|
344
587
|
|
|
345
|
-
|
|
346
|
-
for p in providers_meta:
|
|
347
|
-
if not isinstance(p, dict):
|
|
348
|
-
continue
|
|
349
|
-
if p.get("status") != "available":
|
|
350
|
-
continue
|
|
351
|
-
name = p.get("name")
|
|
352
|
-
models = p.get("models")
|
|
353
|
-
if not isinstance(name, str) or not name.strip():
|
|
354
|
-
continue
|
|
355
|
-
if not isinstance(models, list):
|
|
356
|
-
continue
|
|
357
|
-
first = next((m for m in models if isinstance(m, str) and m.strip()), None)
|
|
358
|
-
if first:
|
|
359
|
-
provider_model = (name.strip().lower(), first.strip())
|
|
360
|
-
break
|
|
361
|
-
except Exception:
|
|
362
|
-
provider_model = None
|
|
588
|
+
extra_effect_handlers = build_memory_kg_effect_handlers(store=store_obj, run_store=run_store, now_iso=utc_now_iso)
|
|
363
589
|
|
|
590
|
+
if has_llm_nodes:
|
|
591
|
+
provider_model = default_llm
|
|
592
|
+
|
|
593
|
+
# Strict behavior: do not probe unrelated providers/models to "guess" a default.
|
|
594
|
+
#
|
|
595
|
+
# A VisualFlow run must provide a deterministic provider+model for the runtime:
|
|
596
|
+
# - via run inputs (e.g. ON_FLOW_START pinDefaults / user-provided input_data), OR
|
|
597
|
+
# - via static node configs (effectConfig/agentConfig), OR
|
|
598
|
+
# - via connected pin defaults (best-effort).
|
|
599
|
+
#
|
|
600
|
+
# If we can't determine that, fail loudly with a clear error message.
|
|
364
601
|
if provider_model is None:
|
|
365
602
|
raise RuntimeError(
|
|
366
|
-
"This flow uses LLM nodes (llm_call/agent), but no provider/model could be determined. "
|
|
367
|
-
"
|
|
368
|
-
"
|
|
603
|
+
"This flow uses LLM nodes (llm_call/agent/memory_compact), but no default provider/model could be determined. "
|
|
604
|
+
"Set provider+model on a node, or connect provider/model pins to a node with pinDefaults "
|
|
605
|
+
"(e.g. ON_FLOW_START), or pass `input_data={'provider': ..., 'model': ...}` when creating the runner."
|
|
369
606
|
)
|
|
370
607
|
|
|
371
608
|
provider, model = provider_model
|
|
@@ -434,20 +671,21 @@ def create_visual_runner(
|
|
|
434
671
|
else:
|
|
435
672
|
llm_kwargs["timeout"] = timeout_s
|
|
436
673
|
|
|
437
|
-
#
|
|
674
|
+
# Output token budget for web-hosted runs.
|
|
675
|
+
#
|
|
676
|
+
# Contract: do not impose an arbitrary default cap here. When unset, the runtime/provider
|
|
677
|
+
# uses the model's declared capabilities (`model_capabilities.json`) for its defaults.
|
|
438
678
|
#
|
|
439
|
-
#
|
|
440
|
-
# responses that are both slow (local inference) and unhelpful for a visual UI
|
|
441
|
-
# (tools should write files; the model should not dump huge blobs into chat).
|
|
679
|
+
# Operators can still override via env (including disabling by setting <=0 / "unlimited").
|
|
442
680
|
max_out_raw = os.getenv("ABSTRACTFLOW_LLM_MAX_OUTPUT_TOKENS") or os.getenv("ABSTRACTFLOW_MAX_OUTPUT_TOKENS")
|
|
443
681
|
max_out: Optional[int] = None
|
|
444
682
|
if max_out_raw is None or not str(max_out_raw).strip():
|
|
445
|
-
max_out =
|
|
683
|
+
max_out = None
|
|
446
684
|
else:
|
|
447
685
|
try:
|
|
448
686
|
max_out = int(str(max_out_raw).strip())
|
|
449
687
|
except Exception:
|
|
450
|
-
max_out =
|
|
688
|
+
max_out = None
|
|
451
689
|
if isinstance(max_out, int) and max_out <= 0:
|
|
452
690
|
max_out = None
|
|
453
691
|
|
|
@@ -467,12 +705,15 @@ def create_visual_runner(
|
|
|
467
705
|
ledger_store=ledger_store,
|
|
468
706
|
artifact_store=artifact_store,
|
|
469
707
|
config=runtime_config,
|
|
708
|
+
extra_effect_handlers=extra_effect_handlers,
|
|
470
709
|
)
|
|
471
710
|
else:
|
|
472
711
|
runtime_kwargs: Dict[str, Any] = {
|
|
473
712
|
"run_store": run_store or InMemoryRunStore(),
|
|
474
713
|
"ledger_store": ledger_store or InMemoryLedgerStore(),
|
|
475
714
|
}
|
|
715
|
+
if extra_effect_handlers:
|
|
716
|
+
runtime_kwargs["effect_handlers"] = extra_effect_handlers
|
|
476
717
|
|
|
477
718
|
if needs_artifacts:
|
|
478
719
|
# MEMORY_* effects require an ArtifactStore. Only configure it when needed.
|
|
@@ -632,12 +873,20 @@ def create_visual_runner(
|
|
|
632
873
|
from abstractagent.logic.builtins import ( # type: ignore
|
|
633
874
|
ASK_USER_TOOL,
|
|
634
875
|
COMPACT_MEMORY_TOOL,
|
|
876
|
+
DELEGATE_AGENT_TOOL,
|
|
635
877
|
INSPECT_VARS_TOOL,
|
|
636
878
|
RECALL_MEMORY_TOOL,
|
|
637
879
|
REMEMBER_TOOL,
|
|
638
880
|
)
|
|
639
881
|
|
|
640
|
-
builtin_defs = [
|
|
882
|
+
builtin_defs = [
|
|
883
|
+
ASK_USER_TOOL,
|
|
884
|
+
RECALL_MEMORY_TOOL,
|
|
885
|
+
INSPECT_VARS_TOOL,
|
|
886
|
+
REMEMBER_TOOL,
|
|
887
|
+
COMPACT_MEMORY_TOOL,
|
|
888
|
+
DELEGATE_AGENT_TOOL,
|
|
889
|
+
]
|
|
641
890
|
seen_names = {t.name for t in all_tool_defs if getattr(t, "name", None)}
|
|
642
891
|
for t in builtin_defs:
|
|
643
892
|
if getattr(t, "name", None) and t.name not in seen_names:
|
|
@@ -679,2083 +928,13 @@ def create_visual_runner(
|
|
|
679
928
|
return runner
|
|
680
929
|
|
|
681
930
|
|
|
682
|
-
def _build_data_edge_map(edges: List[VisualEdge]) -> DataEdgeMap:
|
|
683
|
-
"""Build a mapping of data edges for input resolution."""
|
|
684
|
-
data_edges: DataEdgeMap = {}
|
|
685
|
-
|
|
686
|
-
for edge in edges:
|
|
687
|
-
# Skip execution edges
|
|
688
|
-
if edge.sourceHandle == "exec-out" or edge.targetHandle == "exec-in":
|
|
689
|
-
continue
|
|
690
|
-
|
|
691
|
-
if edge.target not in data_edges:
|
|
692
|
-
data_edges[edge.target] = {}
|
|
693
|
-
|
|
694
|
-
data_edges[edge.target][edge.targetHandle] = (edge.source, edge.sourceHandle)
|
|
695
|
-
|
|
696
|
-
return data_edges
|
|
697
|
-
|
|
698
|
-
|
|
699
931
|
def visual_to_flow(visual: VisualFlow) -> Flow:
|
|
700
|
-
"""Convert a
|
|
701
|
-
import
|
|
702
|
-
|
|
703
|
-
flow = Flow(visual.id)
|
|
704
|
-
|
|
705
|
-
data_edge_map = _build_data_edge_map(visual.edges)
|
|
706
|
-
|
|
707
|
-
# Store node outputs during execution (visual data-edge evaluation cache)
|
|
708
|
-
flow._node_outputs = {} # type: ignore[attr-defined]
|
|
709
|
-
flow._data_edge_map = data_edge_map # type: ignore[attr-defined]
|
|
710
|
-
flow._pure_node_ids = set() # type: ignore[attr-defined]
|
|
711
|
-
flow._volatile_pure_node_ids = set() # type: ignore[attr-defined]
|
|
712
|
-
# Snapshot of "static" node outputs (literals, schemas, etc.). This is used to
|
|
713
|
-
# reset the in-memory cache when the same compiled VisualFlow is executed by
|
|
714
|
-
# multiple runs (e.g. recursive/mutual subflows). See compiler._sync_effect_results_to_node_outputs.
|
|
715
|
-
flow._static_node_outputs = {} # type: ignore[attr-defined]
|
|
716
|
-
flow._active_run_id = None # type: ignore[attr-defined]
|
|
717
|
-
|
|
718
|
-
def _normalize_pin_defaults(raw: Any) -> Dict[str, Any]:
|
|
719
|
-
if not isinstance(raw, dict):
|
|
720
|
-
return {}
|
|
721
|
-
out: Dict[str, Any] = {}
|
|
722
|
-
for k, v in raw.items():
|
|
723
|
-
if not isinstance(k, str) or not k:
|
|
724
|
-
continue
|
|
725
|
-
# Allow JSON-serializable values (including arrays/objects) for defaults.
|
|
726
|
-
# These are cloned at use-sites to avoid cross-run mutation.
|
|
727
|
-
if v is None or isinstance(v, (str, int, float, bool, dict, list)):
|
|
728
|
-
out[k] = v
|
|
729
|
-
return out
|
|
730
|
-
|
|
731
|
-
def _clone_default(value: Any) -> Any:
|
|
732
|
-
# Prevent accidental shared-mutation of dict/list defaults across runs.
|
|
733
|
-
if isinstance(value, (dict, list)):
|
|
734
|
-
try:
|
|
735
|
-
import copy
|
|
736
|
-
|
|
737
|
-
return copy.deepcopy(value)
|
|
738
|
-
except Exception:
|
|
739
|
-
return value
|
|
740
|
-
return value
|
|
741
|
-
|
|
742
|
-
pin_defaults_by_node_id: Dict[str, Dict[str, Any]] = {}
|
|
743
|
-
for node in visual.nodes:
|
|
744
|
-
raw_defaults = node.data.get("pinDefaults") if isinstance(node.data, dict) else None
|
|
745
|
-
normalized = _normalize_pin_defaults(raw_defaults)
|
|
746
|
-
if normalized:
|
|
747
|
-
pin_defaults_by_node_id[node.id] = normalized
|
|
748
|
-
|
|
749
|
-
LITERAL_NODE_TYPES = {
|
|
750
|
-
"literal_string",
|
|
751
|
-
"literal_number",
|
|
752
|
-
"literal_boolean",
|
|
753
|
-
"literal_json",
|
|
754
|
-
"json_schema",
|
|
755
|
-
"literal_array",
|
|
756
|
-
}
|
|
757
|
-
|
|
758
|
-
pure_base_handlers: Dict[str, Any] = {}
|
|
759
|
-
pure_node_ids: set[str] = set()
|
|
760
|
-
|
|
761
|
-
def _has_execution_pins(type_str: str, node_data: Dict[str, Any]) -> bool:
|
|
762
|
-
pins: list[Any] = []
|
|
763
|
-
inputs = node_data.get("inputs")
|
|
764
|
-
outputs = node_data.get("outputs")
|
|
765
|
-
if isinstance(inputs, list):
|
|
766
|
-
pins.extend(inputs)
|
|
767
|
-
if isinstance(outputs, list):
|
|
768
|
-
pins.extend(outputs)
|
|
769
|
-
|
|
770
|
-
if pins:
|
|
771
|
-
for p in pins:
|
|
772
|
-
if isinstance(p, dict) and p.get("type") == "execution":
|
|
773
|
-
return True
|
|
774
|
-
return False
|
|
775
|
-
|
|
776
|
-
if type_str in LITERAL_NODE_TYPES:
|
|
777
|
-
return False
|
|
778
|
-
# These nodes are pure (data-only) even if the JSON document omitted template pins.
|
|
779
|
-
# This keeps programmatic tests and host-built VisualFlows portable.
|
|
780
|
-
if type_str in {"get_var", "bool_var", "var_decl"}:
|
|
781
|
-
return False
|
|
782
|
-
if type_str == "break_object":
|
|
783
|
-
return False
|
|
784
|
-
if get_builtin_handler(type_str) is not None:
|
|
785
|
-
return False
|
|
786
|
-
return True
|
|
787
|
-
|
|
788
|
-
evaluating: set[str] = set()
|
|
789
|
-
volatile_pure_node_ids: set[str] = getattr(flow, "_volatile_pure_node_ids", set()) # type: ignore[attr-defined]
|
|
790
|
-
|
|
791
|
-
def _ensure_node_output(node_id: str) -> None:
|
|
792
|
-
if node_id in flow._node_outputs and node_id not in volatile_pure_node_ids: # type: ignore[attr-defined]
|
|
793
|
-
return
|
|
794
|
-
|
|
795
|
-
handler = pure_base_handlers.get(node_id)
|
|
796
|
-
if handler is None:
|
|
797
|
-
return
|
|
932
|
+
"""Convert a VisualFlow definition to a runtime Flow IR."""
|
|
933
|
+
from abstractruntime.visualflow_compiler import load_visualflow_json as _load_visualflow_json
|
|
934
|
+
from abstractruntime.visualflow_compiler import visual_to_flow as _runtime_visual_to_flow
|
|
798
935
|
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
evaluating.add(node_id)
|
|
803
|
-
try:
|
|
804
|
-
resolved_input: Dict[str, Any] = {}
|
|
805
|
-
|
|
806
|
-
for target_pin, (source_node, source_pin) in data_edge_map.get(node_id, {}).items():
|
|
807
|
-
_ensure_node_output(source_node)
|
|
808
|
-
if source_node not in flow._node_outputs: # type: ignore[attr-defined]
|
|
809
|
-
continue
|
|
810
|
-
source_output = flow._node_outputs[source_node] # type: ignore[attr-defined]
|
|
811
|
-
if isinstance(source_output, dict) and source_pin in source_output:
|
|
812
|
-
resolved_input[target_pin] = source_output[source_pin]
|
|
813
|
-
elif source_pin in ("result", "output"):
|
|
814
|
-
resolved_input[target_pin] = source_output
|
|
815
|
-
|
|
816
|
-
defaults = pin_defaults_by_node_id.get(node_id)
|
|
817
|
-
if defaults:
|
|
818
|
-
for pin_id, value in defaults.items():
|
|
819
|
-
if pin_id in data_edge_map.get(node_id, {}):
|
|
820
|
-
continue
|
|
821
|
-
if pin_id not in resolved_input:
|
|
822
|
-
resolved_input[pin_id] = _clone_default(value)
|
|
823
|
-
|
|
824
|
-
result = handler(resolved_input if resolved_input else {})
|
|
825
|
-
flow._node_outputs[node_id] = result # type: ignore[attr-defined]
|
|
826
|
-
finally:
|
|
827
|
-
# IMPORTANT: even if an upstream pure node raises (bad input / parse_json failure),
|
|
828
|
-
# we must not leave `node_id` in `evaluating`, otherwise later evaluations can
|
|
829
|
-
# surface as a misleading "data edge cycle" at this node.
|
|
830
|
-
try:
|
|
831
|
-
evaluating.remove(node_id)
|
|
832
|
-
except KeyError:
|
|
833
|
-
pass
|
|
834
|
-
|
|
835
|
-
EFFECT_NODE_TYPES = {
|
|
836
|
-
"ask_user",
|
|
837
|
-
"answer_user",
|
|
838
|
-
"llm_call",
|
|
839
|
-
"tool_calls",
|
|
840
|
-
"wait_until",
|
|
841
|
-
"wait_event",
|
|
842
|
-
"emit_event",
|
|
843
|
-
"memory_note",
|
|
844
|
-
"memory_query",
|
|
845
|
-
"memory_rehydrate",
|
|
846
|
-
}
|
|
847
|
-
|
|
848
|
-
literal_node_ids: set[str] = set()
|
|
849
|
-
# Pre-evaluate literal nodes and store their values
|
|
850
|
-
for node in visual.nodes:
|
|
851
|
-
type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
|
|
852
|
-
if type_str in LITERAL_NODE_TYPES:
|
|
853
|
-
literal_value = node.data.get("literalValue")
|
|
854
|
-
flow._node_outputs[node.id] = {"value": literal_value} # type: ignore[attr-defined]
|
|
855
|
-
literal_node_ids.add(node.id)
|
|
856
|
-
# Capture baseline outputs (typically only literal nodes). This baseline must
|
|
857
|
-
# remain stable across runs so we can safely reset `_node_outputs` when switching
|
|
858
|
-
# between different `RunState.run_id` contexts (self-recursive subflows).
|
|
859
|
-
try:
|
|
860
|
-
flow._static_node_outputs = dict(flow._node_outputs) # type: ignore[attr-defined]
|
|
861
|
-
except Exception:
|
|
862
|
-
flow._static_node_outputs = {} # type: ignore[attr-defined]
|
|
863
|
-
|
|
864
|
-
# Compute execution reachability and ignore disconnected execution nodes.
|
|
865
|
-
#
|
|
866
|
-
# Visual editors often contain experimentation / orphan nodes. These should not
|
|
867
|
-
# prevent execution of the reachable pipeline.
|
|
868
|
-
exec_node_ids: set[str] = set()
|
|
869
|
-
for node in visual.nodes:
|
|
870
|
-
type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
|
|
871
|
-
if type_str in LITERAL_NODE_TYPES:
|
|
872
|
-
continue
|
|
873
|
-
if _has_execution_pins(type_str, node.data):
|
|
874
|
-
exec_node_ids.add(node.id)
|
|
875
|
-
|
|
876
|
-
def _pick_entry() -> Optional[str]:
|
|
877
|
-
# Prefer explicit entryNode if it is an execution node.
|
|
878
|
-
if isinstance(getattr(visual, "entryNode", None), str) and visual.entryNode in exec_node_ids:
|
|
879
|
-
return visual.entryNode
|
|
880
|
-
# Otherwise, infer entry as a node with no incoming execution edges.
|
|
881
|
-
targets = {e.target for e in visual.edges if getattr(e, "targetHandle", None) == "exec-in"}
|
|
882
|
-
for node in visual.nodes:
|
|
883
|
-
if node.id in exec_node_ids and node.id not in targets:
|
|
884
|
-
return node.id
|
|
885
|
-
# Fallback: first exec node in document order
|
|
886
|
-
for node in visual.nodes:
|
|
887
|
-
if node.id in exec_node_ids:
|
|
888
|
-
return node.id
|
|
889
|
-
return None
|
|
890
|
-
|
|
891
|
-
entry_exec = _pick_entry()
|
|
892
|
-
reachable_exec: set[str] = set()
|
|
893
|
-
if entry_exec:
|
|
894
|
-
adj: Dict[str, list[str]] = {}
|
|
895
|
-
for e in visual.edges:
|
|
896
|
-
if getattr(e, "targetHandle", None) != "exec-in":
|
|
897
|
-
continue
|
|
898
|
-
if e.source not in exec_node_ids or e.target not in exec_node_ids:
|
|
899
|
-
continue
|
|
900
|
-
adj.setdefault(e.source, []).append(e.target)
|
|
901
|
-
stack = [entry_exec]
|
|
902
|
-
while stack:
|
|
903
|
-
cur = stack.pop()
|
|
904
|
-
if cur in reachable_exec:
|
|
905
|
-
continue
|
|
906
|
-
reachable_exec.add(cur)
|
|
907
|
-
for nxt in adj.get(cur, []):
|
|
908
|
-
if nxt not in reachable_exec:
|
|
909
|
-
stack.append(nxt)
|
|
910
|
-
|
|
911
|
-
ignored_exec = sorted([nid for nid in exec_node_ids if nid not in reachable_exec])
|
|
912
|
-
if ignored_exec:
|
|
913
|
-
# Runtime-local metadata for hosts/UIs that want to show warnings.
|
|
914
|
-
flow._ignored_exec_nodes = ignored_exec # type: ignore[attr-defined]
|
|
915
|
-
|
|
916
|
-
def _decode_separator(value: str) -> str:
|
|
917
|
-
return value.replace("\\n", "\n").replace("\\t", "\t").replace("\\r", "\r")
|
|
918
|
-
|
|
919
|
-
def _create_read_file_handler(_data: Dict[str, Any]):
|
|
920
|
-
import json
|
|
921
|
-
from pathlib import Path
|
|
922
|
-
|
|
923
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
924
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
925
|
-
raw_path = payload.get("file_path")
|
|
926
|
-
if not isinstance(raw_path, str) or not raw_path.strip():
|
|
927
|
-
raise ValueError("read_file requires a non-empty 'file_path' input.")
|
|
928
|
-
|
|
929
|
-
file_path = raw_path.strip()
|
|
930
|
-
path = Path(file_path).expanduser()
|
|
931
|
-
if not path.is_absolute():
|
|
932
|
-
path = Path.cwd() / path
|
|
933
|
-
|
|
934
|
-
if not path.exists():
|
|
935
|
-
raise FileNotFoundError(f"File not found: {file_path}")
|
|
936
|
-
if not path.is_file():
|
|
937
|
-
raise ValueError(f"Not a file: {file_path}")
|
|
938
|
-
|
|
939
|
-
try:
|
|
940
|
-
text = path.read_text(encoding="utf-8")
|
|
941
|
-
except UnicodeDecodeError as e:
|
|
942
|
-
raise ValueError(f"Cannot read '{file_path}' as UTF-8: {e}") from e
|
|
943
|
-
|
|
944
|
-
# Detect JSON primarily from file extension; also opportunistically parse
|
|
945
|
-
# when the content looks like JSON. Markdown and text are returned as-is.
|
|
946
|
-
lower_name = path.name.lower()
|
|
947
|
-
content_stripped = text.lstrip()
|
|
948
|
-
looks_like_json = bool(content_stripped) and content_stripped[0] in "{["
|
|
949
|
-
|
|
950
|
-
if lower_name.endswith(".json"):
|
|
951
|
-
try:
|
|
952
|
-
return {"content": json.loads(text)}
|
|
953
|
-
except Exception as e:
|
|
954
|
-
raise ValueError(f"Invalid JSON in '{file_path}': {e}") from e
|
|
955
|
-
|
|
956
|
-
if looks_like_json:
|
|
957
|
-
try:
|
|
958
|
-
return {"content": json.loads(text)}
|
|
959
|
-
except Exception:
|
|
960
|
-
pass
|
|
961
|
-
|
|
962
|
-
return {"content": text}
|
|
963
|
-
|
|
964
|
-
return handler
|
|
965
|
-
|
|
966
|
-
def _create_write_file_handler(_data: Dict[str, Any]):
|
|
967
|
-
import json
|
|
968
|
-
from pathlib import Path
|
|
969
|
-
|
|
970
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
971
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
972
|
-
raw_path = payload.get("file_path")
|
|
973
|
-
if not isinstance(raw_path, str) or not raw_path.strip():
|
|
974
|
-
raise ValueError("write_file requires a non-empty 'file_path' input.")
|
|
975
|
-
|
|
976
|
-
file_path = raw_path.strip()
|
|
977
|
-
path = Path(file_path).expanduser()
|
|
978
|
-
if not path.is_absolute():
|
|
979
|
-
path = Path.cwd() / path
|
|
980
|
-
|
|
981
|
-
raw_content = payload.get("content")
|
|
982
|
-
|
|
983
|
-
if path.name.lower().endswith(".json"):
|
|
984
|
-
if isinstance(raw_content, str):
|
|
985
|
-
try:
|
|
986
|
-
raw_content = json.loads(raw_content)
|
|
987
|
-
except Exception as e:
|
|
988
|
-
raise ValueError(f"write_file JSON content must be valid JSON: {e}") from e
|
|
989
|
-
text = json.dumps(raw_content, indent=2, ensure_ascii=False)
|
|
990
|
-
if not text.endswith("\n"):
|
|
991
|
-
text += "\n"
|
|
992
|
-
else:
|
|
993
|
-
if raw_content is None:
|
|
994
|
-
text = ""
|
|
995
|
-
elif isinstance(raw_content, str):
|
|
996
|
-
text = raw_content
|
|
997
|
-
elif isinstance(raw_content, (dict, list)):
|
|
998
|
-
text = json.dumps(raw_content, indent=2, ensure_ascii=False)
|
|
999
|
-
else:
|
|
1000
|
-
text = str(raw_content)
|
|
1001
|
-
|
|
1002
|
-
path.parent.mkdir(parents=True, exist_ok=True)
|
|
1003
|
-
path.write_text(text, encoding="utf-8")
|
|
1004
|
-
|
|
1005
|
-
return {"bytes": len(text.encode("utf-8")), "file_path": str(path)}
|
|
1006
|
-
|
|
1007
|
-
return handler
|
|
1008
|
-
|
|
1009
|
-
def _create_concat_handler(data: Dict[str, Any]):
|
|
1010
|
-
config = data.get("concatConfig", {}) if isinstance(data, dict) else {}
|
|
1011
|
-
separator = " "
|
|
1012
|
-
if isinstance(config, dict):
|
|
1013
|
-
sep_raw = config.get("separator")
|
|
1014
|
-
if isinstance(sep_raw, str):
|
|
1015
|
-
separator = sep_raw
|
|
1016
|
-
separator = _decode_separator(separator)
|
|
1017
|
-
|
|
1018
|
-
pin_order: list[str] = []
|
|
1019
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1020
|
-
if isinstance(pins, list):
|
|
1021
|
-
for p in pins:
|
|
1022
|
-
if not isinstance(p, dict):
|
|
1023
|
-
continue
|
|
1024
|
-
if p.get("type") == "execution":
|
|
1025
|
-
continue
|
|
1026
|
-
pid = p.get("id")
|
|
1027
|
-
if isinstance(pid, str) and pid:
|
|
1028
|
-
pin_order.append(pid)
|
|
1029
|
-
|
|
1030
|
-
if not pin_order:
|
|
1031
|
-
# Backward-compat: programmatic/test-created VisualNodes may omit template pins.
|
|
1032
|
-
# In that case, infer a stable pin order from the provided input keys at runtime
|
|
1033
|
-
# (prefer a..z single-letter pins), so `a`, `b`, ... behave as expected.
|
|
1034
|
-
pin_order = []
|
|
1035
|
-
|
|
1036
|
-
def handler(input_data: Any) -> str:
|
|
1037
|
-
if not isinstance(input_data, dict):
|
|
1038
|
-
return str(input_data or "")
|
|
1039
|
-
|
|
1040
|
-
parts: list[str] = []
|
|
1041
|
-
if pin_order:
|
|
1042
|
-
order = pin_order
|
|
1043
|
-
else:
|
|
1044
|
-
# Stable inference for missing pin metadata.
|
|
1045
|
-
keys = [k for k in input_data.keys() if isinstance(k, str)]
|
|
1046
|
-
letter = sorted([k for k in keys if len(k) == 1 and "a" <= k <= "z"])
|
|
1047
|
-
other = sorted([k for k in keys if k not in set(letter)])
|
|
1048
|
-
order = letter + other
|
|
1049
|
-
|
|
1050
|
-
for pid in order:
|
|
1051
|
-
if pid in input_data:
|
|
1052
|
-
v = input_data.get(pid)
|
|
1053
|
-
parts.append("" if v is None else str(v))
|
|
1054
|
-
return separator.join(parts)
|
|
1055
|
-
|
|
1056
|
-
return handler
|
|
1057
|
-
|
|
1058
|
-
def _create_make_array_handler(data: Dict[str, Any]):
|
|
1059
|
-
"""Build an array from 1+ inputs in pin order.
|
|
1060
|
-
|
|
1061
|
-
Design:
|
|
1062
|
-
- We treat missing/unset pins as absent (skip None) to avoid surprising `null`
|
|
1063
|
-
elements when a pin is present but unconnected.
|
|
1064
|
-
- We do NOT flatten arrays/tuples; if you want flattening/concatenation,
|
|
1065
|
-
use `array_concat`.
|
|
1066
|
-
"""
|
|
1067
|
-
pin_order: list[str] = []
|
|
1068
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1069
|
-
if isinstance(pins, list):
|
|
1070
|
-
for p in pins:
|
|
1071
|
-
if not isinstance(p, dict):
|
|
1072
|
-
continue
|
|
1073
|
-
if p.get("type") == "execution":
|
|
1074
|
-
continue
|
|
1075
|
-
pid = p.get("id")
|
|
1076
|
-
if isinstance(pid, str) and pid:
|
|
1077
|
-
pin_order.append(pid)
|
|
1078
|
-
|
|
1079
|
-
if not pin_order:
|
|
1080
|
-
pin_order = ["a", "b"]
|
|
1081
|
-
|
|
1082
|
-
def handler(input_data: Any) -> list[Any]:
|
|
1083
|
-
if not isinstance(input_data, dict):
|
|
1084
|
-
if input_data is None:
|
|
1085
|
-
return []
|
|
1086
|
-
if isinstance(input_data, list):
|
|
1087
|
-
return list(input_data)
|
|
1088
|
-
if isinstance(input_data, tuple):
|
|
1089
|
-
return list(input_data)
|
|
1090
|
-
return [input_data]
|
|
1091
|
-
|
|
1092
|
-
out: list[Any] = []
|
|
1093
|
-
for pid in pin_order:
|
|
1094
|
-
if pid not in input_data:
|
|
1095
|
-
continue
|
|
1096
|
-
v = input_data.get(pid)
|
|
1097
|
-
if v is None:
|
|
1098
|
-
continue
|
|
1099
|
-
out.append(v)
|
|
1100
|
-
return out
|
|
1101
|
-
|
|
1102
|
-
return handler
|
|
1103
|
-
|
|
1104
|
-
def _create_array_concat_handler(data: Dict[str, Any]):
|
|
1105
|
-
pin_order: list[str] = []
|
|
1106
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1107
|
-
if isinstance(pins, list):
|
|
1108
|
-
for p in pins:
|
|
1109
|
-
if not isinstance(p, dict):
|
|
1110
|
-
continue
|
|
1111
|
-
if p.get("type") == "execution":
|
|
1112
|
-
continue
|
|
1113
|
-
pid = p.get("id")
|
|
1114
|
-
if isinstance(pid, str) and pid:
|
|
1115
|
-
pin_order.append(pid)
|
|
1116
|
-
|
|
1117
|
-
if not pin_order:
|
|
1118
|
-
pin_order = ["a", "b"]
|
|
1119
|
-
|
|
1120
|
-
def handler(input_data: Any) -> list[Any]:
|
|
1121
|
-
if not isinstance(input_data, dict):
|
|
1122
|
-
if input_data is None:
|
|
1123
|
-
return []
|
|
1124
|
-
if isinstance(input_data, list):
|
|
1125
|
-
return list(input_data)
|
|
1126
|
-
if isinstance(input_data, tuple):
|
|
1127
|
-
return list(input_data)
|
|
1128
|
-
return [input_data]
|
|
1129
|
-
|
|
1130
|
-
out: list[Any] = []
|
|
1131
|
-
for pid in pin_order:
|
|
1132
|
-
if pid not in input_data:
|
|
1133
|
-
continue
|
|
1134
|
-
v = input_data.get(pid)
|
|
1135
|
-
if v is None:
|
|
1136
|
-
continue
|
|
1137
|
-
if isinstance(v, list):
|
|
1138
|
-
out.extend(v)
|
|
1139
|
-
continue
|
|
1140
|
-
if isinstance(v, tuple):
|
|
1141
|
-
out.extend(list(v))
|
|
1142
|
-
continue
|
|
1143
|
-
out.append(v)
|
|
1144
|
-
return out
|
|
1145
|
-
|
|
1146
|
-
return handler
|
|
1147
|
-
|
|
1148
|
-
def _create_break_object_handler(data: Dict[str, Any]):
|
|
1149
|
-
config = data.get("breakConfig", {}) if isinstance(data, dict) else {}
|
|
1150
|
-
selected = config.get("selectedPaths", []) if isinstance(config, dict) else []
|
|
1151
|
-
selected_paths = [p.strip() for p in selected if isinstance(p, str) and p.strip()]
|
|
1152
|
-
|
|
1153
|
-
def _get_path(value: Any, path: str) -> Any:
|
|
1154
|
-
current = value
|
|
1155
|
-
for part in path.split("."):
|
|
1156
|
-
if current is None:
|
|
1157
|
-
return None
|
|
1158
|
-
if isinstance(current, dict):
|
|
1159
|
-
current = current.get(part)
|
|
1160
|
-
continue
|
|
1161
|
-
if isinstance(current, list) and part.isdigit():
|
|
1162
|
-
idx = int(part)
|
|
1163
|
-
if idx < 0 or idx >= len(current):
|
|
1164
|
-
return None
|
|
1165
|
-
current = current[idx]
|
|
1166
|
-
continue
|
|
1167
|
-
return None
|
|
1168
|
-
return current
|
|
1169
|
-
|
|
1170
|
-
def handler(input_data):
|
|
1171
|
-
src_obj = None
|
|
1172
|
-
if isinstance(input_data, dict):
|
|
1173
|
-
src_obj = input_data.get("object")
|
|
1174
|
-
|
|
1175
|
-
# Best-effort: tolerate JSON-ish strings (common when breaking LLM outputs).
|
|
1176
|
-
if isinstance(src_obj, str) and src_obj.strip():
|
|
1177
|
-
try:
|
|
1178
|
-
parser = get_builtin_handler("parse_json")
|
|
1179
|
-
if parser is not None:
|
|
1180
|
-
src_obj = parser({"text": src_obj, "wrap_scalar": True})
|
|
1181
|
-
except Exception:
|
|
1182
|
-
pass
|
|
1183
|
-
|
|
1184
|
-
out: Dict[str, Any] = {}
|
|
1185
|
-
for path in selected_paths:
|
|
1186
|
-
out[path] = _get_path(src_obj, path)
|
|
1187
|
-
return out
|
|
1188
|
-
|
|
1189
|
-
return handler
|
|
1190
|
-
|
|
1191
|
-
def _get_by_path(value: Any, path: str) -> Any:
|
|
1192
|
-
"""Best-effort dotted-path lookup supporting dicts and numeric list indices."""
|
|
1193
|
-
current = value
|
|
1194
|
-
for part in path.split("."):
|
|
1195
|
-
if current is None:
|
|
1196
|
-
return None
|
|
1197
|
-
if isinstance(current, dict):
|
|
1198
|
-
current = current.get(part)
|
|
1199
|
-
continue
|
|
1200
|
-
if isinstance(current, list) and part.isdigit():
|
|
1201
|
-
idx = int(part)
|
|
1202
|
-
if idx < 0 or idx >= len(current):
|
|
1203
|
-
return None
|
|
1204
|
-
current = current[idx]
|
|
1205
|
-
continue
|
|
1206
|
-
return None
|
|
1207
|
-
return current
|
|
1208
|
-
|
|
1209
|
-
def _create_get_var_handler(_data: Dict[str, Any]):
|
|
1210
|
-
# Pure node: reads from the current run vars (attached onto the Flow by the compiler).
|
|
1211
|
-
# Mark as volatile so it is recomputed whenever requested (avoids stale cached reads).
|
|
1212
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
1213
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
1214
|
-
raw_name = payload.get("name")
|
|
1215
|
-
name = (raw_name if isinstance(raw_name, str) else str(raw_name or "")).strip()
|
|
1216
|
-
run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
|
|
1217
|
-
if not isinstance(run_vars, dict) or not name:
|
|
1218
|
-
return {"value": None}
|
|
1219
|
-
return {"value": _get_by_path(run_vars, name)}
|
|
1220
|
-
|
|
1221
|
-
return handler
|
|
1222
|
-
|
|
1223
|
-
def _create_bool_var_handler(data: Dict[str, Any]):
|
|
1224
|
-
"""Pure node: reads a workflow-level boolean variable from run.vars with a default.
|
|
1225
|
-
|
|
1226
|
-
Config is stored in the visual node's `literalValue` as either:
|
|
1227
|
-
- a string: variable name
|
|
1228
|
-
- an object: { "name": "...", "default": true|false }
|
|
1229
|
-
"""
|
|
1230
|
-
raw_cfg = data.get("literalValue")
|
|
1231
|
-
name_cfg = ""
|
|
1232
|
-
default_cfg = False
|
|
1233
|
-
if isinstance(raw_cfg, str):
|
|
1234
|
-
name_cfg = raw_cfg.strip()
|
|
1235
|
-
elif isinstance(raw_cfg, dict):
|
|
1236
|
-
n = raw_cfg.get("name")
|
|
1237
|
-
if isinstance(n, str):
|
|
1238
|
-
name_cfg = n.strip()
|
|
1239
|
-
d = raw_cfg.get("default")
|
|
1240
|
-
if isinstance(d, bool):
|
|
1241
|
-
default_cfg = d
|
|
1242
|
-
|
|
1243
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
1244
|
-
del input_data
|
|
1245
|
-
run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
|
|
1246
|
-
if not isinstance(run_vars, dict) or not name_cfg:
|
|
1247
|
-
return {"name": name_cfg, "value": bool(default_cfg)}
|
|
1248
|
-
|
|
1249
|
-
raw = _get_by_path(run_vars, name_cfg)
|
|
1250
|
-
if isinstance(raw, bool):
|
|
1251
|
-
return {"name": name_cfg, "value": raw}
|
|
1252
|
-
return {"name": name_cfg, "value": bool(default_cfg)}
|
|
1253
|
-
|
|
1254
|
-
return handler
|
|
1255
|
-
|
|
1256
|
-
def _create_var_decl_handler(data: Dict[str, Any]):
|
|
1257
|
-
"""Pure node: typed workflow variable declaration (name + type + default).
|
|
1258
|
-
|
|
1259
|
-
Config is stored in `literalValue`:
|
|
1260
|
-
{ "name": "...", "type": "boolean|number|string|object|array|any", "default": ... }
|
|
1261
|
-
|
|
1262
|
-
Runtime semantics:
|
|
1263
|
-
- Read `run.vars[name]` (via `flow._run_vars`), and return it if it matches the declared type.
|
|
1264
|
-
- Otherwise fall back to the declared default.
|
|
1265
|
-
"""
|
|
1266
|
-
raw_cfg = data.get("literalValue")
|
|
1267
|
-
name_cfg = ""
|
|
1268
|
-
type_cfg = "any"
|
|
1269
|
-
default_cfg: Any = None
|
|
1270
|
-
if isinstance(raw_cfg, dict):
|
|
1271
|
-
n = raw_cfg.get("name")
|
|
1272
|
-
if isinstance(n, str):
|
|
1273
|
-
name_cfg = n.strip()
|
|
1274
|
-
t = raw_cfg.get("type")
|
|
1275
|
-
if isinstance(t, str) and t.strip():
|
|
1276
|
-
type_cfg = t.strip()
|
|
1277
|
-
default_cfg = raw_cfg.get("default")
|
|
1278
|
-
|
|
1279
|
-
allowed_types = {"boolean", "number", "string", "object", "array", "any"}
|
|
1280
|
-
if type_cfg not in allowed_types:
|
|
1281
|
-
type_cfg = "any"
|
|
1282
|
-
|
|
1283
|
-
def _matches(v: Any) -> bool:
|
|
1284
|
-
if type_cfg == "any":
|
|
1285
|
-
return True
|
|
1286
|
-
if type_cfg == "boolean":
|
|
1287
|
-
return isinstance(v, bool)
|
|
1288
|
-
if type_cfg == "number":
|
|
1289
|
-
return isinstance(v, (int, float)) and not isinstance(v, bool)
|
|
1290
|
-
if type_cfg == "string":
|
|
1291
|
-
return isinstance(v, str)
|
|
1292
|
-
if type_cfg == "array":
|
|
1293
|
-
return isinstance(v, list)
|
|
1294
|
-
if type_cfg == "object":
|
|
1295
|
-
return isinstance(v, dict)
|
|
1296
|
-
return True
|
|
1297
|
-
|
|
1298
|
-
def _default_for_type() -> Any:
|
|
1299
|
-
if type_cfg == "boolean":
|
|
1300
|
-
return False
|
|
1301
|
-
if type_cfg == "number":
|
|
1302
|
-
return 0
|
|
1303
|
-
if type_cfg == "string":
|
|
1304
|
-
return ""
|
|
1305
|
-
if type_cfg == "array":
|
|
1306
|
-
return []
|
|
1307
|
-
if type_cfg == "object":
|
|
1308
|
-
return {}
|
|
1309
|
-
return None
|
|
1310
|
-
|
|
1311
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
1312
|
-
del input_data
|
|
1313
|
-
run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
|
|
1314
|
-
if not isinstance(run_vars, dict) or not name_cfg:
|
|
1315
|
-
v = default_cfg if _matches(default_cfg) else _default_for_type()
|
|
1316
|
-
return {"name": name_cfg, "value": v}
|
|
1317
|
-
|
|
1318
|
-
raw = _get_by_path(run_vars, name_cfg)
|
|
1319
|
-
if _matches(raw):
|
|
1320
|
-
return {"name": name_cfg, "value": raw}
|
|
1321
|
-
|
|
1322
|
-
v = default_cfg if _matches(default_cfg) else _default_for_type()
|
|
1323
|
-
return {"name": name_cfg, "value": v}
|
|
1324
|
-
|
|
1325
|
-
return handler
|
|
1326
|
-
|
|
1327
|
-
def _create_set_var_handler(_data: Dict[str, Any]):
|
|
1328
|
-
# Execution node: does not mutate run.vars here (handled by compiler adapter).
|
|
1329
|
-
# This handler exists to participate in data-edge resolution and expose outputs.
|
|
1330
|
-
#
|
|
1331
|
-
# Important UX contract:
|
|
1332
|
-
# - In the visual editor, primitive pins (boolean/number/string) show default UI controls
|
|
1333
|
-
# even when the user hasn't explicitly edited them.
|
|
1334
|
-
# - If we treat "missing" as None here, `Set Variable` would write None and this can
|
|
1335
|
-
# cause typed `Variable` (`var_decl`) to fall back to its default (e.g. staying True).
|
|
1336
|
-
# - Therefore we default missing primitive values to their natural defaults.
|
|
1337
|
-
pins = _data.get("inputs") if isinstance(_data, dict) else None
|
|
1338
|
-
value_pin_type: Optional[str] = None
|
|
1339
|
-
if isinstance(pins, list):
|
|
1340
|
-
for p in pins:
|
|
1341
|
-
if not isinstance(p, dict):
|
|
1342
|
-
continue
|
|
1343
|
-
if p.get("id") != "value":
|
|
1344
|
-
continue
|
|
1345
|
-
t = p.get("type")
|
|
1346
|
-
if isinstance(t, str) and t:
|
|
1347
|
-
value_pin_type = t
|
|
1348
|
-
break
|
|
1349
|
-
|
|
1350
|
-
def handler(input_data: Any) -> Dict[str, Any]:
|
|
1351
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
1352
|
-
value_specified = isinstance(payload, dict) and "value" in payload
|
|
1353
|
-
value = payload.get("value")
|
|
1354
|
-
|
|
1355
|
-
if not value_specified:
|
|
1356
|
-
if value_pin_type == "boolean":
|
|
1357
|
-
value = False
|
|
1358
|
-
elif value_pin_type == "number":
|
|
1359
|
-
value = 0
|
|
1360
|
-
elif value_pin_type == "string":
|
|
1361
|
-
value = ""
|
|
1362
|
-
|
|
1363
|
-
return {"name": payload.get("name"), "value": value}
|
|
1364
|
-
|
|
1365
|
-
return handler
|
|
1366
|
-
|
|
1367
|
-
def _wrap_builtin(handler, data: Dict[str, Any]):
|
|
1368
|
-
literal_value = data.get("literalValue")
|
|
1369
|
-
# Preserve pin order for builtins that need deterministic input selection (e.g. coalesce).
|
|
1370
|
-
pin_order: list[str] = []
|
|
1371
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1372
|
-
if isinstance(pins, list):
|
|
1373
|
-
for p in pins:
|
|
1374
|
-
if not isinstance(p, dict):
|
|
1375
|
-
continue
|
|
1376
|
-
if p.get("type") == "execution":
|
|
1377
|
-
continue
|
|
1378
|
-
pid = p.get("id")
|
|
1379
|
-
if isinstance(pid, str) and pid:
|
|
1380
|
-
pin_order.append(pid)
|
|
1381
|
-
|
|
1382
|
-
def wrapped(input_data):
|
|
1383
|
-
if isinstance(input_data, dict):
|
|
1384
|
-
inputs = input_data.copy()
|
|
1385
|
-
else:
|
|
1386
|
-
inputs = {"value": input_data, "a": input_data, "text": input_data}
|
|
1387
|
-
|
|
1388
|
-
if literal_value is not None:
|
|
1389
|
-
inputs["_literalValue"] = literal_value
|
|
1390
|
-
if pin_order:
|
|
1391
|
-
inputs["_pin_order"] = list(pin_order)
|
|
1392
|
-
|
|
1393
|
-
return handler(inputs)
|
|
1394
|
-
|
|
1395
|
-
return wrapped
|
|
1396
|
-
|
|
1397
|
-
def _create_agent_input_handler(data: Dict[str, Any]):
|
|
1398
|
-
cfg = data.get("agentConfig", {}) if isinstance(data, dict) else {}
|
|
1399
|
-
cfg = cfg if isinstance(cfg, dict) else {}
|
|
1400
|
-
|
|
1401
|
-
def _normalize_response_schema(raw: Any) -> Optional[Dict[str, Any]]:
|
|
1402
|
-
"""Normalize a structured-output schema input into a JSON Schema dict.
|
|
1403
|
-
|
|
1404
|
-
Supported inputs (best-effort):
|
|
1405
|
-
- JSON Schema dict: {"type":"object","properties":{...}, ...}
|
|
1406
|
-
- LMStudio/OpenAI-style wrapper: {"type":"json_schema","json_schema": {"schema": {...}}}
|
|
1407
|
-
"""
|
|
1408
|
-
if raw is None:
|
|
1409
|
-
return None
|
|
1410
|
-
if isinstance(raw, dict):
|
|
1411
|
-
if raw.get("type") == "json_schema" and isinstance(raw.get("json_schema"), dict):
|
|
1412
|
-
inner = raw.get("json_schema")
|
|
1413
|
-
if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
|
|
1414
|
-
return dict(inner.get("schema") or {})
|
|
1415
|
-
return dict(raw)
|
|
1416
|
-
return None
|
|
1417
|
-
|
|
1418
|
-
def _normalize_tool_names(raw: Any) -> list[str]:
|
|
1419
|
-
if raw is None:
|
|
1420
|
-
return []
|
|
1421
|
-
items: list[Any]
|
|
1422
|
-
if isinstance(raw, list):
|
|
1423
|
-
items = raw
|
|
1424
|
-
elif isinstance(raw, tuple):
|
|
1425
|
-
items = list(raw)
|
|
1426
|
-
else:
|
|
1427
|
-
items = [raw]
|
|
1428
|
-
out: list[str] = []
|
|
1429
|
-
for t in items:
|
|
1430
|
-
if isinstance(t, str) and t.strip():
|
|
1431
|
-
out.append(t.strip())
|
|
1432
|
-
# preserve order, remove duplicates
|
|
1433
|
-
seen: set[str] = set()
|
|
1434
|
-
uniq: list[str] = []
|
|
1435
|
-
for t in out:
|
|
1436
|
-
if t in seen:
|
|
1437
|
-
continue
|
|
1438
|
-
seen.add(t)
|
|
1439
|
-
uniq.append(t)
|
|
1440
|
-
return uniq
|
|
1441
|
-
|
|
1442
|
-
def handler(input_data):
|
|
1443
|
-
task = ""
|
|
1444
|
-
if isinstance(input_data, dict):
|
|
1445
|
-
raw_task = input_data.get("task")
|
|
1446
|
-
if raw_task is None:
|
|
1447
|
-
raw_task = input_data.get("prompt")
|
|
1448
|
-
task = "" if raw_task is None else str(raw_task)
|
|
1449
|
-
else:
|
|
1450
|
-
task = str(input_data)
|
|
1451
|
-
|
|
1452
|
-
context_raw = input_data.get("context", {}) if isinstance(input_data, dict) else {}
|
|
1453
|
-
context = context_raw if isinstance(context_raw, dict) else {}
|
|
1454
|
-
provider = input_data.get("provider") if isinstance(input_data, dict) else None
|
|
1455
|
-
model = input_data.get("model") if isinstance(input_data, dict) else None
|
|
1456
|
-
|
|
1457
|
-
system_raw = input_data.get("system") if isinstance(input_data, dict) else ""
|
|
1458
|
-
system = system_raw if isinstance(system_raw, str) else str(system_raw or "")
|
|
1459
|
-
|
|
1460
|
-
tools_specified = isinstance(input_data, dict) and "tools" in input_data
|
|
1461
|
-
tools_raw = input_data.get("tools") if isinstance(input_data, dict) else None
|
|
1462
|
-
tools = _normalize_tool_names(tools_raw) if tools_specified else []
|
|
1463
|
-
if not tools_specified:
|
|
1464
|
-
tools = _normalize_tool_names(cfg.get("tools"))
|
|
1465
|
-
|
|
1466
|
-
out: Dict[str, Any] = {
|
|
1467
|
-
"task": task,
|
|
1468
|
-
"context": context,
|
|
1469
|
-
"provider": provider if isinstance(provider, str) else None,
|
|
1470
|
-
"model": model if isinstance(model, str) else None,
|
|
1471
|
-
"system": system,
|
|
1472
|
-
"tools": tools,
|
|
1473
|
-
}
|
|
1474
|
-
|
|
1475
|
-
# Optional pin overrides (passed through for compiler/runtime consumption).
|
|
1476
|
-
if isinstance(input_data, dict) and "max_iterations" in input_data:
|
|
1477
|
-
out["max_iterations"] = input_data.get("max_iterations")
|
|
1478
|
-
|
|
1479
|
-
if isinstance(input_data, dict) and "response_schema" in input_data:
|
|
1480
|
-
schema = _normalize_response_schema(input_data.get("response_schema"))
|
|
1481
|
-
if isinstance(schema, dict) and schema:
|
|
1482
|
-
out["response_schema"] = schema
|
|
1483
|
-
|
|
1484
|
-
include_context_specified = isinstance(input_data, dict) and (
|
|
1485
|
-
"include_context" in input_data or "use_context" in input_data
|
|
1486
|
-
)
|
|
1487
|
-
if include_context_specified:
|
|
1488
|
-
raw_inc = (
|
|
1489
|
-
input_data.get("include_context")
|
|
1490
|
-
if isinstance(input_data, dict) and "include_context" in input_data
|
|
1491
|
-
else input_data.get("use_context") if isinstance(input_data, dict) else None
|
|
1492
|
-
)
|
|
1493
|
-
out["include_context"] = _coerce_bool(raw_inc)
|
|
1494
|
-
|
|
1495
|
-
return out
|
|
1496
|
-
|
|
1497
|
-
return handler
|
|
1498
|
-
|
|
1499
|
-
def _create_subflow_effect_builder(data: Dict[str, Any]):
|
|
1500
|
-
input_pin_ids: list[str] = []
|
|
1501
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1502
|
-
if isinstance(pins, list):
|
|
1503
|
-
for p in pins:
|
|
1504
|
-
if not isinstance(p, dict):
|
|
1505
|
-
continue
|
|
1506
|
-
if p.get("type") == "execution":
|
|
1507
|
-
continue
|
|
1508
|
-
pid = p.get("id")
|
|
1509
|
-
if isinstance(pid, str) and pid:
|
|
1510
|
-
# Control pin (not forwarded into child vars).
|
|
1511
|
-
if pid in {"inherit_context", "inheritContext"}:
|
|
1512
|
-
continue
|
|
1513
|
-
input_pin_ids.append(pid)
|
|
1514
|
-
|
|
1515
|
-
inherit_cfg = None
|
|
1516
|
-
if isinstance(data, dict):
|
|
1517
|
-
cfg = data.get("effectConfig")
|
|
1518
|
-
if isinstance(cfg, dict):
|
|
1519
|
-
inherit_cfg = cfg.get("inherit_context")
|
|
1520
|
-
if inherit_cfg is None:
|
|
1521
|
-
inherit_cfg = cfg.get("inheritContext")
|
|
1522
|
-
inherit_context_default = bool(inherit_cfg) if inherit_cfg is not None else False
|
|
1523
|
-
|
|
1524
|
-
def handler(input_data):
|
|
1525
|
-
subflow_id = (
|
|
1526
|
-
data.get("subflowId")
|
|
1527
|
-
or data.get("flowId") # legacy
|
|
1528
|
-
or data.get("workflowId")
|
|
1529
|
-
or data.get("workflow_id")
|
|
1530
|
-
)
|
|
1531
|
-
|
|
1532
|
-
sub_vars_dict: Dict[str, Any] = {}
|
|
1533
|
-
if isinstance(input_data, dict):
|
|
1534
|
-
base: Dict[str, Any] = {}
|
|
1535
|
-
if isinstance(input_data.get("vars"), dict):
|
|
1536
|
-
base.update(dict(input_data["vars"]))
|
|
1537
|
-
elif isinstance(input_data.get("input"), dict):
|
|
1538
|
-
base.update(dict(input_data["input"]))
|
|
1539
|
-
|
|
1540
|
-
if input_pin_ids:
|
|
1541
|
-
for pid in input_pin_ids:
|
|
1542
|
-
if pid in ("vars", "input") and isinstance(input_data.get(pid), dict):
|
|
1543
|
-
continue
|
|
1544
|
-
if pid in input_data:
|
|
1545
|
-
base[pid] = input_data.get(pid)
|
|
1546
|
-
sub_vars_dict = base
|
|
1547
|
-
else:
|
|
1548
|
-
if base:
|
|
1549
|
-
sub_vars_dict = base
|
|
1550
|
-
else:
|
|
1551
|
-
sub_vars_dict = dict(input_data)
|
|
1552
|
-
else:
|
|
1553
|
-
if input_pin_ids and len(input_pin_ids) == 1:
|
|
1554
|
-
sub_vars_dict = {input_pin_ids[0]: input_data}
|
|
1555
|
-
else:
|
|
1556
|
-
sub_vars_dict = {"input": input_data}
|
|
1557
|
-
|
|
1558
|
-
# Never forward control pins into the child run vars.
|
|
1559
|
-
sub_vars_dict.pop("inherit_context", None)
|
|
1560
|
-
sub_vars_dict.pop("inheritContext", None)
|
|
1561
|
-
|
|
1562
|
-
inherit_context_specified = isinstance(input_data, dict) and (
|
|
1563
|
-
"inherit_context" in input_data or "inheritContext" in input_data
|
|
1564
|
-
)
|
|
1565
|
-
if inherit_context_specified:
|
|
1566
|
-
raw_inherit = (
|
|
1567
|
-
input_data.get("inherit_context")
|
|
1568
|
-
if isinstance(input_data, dict) and "inherit_context" in input_data
|
|
1569
|
-
else input_data.get("inheritContext") if isinstance(input_data, dict) else None
|
|
1570
|
-
)
|
|
1571
|
-
inherit_context_value = _coerce_bool(raw_inherit)
|
|
1572
|
-
else:
|
|
1573
|
-
inherit_context_value = inherit_context_default
|
|
1574
|
-
|
|
1575
|
-
return {
|
|
1576
|
-
"output": None,
|
|
1577
|
-
"_pending_effect": (
|
|
1578
|
-
{
|
|
1579
|
-
"type": "start_subworkflow",
|
|
1580
|
-
"workflow_id": subflow_id,
|
|
1581
|
-
"vars": sub_vars_dict,
|
|
1582
|
-
# Start subworkflows in async+wait mode so hosts (notably AbstractFlow Web)
|
|
1583
|
-
# can tick child runs incrementally and stream their node_start/node_complete
|
|
1584
|
-
# events for better observability (nested/recursive subflows).
|
|
1585
|
-
#
|
|
1586
|
-
# Non-interactive hosts (tests/CLI) still complete synchronously because
|
|
1587
|
-
# FlowRunner.run() auto-drives WAITING(SUBWORKFLOW) children and resumes
|
|
1588
|
-
# parents until completion.
|
|
1589
|
-
"async": True,
|
|
1590
|
-
"wait": True,
|
|
1591
|
-
**({"inherit_context": True} if inherit_context_value else {}),
|
|
1592
|
-
}
|
|
1593
|
-
),
|
|
1594
|
-
}
|
|
1595
|
-
|
|
1596
|
-
return handler
|
|
1597
|
-
|
|
1598
|
-
def _create_event_handler(event_type: str, data: Dict[str, Any]):
|
|
1599
|
-
# Event nodes are special: they bridge external inputs / runtime vars into the graph.
|
|
1600
|
-
#
|
|
1601
|
-
# Critical constraint: RunState.vars must remain JSON-serializable for durable execution.
|
|
1602
|
-
# The runtime persists per-node outputs in `vars["_temp"]["node_outputs"]`. If an event node
|
|
1603
|
-
# returns the full `run.vars` dict (which contains `_temp`), we create a self-referential
|
|
1604
|
-
# cycle: `_temp -> node_outputs -> <start_output>['_temp'] -> _temp`, which explodes during
|
|
1605
|
-
# persistence (e.g. JsonFileRunStore uses dataclasses.asdict()).
|
|
1606
|
-
#
|
|
1607
|
-
# Therefore, `on_flow_start` must *not* leak internal namespaces like `_temp` into outputs.
|
|
1608
|
-
start_pin_ids: list[str] = []
|
|
1609
|
-
pins = data.get("outputs") if isinstance(data, dict) else None
|
|
1610
|
-
if isinstance(pins, list):
|
|
1611
|
-
for p in pins:
|
|
1612
|
-
if not isinstance(p, dict):
|
|
1613
|
-
continue
|
|
1614
|
-
if p.get("type") == "execution":
|
|
1615
|
-
continue
|
|
1616
|
-
pid = p.get("id")
|
|
1617
|
-
if isinstance(pid, str) and pid:
|
|
1618
|
-
start_pin_ids.append(pid)
|
|
1619
|
-
|
|
1620
|
-
def handler(input_data):
|
|
1621
|
-
if event_type == "on_flow_start":
|
|
1622
|
-
# Prefer explicit pins: the visual editor treats non-exec output pins as
|
|
1623
|
-
# "Flow Start Parameters" (initial vars). Only expose those by default.
|
|
1624
|
-
if isinstance(input_data, dict):
|
|
1625
|
-
defaults_raw = data.get("pinDefaults") if isinstance(data, dict) else None
|
|
1626
|
-
defaults = defaults_raw if isinstance(defaults_raw, dict) else {}
|
|
1627
|
-
if start_pin_ids:
|
|
1628
|
-
out: Dict[str, Any] = {}
|
|
1629
|
-
for pid in start_pin_ids:
|
|
1630
|
-
if pid in input_data:
|
|
1631
|
-
out[pid] = input_data.get(pid)
|
|
1632
|
-
continue
|
|
1633
|
-
if isinstance(pid, str) and pid in defaults:
|
|
1634
|
-
dv = defaults.get(pid)
|
|
1635
|
-
out[pid] = _clone_default(dv)
|
|
1636
|
-
# Also seed run.vars for downstream Get Variable / debugging.
|
|
1637
|
-
if not pid.startswith("_") and pid not in input_data:
|
|
1638
|
-
input_data[pid] = _clone_default(dv)
|
|
1639
|
-
continue
|
|
1640
|
-
out[pid] = None
|
|
1641
|
-
return out
|
|
1642
|
-
# Backward-compat: older/test-created flows may omit pin metadata.
|
|
1643
|
-
# In that case, expose non-internal keys only (avoid `_temp`, `_limits`, ...).
|
|
1644
|
-
out2 = {k: v for k, v in input_data.items() if isinstance(k, str) and not k.startswith("_")}
|
|
1645
|
-
# If pinDefaults exist, apply them for missing non-internal keys.
|
|
1646
|
-
for k, dv in defaults.items():
|
|
1647
|
-
if not isinstance(k, str) or not k or k.startswith("_"):
|
|
1648
|
-
continue
|
|
1649
|
-
if k in out2 or k in input_data:
|
|
1650
|
-
continue
|
|
1651
|
-
out2[k] = _clone_default(dv)
|
|
1652
|
-
input_data[k] = _clone_default(dv)
|
|
1653
|
-
return out2
|
|
1654
|
-
|
|
1655
|
-
# Non-dict input: if there is a single declared pin, map into it; otherwise
|
|
1656
|
-
# keep a generic `input` key.
|
|
1657
|
-
if start_pin_ids and len(start_pin_ids) == 1:
|
|
1658
|
-
return {start_pin_ids[0]: input_data}
|
|
1659
|
-
return {"input": input_data}
|
|
1660
|
-
if event_type == "on_user_request":
|
|
1661
|
-
message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data)
|
|
1662
|
-
context = input_data.get("context", {}) if isinstance(input_data, dict) else {}
|
|
1663
|
-
return {"message": message, "context": context}
|
|
1664
|
-
if event_type == "on_agent_message":
|
|
1665
|
-
sender = input_data.get("sender", "unknown") if isinstance(input_data, dict) else "unknown"
|
|
1666
|
-
message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data)
|
|
1667
|
-
channel = data.get("eventConfig", {}).get("channel", "")
|
|
1668
|
-
return {"sender": sender, "message": message, "channel": channel}
|
|
1669
|
-
return input_data
|
|
1670
|
-
|
|
1671
|
-
return handler
|
|
1672
|
-
|
|
1673
|
-
def _create_flow_end_handler(data: Dict[str, Any]):
|
|
1674
|
-
pin_ids: list[str] = []
|
|
1675
|
-
pins = data.get("inputs") if isinstance(data, dict) else None
|
|
1676
|
-
if isinstance(pins, list):
|
|
1677
|
-
for p in pins:
|
|
1678
|
-
if not isinstance(p, dict):
|
|
1679
|
-
continue
|
|
1680
|
-
if p.get("type") == "execution":
|
|
1681
|
-
continue
|
|
1682
|
-
pid = p.get("id")
|
|
1683
|
-
if isinstance(pid, str) and pid:
|
|
1684
|
-
pin_ids.append(pid)
|
|
1685
|
-
|
|
1686
|
-
def handler(input_data: Any):
|
|
1687
|
-
if not pin_ids:
|
|
1688
|
-
if isinstance(input_data, dict):
|
|
1689
|
-
return dict(input_data)
|
|
1690
|
-
return {"result": input_data}
|
|
1691
|
-
|
|
1692
|
-
if not isinstance(input_data, dict):
|
|
1693
|
-
if len(pin_ids) == 1:
|
|
1694
|
-
return {pin_ids[0]: input_data}
|
|
1695
|
-
return {"result": input_data}
|
|
1696
|
-
|
|
1697
|
-
return {pid: input_data.get(pid) for pid in pin_ids}
|
|
1698
|
-
|
|
1699
|
-
return handler
|
|
1700
|
-
|
|
1701
|
-
def _create_expression_handler(expression: str):
|
|
1702
|
-
def handler(input_data):
|
|
1703
|
-
namespace = {"x": input_data, "input": input_data}
|
|
1704
|
-
if isinstance(input_data, dict):
|
|
1705
|
-
namespace.update(input_data)
|
|
1706
|
-
try:
|
|
1707
|
-
return eval(expression, {"__builtins__": {}}, namespace)
|
|
1708
|
-
except Exception as e:
|
|
1709
|
-
return {"error": str(e)}
|
|
1710
|
-
|
|
1711
|
-
return handler
|
|
1712
|
-
|
|
1713
|
-
def _create_if_handler(data: Dict[str, Any]):
|
|
1714
|
-
def handler(input_data):
|
|
1715
|
-
condition = input_data.get("condition") if isinstance(input_data, dict) else bool(input_data)
|
|
1716
|
-
return {"branch": "true" if condition else "false", "condition": condition}
|
|
1717
|
-
|
|
1718
|
-
return handler
|
|
1719
|
-
|
|
1720
|
-
def _create_switch_handler(data: Dict[str, Any]):
|
|
1721
|
-
def handler(input_data):
|
|
1722
|
-
value = input_data.get("value") if isinstance(input_data, dict) else input_data
|
|
1723
|
-
|
|
1724
|
-
config = data.get("switchConfig", {}) if isinstance(data, dict) else {}
|
|
1725
|
-
raw_cases = config.get("cases", []) if isinstance(config, dict) else []
|
|
1726
|
-
|
|
1727
|
-
value_str = "" if value is None else str(value)
|
|
1728
|
-
if isinstance(raw_cases, list):
|
|
1729
|
-
for case in raw_cases:
|
|
1730
|
-
if not isinstance(case, dict):
|
|
1731
|
-
continue
|
|
1732
|
-
case_id = case.get("id")
|
|
1733
|
-
case_value = case.get("value")
|
|
1734
|
-
if not isinstance(case_id, str) or not case_id:
|
|
1735
|
-
continue
|
|
1736
|
-
if case_value is None:
|
|
1737
|
-
continue
|
|
1738
|
-
if value_str == str(case_value):
|
|
1739
|
-
return {"branch": f"case:{case_id}", "value": value, "matched": str(case_value)}
|
|
1740
|
-
|
|
1741
|
-
return {"branch": "default", "value": value}
|
|
1742
|
-
|
|
1743
|
-
return handler
|
|
1744
|
-
|
|
1745
|
-
def _create_while_handler(data: Dict[str, Any]):
|
|
1746
|
-
def handler(input_data):
|
|
1747
|
-
condition = input_data.get("condition") if isinstance(input_data, dict) else bool(input_data)
|
|
1748
|
-
return {"condition": bool(condition)}
|
|
1749
|
-
|
|
1750
|
-
return handler
|
|
1751
|
-
|
|
1752
|
-
def _create_for_handler(data: Dict[str, Any]):
|
|
1753
|
-
def handler(input_data):
|
|
1754
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
1755
|
-
start = payload.get("start")
|
|
1756
|
-
end = payload.get("end")
|
|
1757
|
-
step = payload.get("step")
|
|
1758
|
-
return {"start": start, "end": end, "step": step}
|
|
1759
|
-
|
|
1760
|
-
return handler
|
|
1761
|
-
|
|
1762
|
-
def _create_loop_handler(data: Dict[str, Any]):
|
|
1763
|
-
def handler(input_data):
|
|
1764
|
-
items = input_data.get("items") if isinstance(input_data, dict) else input_data
|
|
1765
|
-
if items is None:
|
|
1766
|
-
items = []
|
|
1767
|
-
if not isinstance(items, (list, tuple)):
|
|
1768
|
-
items = [items]
|
|
1769
|
-
items_list = list(items) if isinstance(items, tuple) else list(items) # type: ignore[arg-type]
|
|
1770
|
-
return {"items": items_list, "count": len(items_list)}
|
|
1771
|
-
|
|
1772
|
-
return handler
|
|
1773
|
-
|
|
1774
|
-
def _coerce_bool(value: Any) -> bool:
|
|
1775
|
-
"""Best-effort boolean parsing (handles common string forms)."""
|
|
1776
|
-
if value is None:
|
|
1777
|
-
return False
|
|
1778
|
-
if isinstance(value, bool):
|
|
1779
|
-
return value
|
|
1780
|
-
if isinstance(value, (int, float)):
|
|
1781
|
-
try:
|
|
1782
|
-
return float(value) != 0.0
|
|
1783
|
-
except Exception:
|
|
1784
|
-
return False
|
|
1785
|
-
if isinstance(value, str):
|
|
1786
|
-
s = value.strip().lower()
|
|
1787
|
-
if not s:
|
|
1788
|
-
return False
|
|
1789
|
-
if s in {"false", "0", "no", "off"}:
|
|
1790
|
-
return False
|
|
1791
|
-
if s in {"true", "1", "yes", "on"}:
|
|
1792
|
-
return True
|
|
1793
|
-
return False
|
|
1794
|
-
|
|
1795
|
-
def _create_effect_handler(effect_type: str, data: Dict[str, Any]):
|
|
1796
|
-
effect_config = data.get("effectConfig", {})
|
|
1797
|
-
|
|
1798
|
-
if effect_type == "ask_user":
|
|
1799
|
-
return _create_ask_user_handler(data, effect_config)
|
|
1800
|
-
if effect_type == "answer_user":
|
|
1801
|
-
return _create_answer_user_handler(data, effect_config)
|
|
1802
|
-
if effect_type == "llm_call":
|
|
1803
|
-
return _create_llm_call_handler(data, effect_config)
|
|
1804
|
-
if effect_type == "tool_calls":
|
|
1805
|
-
return _create_tool_calls_handler(data, effect_config)
|
|
1806
|
-
if effect_type == "wait_until":
|
|
1807
|
-
return _create_wait_until_handler(data, effect_config)
|
|
1808
|
-
if effect_type == "wait_event":
|
|
1809
|
-
return _create_wait_event_handler(data, effect_config)
|
|
1810
|
-
if effect_type == "memory_note":
|
|
1811
|
-
return _create_memory_note_handler(data, effect_config)
|
|
1812
|
-
if effect_type == "memory_query":
|
|
1813
|
-
return _create_memory_query_handler(data, effect_config)
|
|
1814
|
-
if effect_type == "memory_rehydrate":
|
|
1815
|
-
return _create_memory_rehydrate_handler(data, effect_config)
|
|
1816
|
-
|
|
1817
|
-
return lambda x: x
|
|
1818
|
-
|
|
1819
|
-
def _create_tool_calls_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
1820
|
-
import json
|
|
1821
|
-
|
|
1822
|
-
allowed_default = None
|
|
1823
|
-
if isinstance(config, dict):
|
|
1824
|
-
raw = config.get("allowed_tools")
|
|
1825
|
-
if raw is None:
|
|
1826
|
-
raw = config.get("allowedTools")
|
|
1827
|
-
allowed_default = raw
|
|
1828
|
-
|
|
1829
|
-
def _normalize_str_list(raw: Any) -> list[str]:
|
|
1830
|
-
if not isinstance(raw, list):
|
|
1831
|
-
return []
|
|
1832
|
-
out: list[str] = []
|
|
1833
|
-
for x in raw:
|
|
1834
|
-
if isinstance(x, str) and x.strip():
|
|
1835
|
-
out.append(x.strip())
|
|
1836
|
-
return out
|
|
1837
|
-
|
|
1838
|
-
def _normalize_tool_calls(raw: Any) -> list[Dict[str, Any]]:
|
|
1839
|
-
if raw is None:
|
|
1840
|
-
return []
|
|
1841
|
-
if isinstance(raw, dict):
|
|
1842
|
-
return [dict(raw)]
|
|
1843
|
-
if isinstance(raw, list):
|
|
1844
|
-
out: list[Dict[str, Any]] = []
|
|
1845
|
-
for x in raw:
|
|
1846
|
-
if isinstance(x, dict):
|
|
1847
|
-
out.append(dict(x))
|
|
1848
|
-
return out
|
|
1849
|
-
if isinstance(raw, str) and raw.strip():
|
|
1850
|
-
# Best-effort: tolerate JSON strings coming from parse_json/text nodes.
|
|
1851
|
-
try:
|
|
1852
|
-
parsed = json.loads(raw)
|
|
1853
|
-
except Exception:
|
|
1854
|
-
return []
|
|
1855
|
-
return _normalize_tool_calls(parsed)
|
|
1856
|
-
return []
|
|
1857
|
-
|
|
1858
|
-
def handler(input_data: Any):
|
|
1859
|
-
payload = input_data if isinstance(input_data, dict) else {}
|
|
1860
|
-
|
|
1861
|
-
tool_calls_raw = payload.get("tool_calls")
|
|
1862
|
-
tool_calls = _normalize_tool_calls(tool_calls_raw)
|
|
1863
|
-
|
|
1864
|
-
allow_specified = "allowed_tools" in payload or "allowedTools" in payload
|
|
1865
|
-
allowed_raw = payload.get("allowed_tools")
|
|
1866
|
-
if allowed_raw is None:
|
|
1867
|
-
allowed_raw = payload.get("allowedTools")
|
|
1868
|
-
allowed_tools = _normalize_str_list(allowed_raw) if allow_specified else []
|
|
1869
|
-
if not allow_specified:
|
|
1870
|
-
allowed_tools = _normalize_str_list(allowed_default)
|
|
1871
|
-
|
|
1872
|
-
pending: Dict[str, Any] = {"type": "tool_calls", "tool_calls": tool_calls}
|
|
1873
|
-
# Only include allowlist when explicitly provided (empty list means "allow none").
|
|
1874
|
-
if allow_specified or isinstance(allowed_default, list):
|
|
1875
|
-
pending["allowed_tools"] = allowed_tools
|
|
1876
|
-
|
|
1877
|
-
return {
|
|
1878
|
-
"results": None,
|
|
1879
|
-
"success": None,
|
|
1880
|
-
"_pending_effect": pending,
|
|
1881
|
-
}
|
|
1882
|
-
|
|
1883
|
-
return handler
|
|
1884
|
-
|
|
1885
|
-
def _create_ask_user_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
1886
|
-
def handler(input_data):
|
|
1887
|
-
prompt = input_data.get("prompt", "Please respond:") if isinstance(input_data, dict) else str(input_data)
|
|
1888
|
-
choices = input_data.get("choices", []) if isinstance(input_data, dict) else []
|
|
1889
|
-
allow_free_text = config.get("allowFreeText", True)
|
|
1890
|
-
|
|
1891
|
-
return {
|
|
1892
|
-
"response": f"[User prompt: {prompt}]",
|
|
1893
|
-
"prompt": prompt,
|
|
1894
|
-
"choices": choices,
|
|
1895
|
-
"allow_free_text": allow_free_text,
|
|
1896
|
-
"_pending_effect": {
|
|
1897
|
-
"type": "ask_user",
|
|
1898
|
-
"prompt": prompt,
|
|
1899
|
-
"choices": choices,
|
|
1900
|
-
"allow_free_text": allow_free_text,
|
|
1901
|
-
},
|
|
1902
|
-
}
|
|
1903
|
-
|
|
1904
|
-
return handler
|
|
1905
|
-
|
|
1906
|
-
def _create_answer_user_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
1907
|
-
def handler(input_data):
|
|
1908
|
-
message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data or "")
|
|
1909
|
-
return {"message": message, "_pending_effect": {"type": "answer_user", "message": message}}
|
|
1910
|
-
|
|
1911
|
-
return handler
|
|
1912
|
-
|
|
1913
|
-
def _create_llm_call_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
1914
|
-
provider_default = config.get("provider", "")
|
|
1915
|
-
model_default = config.get("model", "")
|
|
1916
|
-
temperature = config.get("temperature", 0.7)
|
|
1917
|
-
tools_default_raw = config.get("tools")
|
|
1918
|
-
include_context_cfg = config.get("include_context")
|
|
1919
|
-
if include_context_cfg is None:
|
|
1920
|
-
include_context_cfg = config.get("use_context")
|
|
1921
|
-
include_context_default = _coerce_bool(include_context_cfg) if include_context_cfg is not None else False
|
|
1922
|
-
|
|
1923
|
-
# Tool definitions (ToolSpecs) are required for tool calling. In the visual editor we
|
|
1924
|
-
# store tools as a portable `string[]` allowlist; at execution time we translate to
|
|
1925
|
-
# strict ToolSpecs `{name, description, parameters}` expected by AbstractCore.
|
|
1926
|
-
def _strip_tool_spec(raw: Any) -> Optional[Dict[str, Any]]:
|
|
1927
|
-
if not isinstance(raw, dict):
|
|
1928
|
-
return None
|
|
1929
|
-
name = raw.get("name")
|
|
1930
|
-
if not isinstance(name, str) or not name.strip():
|
|
1931
|
-
return None
|
|
1932
|
-
desc = raw.get("description")
|
|
1933
|
-
params = raw.get("parameters")
|
|
1934
|
-
out: Dict[str, Any] = {
|
|
1935
|
-
"name": name.strip(),
|
|
1936
|
-
"description": str(desc or ""),
|
|
1937
|
-
"parameters": dict(params) if isinstance(params, dict) else {},
|
|
1938
|
-
}
|
|
1939
|
-
return out
|
|
1940
|
-
|
|
1941
|
-
def _normalize_tool_names(raw: Any) -> list[str]:
|
|
1942
|
-
if not isinstance(raw, list):
|
|
1943
|
-
return []
|
|
1944
|
-
out: list[str] = []
|
|
1945
|
-
for t in raw:
|
|
1946
|
-
if isinstance(t, str) and t.strip():
|
|
1947
|
-
out.append(t.strip())
|
|
1948
|
-
return out
|
|
1949
|
-
|
|
1950
|
-
# Precompute a best-effort "available ToolSpecs by name" map so we can turn tool names
|
|
1951
|
-
# into ToolSpecs without going through the web backend.
|
|
1952
|
-
tool_specs_by_name: Dict[str, Dict[str, Any]] = {}
|
|
1953
|
-
try:
|
|
1954
|
-
from abstractruntime.integrations.abstractcore.default_tools import list_default_tool_specs
|
|
1955
|
-
|
|
1956
|
-
base_specs = list_default_tool_specs()
|
|
1957
|
-
if not isinstance(base_specs, list):
|
|
1958
|
-
base_specs = []
|
|
1959
|
-
for s in base_specs:
|
|
1960
|
-
stripped = _strip_tool_spec(s)
|
|
1961
|
-
if stripped is not None:
|
|
1962
|
-
tool_specs_by_name[stripped["name"]] = stripped
|
|
1963
|
-
except Exception:
|
|
1964
|
-
pass
|
|
1965
|
-
|
|
1966
|
-
# Optional schema-only runtime tools (used by AbstractAgent). These are useful for
|
|
1967
|
-
# "state machine" autonomy where the graph can route tool-like requests to effect nodes.
|
|
1968
|
-
try:
|
|
1969
|
-
from abstractagent.logic.builtins import ( # type: ignore
|
|
1970
|
-
ASK_USER_TOOL,
|
|
1971
|
-
COMPACT_MEMORY_TOOL,
|
|
1972
|
-
INSPECT_VARS_TOOL,
|
|
1973
|
-
RECALL_MEMORY_TOOL,
|
|
1974
|
-
REMEMBER_TOOL,
|
|
1975
|
-
)
|
|
1976
|
-
|
|
1977
|
-
builtin_defs = [ASK_USER_TOOL, RECALL_MEMORY_TOOL, INSPECT_VARS_TOOL, REMEMBER_TOOL, COMPACT_MEMORY_TOOL]
|
|
1978
|
-
for tool_def in builtin_defs:
|
|
1979
|
-
try:
|
|
1980
|
-
d = tool_def.to_dict()
|
|
1981
|
-
except Exception:
|
|
1982
|
-
d = None
|
|
1983
|
-
stripped = _strip_tool_spec(d)
|
|
1984
|
-
if stripped is not None and stripped["name"] not in tool_specs_by_name:
|
|
1985
|
-
tool_specs_by_name[stripped["name"]] = stripped
|
|
1986
|
-
except Exception:
|
|
1987
|
-
pass
|
|
1988
|
-
|
|
1989
|
-
def _normalize_tools(raw: Any) -> list[Dict[str, Any]]:
|
|
1990
|
-
# Already ToolSpecs (from pins): accept and strip UI-only fields.
|
|
1991
|
-
if isinstance(raw, list) and raw and all(isinstance(x, dict) for x in raw):
|
|
1992
|
-
out: list[Dict[str, Any]] = []
|
|
1993
|
-
for x in raw:
|
|
1994
|
-
stripped = _strip_tool_spec(x)
|
|
1995
|
-
if stripped is not None:
|
|
1996
|
-
out.append(stripped)
|
|
1997
|
-
return out
|
|
1998
|
-
|
|
1999
|
-
# Tool names (portable representation): resolve against known tool specs.
|
|
2000
|
-
names = _normalize_tool_names(raw)
|
|
2001
|
-
out: list[Dict[str, Any]] = []
|
|
2002
|
-
for name in names:
|
|
2003
|
-
spec = tool_specs_by_name.get(name)
|
|
2004
|
-
if spec is not None:
|
|
2005
|
-
out.append(spec)
|
|
2006
|
-
return out
|
|
2007
|
-
|
|
2008
|
-
def _normalize_response_schema(raw: Any) -> Optional[Dict[str, Any]]:
|
|
2009
|
-
"""Normalize a structured-output schema input into a JSON Schema dict.
|
|
2010
|
-
|
|
2011
|
-
Supported inputs (best-effort):
|
|
2012
|
-
- JSON Schema dict: {"type":"object","properties":{...}, ...}
|
|
2013
|
-
- LMStudio/OpenAI-style wrapper: {"type":"json_schema","json_schema": {"schema": {...}}}
|
|
2014
|
-
"""
|
|
2015
|
-
if raw is None:
|
|
2016
|
-
return None
|
|
2017
|
-
if isinstance(raw, dict):
|
|
2018
|
-
# Wrapper form (OpenAI "response_format": {type:"json_schema", json_schema:{schema:{...}}})
|
|
2019
|
-
if raw.get("type") == "json_schema" and isinstance(raw.get("json_schema"), dict):
|
|
2020
|
-
inner = raw.get("json_schema")
|
|
2021
|
-
if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
|
|
2022
|
-
return dict(inner.get("schema") or {})
|
|
2023
|
-
# Plain JSON Schema dict
|
|
2024
|
-
return dict(raw)
|
|
2025
|
-
return None
|
|
2026
|
-
|
|
2027
|
-
def handler(input_data):
|
|
2028
|
-
prompt = input_data.get("prompt", "") if isinstance(input_data, dict) else str(input_data)
|
|
2029
|
-
system = input_data.get("system", "") if isinstance(input_data, dict) else ""
|
|
2030
|
-
|
|
2031
|
-
tools_specified = isinstance(input_data, dict) and "tools" in input_data
|
|
2032
|
-
tools_raw = input_data.get("tools") if isinstance(input_data, dict) else None
|
|
2033
|
-
tools = _normalize_tools(tools_raw) if tools_specified else []
|
|
2034
|
-
if not tools_specified:
|
|
2035
|
-
tools = _normalize_tools(tools_default_raw)
|
|
2036
|
-
|
|
2037
|
-
include_context_specified = isinstance(input_data, dict) and (
|
|
2038
|
-
"include_context" in input_data or "use_context" in input_data
|
|
2039
|
-
)
|
|
2040
|
-
if include_context_specified:
|
|
2041
|
-
raw_inc = (
|
|
2042
|
-
input_data.get("include_context")
|
|
2043
|
-
if isinstance(input_data, dict) and "include_context" in input_data
|
|
2044
|
-
else input_data.get("use_context") if isinstance(input_data, dict) else None
|
|
2045
|
-
)
|
|
2046
|
-
include_context_value = _coerce_bool(raw_inc)
|
|
2047
|
-
else:
|
|
2048
|
-
include_context_value = include_context_default
|
|
2049
|
-
|
|
2050
|
-
provider = (
|
|
2051
|
-
input_data.get("provider")
|
|
2052
|
-
if isinstance(input_data, dict) and isinstance(input_data.get("provider"), str)
|
|
2053
|
-
else provider_default
|
|
2054
|
-
)
|
|
2055
|
-
model = (
|
|
2056
|
-
input_data.get("model")
|
|
2057
|
-
if isinstance(input_data, dict) and isinstance(input_data.get("model"), str)
|
|
2058
|
-
else model_default
|
|
2059
|
-
)
|
|
2060
|
-
|
|
2061
|
-
if not provider or not model:
|
|
2062
|
-
return {
|
|
2063
|
-
"response": "[LLM Call: missing provider/model]",
|
|
2064
|
-
"_pending_effect": {
|
|
2065
|
-
"type": "llm_call",
|
|
2066
|
-
"prompt": prompt,
|
|
2067
|
-
"system_prompt": system,
|
|
2068
|
-
"tools": tools,
|
|
2069
|
-
"params": {"temperature": temperature},
|
|
2070
|
-
"include_context": include_context_value,
|
|
2071
|
-
},
|
|
2072
|
-
"error": "Missing provider or model configuration",
|
|
2073
|
-
}
|
|
2074
|
-
|
|
2075
|
-
response_schema = (
|
|
2076
|
-
_normalize_response_schema(input_data.get("response_schema"))
|
|
2077
|
-
if isinstance(input_data, dict) and "response_schema" in input_data
|
|
2078
|
-
else None
|
|
2079
|
-
)
|
|
2080
|
-
|
|
2081
|
-
pending: Dict[str, Any] = {
|
|
2082
|
-
"type": "llm_call",
|
|
2083
|
-
"prompt": prompt,
|
|
2084
|
-
"system_prompt": system,
|
|
2085
|
-
"tools": tools,
|
|
2086
|
-
"params": {"temperature": temperature},
|
|
2087
|
-
"provider": provider,
|
|
2088
|
-
"model": model,
|
|
2089
|
-
"include_context": include_context_value,
|
|
2090
|
-
}
|
|
2091
|
-
if isinstance(response_schema, dict) and response_schema:
|
|
2092
|
-
pending["response_schema"] = response_schema
|
|
2093
|
-
# Name is optional; AbstractRuntime will fall back to a safe default.
|
|
2094
|
-
pending["response_schema_name"] = "LLM_StructuredOutput"
|
|
2095
|
-
|
|
2096
|
-
return {
|
|
2097
|
-
"response": None,
|
|
2098
|
-
"_pending_effect": pending,
|
|
2099
|
-
}
|
|
2100
|
-
|
|
2101
|
-
return handler
|
|
2102
|
-
|
|
2103
|
-
def _create_model_catalog_handler(data: Dict[str, Any]):
|
|
2104
|
-
cfg = data.get("modelCatalogConfig", {}) if isinstance(data, dict) else {}
|
|
2105
|
-
cfg = dict(cfg) if isinstance(cfg, dict) else {}
|
|
2106
|
-
|
|
2107
|
-
allowed_providers_default = cfg.get("allowedProviders")
|
|
2108
|
-
allowed_models_default = cfg.get("allowedModels")
|
|
2109
|
-
index_default = cfg.get("index", 0)
|
|
2110
|
-
|
|
2111
|
-
def _as_str_list(raw: Any) -> list[str]:
|
|
2112
|
-
if not isinstance(raw, list):
|
|
2113
|
-
return []
|
|
2114
|
-
out: list[str] = []
|
|
2115
|
-
for x in raw:
|
|
2116
|
-
if isinstance(x, str) and x.strip():
|
|
2117
|
-
out.append(x.strip())
|
|
2118
|
-
return out
|
|
2119
|
-
|
|
2120
|
-
def handler(input_data: Any):
|
|
2121
|
-
# Allow pin-based overrides (data edges) while keeping node config as defaults.
|
|
2122
|
-
allowed_providers = _as_str_list(
|
|
2123
|
-
input_data.get("allowed_providers") if isinstance(input_data, dict) else None
|
|
2124
|
-
) or _as_str_list(allowed_providers_default)
|
|
2125
|
-
allowed_models = _as_str_list(
|
|
2126
|
-
input_data.get("allowed_models") if isinstance(input_data, dict) else None
|
|
2127
|
-
) or _as_str_list(allowed_models_default)
|
|
2128
|
-
|
|
2129
|
-
idx_raw = input_data.get("index") if isinstance(input_data, dict) else None
|
|
2130
|
-
try:
|
|
2131
|
-
idx = int(idx_raw) if idx_raw is not None else int(index_default or 0)
|
|
2132
|
-
except Exception:
|
|
2133
|
-
idx = 0
|
|
2134
|
-
if idx < 0:
|
|
2135
|
-
idx = 0
|
|
2136
|
-
|
|
2137
|
-
try:
|
|
2138
|
-
from abstractcore.providers.registry import get_all_providers_with_models, get_available_models_for_provider
|
|
2139
|
-
except Exception:
|
|
2140
|
-
return {"providers": [], "models": [], "pair": None, "provider": "", "model": ""}
|
|
2141
|
-
|
|
2142
|
-
providers_meta = get_all_providers_with_models(include_models=False)
|
|
2143
|
-
available_providers: list[str] = []
|
|
2144
|
-
for p in providers_meta:
|
|
2145
|
-
if not isinstance(p, dict):
|
|
2146
|
-
continue
|
|
2147
|
-
if p.get("status") != "available":
|
|
2148
|
-
continue
|
|
2149
|
-
name = p.get("name")
|
|
2150
|
-
if isinstance(name, str) and name.strip():
|
|
2151
|
-
available_providers.append(name.strip())
|
|
2152
|
-
|
|
2153
|
-
if allowed_providers:
|
|
2154
|
-
allow = {x.lower(): x for x in allowed_providers}
|
|
2155
|
-
available_providers = [p for p in available_providers if p.lower() in allow]
|
|
2156
|
-
|
|
2157
|
-
pairs: list[dict[str, str]] = []
|
|
2158
|
-
model_ids: list[str] = []
|
|
2159
|
-
|
|
2160
|
-
allow_models_norm = {m.strip() for m in allowed_models if isinstance(m, str) and m.strip()}
|
|
2161
|
-
|
|
2162
|
-
for provider in available_providers:
|
|
2163
|
-
try:
|
|
2164
|
-
models = get_available_models_for_provider(provider)
|
|
2165
|
-
except Exception:
|
|
2166
|
-
models = []
|
|
2167
|
-
if not isinstance(models, list):
|
|
2168
|
-
models = []
|
|
2169
|
-
for m in models:
|
|
2170
|
-
if not isinstance(m, str) or not m.strip():
|
|
2171
|
-
continue
|
|
2172
|
-
model = m.strip()
|
|
2173
|
-
mid = f"{provider}/{model}"
|
|
2174
|
-
if allow_models_norm:
|
|
2175
|
-
# Accept either full ids or raw model names.
|
|
2176
|
-
if mid not in allow_models_norm and model not in allow_models_norm:
|
|
2177
|
-
continue
|
|
2178
|
-
pairs.append({"provider": provider, "model": model, "id": mid})
|
|
2179
|
-
model_ids.append(mid)
|
|
2180
|
-
|
|
2181
|
-
selected = pairs[idx] if pairs and idx < len(pairs) else (pairs[0] if pairs else None)
|
|
2182
|
-
return {
|
|
2183
|
-
"providers": available_providers,
|
|
2184
|
-
"models": model_ids,
|
|
2185
|
-
"pair": selected,
|
|
2186
|
-
"provider": selected.get("provider", "") if isinstance(selected, dict) else "",
|
|
2187
|
-
"model": selected.get("model", "") if isinstance(selected, dict) else "",
|
|
2188
|
-
}
|
|
2189
|
-
|
|
2190
|
-
return handler
|
|
2191
|
-
|
|
2192
|
-
def _create_provider_catalog_handler(data: Dict[str, Any]):
|
|
2193
|
-
def _as_str_list(raw: Any) -> list[str]:
|
|
2194
|
-
if not isinstance(raw, list):
|
|
2195
|
-
return []
|
|
2196
|
-
out: list[str] = []
|
|
2197
|
-
for x in raw:
|
|
2198
|
-
if isinstance(x, str) and x.strip():
|
|
2199
|
-
out.append(x.strip())
|
|
2200
|
-
return out
|
|
2201
|
-
|
|
2202
|
-
def handler(input_data: Any):
|
|
2203
|
-
allowed_providers = _as_str_list(
|
|
2204
|
-
input_data.get("allowed_providers") if isinstance(input_data, dict) else None
|
|
2205
|
-
)
|
|
2206
|
-
|
|
2207
|
-
try:
|
|
2208
|
-
from abstractcore.providers.registry import get_all_providers_with_models
|
|
2209
|
-
except Exception:
|
|
2210
|
-
return {"providers": []}
|
|
2211
|
-
|
|
2212
|
-
providers_meta = get_all_providers_with_models(include_models=False)
|
|
2213
|
-
available: list[str] = []
|
|
2214
|
-
for p in providers_meta:
|
|
2215
|
-
if not isinstance(p, dict):
|
|
2216
|
-
continue
|
|
2217
|
-
if p.get("status") != "available":
|
|
2218
|
-
continue
|
|
2219
|
-
name = p.get("name")
|
|
2220
|
-
if isinstance(name, str) and name.strip():
|
|
2221
|
-
available.append(name.strip())
|
|
2222
|
-
|
|
2223
|
-
if allowed_providers:
|
|
2224
|
-
allow = {x.lower() for x in allowed_providers}
|
|
2225
|
-
available = [p for p in available if p.lower() in allow]
|
|
2226
|
-
|
|
2227
|
-
return {"providers": available}
|
|
2228
|
-
|
|
2229
|
-
return handler
|
|
2230
|
-
|
|
2231
|
-
def _create_provider_models_handler(data: Dict[str, Any]):
|
|
2232
|
-
cfg = data.get("providerModelsConfig", {}) if isinstance(data, dict) else {}
|
|
2233
|
-
cfg = dict(cfg) if isinstance(cfg, dict) else {}
|
|
2234
|
-
|
|
2235
|
-
def _as_str_list(raw: Any) -> list[str]:
|
|
2236
|
-
if not isinstance(raw, list):
|
|
2237
|
-
return []
|
|
2238
|
-
out: list[str] = []
|
|
2239
|
-
for x in raw:
|
|
2240
|
-
if isinstance(x, str) and x.strip():
|
|
2241
|
-
out.append(x.strip())
|
|
2242
|
-
return out
|
|
2243
|
-
|
|
2244
|
-
def handler(input_data: Any):
|
|
2245
|
-
provider = None
|
|
2246
|
-
if isinstance(input_data, dict) and isinstance(input_data.get("provider"), str):
|
|
2247
|
-
provider = input_data.get("provider")
|
|
2248
|
-
if not provider and isinstance(cfg.get("provider"), str):
|
|
2249
|
-
provider = cfg.get("provider")
|
|
2250
|
-
|
|
2251
|
-
provider = str(provider or "").strip()
|
|
2252
|
-
if not provider:
|
|
2253
|
-
return {"provider": "", "models": []}
|
|
2254
|
-
|
|
2255
|
-
allowed_models = _as_str_list(
|
|
2256
|
-
input_data.get("allowed_models") if isinstance(input_data, dict) else None
|
|
2257
|
-
)
|
|
2258
|
-
if not allowed_models:
|
|
2259
|
-
# Optional allowlist from node config when the pin isn't connected.
|
|
2260
|
-
allowed_models = _as_str_list(cfg.get("allowedModels")) or _as_str_list(cfg.get("allowed_models"))
|
|
2261
|
-
allow = {m for m in allowed_models if m}
|
|
2262
|
-
|
|
2263
|
-
try:
|
|
2264
|
-
from abstractcore.providers.registry import get_available_models_for_provider
|
|
2265
|
-
except Exception:
|
|
2266
|
-
return {"provider": provider, "models": []}
|
|
2267
|
-
|
|
2268
|
-
try:
|
|
2269
|
-
models = get_available_models_for_provider(provider)
|
|
2270
|
-
except Exception:
|
|
2271
|
-
models = []
|
|
2272
|
-
if not isinstance(models, list):
|
|
2273
|
-
models = []
|
|
2274
|
-
|
|
2275
|
-
out: list[str] = []
|
|
2276
|
-
for m in models:
|
|
2277
|
-
if not isinstance(m, str) or not m.strip():
|
|
2278
|
-
continue
|
|
2279
|
-
name = m.strip()
|
|
2280
|
-
mid = f"{provider}/{name}"
|
|
2281
|
-
if allow and (name not in allow and mid not in allow):
|
|
2282
|
-
continue
|
|
2283
|
-
out.append(name)
|
|
2284
|
-
|
|
2285
|
-
return {"provider": provider, "models": out}
|
|
2286
|
-
|
|
2287
|
-
return handler
|
|
2288
|
-
|
|
2289
|
-
def _create_wait_until_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
2290
|
-
from datetime import datetime as _dt, timedelta, timezone
|
|
2291
|
-
|
|
2292
|
-
duration_type = config.get("durationType", "seconds")
|
|
2293
|
-
|
|
2294
|
-
def handler(input_data):
|
|
2295
|
-
duration = input_data.get("duration", 0) if isinstance(input_data, dict) else 0
|
|
2296
|
-
|
|
2297
|
-
try:
|
|
2298
|
-
amount = float(duration)
|
|
2299
|
-
except (TypeError, ValueError):
|
|
2300
|
-
amount = 0
|
|
2301
|
-
|
|
2302
|
-
now = _dt.now(timezone.utc)
|
|
2303
|
-
if duration_type == "timestamp":
|
|
2304
|
-
until = str(duration or "")
|
|
2305
|
-
elif duration_type == "minutes":
|
|
2306
|
-
until = (now + timedelta(minutes=amount)).isoformat()
|
|
2307
|
-
elif duration_type == "hours":
|
|
2308
|
-
until = (now + timedelta(hours=amount)).isoformat()
|
|
2309
|
-
else:
|
|
2310
|
-
until = (now + timedelta(seconds=amount)).isoformat()
|
|
2311
|
-
|
|
2312
|
-
return {"_pending_effect": {"type": "wait_until", "until": until}}
|
|
2313
|
-
|
|
2314
|
-
return handler
|
|
2315
|
-
|
|
2316
|
-
def _create_wait_event_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
2317
|
-
def handler(input_data):
|
|
2318
|
-
# `wait_event` is a durable pause that waits for an external signal.
|
|
2319
|
-
#
|
|
2320
|
-
# Input shape (best-effort):
|
|
2321
|
-
# - event_key: str (required; defaults to "default" for backward-compat)
|
|
2322
|
-
# - prompt: str (optional; enables human-in-the-loop UX for EVENT waits)
|
|
2323
|
-
# - choices: list[str] (optional)
|
|
2324
|
-
# - allow_free_text: bool (optional; default True)
|
|
2325
|
-
#
|
|
2326
|
-
# NOTE: The compiler will wrap `_pending_effect` into an AbstractRuntime Effect payload.
|
|
2327
|
-
event_key = input_data.get("event_key", "default") if isinstance(input_data, dict) else str(input_data)
|
|
2328
|
-
prompt = None
|
|
2329
|
-
choices = None
|
|
2330
|
-
allow_free_text = True
|
|
2331
|
-
if isinstance(input_data, dict):
|
|
2332
|
-
p = input_data.get("prompt")
|
|
2333
|
-
if isinstance(p, str) and p.strip():
|
|
2334
|
-
prompt = p
|
|
2335
|
-
ch = input_data.get("choices")
|
|
2336
|
-
if isinstance(ch, list):
|
|
2337
|
-
# Keep choices JSON-safe and predictable.
|
|
2338
|
-
choices = [str(c) for c in ch if isinstance(c, str) and str(c).strip()]
|
|
2339
|
-
aft = input_data.get("allow_free_text")
|
|
2340
|
-
if aft is None:
|
|
2341
|
-
aft = input_data.get("allowFreeText")
|
|
2342
|
-
if aft is not None:
|
|
2343
|
-
allow_free_text = bool(aft)
|
|
2344
|
-
|
|
2345
|
-
pending: Dict[str, Any] = {"type": "wait_event", "wait_key": event_key}
|
|
2346
|
-
if prompt is not None:
|
|
2347
|
-
pending["prompt"] = prompt
|
|
2348
|
-
if isinstance(choices, list):
|
|
2349
|
-
pending["choices"] = choices
|
|
2350
|
-
# Always include allow_free_text so hosts can render consistent UX.
|
|
2351
|
-
pending["allow_free_text"] = allow_free_text
|
|
2352
|
-
return {
|
|
2353
|
-
"event_data": {},
|
|
2354
|
-
"event_key": event_key,
|
|
2355
|
-
"_pending_effect": pending,
|
|
2356
|
-
}
|
|
2357
|
-
|
|
2358
|
-
return handler
|
|
2359
|
-
|
|
2360
|
-
def _create_memory_note_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
2361
|
-
def handler(input_data):
|
|
2362
|
-
content = input_data.get("content", "") if isinstance(input_data, dict) else str(input_data)
|
|
2363
|
-
tags = input_data.get("tags") if isinstance(input_data, dict) else None
|
|
2364
|
-
sources = input_data.get("sources") if isinstance(input_data, dict) else None
|
|
2365
|
-
location = input_data.get("location") if isinstance(input_data, dict) else None
|
|
2366
|
-
scope = input_data.get("scope") if isinstance(input_data, dict) else None
|
|
2367
|
-
|
|
2368
|
-
pending: Dict[str, Any] = {"type": "memory_note", "note": content, "tags": tags if isinstance(tags, dict) else {}}
|
|
2369
|
-
if isinstance(sources, dict):
|
|
2370
|
-
pending["sources"] = sources
|
|
2371
|
-
if isinstance(location, str) and location.strip():
|
|
2372
|
-
pending["location"] = location.strip()
|
|
2373
|
-
if isinstance(scope, str) and scope.strip():
|
|
2374
|
-
pending["scope"] = scope.strip()
|
|
2375
|
-
|
|
2376
|
-
keep_in_context_specified = isinstance(input_data, dict) and (
|
|
2377
|
-
"keep_in_context" in input_data or "keepInContext" in input_data
|
|
2378
|
-
)
|
|
2379
|
-
if keep_in_context_specified:
|
|
2380
|
-
raw_keep = (
|
|
2381
|
-
input_data.get("keep_in_context")
|
|
2382
|
-
if isinstance(input_data, dict) and "keep_in_context" in input_data
|
|
2383
|
-
else input_data.get("keepInContext") if isinstance(input_data, dict) else None
|
|
2384
|
-
)
|
|
2385
|
-
keep_in_context = _coerce_bool(raw_keep)
|
|
2386
|
-
else:
|
|
2387
|
-
# Visual-editor config (checkbox) default.
|
|
2388
|
-
keep_cfg = None
|
|
2389
|
-
if isinstance(config, dict):
|
|
2390
|
-
keep_cfg = config.get("keep_in_context")
|
|
2391
|
-
if keep_cfg is None:
|
|
2392
|
-
keep_cfg = config.get("keepInContext")
|
|
2393
|
-
keep_in_context = _coerce_bool(keep_cfg)
|
|
2394
|
-
if keep_in_context:
|
|
2395
|
-
pending["keep_in_context"] = True
|
|
2396
|
-
|
|
2397
|
-
return {"note_id": None, "_pending_effect": pending}
|
|
2398
|
-
|
|
2399
|
-
return handler
|
|
2400
|
-
|
|
2401
|
-
def _create_memory_query_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
2402
|
-
def handler(input_data):
|
|
2403
|
-
query = input_data.get("query", "") if isinstance(input_data, dict) else str(input_data)
|
|
2404
|
-
limit = input_data.get("limit", 10) if isinstance(input_data, dict) else 10
|
|
2405
|
-
tags = input_data.get("tags") if isinstance(input_data, dict) else None
|
|
2406
|
-
tags_mode = input_data.get("tags_mode") if isinstance(input_data, dict) else None
|
|
2407
|
-
usernames = input_data.get("usernames") if isinstance(input_data, dict) else None
|
|
2408
|
-
locations = input_data.get("locations") if isinstance(input_data, dict) else None
|
|
2409
|
-
since = input_data.get("since") if isinstance(input_data, dict) else None
|
|
2410
|
-
until = input_data.get("until") if isinstance(input_data, dict) else None
|
|
2411
|
-
scope = input_data.get("scope") if isinstance(input_data, dict) else None
|
|
2412
|
-
try:
|
|
2413
|
-
limit_int = int(limit) if limit is not None else 10
|
|
2414
|
-
except Exception:
|
|
2415
|
-
limit_int = 10
|
|
2416
|
-
|
|
2417
|
-
pending: Dict[str, Any] = {"type": "memory_query", "query": query, "limit_spans": limit_int, "return": "both"}
|
|
2418
|
-
if isinstance(tags, dict):
|
|
2419
|
-
pending["tags"] = tags
|
|
2420
|
-
if isinstance(tags_mode, str) and tags_mode.strip():
|
|
2421
|
-
pending["tags_mode"] = tags_mode.strip()
|
|
2422
|
-
if isinstance(usernames, list):
|
|
2423
|
-
pending["usernames"] = [str(x).strip() for x in usernames if isinstance(x, str) and str(x).strip()]
|
|
2424
|
-
if isinstance(locations, list):
|
|
2425
|
-
pending["locations"] = [str(x).strip() for x in locations if isinstance(x, str) and str(x).strip()]
|
|
2426
|
-
if since is not None:
|
|
2427
|
-
pending["since"] = since
|
|
2428
|
-
if until is not None:
|
|
2429
|
-
pending["until"] = until
|
|
2430
|
-
if isinstance(scope, str) and scope.strip():
|
|
2431
|
-
pending["scope"] = scope.strip()
|
|
2432
|
-
|
|
2433
|
-
return {"results": [], "rendered": "", "_pending_effect": pending}
|
|
2434
|
-
|
|
2435
|
-
return handler
|
|
2436
|
-
|
|
2437
|
-
def _create_memory_rehydrate_handler(data: Dict[str, Any], config: Dict[str, Any]):
|
|
2438
|
-
def handler(input_data):
|
|
2439
|
-
raw = input_data.get("span_ids") if isinstance(input_data, dict) else None
|
|
2440
|
-
if raw is None and isinstance(input_data, dict):
|
|
2441
|
-
raw = input_data.get("span_id")
|
|
2442
|
-
span_ids: list[Any] = []
|
|
2443
|
-
if isinstance(raw, list):
|
|
2444
|
-
span_ids = list(raw)
|
|
2445
|
-
elif raw is not None:
|
|
2446
|
-
span_ids = [raw]
|
|
2447
|
-
|
|
2448
|
-
placement = input_data.get("placement") if isinstance(input_data, dict) else None
|
|
2449
|
-
placement_str = str(placement).strip() if isinstance(placement, str) else "after_summary"
|
|
2450
|
-
if placement_str not in {"after_summary", "after_system", "end"}:
|
|
2451
|
-
placement_str = "after_summary"
|
|
2452
|
-
|
|
2453
|
-
max_messages = input_data.get("max_messages") if isinstance(input_data, dict) else None
|
|
2454
|
-
|
|
2455
|
-
pending: Dict[str, Any] = {"type": "memory_rehydrate", "span_ids": span_ids, "placement": placement_str}
|
|
2456
|
-
if max_messages is not None:
|
|
2457
|
-
pending["max_messages"] = max_messages
|
|
2458
|
-
return {"inserted": 0, "skipped": 0, "_pending_effect": pending}
|
|
2459
|
-
|
|
2460
|
-
return handler
|
|
2461
|
-
|
|
2462
|
-
def _create_handler(node_type: NodeType, data: Dict[str, Any]) -> Any:
|
|
2463
|
-
type_str = node_type.value if isinstance(node_type, NodeType) else str(node_type)
|
|
2464
|
-
|
|
2465
|
-
if type_str == "get_var":
|
|
2466
|
-
return _create_get_var_handler(data)
|
|
2467
|
-
|
|
2468
|
-
if type_str == "bool_var":
|
|
2469
|
-
return _create_bool_var_handler(data)
|
|
2470
|
-
|
|
2471
|
-
if type_str == "var_decl":
|
|
2472
|
-
return _create_var_decl_handler(data)
|
|
2473
|
-
|
|
2474
|
-
if type_str == "set_var":
|
|
2475
|
-
return _create_set_var_handler(data)
|
|
2476
|
-
|
|
2477
|
-
if type_str == "concat":
|
|
2478
|
-
return _create_concat_handler(data)
|
|
2479
|
-
|
|
2480
|
-
if type_str == "make_array":
|
|
2481
|
-
return _create_make_array_handler(data)
|
|
2482
|
-
|
|
2483
|
-
if type_str == "array_concat":
|
|
2484
|
-
return _create_array_concat_handler(data)
|
|
2485
|
-
|
|
2486
|
-
if type_str == "read_file":
|
|
2487
|
-
return _create_read_file_handler(data)
|
|
2488
|
-
|
|
2489
|
-
if type_str == "write_file":
|
|
2490
|
-
return _create_write_file_handler(data)
|
|
2491
|
-
|
|
2492
|
-
# Sequence / Parallel are scheduler nodes compiled specially by `compile_flow`.
|
|
2493
|
-
# Their runtime semantics are handled in `abstractflow.adapters.control_adapter`.
|
|
2494
|
-
if type_str in ("sequence", "parallel"):
|
|
2495
|
-
return lambda x: x
|
|
2496
|
-
|
|
2497
|
-
builtin = get_builtin_handler(type_str)
|
|
2498
|
-
if builtin:
|
|
2499
|
-
return _wrap_builtin(builtin, data)
|
|
2500
|
-
|
|
2501
|
-
if type_str == "code":
|
|
2502
|
-
code = data.get("code", "def transform(input):\n return input")
|
|
2503
|
-
function_name = data.get("functionName", "transform")
|
|
2504
|
-
return create_code_handler(code, function_name)
|
|
2505
|
-
|
|
2506
|
-
if type_str == "agent":
|
|
2507
|
-
return _create_agent_input_handler(data)
|
|
2508
|
-
|
|
2509
|
-
if type_str == "model_catalog":
|
|
2510
|
-
return _create_model_catalog_handler(data)
|
|
2511
|
-
|
|
2512
|
-
if type_str == "provider_catalog":
|
|
2513
|
-
return _create_provider_catalog_handler(data)
|
|
2514
|
-
|
|
2515
|
-
if type_str == "provider_models":
|
|
2516
|
-
return _create_provider_models_handler(data)
|
|
2517
|
-
|
|
2518
|
-
if type_str == "subflow":
|
|
2519
|
-
return _create_subflow_effect_builder(data)
|
|
2520
|
-
|
|
2521
|
-
if type_str == "break_object":
|
|
2522
|
-
return _create_break_object_handler(data)
|
|
2523
|
-
|
|
2524
|
-
if type_str == "function":
|
|
2525
|
-
if "code" in data:
|
|
2526
|
-
return create_code_handler(data["code"], data.get("functionName", "transform"))
|
|
2527
|
-
if "expression" in data:
|
|
2528
|
-
return _create_expression_handler(data["expression"])
|
|
2529
|
-
return lambda x: x
|
|
2530
|
-
|
|
2531
|
-
if type_str == "on_flow_end":
|
|
2532
|
-
return _create_flow_end_handler(data)
|
|
2533
|
-
|
|
2534
|
-
if type_str in ("on_flow_start", "on_user_request", "on_agent_message"):
|
|
2535
|
-
return _create_event_handler(type_str, data)
|
|
2536
|
-
|
|
2537
|
-
if type_str == "if":
|
|
2538
|
-
return _create_if_handler(data)
|
|
2539
|
-
if type_str == "switch":
|
|
2540
|
-
return _create_switch_handler(data)
|
|
2541
|
-
if type_str == "while":
|
|
2542
|
-
return _create_while_handler(data)
|
|
2543
|
-
if type_str == "for":
|
|
2544
|
-
return _create_for_handler(data)
|
|
2545
|
-
if type_str == "loop":
|
|
2546
|
-
return _create_loop_handler(data)
|
|
2547
|
-
|
|
2548
|
-
if type_str in EFFECT_NODE_TYPES:
|
|
2549
|
-
return _create_effect_handler(type_str, data)
|
|
2550
|
-
|
|
2551
|
-
return lambda x: x
|
|
2552
|
-
|
|
2553
|
-
for node in visual.nodes:
|
|
2554
|
-
type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
|
|
2555
|
-
|
|
2556
|
-
if type_str in LITERAL_NODE_TYPES:
|
|
2557
|
-
continue
|
|
2558
|
-
|
|
2559
|
-
base_handler = _create_handler(node.type, node.data)
|
|
2560
|
-
|
|
2561
|
-
if not _has_execution_pins(type_str, node.data):
|
|
2562
|
-
pure_base_handlers[node.id] = base_handler
|
|
2563
|
-
pure_node_ids.add(node.id)
|
|
2564
|
-
if type_str in {"get_var", "bool_var", "var_decl"}:
|
|
2565
|
-
volatile_pure_node_ids.add(node.id)
|
|
2566
|
-
continue
|
|
2567
|
-
|
|
2568
|
-
# Ignore disconnected/unreachable execution nodes.
|
|
2569
|
-
if reachable_exec and node.id not in reachable_exec:
|
|
2570
|
-
continue
|
|
2571
|
-
|
|
2572
|
-
wrapped_handler = _create_data_aware_handler(
|
|
2573
|
-
node_id=node.id,
|
|
2574
|
-
base_handler=base_handler,
|
|
2575
|
-
data_edges=data_edge_map.get(node.id, {}),
|
|
2576
|
-
pin_defaults=pin_defaults_by_node_id.get(node.id),
|
|
2577
|
-
node_outputs=flow._node_outputs, # type: ignore[attr-defined]
|
|
2578
|
-
ensure_node_output=_ensure_node_output,
|
|
2579
|
-
volatile_node_ids=volatile_pure_node_ids,
|
|
2580
|
-
)
|
|
2581
|
-
|
|
2582
|
-
input_key = node.data.get("inputKey")
|
|
2583
|
-
output_key = node.data.get("outputKey")
|
|
2584
|
-
|
|
2585
|
-
effect_type: Optional[str] = None
|
|
2586
|
-
effect_config: Optional[Dict[str, Any]] = None
|
|
2587
|
-
if type_str in EFFECT_NODE_TYPES:
|
|
2588
|
-
effect_type = type_str
|
|
2589
|
-
effect_config = node.data.get("effectConfig", {})
|
|
2590
|
-
elif type_str == "on_schedule":
|
|
2591
|
-
# Schedule trigger: compiles into WAIT_UNTIL under the hood.
|
|
2592
|
-
effect_type = "on_schedule"
|
|
2593
|
-
effect_config = node.data.get("eventConfig", {})
|
|
2594
|
-
elif type_str == "on_event":
|
|
2595
|
-
# Custom event listener (Blueprint-style "Custom Event").
|
|
2596
|
-
# Compiles into WAIT_EVENT under the hood.
|
|
2597
|
-
effect_type = "on_event"
|
|
2598
|
-
effect_config = node.data.get("eventConfig", {})
|
|
2599
|
-
elif type_str == "agent":
|
|
2600
|
-
effect_type = "agent"
|
|
2601
|
-
raw_cfg = node.data.get("agentConfig", {})
|
|
2602
|
-
cfg = dict(raw_cfg) if isinstance(raw_cfg, dict) else {}
|
|
2603
|
-
cfg.setdefault(
|
|
2604
|
-
"_react_workflow_id",
|
|
2605
|
-
visual_react_workflow_id(flow_id=visual.id, node_id=node.id),
|
|
2606
|
-
)
|
|
2607
|
-
effect_config = cfg
|
|
2608
|
-
elif type_str in ("sequence", "parallel"):
|
|
2609
|
-
# Control-flow scheduler nodes. Store pin order so compilation can
|
|
2610
|
-
# execute branches deterministically (Blueprint-style).
|
|
2611
|
-
effect_type = type_str
|
|
2612
|
-
|
|
2613
|
-
pins = node.data.get("outputs") if isinstance(node.data, dict) else None
|
|
2614
|
-
exec_ids: list[str] = []
|
|
2615
|
-
if isinstance(pins, list):
|
|
2616
|
-
for p in pins:
|
|
2617
|
-
if not isinstance(p, dict):
|
|
2618
|
-
continue
|
|
2619
|
-
if p.get("type") != "execution":
|
|
2620
|
-
continue
|
|
2621
|
-
pid = p.get("id")
|
|
2622
|
-
if isinstance(pid, str) and pid:
|
|
2623
|
-
exec_ids.append(pid)
|
|
2624
|
-
|
|
2625
|
-
def _then_key(h: str) -> int:
|
|
2626
|
-
try:
|
|
2627
|
-
if h.startswith("then:"):
|
|
2628
|
-
return int(h.split(":", 1)[1])
|
|
2629
|
-
except Exception:
|
|
2630
|
-
pass
|
|
2631
|
-
return 10**9
|
|
2632
|
-
|
|
2633
|
-
then_handles = sorted([h for h in exec_ids if h.startswith("then:")], key=_then_key)
|
|
2634
|
-
cfg = {"then_handles": then_handles}
|
|
2635
|
-
if type_str == "parallel":
|
|
2636
|
-
cfg["completed_handle"] = "completed"
|
|
2637
|
-
effect_config = cfg
|
|
2638
|
-
elif type_str == "loop":
|
|
2639
|
-
# Control-flow scheduler node (Blueprint-style foreach).
|
|
2640
|
-
# Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
|
|
2641
|
-
effect_type = type_str
|
|
2642
|
-
effect_config = {}
|
|
2643
|
-
elif type_str == "while":
|
|
2644
|
-
# Control-flow scheduler node (Blueprint-style while).
|
|
2645
|
-
# Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
|
|
2646
|
-
effect_type = type_str
|
|
2647
|
-
effect_config = {}
|
|
2648
|
-
elif type_str == "for":
|
|
2649
|
-
# Control-flow scheduler node (Blueprint-style numeric for).
|
|
2650
|
-
# Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
|
|
2651
|
-
effect_type = type_str
|
|
2652
|
-
effect_config = {}
|
|
2653
|
-
elif type_str == "subflow":
|
|
2654
|
-
effect_type = "start_subworkflow"
|
|
2655
|
-
subflow_id = node.data.get("subflowId") or node.data.get("flowId")
|
|
2656
|
-
output_pin_ids: list[str] = []
|
|
2657
|
-
outs = node.data.get("outputs")
|
|
2658
|
-
if isinstance(outs, list):
|
|
2659
|
-
for p in outs:
|
|
2660
|
-
if not isinstance(p, dict):
|
|
2661
|
-
continue
|
|
2662
|
-
if p.get("type") == "execution":
|
|
2663
|
-
continue
|
|
2664
|
-
pid = p.get("id")
|
|
2665
|
-
if isinstance(pid, str) and pid and pid != "output":
|
|
2666
|
-
output_pin_ids.append(pid)
|
|
2667
|
-
effect_config = {"workflow_id": subflow_id, "output_pins": output_pin_ids}
|
|
2668
|
-
|
|
2669
|
-
# Always attach minimal visual metadata for downstream compilation/wrapping.
|
|
2670
|
-
meta_cfg: Dict[str, Any] = {"_visual_type": type_str}
|
|
2671
|
-
if isinstance(effect_config, dict):
|
|
2672
|
-
meta_cfg.update(effect_config)
|
|
2673
|
-
effect_config = meta_cfg
|
|
2674
|
-
|
|
2675
|
-
flow.add_node(
|
|
2676
|
-
node_id=node.id,
|
|
2677
|
-
handler=wrapped_handler,
|
|
2678
|
-
input_key=input_key,
|
|
2679
|
-
output_key=output_key,
|
|
2680
|
-
effect_type=effect_type,
|
|
2681
|
-
effect_config=effect_config,
|
|
2682
|
-
)
|
|
2683
|
-
|
|
2684
|
-
for edge in visual.edges:
|
|
2685
|
-
if edge.targetHandle == "exec-in":
|
|
2686
|
-
if edge.source in flow.nodes and edge.target in flow.nodes:
|
|
2687
|
-
flow.add_edge(edge.source, edge.target, source_handle=edge.sourceHandle)
|
|
2688
|
-
|
|
2689
|
-
if visual.entryNode and visual.entryNode in flow.nodes:
|
|
2690
|
-
flow.set_entry(visual.entryNode)
|
|
2691
|
-
else:
|
|
2692
|
-
targets = {e.target for e in visual.edges if e.targetHandle == "exec-in"}
|
|
2693
|
-
for node_id in flow.nodes:
|
|
2694
|
-
if node_id not in targets:
|
|
2695
|
-
flow.set_entry(node_id)
|
|
2696
|
-
break
|
|
2697
|
-
if not flow.entry_node and flow.nodes:
|
|
2698
|
-
flow.set_entry(next(iter(flow.nodes)))
|
|
2699
|
-
|
|
2700
|
-
# Pure (no-exec) nodes are cached in `flow._node_outputs` for data-edge resolution.
|
|
2701
|
-
# Some schedulers (While, On Event, On Schedule) must invalidate these caches between iterations.
|
|
2702
|
-
flow._pure_node_ids = pure_node_ids # type: ignore[attr-defined]
|
|
2703
|
-
|
|
2704
|
-
return flow
|
|
2705
|
-
|
|
2706
|
-
|
|
2707
|
-
def _create_data_aware_handler(
|
|
2708
|
-
node_id: str,
|
|
2709
|
-
base_handler,
|
|
2710
|
-
data_edges: Dict[str, tuple[str, str]],
|
|
2711
|
-
pin_defaults: Optional[Dict[str, Any]],
|
|
2712
|
-
node_outputs: Dict[str, Dict[str, Any]],
|
|
2713
|
-
*,
|
|
2714
|
-
ensure_node_output=None,
|
|
2715
|
-
volatile_node_ids: Optional[set[str]] = None,
|
|
2716
|
-
):
|
|
2717
|
-
"""Wrap a handler to resolve data edge inputs before execution."""
|
|
2718
|
-
|
|
2719
|
-
volatile: set[str] = volatile_node_ids if isinstance(volatile_node_ids, set) else set()
|
|
2720
|
-
|
|
2721
|
-
def wrapped_handler(input_data):
|
|
2722
|
-
resolved_input: Dict[str, Any] = {}
|
|
2723
|
-
|
|
2724
|
-
if isinstance(input_data, dict):
|
|
2725
|
-
resolved_input.update(input_data)
|
|
2726
|
-
|
|
2727
|
-
for target_pin, (source_node, source_pin) in data_edges.items():
|
|
2728
|
-
if ensure_node_output is not None and (source_node not in node_outputs or source_node in volatile):
|
|
2729
|
-
ensure_node_output(source_node)
|
|
2730
|
-
if source_node in node_outputs:
|
|
2731
|
-
source_output = node_outputs[source_node]
|
|
2732
|
-
if isinstance(source_output, dict) and source_pin in source_output:
|
|
2733
|
-
resolved_input[target_pin] = source_output[source_pin]
|
|
2734
|
-
elif source_pin in ("result", "output"):
|
|
2735
|
-
resolved_input[target_pin] = source_output
|
|
2736
|
-
|
|
2737
|
-
if pin_defaults:
|
|
2738
|
-
for pin_id, value in pin_defaults.items():
|
|
2739
|
-
# Connected pins always win (even if the upstream value is None).
|
|
2740
|
-
if pin_id in data_edges:
|
|
2741
|
-
continue
|
|
2742
|
-
if pin_id not in resolved_input:
|
|
2743
|
-
# Clone object/array defaults so handlers can't mutate the shared default.
|
|
2744
|
-
if isinstance(value, (dict, list)):
|
|
2745
|
-
try:
|
|
2746
|
-
import copy
|
|
2747
|
-
|
|
2748
|
-
resolved_input[pin_id] = copy.deepcopy(value)
|
|
2749
|
-
except Exception:
|
|
2750
|
-
resolved_input[pin_id] = value
|
|
2751
|
-
else:
|
|
2752
|
-
resolved_input[pin_id] = value
|
|
2753
|
-
|
|
2754
|
-
result = base_handler(resolved_input if resolved_input else input_data)
|
|
2755
|
-
node_outputs[node_id] = result
|
|
2756
|
-
return result
|
|
2757
|
-
|
|
2758
|
-
return wrapped_handler
|
|
936
|
+
vf = _load_visualflow_json(visual)
|
|
937
|
+
return cast(Flow, _runtime_visual_to_flow(vf))
|
|
2759
938
|
|
|
2760
939
|
|
|
2761
940
|
def execute_visual_flow(visual_flow: VisualFlow, input_data: Dict[str, Any], *, flows: Dict[str, VisualFlow]) -> Dict[str, Any]:
|