AbstractRuntime 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. abstractruntime/__init__.py +76 -1
  2. abstractruntime/core/config.py +68 -1
  3. abstractruntime/core/models.py +5 -0
  4. abstractruntime/core/policy.py +74 -3
  5. abstractruntime/core/runtime.py +1002 -126
  6. abstractruntime/core/vars.py +8 -2
  7. abstractruntime/evidence/recorder.py +1 -1
  8. abstractruntime/history_bundle.py +772 -0
  9. abstractruntime/integrations/abstractcore/__init__.py +3 -0
  10. abstractruntime/integrations/abstractcore/default_tools.py +127 -3
  11. abstractruntime/integrations/abstractcore/effect_handlers.py +2440 -99
  12. abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
  13. abstractruntime/integrations/abstractcore/factory.py +68 -20
  14. abstractruntime/integrations/abstractcore/llm_client.py +447 -15
  15. abstractruntime/integrations/abstractcore/mcp_worker.py +1 -0
  16. abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
  17. abstractruntime/integrations/abstractcore/tool_executor.py +31 -10
  18. abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
  19. abstractruntime/integrations/abstractmemory/__init__.py +3 -0
  20. abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
  21. abstractruntime/memory/active_context.py +6 -1
  22. abstractruntime/memory/kg_packets.py +164 -0
  23. abstractruntime/memory/memact_composer.py +175 -0
  24. abstractruntime/memory/recall_levels.py +163 -0
  25. abstractruntime/memory/token_budget.py +86 -0
  26. abstractruntime/storage/__init__.py +4 -1
  27. abstractruntime/storage/artifacts.py +158 -30
  28. abstractruntime/storage/base.py +17 -1
  29. abstractruntime/storage/commands.py +339 -0
  30. abstractruntime/storage/in_memory.py +41 -1
  31. abstractruntime/storage/json_files.py +195 -12
  32. abstractruntime/storage/observable.py +38 -1
  33. abstractruntime/storage/offloading.py +433 -0
  34. abstractruntime/storage/sqlite.py +836 -0
  35. abstractruntime/visualflow_compiler/__init__.py +29 -0
  36. abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
  37. abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
  38. abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
  39. abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
  40. abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
  41. abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
  42. abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
  43. abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
  44. abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
  45. abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
  46. abstractruntime/visualflow_compiler/compiler.py +3832 -0
  47. abstractruntime/visualflow_compiler/flow.py +247 -0
  48. abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
  49. abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
  50. abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
  51. abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
  52. abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
  53. abstractruntime/visualflow_compiler/visual/models.py +211 -0
  54. abstractruntime/workflow_bundle/__init__.py +52 -0
  55. abstractruntime/workflow_bundle/models.py +236 -0
  56. abstractruntime/workflow_bundle/packer.py +317 -0
  57. abstractruntime/workflow_bundle/reader.py +87 -0
  58. abstractruntime/workflow_bundle/registry.py +587 -0
  59. abstractruntime-0.4.1.dist-info/METADATA +177 -0
  60. abstractruntime-0.4.1.dist-info/RECORD +86 -0
  61. abstractruntime-0.4.0.dist-info/METADATA +0 -167
  62. abstractruntime-0.4.0.dist-info/RECORD +0 -49
  63. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
  64. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/entry_points.txt +0 -0
  65. {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,2804 @@
1
+ """VisualFlow lowering utilities (VisualFlow JSON → Flow IR).
2
+
3
+ This is a stdlib-only subset of AbstractFlow's visual executor, extracted into
4
+ AbstractRuntime so the VisualFlow compiler can run without importing AbstractFlow.
5
+
6
+ Scope:
7
+ - VisualFlow → Flow lowering (no runtime wiring / no web/editor host concerns)
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ from ..flow import Flow
15
+
16
+ from .agent_ids import visual_react_workflow_id
17
+ from .builtins import get_builtin_handler
18
+ from .code_executor import create_code_handler
19
+ from .models import NodeType, VisualEdge, VisualFlow
20
+
21
+
22
+ # Type alias for data edge mapping
23
+ # Maps target_node_id -> { target_pin -> (source_node_id, source_pin) }
24
+ DataEdgeMap = Dict[str, Dict[str, tuple[str, str]]]
25
+
26
+ def _build_data_edge_map(edges: List[VisualEdge]) -> DataEdgeMap:
27
+ """Build a mapping of data edges for input resolution."""
28
+ data_edges: DataEdgeMap = {}
29
+
30
+ for edge in edges:
31
+ # Skip execution edges
32
+ if edge.sourceHandle == "exec-out" or edge.targetHandle == "exec-in":
33
+ continue
34
+
35
+ if edge.target not in data_edges:
36
+ data_edges[edge.target] = {}
37
+
38
+ data_edges[edge.target][edge.targetHandle] = (edge.source, edge.sourceHandle)
39
+
40
+ return data_edges
41
+
42
+
43
+ def visual_to_flow(visual: VisualFlow) -> Flow:
44
+ """Convert a visual flow definition to an AbstractFlow `Flow`."""
45
+ import datetime
46
+
47
+ flow = Flow(visual.id)
48
+
49
+ data_edge_map = _build_data_edge_map(visual.edges)
50
+
51
+ # Store node outputs during execution (visual data-edge evaluation cache)
52
+ flow._node_outputs = {} # type: ignore[attr-defined]
53
+ flow._data_edge_map = data_edge_map # type: ignore[attr-defined]
54
+ flow._pure_node_ids = set() # type: ignore[attr-defined]
55
+ flow._volatile_pure_node_ids = set() # type: ignore[attr-defined]
56
+ # Snapshot of "static" node outputs (literals, schemas, etc.). This is used to
57
+ # reset the in-memory cache when the same compiled VisualFlow is executed by
58
+ # multiple runs (e.g. recursive/mutual subflows). See compiler._sync_effect_results_to_node_outputs.
59
+ flow._static_node_outputs = {} # type: ignore[attr-defined]
60
+ flow._active_run_id = None # type: ignore[attr-defined]
61
+
62
+ def _normalize_pin_defaults(raw: Any) -> Dict[str, Any]:
63
+ if not isinstance(raw, dict):
64
+ return {}
65
+ out: Dict[str, Any] = {}
66
+ for k, v in raw.items():
67
+ if not isinstance(k, str) or not k:
68
+ continue
69
+ # Allow JSON-serializable values (including arrays/objects) for defaults.
70
+ # These are cloned at use-sites to avoid cross-run mutation.
71
+ if v is None or isinstance(v, (str, int, float, bool, dict, list)):
72
+ out[k] = v
73
+ return out
74
+
75
+ def _clone_default(value: Any) -> Any:
76
+ # Prevent accidental shared-mutation of dict/list defaults across runs.
77
+ if isinstance(value, (dict, list)):
78
+ try:
79
+ import copy
80
+
81
+ return copy.deepcopy(value)
82
+ except Exception:
83
+ return value
84
+ return value
85
+
86
+ pin_defaults_by_node_id: Dict[str, Dict[str, Any]] = {}
87
+ for node in visual.nodes:
88
+ raw_defaults = node.data.get("pinDefaults") if isinstance(node.data, dict) else None
89
+ normalized = _normalize_pin_defaults(raw_defaults)
90
+ if normalized:
91
+ pin_defaults_by_node_id[node.id] = normalized
92
+
93
+ LITERAL_NODE_TYPES = {
94
+ "literal_string",
95
+ "literal_number",
96
+ "literal_boolean",
97
+ "literal_json",
98
+ "json_schema",
99
+ "literal_array",
100
+ }
101
+
102
+ pure_base_handlers: Dict[str, Any] = {}
103
+ pure_node_ids: set[str] = set()
104
+
105
+ def _has_execution_pins(type_str: str, node_data: Dict[str, Any]) -> bool:
106
+ pins: list[Any] = []
107
+ inputs = node_data.get("inputs")
108
+ outputs = node_data.get("outputs")
109
+ if isinstance(inputs, list):
110
+ pins.extend(inputs)
111
+ if isinstance(outputs, list):
112
+ pins.extend(outputs)
113
+
114
+ if pins:
115
+ for p in pins:
116
+ if isinstance(p, dict) and p.get("type") == "execution":
117
+ return True
118
+ return False
119
+
120
+ if type_str in LITERAL_NODE_TYPES:
121
+ return False
122
+ # These nodes are pure (data-only) even if the JSON document omitted template pins.
123
+ # This keeps programmatic tests and host-built VisualFlows portable.
124
+ if type_str in {"get_var", "get_context", "bool_var", "var_decl"}:
125
+ return False
126
+ if type_str == "break_object":
127
+ return False
128
+ if type_str == "tool_parameters":
129
+ return False
130
+ if get_builtin_handler(type_str) is not None:
131
+ return False
132
+ return True
133
+
134
+ evaluating: set[str] = set()
135
+ volatile_pure_node_ids: set[str] = getattr(flow, "_volatile_pure_node_ids", set()) # type: ignore[attr-defined]
136
+
137
+ def _ensure_node_output(node_id: str) -> None:
138
+ if node_id in flow._node_outputs and node_id not in volatile_pure_node_ids: # type: ignore[attr-defined]
139
+ return
140
+
141
+ handler = pure_base_handlers.get(node_id)
142
+ if handler is None:
143
+ return
144
+
145
+ if node_id in evaluating:
146
+ raise ValueError(f"Data edge cycle detected at '{node_id}'")
147
+
148
+ evaluating.add(node_id)
149
+ try:
150
+ resolved_input: Dict[str, Any] = {}
151
+
152
+ for target_pin, (source_node, source_pin) in data_edge_map.get(node_id, {}).items():
153
+ _ensure_node_output(source_node)
154
+ if source_node not in flow._node_outputs: # type: ignore[attr-defined]
155
+ continue
156
+ source_output = flow._node_outputs[source_node] # type: ignore[attr-defined]
157
+ if isinstance(source_output, dict) and source_pin in source_output:
158
+ resolved_input[target_pin] = source_output[source_pin]
159
+ elif source_pin in ("result", "output"):
160
+ resolved_input[target_pin] = source_output
161
+
162
+ defaults = pin_defaults_by_node_id.get(node_id)
163
+ if defaults:
164
+ for pin_id, value in defaults.items():
165
+ if pin_id in data_edge_map.get(node_id, {}):
166
+ continue
167
+ if pin_id not in resolved_input:
168
+ resolved_input[pin_id] = _clone_default(value)
169
+
170
+ result = handler(resolved_input if resolved_input else {})
171
+ flow._node_outputs[node_id] = result # type: ignore[attr-defined]
172
+ finally:
173
+ # IMPORTANT: even if an upstream pure node raises (bad input / parse_json failure),
174
+ # we must not leave `node_id` in `evaluating`, otherwise later evaluations can
175
+ # surface as a misleading "data edge cycle" at this node.
176
+ try:
177
+ evaluating.remove(node_id)
178
+ except KeyError:
179
+ pass
180
+
181
+ EFFECT_NODE_TYPES = {
182
+ "ask_user",
183
+ "answer_user",
184
+ "llm_call",
185
+ "tool_calls",
186
+ "call_tool",
187
+ "wait_until",
188
+ "wait_event",
189
+ "emit_event",
190
+ "memory_note",
191
+ "memory_query",
192
+ "memory_tag",
193
+ "memory_compact",
194
+ "memory_rehydrate",
195
+ "memory_kg_assert",
196
+ "memory_kg_query",
197
+ "memory_kg_resolve",
198
+ }
199
+
200
+ literal_node_ids: set[str] = set()
201
+ # Pre-evaluate literal nodes and store their values
202
+ for node in visual.nodes:
203
+ type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
204
+ if type_str in LITERAL_NODE_TYPES:
205
+ literal_value = node.data.get("literalValue")
206
+ flow._node_outputs[node.id] = {"value": literal_value} # type: ignore[attr-defined]
207
+ literal_node_ids.add(node.id)
208
+ # Capture baseline outputs (typically only literal nodes). This baseline must
209
+ # remain stable across runs so we can safely reset `_node_outputs` when switching
210
+ # between different `RunState.run_id` contexts (self-recursive subflows).
211
+ try:
212
+ flow._static_node_outputs = dict(flow._node_outputs) # type: ignore[attr-defined]
213
+ except Exception:
214
+ flow._static_node_outputs = {} # type: ignore[attr-defined]
215
+
216
+ # Compute execution reachability and ignore disconnected execution nodes.
217
+ #
218
+ # Visual editors often contain experimentation / orphan nodes. These should not
219
+ # prevent execution of the reachable pipeline.
220
+ exec_node_ids: set[str] = set()
221
+ for node in visual.nodes:
222
+ type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
223
+ if type_str in LITERAL_NODE_TYPES:
224
+ continue
225
+ if _has_execution_pins(type_str, node.data):
226
+ exec_node_ids.add(node.id)
227
+
228
+ def _pick_entry() -> Optional[str]:
229
+ # Prefer explicit entryNode if it is an execution node.
230
+ if isinstance(getattr(visual, "entryNode", None), str) and visual.entryNode in exec_node_ids:
231
+ return visual.entryNode
232
+ # Otherwise, infer entry as a node with no incoming execution edges.
233
+ targets = {e.target for e in visual.edges if getattr(e, "targetHandle", None) == "exec-in"}
234
+ for node in visual.nodes:
235
+ if node.id in exec_node_ids and node.id not in targets:
236
+ return node.id
237
+ # Fallback: first exec node in document order
238
+ for node in visual.nodes:
239
+ if node.id in exec_node_ids:
240
+ return node.id
241
+ return None
242
+
243
+ entry_exec = _pick_entry()
244
+ reachable_exec: set[str] = set()
245
+ if entry_exec:
246
+ adj: Dict[str, list[str]] = {}
247
+ for e in visual.edges:
248
+ if getattr(e, "targetHandle", None) != "exec-in":
249
+ continue
250
+ if e.source not in exec_node_ids or e.target not in exec_node_ids:
251
+ continue
252
+ adj.setdefault(e.source, []).append(e.target)
253
+ stack = [entry_exec]
254
+ while stack:
255
+ cur = stack.pop()
256
+ if cur in reachable_exec:
257
+ continue
258
+ reachable_exec.add(cur)
259
+ for nxt in adj.get(cur, []):
260
+ if nxt not in reachable_exec:
261
+ stack.append(nxt)
262
+
263
+ ignored_exec = sorted([nid for nid in exec_node_ids if nid not in reachable_exec])
264
+ if ignored_exec:
265
+ # Runtime-local metadata for hosts/UIs that want to show warnings.
266
+ flow._ignored_exec_nodes = ignored_exec # type: ignore[attr-defined]
267
+
268
+ def _decode_separator(value: str) -> str:
269
+ return value.replace("\\n", "\n").replace("\\t", "\t").replace("\\r", "\r")
270
+
271
+ def _create_read_file_handler(_data: Dict[str, Any]):
272
+ import json
273
+ from pathlib import Path
274
+
275
+ def handler(input_data: Any) -> Dict[str, Any]:
276
+ payload = input_data if isinstance(input_data, dict) else {}
277
+ raw_path = payload.get("file_path")
278
+ if not isinstance(raw_path, str) or not raw_path.strip():
279
+ raise ValueError("read_file requires a non-empty 'file_path' input.")
280
+
281
+ file_path = raw_path.strip()
282
+ path = Path(file_path).expanduser()
283
+ if not path.is_absolute():
284
+ path = Path.cwd() / path
285
+
286
+ if not path.exists():
287
+ raise FileNotFoundError(f"File not found: {file_path}")
288
+ if not path.is_file():
289
+ raise ValueError(f"Not a file: {file_path}")
290
+
291
+ try:
292
+ text = path.read_text(encoding="utf-8")
293
+ except UnicodeDecodeError as e:
294
+ raise ValueError(f"Cannot read '{file_path}' as UTF-8: {e}") from e
295
+
296
+ # Detect JSON primarily from file extension; also opportunistically parse
297
+ # when the content looks like JSON. Markdown and text are returned as-is.
298
+ lower_name = path.name.lower()
299
+ content_stripped = text.lstrip()
300
+ looks_like_json = bool(content_stripped) and content_stripped[0] in "{["
301
+
302
+ if lower_name.endswith(".json"):
303
+ try:
304
+ return {"content": json.loads(text)}
305
+ except Exception as e:
306
+ raise ValueError(f"Invalid JSON in '{file_path}': {e}") from e
307
+
308
+ if looks_like_json:
309
+ try:
310
+ return {"content": json.loads(text)}
311
+ except Exception:
312
+ pass
313
+
314
+ return {"content": text}
315
+
316
+ return handler
317
+
318
+ def _create_write_file_handler(_data: Dict[str, Any]):
319
+ import json
320
+ from pathlib import Path
321
+
322
+ def handler(input_data: Any) -> Dict[str, Any]:
323
+ payload = input_data if isinstance(input_data, dict) else {}
324
+ raw_path = payload.get("file_path")
325
+ if not isinstance(raw_path, str) or not raw_path.strip():
326
+ raise ValueError("write_file requires a non-empty 'file_path' input.")
327
+
328
+ file_path = raw_path.strip()
329
+ path = Path(file_path).expanduser()
330
+ if not path.is_absolute():
331
+ path = Path.cwd() / path
332
+
333
+ raw_content = payload.get("content")
334
+
335
+ if path.name.lower().endswith(".json"):
336
+ if isinstance(raw_content, str):
337
+ try:
338
+ raw_content = json.loads(raw_content)
339
+ except Exception as e:
340
+ raise ValueError(f"write_file JSON content must be valid JSON: {e}") from e
341
+ text = json.dumps(raw_content, indent=2, ensure_ascii=False)
342
+ if not text.endswith("\n"):
343
+ text += "\n"
344
+ else:
345
+ if raw_content is None:
346
+ text = ""
347
+ elif isinstance(raw_content, str):
348
+ text = raw_content
349
+ elif isinstance(raw_content, (dict, list)):
350
+ text = json.dumps(raw_content, indent=2, ensure_ascii=False)
351
+ else:
352
+ text = str(raw_content)
353
+
354
+ path.parent.mkdir(parents=True, exist_ok=True)
355
+ path.write_text(text, encoding="utf-8")
356
+
357
+ return {"bytes": len(text.encode("utf-8")), "file_path": str(path)}
358
+
359
+ return handler
360
+
361
+ def _create_concat_handler(data: Dict[str, Any]):
362
+ config = data.get("concatConfig", {}) if isinstance(data, dict) else {}
363
+ separator = " "
364
+ if isinstance(config, dict):
365
+ sep_raw = config.get("separator")
366
+ if isinstance(sep_raw, str):
367
+ separator = sep_raw
368
+ separator = _decode_separator(separator)
369
+
370
+ pin_order: list[str] = []
371
+ pins = data.get("inputs") if isinstance(data, dict) else None
372
+ if isinstance(pins, list):
373
+ for p in pins:
374
+ if not isinstance(p, dict):
375
+ continue
376
+ if p.get("type") == "execution":
377
+ continue
378
+ pid = p.get("id")
379
+ if isinstance(pid, str) and pid:
380
+ pin_order.append(pid)
381
+
382
+ if not pin_order:
383
+ # Backward-compat: programmatic/test-created VisualNodes may omit template pins.
384
+ # In that case, infer a stable pin order from the provided input keys at runtime
385
+ # (prefer a..z single-letter pins), so `a`, `b`, ... behave as expected.
386
+ pin_order = []
387
+
388
+ def handler(input_data: Any) -> str:
389
+ if not isinstance(input_data, dict):
390
+ return str(input_data or "")
391
+
392
+ parts: list[str] = []
393
+ if pin_order:
394
+ order = pin_order
395
+ else:
396
+ # Stable inference for missing pin metadata.
397
+ keys = [k for k in input_data.keys() if isinstance(k, str)]
398
+ letter = sorted([k for k in keys if len(k) == 1 and "a" <= k <= "z"])
399
+ other = sorted([k for k in keys if k not in set(letter)])
400
+ order = letter + other
401
+
402
+ for pid in order:
403
+ if pid in input_data:
404
+ v = input_data.get(pid)
405
+ parts.append("" if v is None else str(v))
406
+ return separator.join(parts)
407
+
408
+ return handler
409
+
410
+ def _create_make_array_handler(data: Dict[str, Any]):
411
+ """Build an array from 1+ inputs in pin order.
412
+
413
+ Design:
414
+ - We treat missing/unset pins as absent (skip None) to avoid surprising `null`
415
+ elements when a pin is present but unconnected.
416
+ - We do NOT flatten arrays/tuples; if you want flattening/concatenation,
417
+ use `array_concat`.
418
+ """
419
+ pin_order: list[str] = []
420
+ pins = data.get("inputs") if isinstance(data, dict) else None
421
+ if isinstance(pins, list):
422
+ for p in pins:
423
+ if not isinstance(p, dict):
424
+ continue
425
+ if p.get("type") == "execution":
426
+ continue
427
+ pid = p.get("id")
428
+ if isinstance(pid, str) and pid:
429
+ pin_order.append(pid)
430
+
431
+ if not pin_order:
432
+ pin_order = ["a", "b"]
433
+
434
+ def handler(input_data: Any) -> list[Any]:
435
+ if not isinstance(input_data, dict):
436
+ if input_data is None:
437
+ return []
438
+ if isinstance(input_data, list):
439
+ return list(input_data)
440
+ if isinstance(input_data, tuple):
441
+ return list(input_data)
442
+ return [input_data]
443
+
444
+ out: list[Any] = []
445
+ for pid in pin_order:
446
+ if pid not in input_data:
447
+ continue
448
+ v = input_data.get(pid)
449
+ if v is None:
450
+ continue
451
+ out.append(v)
452
+ return out
453
+
454
+ return handler
455
+
456
+ def _create_array_concat_handler(data: Dict[str, Any]):
457
+ pin_order: list[str] = []
458
+ pins = data.get("inputs") if isinstance(data, dict) else None
459
+ if isinstance(pins, list):
460
+ for p in pins:
461
+ if not isinstance(p, dict):
462
+ continue
463
+ if p.get("type") == "execution":
464
+ continue
465
+ pid = p.get("id")
466
+ if isinstance(pid, str) and pid:
467
+ pin_order.append(pid)
468
+
469
+ if not pin_order:
470
+ pin_order = ["a", "b"]
471
+
472
+ def handler(input_data: Any) -> list[Any]:
473
+ if not isinstance(input_data, dict):
474
+ if input_data is None:
475
+ return []
476
+ if isinstance(input_data, list):
477
+ return list(input_data)
478
+ if isinstance(input_data, tuple):
479
+ return list(input_data)
480
+ return [input_data]
481
+
482
+ out: list[Any] = []
483
+ for pid in pin_order:
484
+ if pid not in input_data:
485
+ continue
486
+ v = input_data.get(pid)
487
+ if v is None:
488
+ continue
489
+ if isinstance(v, list):
490
+ out.extend(v)
491
+ continue
492
+ if isinstance(v, tuple):
493
+ out.extend(list(v))
494
+ continue
495
+ out.append(v)
496
+ return out
497
+
498
+ return handler
499
+
500
+ def _create_tool_parameters_handler(data: Dict[str, Any]):
501
+ cfg = data.get("toolParametersConfig", {}) if isinstance(data, dict) else {}
502
+ tool_name = ""
503
+ if isinstance(cfg, dict):
504
+ raw = cfg.get("tool") or cfg.get("name")
505
+ if isinstance(raw, str):
506
+ tool_name = raw.strip()
507
+
508
+ param_ids: list[str] = []
509
+ pins = data.get("inputs") if isinstance(data, dict) else None
510
+ if isinstance(pins, list):
511
+ for p in pins:
512
+ if not isinstance(p, dict):
513
+ continue
514
+ if p.get("type") == "execution":
515
+ continue
516
+ pid = p.get("id")
517
+ if isinstance(pid, str) and pid.strip():
518
+ param_ids.append(pid.strip())
519
+
520
+ def handler(input_data: Any) -> Dict[str, Any]:
521
+ payload = input_data if isinstance(input_data, dict) else {}
522
+
523
+ args: Dict[str, Any] = {}
524
+ out: Dict[str, Any] = {}
525
+
526
+ for pid in param_ids:
527
+ value = payload.get(pid) if isinstance(payload, dict) else None
528
+ out[pid] = value
529
+ if value is not None:
530
+ args[pid] = value
531
+
532
+ out["tool_call"] = {"name": tool_name, "arguments": args}
533
+ return out
534
+
535
+ return handler
536
+
537
+ def _create_break_object_handler(data: Dict[str, Any]):
538
+ config = data.get("breakConfig", {}) if isinstance(data, dict) else {}
539
+ selected = config.get("selectedPaths", []) if isinstance(config, dict) else []
540
+ selected_paths = [p.strip() for p in selected if isinstance(p, str) and p.strip()]
541
+
542
+ def _get_path(value: Any, path: str) -> Any:
543
+ current = value
544
+ for part in path.split("."):
545
+ if current is None:
546
+ return None
547
+ if isinstance(current, dict):
548
+ current = current.get(part)
549
+ continue
550
+ if isinstance(current, list) and part.isdigit():
551
+ idx = int(part)
552
+ if idx < 0 or idx >= len(current):
553
+ return None
554
+ current = current[idx]
555
+ continue
556
+ return None
557
+ return current
558
+
559
+ def handler(input_data):
560
+ src_obj = None
561
+ if isinstance(input_data, dict):
562
+ src_obj = input_data.get("object")
563
+
564
+ # Best-effort: tolerate JSON-ish strings (common when breaking LLM outputs).
565
+ if isinstance(src_obj, str) and src_obj.strip():
566
+ try:
567
+ parser = get_builtin_handler("parse_json")
568
+ if parser is not None:
569
+ src_obj = parser({"text": src_obj, "wrap_scalar": True})
570
+ except Exception:
571
+ pass
572
+
573
+ out: Dict[str, Any] = {}
574
+ for path in selected_paths:
575
+ out[path] = _get_path(src_obj, path)
576
+ return out
577
+
578
+ return handler
579
+
580
+ def _get_by_path(value: Any, path: str) -> Any:
581
+ """Best-effort dotted-path lookup supporting dicts and numeric list indices."""
582
+ current = value
583
+ for part in path.split("."):
584
+ if current is None:
585
+ return None
586
+ if isinstance(current, dict):
587
+ current = current.get(part)
588
+ continue
589
+ if isinstance(current, list) and part.isdigit():
590
+ idx = int(part)
591
+ if idx < 0 or idx >= len(current):
592
+ return None
593
+ current = current[idx]
594
+ continue
595
+ return None
596
+ return current
597
+
598
+ def _create_get_var_handler(_data: Dict[str, Any]):
599
+ # Pure node: reads from the current run vars (attached onto the Flow by the compiler).
600
+ # Mark as volatile so it is recomputed whenever requested (avoids stale cached reads).
601
+ def handler(input_data: Any) -> Dict[str, Any]:
602
+ payload = input_data if isinstance(input_data, dict) else {}
603
+ raw_name = payload.get("name")
604
+ name = (raw_name if isinstance(raw_name, str) else str(raw_name or "")).strip()
605
+ run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
606
+ if not isinstance(run_vars, dict) or not name:
607
+ return {"value": None}
608
+ return {"value": _get_by_path(run_vars, name)}
609
+
610
+ return handler
611
+
612
+ def _create_get_context_handler(_data: Dict[str, Any]):
613
+ # Pure node: reads from the current run vars (attached onto the Flow by the compiler).
614
+ # Mark as volatile so it is recomputed whenever requested (avoids stale cached reads).
615
+ def handler(_input_data: Any) -> Dict[str, Any]:
616
+ del _input_data
617
+ run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
618
+ if not isinstance(run_vars, dict):
619
+ return {"context": {}, "task": "", "messages": []}
620
+
621
+ ctx = run_vars.get("context")
622
+ ctx_dict = ctx if isinstance(ctx, dict) else {}
623
+
624
+ task = ctx_dict.get("task")
625
+ task_str = task if isinstance(task, str) else str(task or "")
626
+
627
+ msgs = ctx_dict.get("messages")
628
+ if isinstance(msgs, list):
629
+ messages = msgs
630
+ elif isinstance(msgs, tuple):
631
+ messages = list(msgs)
632
+ elif msgs is None:
633
+ messages = []
634
+ else:
635
+ messages = [msgs]
636
+
637
+ return {"context": ctx_dict, "task": task_str, "messages": messages}
638
+
639
+ return handler
640
+
641
+ def _create_bool_var_handler(data: Dict[str, Any]):
642
+ """Pure node: reads a workflow-level boolean variable from run.vars with a default.
643
+
644
+ Config is stored in the visual node's `literalValue` as either:
645
+ - a string: variable name
646
+ - an object: { "name": "...", "default": true|false }
647
+ """
648
+ raw_cfg = data.get("literalValue")
649
+ name_cfg = ""
650
+ default_cfg = False
651
+ if isinstance(raw_cfg, str):
652
+ name_cfg = raw_cfg.strip()
653
+ elif isinstance(raw_cfg, dict):
654
+ n = raw_cfg.get("name")
655
+ if isinstance(n, str):
656
+ name_cfg = n.strip()
657
+ d = raw_cfg.get("default")
658
+ if isinstance(d, bool):
659
+ default_cfg = d
660
+
661
+ def handler(input_data: Any) -> Dict[str, Any]:
662
+ del input_data
663
+ run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
664
+ if not isinstance(run_vars, dict) or not name_cfg:
665
+ return {"name": name_cfg, "value": bool(default_cfg)}
666
+
667
+ raw = _get_by_path(run_vars, name_cfg)
668
+ if isinstance(raw, bool):
669
+ return {"name": name_cfg, "value": raw}
670
+ return {"name": name_cfg, "value": bool(default_cfg)}
671
+
672
+ return handler
673
+
674
+ def _create_var_decl_handler(data: Dict[str, Any]):
675
+ """Pure node: typed workflow variable declaration (name + type + default).
676
+
677
+ Config is stored in `literalValue`:
678
+ { "name": "...", "type": "boolean|number|string|object|array|any", "default": ... }
679
+
680
+ Runtime semantics:
681
+ - Read `run.vars[name]` (via `flow._run_vars`), and return it if it matches the declared type.
682
+ - Otherwise fall back to the declared default.
683
+ """
684
+ raw_cfg = data.get("literalValue")
685
+ name_cfg = ""
686
+ type_cfg = "any"
687
+ default_cfg: Any = None
688
+ if isinstance(raw_cfg, dict):
689
+ n = raw_cfg.get("name")
690
+ if isinstance(n, str):
691
+ name_cfg = n.strip()
692
+ t = raw_cfg.get("type")
693
+ if isinstance(t, str) and t.strip():
694
+ type_cfg = t.strip()
695
+ default_cfg = raw_cfg.get("default")
696
+
697
+ allowed_types = {"boolean", "number", "string", "object", "array", "any"}
698
+ if type_cfg not in allowed_types:
699
+ type_cfg = "any"
700
+
701
+ def _matches(v: Any) -> bool:
702
+ if type_cfg == "any":
703
+ return True
704
+ if type_cfg == "boolean":
705
+ return isinstance(v, bool)
706
+ if type_cfg == "number":
707
+ return isinstance(v, (int, float)) and not isinstance(v, bool)
708
+ if type_cfg == "string":
709
+ return isinstance(v, str)
710
+ if type_cfg == "array":
711
+ return isinstance(v, list)
712
+ if type_cfg == "object":
713
+ return isinstance(v, dict)
714
+ return True
715
+
716
+ def _default_for_type() -> Any:
717
+ if type_cfg == "boolean":
718
+ return False
719
+ if type_cfg == "number":
720
+ return 0
721
+ if type_cfg == "string":
722
+ return ""
723
+ if type_cfg == "array":
724
+ return []
725
+ if type_cfg == "object":
726
+ return {}
727
+ return None
728
+
729
+ def handler(input_data: Any) -> Dict[str, Any]:
730
+ del input_data
731
+ run_vars = getattr(flow, "_run_vars", None) # type: ignore[attr-defined]
732
+ if not isinstance(run_vars, dict) or not name_cfg:
733
+ v = default_cfg if _matches(default_cfg) else _default_for_type()
734
+ return {"name": name_cfg, "value": v}
735
+
736
+ raw = _get_by_path(run_vars, name_cfg)
737
+ if _matches(raw):
738
+ return {"name": name_cfg, "value": raw}
739
+
740
+ v = default_cfg if _matches(default_cfg) else _default_for_type()
741
+ return {"name": name_cfg, "value": v}
742
+
743
+ return handler
744
+
745
+ def _create_set_var_handler(_data: Dict[str, Any]):
746
+ # Execution node: does not mutate run.vars here (handled by compiler adapter).
747
+ # This handler exists to participate in data-edge resolution and expose outputs.
748
+ #
749
+ # Important UX contract:
750
+ # - In the visual editor, primitive pins (boolean/number/string) show default UI controls
751
+ # even when the user hasn't explicitly edited them.
752
+ # - If we treat "missing" as None here, `Set Variable` would write None and this can
753
+ # cause typed `Variable` (`var_decl`) to fall back to its default (e.g. staying True).
754
+ # - Therefore we default missing primitive values to their natural defaults.
755
+ pins = _data.get("inputs") if isinstance(_data, dict) else None
756
+ value_pin_type: Optional[str] = None
757
+ if isinstance(pins, list):
758
+ for p in pins:
759
+ if not isinstance(p, dict):
760
+ continue
761
+ if p.get("id") != "value":
762
+ continue
763
+ t = p.get("type")
764
+ if isinstance(t, str) and t:
765
+ value_pin_type = t
766
+ break
767
+
768
+ def handler(input_data: Any) -> Dict[str, Any]:
769
+ payload = input_data if isinstance(input_data, dict) else {}
770
+ value_specified = isinstance(payload, dict) and "value" in payload
771
+ value = payload.get("value")
772
+
773
+ if not value_specified:
774
+ if value_pin_type == "boolean":
775
+ value = False
776
+ elif value_pin_type == "number":
777
+ value = 0
778
+ elif value_pin_type == "string":
779
+ value = ""
780
+
781
+ return {"name": payload.get("name"), "value": value}
782
+
783
+ return handler
784
+
785
+ def _wrap_builtin(handler, data: Dict[str, Any]):
786
+ literal_value = data.get("literalValue")
787
+ # Preserve pin order for builtins that need deterministic input selection (e.g. coalesce).
788
+ pin_order: list[str] = []
789
+ pins = data.get("inputs") if isinstance(data, dict) else None
790
+ if isinstance(pins, list):
791
+ for p in pins:
792
+ if not isinstance(p, dict):
793
+ continue
794
+ if p.get("type") == "execution":
795
+ continue
796
+ pid = p.get("id")
797
+ if isinstance(pid, str) and pid:
798
+ pin_order.append(pid)
799
+
800
+ def wrapped(input_data):
801
+ if isinstance(input_data, dict):
802
+ inputs = input_data.copy()
803
+ else:
804
+ inputs = {"value": input_data, "a": input_data, "text": input_data}
805
+
806
+ if literal_value is not None:
807
+ inputs["_literalValue"] = literal_value
808
+ if pin_order:
809
+ inputs["_pin_order"] = list(pin_order)
810
+
811
+ return handler(inputs)
812
+
813
+ return wrapped
814
+
815
+ def _create_agent_input_handler(data: Dict[str, Any]):
816
+ cfg = data.get("agentConfig", {}) if isinstance(data, dict) else {}
817
+ cfg = cfg if isinstance(cfg, dict) else {}
818
+
819
+ def _normalize_response_schema(raw: Any) -> Optional[Dict[str, Any]]:
820
+ """Normalize a structured-output schema input into a JSON Schema dict.
821
+
822
+ Supported inputs (best-effort):
823
+ - JSON Schema dict: {"type":"object","properties":{...}, ...}
824
+ - LMStudio/OpenAI-style wrapper: {"type":"json_schema","json_schema": {"schema": {...}}}
825
+ """
826
+ if raw is None:
827
+ return None
828
+
829
+ schema: Optional[Dict[str, Any]] = None
830
+ if isinstance(raw, dict):
831
+ if raw.get("type") == "json_schema" and isinstance(raw.get("json_schema"), dict):
832
+ inner = raw.get("json_schema")
833
+ if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
834
+ schema = dict(inner.get("schema") or {})
835
+ else:
836
+ schema = dict(raw)
837
+
838
+ if not isinstance(schema, dict) or not schema:
839
+ return None
840
+
841
+ # Resolve stable schema refs (no silent fallback).
842
+ ref = schema.get("$ref")
843
+ if isinstance(ref, str) and ref.strip().startswith("abstractsemantics:"):
844
+ try:
845
+ from abstractsemantics import resolve_schema_ref # type: ignore
846
+ except Exception as e:
847
+ import sys
848
+
849
+ raise RuntimeError(
850
+ "Structured-output schema ref requires `abstractsemantics` to be installed in the same "
851
+ f"Python environment as the runtime/gateway (cannot resolve $ref={ref!r}). "
852
+ f"Current python: {sys.executable}. Install with: `pip install abstractsemantics`."
853
+ ) from e
854
+ resolved = resolve_schema_ref(schema)
855
+ if isinstance(resolved, dict) and resolved:
856
+ return resolved
857
+ raise RuntimeError(f"Unknown structured-output schema ref: {ref}")
858
+
859
+ return schema
860
+
861
+ def _normalize_tool_names(raw: Any) -> list[str]:
862
+ if raw is None:
863
+ return []
864
+ items: list[Any]
865
+ if isinstance(raw, list):
866
+ items = raw
867
+ elif isinstance(raw, tuple):
868
+ items = list(raw)
869
+ else:
870
+ items = [raw]
871
+ out: list[str] = []
872
+ for t in items:
873
+ if isinstance(t, str) and t.strip():
874
+ out.append(t.strip())
875
+ # preserve order, remove duplicates
876
+ seen: set[str] = set()
877
+ uniq: list[str] = []
878
+ for t in out:
879
+ if t in seen:
880
+ continue
881
+ seen.add(t)
882
+ uniq.append(t)
883
+ return uniq
884
+
885
+ def handler(input_data):
886
+ task = ""
887
+ if isinstance(input_data, dict):
888
+ raw_task = input_data.get("prompt")
889
+ task = "" if raw_task is None else str(raw_task)
890
+ else:
891
+ task = str(input_data)
892
+
893
+ context_raw = input_data.get("context", {}) if isinstance(input_data, dict) else {}
894
+ context = context_raw if isinstance(context_raw, dict) else {}
895
+ provider = input_data.get("provider") if isinstance(input_data, dict) else None
896
+ model = input_data.get("model") if isinstance(input_data, dict) else None
897
+
898
+ system_raw = input_data.get("system") if isinstance(input_data, dict) else ""
899
+ system = system_raw if isinstance(system_raw, str) else str(system_raw or "")
900
+
901
+ tools_specified = isinstance(input_data, dict) and "tools" in input_data
902
+ tools_raw = input_data.get("tools") if isinstance(input_data, dict) else None
903
+ tools = _normalize_tool_names(tools_raw) if tools_specified else []
904
+ if not tools_specified:
905
+ tools = _normalize_tool_names(cfg.get("tools"))
906
+
907
+ out: Dict[str, Any] = {
908
+ "task": task,
909
+ "context": context,
910
+ "provider": provider if isinstance(provider, str) else None,
911
+ "model": model if isinstance(model, str) else None,
912
+ "system": system,
913
+ "tools": tools,
914
+ }
915
+
916
+ # Optional pin overrides (passed through for compiler/runtime consumption).
917
+ if isinstance(input_data, dict) and "max_iterations" in input_data:
918
+ out["max_iterations"] = input_data.get("max_iterations")
919
+ if isinstance(input_data, dict) and (
920
+ "max_in_tokens" in input_data or "max_input_tokens" in input_data or "maxInputTokens" in input_data
921
+ ):
922
+ if "max_in_tokens" in input_data:
923
+ out["max_input_tokens"] = input_data.get("max_in_tokens")
924
+ elif "max_input_tokens" in input_data:
925
+ out["max_input_tokens"] = input_data.get("max_input_tokens")
926
+ else:
927
+ out["max_input_tokens"] = input_data.get("maxInputTokens")
928
+ if isinstance(input_data, dict) and (
929
+ "max_out_tokens" in input_data or "max_output_tokens" in input_data or "maxOutputTokens" in input_data
930
+ ):
931
+ if "max_out_tokens" in input_data:
932
+ out["max_output_tokens"] = input_data.get("max_out_tokens")
933
+ elif "max_output_tokens" in input_data:
934
+ out["max_output_tokens"] = input_data.get("max_output_tokens")
935
+ else:
936
+ out["max_output_tokens"] = input_data.get("maxOutputTokens")
937
+ if isinstance(input_data, dict) and "temperature" in input_data:
938
+ out["temperature"] = input_data.get("temperature")
939
+ if isinstance(input_data, dict) and "seed" in input_data:
940
+ out["seed"] = input_data.get("seed")
941
+
942
+ if isinstance(input_data, dict) and ("resp_schema" in input_data or "response_schema" in input_data):
943
+ schema = _normalize_response_schema(
944
+ input_data.get("resp_schema")
945
+ if "resp_schema" in input_data
946
+ else input_data.get("response_schema")
947
+ )
948
+ if isinstance(schema, dict) and schema:
949
+ out["response_schema"] = schema
950
+
951
+ include_context_specified = isinstance(input_data, dict) and (
952
+ "include_context" in input_data or "use_context" in input_data
953
+ )
954
+ if include_context_specified:
955
+ raw_inc = (
956
+ input_data.get("include_context")
957
+ if isinstance(input_data, dict) and "include_context" in input_data
958
+ else input_data.get("use_context") if isinstance(input_data, dict) else None
959
+ )
960
+ out["include_context"] = _coerce_bool(raw_inc)
961
+
962
+ # Memory-source access pins (467): pass through for compiler/runtime pre-call scheduling.
963
+ if isinstance(input_data, dict):
964
+ # v0: memory object pin (preferred). Expand into legacy per-pin keys so
965
+ # the compiler/runtime logic remains backward compatible.
966
+ mem_obj = input_data.get("memory")
967
+ if isinstance(mem_obj, dict):
968
+ for bool_key in (
969
+ "use_session_attachments",
970
+ "use_span_memory",
971
+ "use_semantic_search",
972
+ "use_kg_memory",
973
+ ):
974
+ if bool_key in input_data:
975
+ continue
976
+ if bool_key in mem_obj and mem_obj.get(bool_key) is not None:
977
+ out[bool_key] = _coerce_bool(mem_obj.get(bool_key))
978
+
979
+ for raw_key in (
980
+ "memory_query",
981
+ "memory_scope",
982
+ "recall_level",
983
+ "max_span_messages",
984
+ "kg_max_input_tokens",
985
+ "kg_limit",
986
+ "kg_min_score",
987
+ ):
988
+ if raw_key in input_data:
989
+ continue
990
+ if raw_key in mem_obj and mem_obj.get(raw_key) is not None:
991
+ out[raw_key] = mem_obj.get(raw_key)
992
+
993
+ for bool_key in (
994
+ "use_session_attachments",
995
+ "use_span_memory",
996
+ "use_semantic_search",
997
+ "use_kg_memory",
998
+ ):
999
+ if bool_key in input_data:
1000
+ out[bool_key] = _coerce_bool(input_data.get(bool_key))
1001
+
1002
+ for raw_key in (
1003
+ "memory_query",
1004
+ "memory_scope",
1005
+ "recall_level",
1006
+ "max_span_messages",
1007
+ "kg_max_input_tokens",
1008
+ "kg_limit",
1009
+ "kg_min_score",
1010
+ ):
1011
+ if raw_key in input_data:
1012
+ out[raw_key] = input_data.get(raw_key)
1013
+
1014
+ return out
1015
+
1016
+ return handler
1017
+
1018
+ def _create_subflow_effect_builder(data: Dict[str, Any]):
1019
+ input_pin_ids: list[str] = []
1020
+ pins = data.get("inputs") if isinstance(data, dict) else None
1021
+ if isinstance(pins, list):
1022
+ for p in pins:
1023
+ if not isinstance(p, dict):
1024
+ continue
1025
+ if p.get("type") == "execution":
1026
+ continue
1027
+ pid = p.get("id")
1028
+ if isinstance(pid, str) and pid:
1029
+ # Control pin (not forwarded into child vars).
1030
+ if pid in {"inherit_context", "inheritContext"}:
1031
+ continue
1032
+ input_pin_ids.append(pid)
1033
+
1034
+ inherit_cfg = None
1035
+ if isinstance(data, dict):
1036
+ cfg = data.get("effectConfig")
1037
+ if isinstance(cfg, dict):
1038
+ inherit_cfg = cfg.get("inherit_context")
1039
+ if inherit_cfg is None:
1040
+ inherit_cfg = cfg.get("inheritContext")
1041
+ inherit_context_default = bool(inherit_cfg) if inherit_cfg is not None else False
1042
+
1043
+ def handler(input_data):
1044
+ subflow_id = (
1045
+ data.get("subflowId")
1046
+ or data.get("flowId") # legacy
1047
+ or data.get("workflowId")
1048
+ or data.get("workflow_id")
1049
+ )
1050
+
1051
+ sub_vars_dict: Dict[str, Any] = {}
1052
+ if isinstance(input_data, dict):
1053
+ base: Dict[str, Any] = {}
1054
+ if isinstance(input_data.get("vars"), dict):
1055
+ base.update(dict(input_data["vars"]))
1056
+ elif isinstance(input_data.get("input"), dict):
1057
+ base.update(dict(input_data["input"]))
1058
+
1059
+ if input_pin_ids:
1060
+ for pid in input_pin_ids:
1061
+ if pid in ("vars", "input") and isinstance(input_data.get(pid), dict):
1062
+ continue
1063
+ if pid in input_data:
1064
+ base[pid] = input_data.get(pid)
1065
+ sub_vars_dict = base
1066
+ else:
1067
+ if base:
1068
+ sub_vars_dict = base
1069
+ else:
1070
+ sub_vars_dict = dict(input_data)
1071
+ else:
1072
+ if input_pin_ids and len(input_pin_ids) == 1:
1073
+ sub_vars_dict = {input_pin_ids[0]: input_data}
1074
+ else:
1075
+ sub_vars_dict = {"input": input_data}
1076
+
1077
+ # Never forward control pins into the child run vars.
1078
+ sub_vars_dict.pop("inherit_context", None)
1079
+ sub_vars_dict.pop("inheritContext", None)
1080
+ sub_vars_dict.pop("child_session_id", None)
1081
+ sub_vars_dict.pop("childSessionId", None)
1082
+ sub_vars_dict.pop("session_id", None)
1083
+ sub_vars_dict.pop("sessionId", None)
1084
+
1085
+ inherit_context_specified = isinstance(input_data, dict) and (
1086
+ "inherit_context" in input_data or "inheritContext" in input_data
1087
+ )
1088
+ if inherit_context_specified:
1089
+ raw_inherit = (
1090
+ input_data.get("inherit_context")
1091
+ if isinstance(input_data, dict) and "inherit_context" in input_data
1092
+ else input_data.get("inheritContext") if isinstance(input_data, dict) else None
1093
+ )
1094
+ inherit_context_value = _coerce_bool(raw_inherit)
1095
+ else:
1096
+ inherit_context_value = inherit_context_default
1097
+
1098
+ pending: Dict[str, Any] = {
1099
+ "output": None,
1100
+ "_pending_effect": (
1101
+ {
1102
+ "type": "start_subworkflow",
1103
+ "workflow_id": subflow_id,
1104
+ "vars": sub_vars_dict,
1105
+ # Start subworkflows in async+wait mode so hosts (notably AbstractFlow Web)
1106
+ # can tick child runs incrementally and stream their node_start/node_complete
1107
+ # events for better observability (nested/recursive subflows).
1108
+ #
1109
+ # Non-interactive hosts (tests/CLI) still complete synchronously because
1110
+ # FlowRunner.run() auto-drives WAITING(SUBWORKFLOW) children and resumes
1111
+ # parents until completion.
1112
+ "async": True,
1113
+ "wait": True,
1114
+ **({"inherit_context": True} if inherit_context_value else {}),
1115
+ }
1116
+ ),
1117
+ }
1118
+
1119
+ # Optional: allow overriding the child session_id (useful for scheduling runs where
1120
+ # each execution should be isolated from others). This is a control field and is
1121
+ # not forwarded into the child vars.
1122
+ if isinstance(input_data, dict):
1123
+ raw_sid = None
1124
+ for k in ("child_session_id", "childSessionId", "session_id", "sessionId"):
1125
+ if k in input_data:
1126
+ raw_sid = input_data.get(k)
1127
+ break
1128
+ if isinstance(raw_sid, str) and raw_sid.strip():
1129
+ # Keep the key name stable for the runtime handler.
1130
+ eff = pending.get("_pending_effect")
1131
+ if isinstance(eff, dict):
1132
+ eff["session_id"] = raw_sid.strip()
1133
+ pending["_pending_effect"] = eff
1134
+
1135
+ return pending
1136
+
1137
+ return handler
1138
+
1139
+ def _create_event_handler(event_type: str, data: Dict[str, Any]):
1140
+ # Event nodes are special: they bridge external inputs / runtime vars into the graph.
1141
+ #
1142
+ # Critical constraint: RunState.vars must remain JSON-serializable for durable execution.
1143
+ # The runtime persists per-node outputs in `vars["_temp"]["node_outputs"]`. If an event node
1144
+ # returns the full `run.vars` dict (which contains `_temp`), we create a self-referential
1145
+ # cycle: `_temp -> node_outputs -> <start_output>['_temp'] -> _temp`, which explodes during
1146
+ # persistence (e.g. JsonFileRunStore uses dataclasses.asdict()).
1147
+ #
1148
+ # Therefore, `on_flow_start` must *not* leak internal namespaces like `_temp` into outputs.
1149
+ start_pin_ids: list[str] = []
1150
+ pins = data.get("outputs") if isinstance(data, dict) else None
1151
+ if isinstance(pins, list):
1152
+ for p in pins:
1153
+ if not isinstance(p, dict):
1154
+ continue
1155
+ if p.get("type") == "execution":
1156
+ continue
1157
+ pid = p.get("id")
1158
+ if isinstance(pid, str) and pid:
1159
+ start_pin_ids.append(pid)
1160
+
1161
+ def handler(input_data):
1162
+ if event_type == "on_flow_start":
1163
+ # Prefer explicit pins: the visual editor treats non-exec output pins as
1164
+ # "Flow Start Parameters" (initial vars). Only expose those by default.
1165
+ if isinstance(input_data, dict):
1166
+ defaults_raw = data.get("pinDefaults") if isinstance(data, dict) else None
1167
+ defaults = defaults_raw if isinstance(defaults_raw, dict) else {}
1168
+ if start_pin_ids:
1169
+ out: Dict[str, Any] = {}
1170
+ for pid in start_pin_ids:
1171
+ if pid in input_data:
1172
+ out[pid] = input_data.get(pid)
1173
+ continue
1174
+ if isinstance(pid, str) and pid in defaults:
1175
+ dv = defaults.get(pid)
1176
+ out[pid] = _clone_default(dv)
1177
+ # Also seed run.vars for downstream Get Variable / debugging.
1178
+ if not pid.startswith("_") and pid not in input_data:
1179
+ input_data[pid] = _clone_default(dv)
1180
+ continue
1181
+ out[pid] = None
1182
+ return out
1183
+ # Backward-compat: older/test-created flows may omit pin metadata.
1184
+ # In that case, expose non-internal keys only (avoid `_temp`, `_limits`, ...).
1185
+ out2 = {k: v for k, v in input_data.items() if isinstance(k, str) and not k.startswith("_")}
1186
+ # If pinDefaults exist, apply them for missing non-internal keys.
1187
+ for k, dv in defaults.items():
1188
+ if not isinstance(k, str) or not k or k.startswith("_"):
1189
+ continue
1190
+ if k in out2 or k in input_data:
1191
+ continue
1192
+ out2[k] = _clone_default(dv)
1193
+ input_data[k] = _clone_default(dv)
1194
+ return out2
1195
+
1196
+ # Non-dict input: if there is a single declared pin, map into it; otherwise
1197
+ # keep a generic `input` key.
1198
+ if start_pin_ids and len(start_pin_ids) == 1:
1199
+ return {start_pin_ids[0]: input_data}
1200
+ return {"input": input_data}
1201
+ if event_type == "on_user_request":
1202
+ message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data)
1203
+ context = input_data.get("context", {}) if isinstance(input_data, dict) else {}
1204
+ return {"message": message, "context": context}
1205
+ if event_type == "on_agent_message":
1206
+ sender = input_data.get("sender", "unknown") if isinstance(input_data, dict) else "unknown"
1207
+ message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data)
1208
+ channel = data.get("eventConfig", {}).get("channel", "")
1209
+ return {"sender": sender, "message": message, "channel": channel}
1210
+ return input_data
1211
+
1212
+ return handler
1213
+
1214
+ def _create_flow_end_handler(data: Dict[str, Any]):
1215
+ pin_ids: list[str] = []
1216
+ pins = data.get("inputs") if isinstance(data, dict) else None
1217
+ if isinstance(pins, list):
1218
+ for p in pins:
1219
+ if not isinstance(p, dict):
1220
+ continue
1221
+ if p.get("type") == "execution":
1222
+ continue
1223
+ pid = p.get("id")
1224
+ if isinstance(pid, str) and pid:
1225
+ pin_ids.append(pid)
1226
+
1227
+ def handler(input_data: Any):
1228
+ if not pin_ids:
1229
+ if isinstance(input_data, dict):
1230
+ return dict(input_data)
1231
+ return input_data
1232
+
1233
+ if not isinstance(input_data, dict):
1234
+ if len(pin_ids) == 1:
1235
+ return {pin_ids[0]: input_data}
1236
+ return {"response": input_data}
1237
+
1238
+ return {pid: input_data.get(pid) for pid in pin_ids}
1239
+
1240
+ return handler
1241
+
1242
+ def _create_expression_handler(expression: str):
1243
+ def handler(input_data):
1244
+ namespace = {"x": input_data, "input": input_data}
1245
+ if isinstance(input_data, dict):
1246
+ namespace.update(input_data)
1247
+ try:
1248
+ return eval(expression, {"__builtins__": {}}, namespace)
1249
+ except Exception as e:
1250
+ return {"error": str(e)}
1251
+
1252
+ return handler
1253
+
1254
+ def _create_if_handler(data: Dict[str, Any]):
1255
+ def handler(input_data):
1256
+ condition = input_data.get("condition") if isinstance(input_data, dict) else bool(input_data)
1257
+ return {"branch": "true" if condition else "false", "condition": condition}
1258
+
1259
+ return handler
1260
+
1261
+ def _create_switch_handler(data: Dict[str, Any]):
1262
+ def handler(input_data):
1263
+ value = input_data.get("value") if isinstance(input_data, dict) else input_data
1264
+
1265
+ config = data.get("switchConfig", {}) if isinstance(data, dict) else {}
1266
+ raw_cases = config.get("cases", []) if isinstance(config, dict) else []
1267
+
1268
+ value_str = "" if value is None else str(value)
1269
+ if isinstance(raw_cases, list):
1270
+ for case in raw_cases:
1271
+ if not isinstance(case, dict):
1272
+ continue
1273
+ case_id = case.get("id")
1274
+ case_value = case.get("value")
1275
+ if not isinstance(case_id, str) or not case_id:
1276
+ continue
1277
+ if case_value is None:
1278
+ continue
1279
+ if value_str == str(case_value):
1280
+ return {"branch": f"case:{case_id}", "value": value, "matched": str(case_value)}
1281
+
1282
+ return {"branch": "default", "value": value}
1283
+
1284
+ return handler
1285
+
1286
+ def _create_while_handler(data: Dict[str, Any]):
1287
+ def handler(input_data):
1288
+ condition = input_data.get("condition") if isinstance(input_data, dict) else bool(input_data)
1289
+ return {"condition": bool(condition)}
1290
+
1291
+ return handler
1292
+
1293
+ def _create_for_handler(data: Dict[str, Any]):
1294
+ def handler(input_data):
1295
+ payload = input_data if isinstance(input_data, dict) else {}
1296
+ start = payload.get("start")
1297
+ end = payload.get("end")
1298
+ step = payload.get("step")
1299
+ return {"start": start, "end": end, "step": step}
1300
+
1301
+ return handler
1302
+
1303
+ def _create_loop_handler(data: Dict[str, Any]):
1304
+ def handler(input_data):
1305
+ items = input_data.get("items") if isinstance(input_data, dict) else input_data
1306
+ if items is None:
1307
+ items = []
1308
+ if not isinstance(items, (list, tuple)):
1309
+ items = [items]
1310
+ items_list = list(items) if isinstance(items, tuple) else list(items) # type: ignore[arg-type]
1311
+ return {"items": items_list, "count": len(items_list)}
1312
+
1313
+ return handler
1314
+
1315
+ def _coerce_bool(value: Any) -> bool:
1316
+ """Best-effort boolean parsing (handles common string forms)."""
1317
+ if value is None:
1318
+ return False
1319
+ if isinstance(value, bool):
1320
+ return value
1321
+ if isinstance(value, (int, float)):
1322
+ try:
1323
+ return float(value) != 0.0
1324
+ except Exception:
1325
+ return False
1326
+ if isinstance(value, str):
1327
+ s = value.strip().lower()
1328
+ if not s:
1329
+ return False
1330
+ if s in {"false", "0", "no", "off"}:
1331
+ return False
1332
+ if s in {"true", "1", "yes", "on"}:
1333
+ return True
1334
+ return False
1335
+
1336
+ def _create_effect_handler(effect_type: str, data: Dict[str, Any]):
1337
+ effect_config = data.get("effectConfig", {})
1338
+
1339
+ if effect_type == "ask_user":
1340
+ return _create_ask_user_handler(data, effect_config)
1341
+ if effect_type == "answer_user":
1342
+ return _create_answer_user_handler(data, effect_config)
1343
+ if effect_type == "llm_call":
1344
+ return _create_llm_call_handler(data, effect_config)
1345
+ if effect_type == "tool_calls":
1346
+ return _create_tool_calls_handler(data, effect_config)
1347
+ if effect_type == "call_tool":
1348
+ return _create_call_tool_handler(data, effect_config)
1349
+ if effect_type == "wait_until":
1350
+ return _create_wait_until_handler(data, effect_config)
1351
+ if effect_type == "wait_event":
1352
+ return _create_wait_event_handler(data, effect_config)
1353
+ if effect_type == "memory_note":
1354
+ return _create_memory_note_handler(data, effect_config)
1355
+ if effect_type == "memory_query":
1356
+ return _create_memory_query_handler(data, effect_config)
1357
+ if effect_type == "memory_tag":
1358
+ return _create_memory_tag_handler(data, effect_config)
1359
+ if effect_type == "memory_compact":
1360
+ return _create_memory_compact_handler(data, effect_config)
1361
+ if effect_type == "memory_rehydrate":
1362
+ return _create_memory_rehydrate_handler(data, effect_config)
1363
+ if effect_type == "memory_kg_assert":
1364
+ return _create_memory_kg_assert_handler(data, effect_config)
1365
+ if effect_type == "memory_kg_query":
1366
+ return _create_memory_kg_query_handler(data, effect_config)
1367
+ if effect_type == "memory_kg_resolve":
1368
+ return _create_memory_kg_resolve_handler(data, effect_config)
1369
+
1370
+ return lambda x: x
1371
+
1372
+ def _create_tool_calls_handler(data: Dict[str, Any], config: Dict[str, Any]):
1373
+ import json
1374
+
1375
+ allowed_default = None
1376
+ if isinstance(config, dict):
1377
+ raw = config.get("allowed_tools")
1378
+ if raw is None:
1379
+ raw = config.get("allowedTools")
1380
+ allowed_default = raw
1381
+
1382
+ def _normalize_str_list(raw: Any) -> list[str]:
1383
+ if not isinstance(raw, list):
1384
+ return []
1385
+ out: list[str] = []
1386
+ for x in raw:
1387
+ if isinstance(x, str) and x.strip():
1388
+ out.append(x.strip())
1389
+ return out
1390
+
1391
+ def _normalize_tool_calls(raw: Any) -> list[Dict[str, Any]]:
1392
+ if raw is None:
1393
+ return []
1394
+ if isinstance(raw, dict):
1395
+ return [dict(raw)]
1396
+ if isinstance(raw, list):
1397
+ out: list[Dict[str, Any]] = []
1398
+ for x in raw:
1399
+ if isinstance(x, dict):
1400
+ out.append(dict(x))
1401
+ return out
1402
+ if isinstance(raw, str) and raw.strip():
1403
+ # Best-effort: tolerate JSON strings coming from parse_json/text nodes.
1404
+ try:
1405
+ parsed = json.loads(raw)
1406
+ except Exception:
1407
+ return []
1408
+ return _normalize_tool_calls(parsed)
1409
+ return []
1410
+
1411
+ def handler(input_data: Any):
1412
+ payload = input_data if isinstance(input_data, dict) else {}
1413
+
1414
+ tool_calls_raw = payload.get("tool_calls")
1415
+ tool_calls = _normalize_tool_calls(tool_calls_raw)
1416
+
1417
+ allow_specified = "allowed_tools" in payload or "allowedTools" in payload
1418
+ allowed_raw = payload.get("allowed_tools")
1419
+ if allowed_raw is None:
1420
+ allowed_raw = payload.get("allowedTools")
1421
+ allowed_tools = _normalize_str_list(allowed_raw) if allow_specified else []
1422
+ if not allow_specified:
1423
+ allowed_tools = _normalize_str_list(allowed_default)
1424
+
1425
+ pending: Dict[str, Any] = {"type": "tool_calls", "tool_calls": tool_calls}
1426
+ # Only include allowlist when explicitly provided (empty list means "allow none").
1427
+ if allow_specified or isinstance(allowed_default, list):
1428
+ pending["allowed_tools"] = allowed_tools
1429
+
1430
+ return {
1431
+ "results": None,
1432
+ "success": None,
1433
+ "_pending_effect": pending,
1434
+ }
1435
+
1436
+ return handler
1437
+
1438
+ def _create_call_tool_handler(data: Dict[str, Any], config: Dict[str, Any]):
1439
+ import json
1440
+
1441
+ allowed_default = None
1442
+ if isinstance(config, dict):
1443
+ raw = config.get("allowed_tools")
1444
+ if raw is None:
1445
+ raw = config.get("allowedTools")
1446
+ allowed_default = raw
1447
+
1448
+ def _normalize_str_list(raw: Any) -> list[str]:
1449
+ if not isinstance(raw, list):
1450
+ return []
1451
+ out: list[str] = []
1452
+ for x in raw:
1453
+ if isinstance(x, str) and x.strip():
1454
+ out.append(x.strip())
1455
+ return out
1456
+
1457
+ def _normalize_tool_call(raw: Any) -> Optional[Dict[str, Any]]:
1458
+ if raw is None:
1459
+ return None
1460
+ if isinstance(raw, dict):
1461
+ return dict(raw)
1462
+ if isinstance(raw, str) and raw.strip():
1463
+ # Best-effort: tolerate JSON strings coming from stringify/parse nodes.
1464
+ try:
1465
+ parsed = json.loads(raw)
1466
+ except Exception:
1467
+ return None
1468
+ if isinstance(parsed, dict):
1469
+ return dict(parsed)
1470
+ return None
1471
+
1472
+ def handler(input_data: Any):
1473
+ payload = input_data if isinstance(input_data, dict) else {}
1474
+
1475
+ raw_call = payload.get("tool_call")
1476
+ if raw_call is None:
1477
+ raw_call = payload.get("toolCall")
1478
+ tool_call = _normalize_tool_call(raw_call)
1479
+ tool_calls = [tool_call] if isinstance(tool_call, dict) else []
1480
+
1481
+ allow_specified = "allowed_tools" in payload or "allowedTools" in payload
1482
+ allowed_raw = payload.get("allowed_tools")
1483
+ if allowed_raw is None:
1484
+ allowed_raw = payload.get("allowedTools")
1485
+ allowed_tools = _normalize_str_list(allowed_raw) if allow_specified else []
1486
+ if not allow_specified:
1487
+ allowed_tools = _normalize_str_list(allowed_default)
1488
+
1489
+ pending: Dict[str, Any] = {"type": "tool_calls", "tool_calls": tool_calls}
1490
+ # Only include allowlist when explicitly provided (empty list means "allow none").
1491
+ if allow_specified or isinstance(allowed_default, list):
1492
+ pending["allowed_tools"] = allowed_tools
1493
+
1494
+ return {
1495
+ "result": None,
1496
+ "success": None,
1497
+ "_pending_effect": pending,
1498
+ }
1499
+
1500
+ return handler
1501
+
1502
+ def _create_ask_user_handler(data: Dict[str, Any], config: Dict[str, Any]):
1503
+ def handler(input_data):
1504
+ prompt = input_data.get("prompt", "Please respond:") if isinstance(input_data, dict) else str(input_data)
1505
+ choices = input_data.get("choices", []) if isinstance(input_data, dict) else []
1506
+ allow_free_text = config.get("allowFreeText", True)
1507
+
1508
+ return {
1509
+ "response": f"[User prompt: {prompt}]",
1510
+ "prompt": prompt,
1511
+ "choices": choices,
1512
+ "allow_free_text": allow_free_text,
1513
+ "_pending_effect": {
1514
+ "type": "ask_user",
1515
+ "prompt": prompt,
1516
+ "choices": choices,
1517
+ "allow_free_text": allow_free_text,
1518
+ },
1519
+ }
1520
+
1521
+ return handler
1522
+
1523
+ def _create_answer_user_handler(data: Dict[str, Any], config: Dict[str, Any]):
1524
+ def handler(input_data):
1525
+ message = input_data.get("message", "") if isinstance(input_data, dict) else str(input_data or "")
1526
+ raw_level = input_data.get("level") if isinstance(input_data, dict) else None
1527
+ level = str(raw_level).strip().lower() if isinstance(raw_level, str) else ""
1528
+ if level == "warn":
1529
+ level = "warning"
1530
+ if level == "info":
1531
+ level = "message"
1532
+ if level not in {"message", "warning", "error"}:
1533
+ level = "message"
1534
+ return {"message": message, "level": level, "_pending_effect": {"type": "answer_user", "message": message, "level": level}}
1535
+
1536
+ return handler
1537
+
1538
+ def _create_llm_call_handler(data: Dict[str, Any], config: Dict[str, Any]):
1539
+ provider_default = config.get("provider", "")
1540
+ model_default = config.get("model", "")
1541
+ temperature = config.get("temperature", 0.7)
1542
+ seed_default = config.get("seed", -1)
1543
+ tools_default_raw = config.get("tools")
1544
+ include_context_cfg = config.get("include_context")
1545
+ if include_context_cfg is None:
1546
+ include_context_cfg = config.get("use_context")
1547
+ include_context_default = _coerce_bool(include_context_cfg) if include_context_cfg is not None else False
1548
+
1549
+ max_input_tokens_default = config.get("max_input_tokens")
1550
+ if max_input_tokens_default is None:
1551
+ max_input_tokens_default = config.get("maxInputTokens")
1552
+
1553
+ max_output_tokens_default = config.get("max_output_tokens")
1554
+ if max_output_tokens_default is None:
1555
+ max_output_tokens_default = config.get("maxOutputTokens")
1556
+
1557
+ structured_output_fallback_cfg = config.get("structured_output_fallback")
1558
+ structured_output_fallback_default = (
1559
+ _coerce_bool(structured_output_fallback_cfg) if structured_output_fallback_cfg is not None else False
1560
+ )
1561
+
1562
+ # Tool definitions (ToolSpecs) are required for tool calling. In the visual editor we
1563
+ # store tools as a portable `string[]` allowlist; at execution time we translate to
1564
+ # strict ToolSpecs `{name, description, parameters}` expected by AbstractCore.
1565
+ def _strip_tool_spec(raw: Any) -> Optional[Dict[str, Any]]:
1566
+ if not isinstance(raw, dict):
1567
+ return None
1568
+ name = raw.get("name")
1569
+ if not isinstance(name, str) or not name.strip():
1570
+ return None
1571
+ desc = raw.get("description")
1572
+ params = raw.get("parameters")
1573
+ out: Dict[str, Any] = {
1574
+ "name": name.strip(),
1575
+ "description": str(desc or ""),
1576
+ "parameters": dict(params) if isinstance(params, dict) else {},
1577
+ }
1578
+ return out
1579
+
1580
+ def _normalize_tool_names(raw: Any) -> list[str]:
1581
+ if not isinstance(raw, list):
1582
+ return []
1583
+ out: list[str] = []
1584
+ for t in raw:
1585
+ if isinstance(t, str) and t.strip():
1586
+ out.append(t.strip())
1587
+ return out
1588
+
1589
+ # Precompute a best-effort "available ToolSpecs by name" map so we can turn tool names
1590
+ # into ToolSpecs without going through the web backend.
1591
+ tool_specs_by_name: Dict[str, Dict[str, Any]] = {}
1592
+ try:
1593
+ from abstractruntime.integrations.abstractcore.default_tools import list_default_tool_specs
1594
+
1595
+ base_specs = list_default_tool_specs()
1596
+ if not isinstance(base_specs, list):
1597
+ base_specs = []
1598
+ for s in base_specs:
1599
+ stripped = _strip_tool_spec(s)
1600
+ if stripped is not None:
1601
+ tool_specs_by_name[stripped["name"]] = stripped
1602
+ except Exception:
1603
+ pass
1604
+
1605
+ # Optional schema-only runtime tools (used by AbstractAgent). These are useful for
1606
+ # "state machine" autonomy where the graph can route tool-like requests to effect nodes.
1607
+ try:
1608
+ from abstractagent.logic.builtins import ( # type: ignore
1609
+ ASK_USER_TOOL,
1610
+ COMPACT_MEMORY_TOOL,
1611
+ INSPECT_VARS_TOOL,
1612
+ RECALL_MEMORY_TOOL,
1613
+ REMEMBER_TOOL,
1614
+ )
1615
+
1616
+ builtin_defs = [ASK_USER_TOOL, RECALL_MEMORY_TOOL, INSPECT_VARS_TOOL, REMEMBER_TOOL, COMPACT_MEMORY_TOOL]
1617
+ for tool_def in builtin_defs:
1618
+ try:
1619
+ d = tool_def.to_dict()
1620
+ except Exception:
1621
+ d = None
1622
+ stripped = _strip_tool_spec(d)
1623
+ if stripped is not None and stripped["name"] not in tool_specs_by_name:
1624
+ tool_specs_by_name[stripped["name"]] = stripped
1625
+ except Exception:
1626
+ pass
1627
+
1628
+ def _normalize_tools(raw: Any) -> list[Dict[str, Any]]:
1629
+ # Already ToolSpecs (from pins): accept and strip UI-only fields.
1630
+ if isinstance(raw, list) and raw and all(isinstance(x, dict) for x in raw):
1631
+ out: list[Dict[str, Any]] = []
1632
+ for x in raw:
1633
+ stripped = _strip_tool_spec(x)
1634
+ if stripped is not None:
1635
+ out.append(stripped)
1636
+ return out
1637
+
1638
+ # Tool names (portable representation): resolve against known tool specs.
1639
+ names = _normalize_tool_names(raw)
1640
+ out: list[Dict[str, Any]] = []
1641
+ for name in names:
1642
+ spec = tool_specs_by_name.get(name)
1643
+ if spec is not None:
1644
+ out.append(spec)
1645
+ return out
1646
+
1647
+ def _normalize_response_schema(raw: Any) -> Optional[Dict[str, Any]]:
1648
+ """Normalize a structured-output schema input into a JSON Schema dict.
1649
+
1650
+ Supported inputs (best-effort):
1651
+ - JSON Schema dict: {"type":"object","properties":{...}, ...}
1652
+ - LMStudio/OpenAI-style wrapper: {"type":"json_schema","json_schema": {"schema": {...}}}
1653
+ """
1654
+ if raw is None:
1655
+ return None
1656
+
1657
+ schema: Optional[Dict[str, Any]] = None
1658
+ if isinstance(raw, dict):
1659
+ # Wrapper form (OpenAI "response_format": {type:"json_schema", json_schema:{schema:{...}}})
1660
+ if raw.get("type") == "json_schema" and isinstance(raw.get("json_schema"), dict):
1661
+ inner = raw.get("json_schema")
1662
+ if isinstance(inner, dict) and isinstance(inner.get("schema"), dict):
1663
+ schema = dict(inner.get("schema") or {})
1664
+ else:
1665
+ # Plain JSON Schema dict
1666
+ schema = dict(raw)
1667
+
1668
+ if not isinstance(schema, dict) or not schema:
1669
+ return None
1670
+
1671
+ # Resolve stable schema refs (no silent fallback).
1672
+ ref = schema.get("$ref")
1673
+ if isinstance(ref, str) and ref.strip().startswith("abstractsemantics:"):
1674
+ try:
1675
+ from abstractsemantics import resolve_schema_ref # type: ignore
1676
+ except Exception as e:
1677
+ import sys
1678
+
1679
+ raise RuntimeError(
1680
+ "Structured-output schema ref requires `abstractsemantics` to be installed in the same "
1681
+ f"Python environment as the runtime/gateway (cannot resolve $ref={ref!r}). "
1682
+ f"Current python: {sys.executable}. Install with: `pip install abstractsemantics`."
1683
+ ) from e
1684
+ resolved = resolve_schema_ref(schema)
1685
+ if isinstance(resolved, dict) and resolved:
1686
+ return resolved
1687
+ raise RuntimeError(f"Unknown structured-output schema ref: {ref}")
1688
+
1689
+ return schema
1690
+
1691
+ def handler(input_data):
1692
+ if isinstance(input_data, dict):
1693
+ raw_prompt = input_data.get("prompt")
1694
+ prompt = "" if raw_prompt is None else str(raw_prompt)
1695
+ else:
1696
+ prompt = str(input_data)
1697
+ system = input_data.get("system", "") if isinstance(input_data, dict) else ""
1698
+
1699
+ tools_specified = isinstance(input_data, dict) and "tools" in input_data
1700
+ tools_raw = input_data.get("tools") if isinstance(input_data, dict) else None
1701
+ tools = _normalize_tools(tools_raw) if tools_specified else []
1702
+ if not tools_specified:
1703
+ tools = _normalize_tools(tools_default_raw)
1704
+
1705
+ include_context_specified = isinstance(input_data, dict) and (
1706
+ "include_context" in input_data or "use_context" in input_data
1707
+ )
1708
+ if include_context_specified:
1709
+ raw_inc = (
1710
+ input_data.get("include_context")
1711
+ if isinstance(input_data, dict) and "include_context" in input_data
1712
+ else input_data.get("use_context") if isinstance(input_data, dict) else None
1713
+ )
1714
+ include_context_value = _coerce_bool(raw_inc)
1715
+ else:
1716
+ include_context_value = include_context_default
1717
+
1718
+ max_input_tokens_value: Optional[int] = None
1719
+ raw_max_in: Any = None
1720
+ if isinstance(input_data, dict):
1721
+ if "max_in_tokens" in input_data:
1722
+ raw_max_in = input_data.get("max_in_tokens")
1723
+ elif "max_input_tokens" in input_data:
1724
+ raw_max_in = input_data.get("max_input_tokens")
1725
+ elif "maxInputTokens" in input_data:
1726
+ raw_max_in = input_data.get("maxInputTokens")
1727
+ if raw_max_in is None:
1728
+ raw_max_in = max_input_tokens_default
1729
+
1730
+ try:
1731
+ if raw_max_in is not None and not isinstance(raw_max_in, bool):
1732
+ parsed = int(raw_max_in)
1733
+ if parsed > 0:
1734
+ max_input_tokens_value = parsed
1735
+ except Exception:
1736
+ max_input_tokens_value = None
1737
+
1738
+ max_output_tokens_value: Optional[int] = None
1739
+ raw_max_out: Any = None
1740
+ if isinstance(input_data, dict):
1741
+ if "max_out_tokens" in input_data:
1742
+ raw_max_out = input_data.get("max_out_tokens")
1743
+ elif "max_output_tokens" in input_data:
1744
+ raw_max_out = input_data.get("max_output_tokens")
1745
+ elif "maxOutputTokens" in input_data:
1746
+ raw_max_out = input_data.get("maxOutputTokens")
1747
+ if raw_max_out is None:
1748
+ raw_max_out = max_output_tokens_default
1749
+ try:
1750
+ if raw_max_out is not None and not isinstance(raw_max_out, bool):
1751
+ parsed = int(raw_max_out)
1752
+ if parsed > 0:
1753
+ max_output_tokens_value = parsed
1754
+ except Exception:
1755
+ max_output_tokens_value = None
1756
+
1757
+ provider = (
1758
+ input_data.get("provider")
1759
+ if isinstance(input_data, dict) and isinstance(input_data.get("provider"), str)
1760
+ else provider_default
1761
+ )
1762
+ model = (
1763
+ input_data.get("model")
1764
+ if isinstance(input_data, dict) and isinstance(input_data.get("model"), str)
1765
+ else model_default
1766
+ )
1767
+
1768
+ # Allow pins to override sampling params.
1769
+ temperature_value = temperature
1770
+ if isinstance(input_data, dict) and "temperature" in input_data:
1771
+ raw_temp = input_data.get("temperature")
1772
+ try:
1773
+ if raw_temp is not None and not isinstance(raw_temp, bool):
1774
+ temperature_value = float(raw_temp)
1775
+ except Exception:
1776
+ pass
1777
+
1778
+ seed_value_raw: Any = seed_default
1779
+ if isinstance(input_data, dict) and "seed" in input_data:
1780
+ seed_value_raw = input_data.get("seed")
1781
+ seed_value = -1
1782
+ try:
1783
+ if seed_value_raw is not None and not isinstance(seed_value_raw, bool):
1784
+ seed_value = int(seed_value_raw)
1785
+ except Exception:
1786
+ seed_value = -1
1787
+
1788
+ params: Dict[str, Any] = {"temperature": float(temperature_value)}
1789
+ if seed_value >= 0:
1790
+ params["seed"] = seed_value
1791
+ if isinstance(max_output_tokens_value, int) and max_output_tokens_value > 0:
1792
+ params["max_output_tokens"] = int(max_output_tokens_value)
1793
+
1794
+ # Memory-source access pins (467): pass through for compiler/runtime pre-call scheduling.
1795
+ #
1796
+ # NOTE: we intentionally keep values JSON-safe and mostly unmodified here.
1797
+ # The compiler layer applies policy defaults/clamping and maps these into
1798
+ # MEMORY_* effects or LLM params as needed.
1799
+ mem_cfg: Dict[str, Any] = {}
1800
+ if isinstance(input_data, dict):
1801
+ # v0: memory object pin (preferred). Expand into legacy per-pin keys so
1802
+ # the compiler/runtime logic remains backward compatible.
1803
+ mem_obj = input_data.get("memory")
1804
+ if isinstance(mem_obj, dict):
1805
+ for bool_key in (
1806
+ "use_session_attachments",
1807
+ "use_span_memory",
1808
+ "use_semantic_search",
1809
+ "use_kg_memory",
1810
+ ):
1811
+ if bool_key in input_data:
1812
+ continue
1813
+ if bool_key in mem_obj and mem_obj.get(bool_key) is not None:
1814
+ mem_cfg[bool_key] = _coerce_bool(mem_obj.get(bool_key))
1815
+
1816
+ for raw_key in (
1817
+ "memory_query",
1818
+ "memory_scope",
1819
+ "recall_level",
1820
+ "max_span_messages",
1821
+ "kg_max_input_tokens",
1822
+ "kg_limit",
1823
+ "kg_min_score",
1824
+ ):
1825
+ if raw_key in input_data:
1826
+ continue
1827
+ if raw_key in mem_obj and mem_obj.get(raw_key) is not None:
1828
+ mem_cfg[raw_key] = mem_obj.get(raw_key)
1829
+
1830
+ for bool_key in (
1831
+ "use_session_attachments",
1832
+ "use_span_memory",
1833
+ "use_semantic_search",
1834
+ "use_kg_memory",
1835
+ ):
1836
+ if bool_key in input_data:
1837
+ mem_cfg[bool_key] = _coerce_bool(input_data.get(bool_key))
1838
+
1839
+ for raw_key in (
1840
+ "memory_query",
1841
+ "memory_scope",
1842
+ "recall_level",
1843
+ "max_span_messages",
1844
+ "kg_max_input_tokens",
1845
+ "kg_limit",
1846
+ "kg_min_score",
1847
+ ):
1848
+ if raw_key in input_data:
1849
+ mem_cfg[raw_key] = input_data.get(raw_key)
1850
+
1851
+ if not provider or not model:
1852
+ return {
1853
+ "response": "[LLM Call: missing provider/model]",
1854
+ "_pending_effect": {
1855
+ "type": "llm_call",
1856
+ "prompt": prompt,
1857
+ "system_prompt": system,
1858
+ "tools": tools,
1859
+ "params": dict(params),
1860
+ "include_context": include_context_value,
1861
+ **mem_cfg,
1862
+ },
1863
+ "error": "Missing provider or model configuration",
1864
+ }
1865
+
1866
+ response_schema = None
1867
+ if isinstance(input_data, dict) and ("resp_schema" in input_data or "response_schema" in input_data):
1868
+ response_schema = _normalize_response_schema(
1869
+ input_data.get("resp_schema")
1870
+ if "resp_schema" in input_data
1871
+ else input_data.get("response_schema")
1872
+ )
1873
+
1874
+ pending: Dict[str, Any] = {
1875
+ "type": "llm_call",
1876
+ "prompt": prompt,
1877
+ "system_prompt": system,
1878
+ "tools": tools,
1879
+ "params": dict(params),
1880
+ "provider": provider,
1881
+ "model": model,
1882
+ "include_context": include_context_value,
1883
+ **mem_cfg,
1884
+ }
1885
+ if isinstance(max_input_tokens_value, int) and max_input_tokens_value > 0:
1886
+ pending["max_input_tokens"] = int(max_input_tokens_value)
1887
+ if isinstance(response_schema, dict) and response_schema:
1888
+ pending["response_schema"] = response_schema
1889
+ # Name is optional; AbstractRuntime will fall back to a safe default.
1890
+ pending["response_schema_name"] = "LLM_StructuredOutput"
1891
+ if structured_output_fallback_default:
1892
+ pending["structured_output_fallback"] = True
1893
+
1894
+ # Optional explicit context pin: if provided, context.messages overrides inherited run context messages.
1895
+ context_msgs: list[Dict[str, Any]] = []
1896
+ context_media_override: Optional[list[Any]] = None
1897
+ if isinstance(input_data, dict):
1898
+ context_raw = input_data.get("context")
1899
+ context_raw = context_raw if isinstance(context_raw, dict) else {}
1900
+ raw_msgs = context_raw.get("messages")
1901
+ if isinstance(raw_msgs, list):
1902
+ context_msgs = [dict(m) for m in raw_msgs if isinstance(m, dict)]
1903
+ # Attachments are passed as `context.attachments` and mapped into `payload.media`
1904
+ # for the underlying LLM_CALL effect. This keeps the durable context object portable
1905
+ # while letting LLM execution consume media in a single, flat field.
1906
+ if "attachments" in context_raw:
1907
+ raw_attachments = context_raw.get("attachments")
1908
+ if isinstance(raw_attachments, list):
1909
+ cleaned: list[Any] = []
1910
+ for a in raw_attachments:
1911
+ if isinstance(a, dict):
1912
+ cleaned.append(dict(a))
1913
+ elif isinstance(a, str) and a.strip():
1914
+ cleaned.append(a.strip())
1915
+ # Preserve explicit empty list (means "no attachments for this call").
1916
+ context_media_override = cleaned
1917
+ if context_media_override is not None:
1918
+ pending["media"] = context_media_override
1919
+ if context_msgs:
1920
+ messages = list(context_msgs)
1921
+ sys_text = str(system or "").strip() if isinstance(system, str) else ""
1922
+ if sys_text:
1923
+ insert_at = 0
1924
+ while insert_at < len(messages):
1925
+ if messages[insert_at].get("role") != "system":
1926
+ break
1927
+ insert_at += 1
1928
+ messages.insert(insert_at, {"role": "system", "content": sys_text})
1929
+ messages.append({"role": "user", "content": str(prompt or "")})
1930
+
1931
+ if isinstance(max_input_tokens_value, int) and max_input_tokens_value > 0:
1932
+ try:
1933
+ from abstractruntime.memory.token_budget import (
1934
+ trim_messages_to_max_input_tokens,
1935
+ )
1936
+
1937
+ messages = trim_messages_to_max_input_tokens(
1938
+ messages,
1939
+ max_input_tokens=int(max_input_tokens_value),
1940
+ model=model if isinstance(model, str) else None,
1941
+ )
1942
+ except Exception:
1943
+ pass
1944
+
1945
+ pending["messages"] = messages
1946
+ pending.pop("prompt", None)
1947
+ pending.pop("system_prompt", None)
1948
+
1949
+ return {
1950
+ "response": None,
1951
+ "_pending_effect": pending,
1952
+ }
1953
+
1954
+ return handler
1955
+
1956
+ def _create_model_catalog_handler(data: Dict[str, Any]):
1957
+ cfg = data.get("modelCatalogConfig", {}) if isinstance(data, dict) else {}
1958
+ cfg = dict(cfg) if isinstance(cfg, dict) else {}
1959
+
1960
+ allowed_providers_default = cfg.get("allowedProviders")
1961
+ allowed_models_default = cfg.get("allowedModels")
1962
+ index_default = cfg.get("index", 0)
1963
+
1964
+ def _as_str_list(raw: Any) -> list[str]:
1965
+ if not isinstance(raw, list):
1966
+ return []
1967
+ out: list[str] = []
1968
+ for x in raw:
1969
+ if isinstance(x, str) and x.strip():
1970
+ out.append(x.strip())
1971
+ return out
1972
+
1973
+ def handler(input_data: Any):
1974
+ # Allow pin-based overrides (data edges) while keeping node config as defaults.
1975
+ allowed_providers = _as_str_list(
1976
+ input_data.get("allowed_providers") if isinstance(input_data, dict) else None
1977
+ ) or _as_str_list(allowed_providers_default)
1978
+ allowed_models = _as_str_list(
1979
+ input_data.get("allowed_models") if isinstance(input_data, dict) else None
1980
+ ) or _as_str_list(allowed_models_default)
1981
+
1982
+ idx_raw = input_data.get("index") if isinstance(input_data, dict) else None
1983
+ try:
1984
+ idx = int(idx_raw) if idx_raw is not None else int(index_default or 0)
1985
+ except Exception:
1986
+ idx = 0
1987
+ if idx < 0:
1988
+ idx = 0
1989
+
1990
+ try:
1991
+ from abstractcore.providers.registry import get_all_providers_with_models, get_available_models_for_provider
1992
+ except Exception:
1993
+ return {"providers": [], "models": [], "pair": None, "provider": "", "model": ""}
1994
+
1995
+ providers_meta = get_all_providers_with_models(include_models=False)
1996
+ available_providers: list[str] = []
1997
+ for p in providers_meta:
1998
+ if not isinstance(p, dict):
1999
+ continue
2000
+ if p.get("status") != "available":
2001
+ continue
2002
+ name = p.get("name")
2003
+ if isinstance(name, str) and name.strip():
2004
+ available_providers.append(name.strip())
2005
+
2006
+ if allowed_providers:
2007
+ allow = {x.lower(): x for x in allowed_providers}
2008
+ available_providers = [p for p in available_providers if p.lower() in allow]
2009
+
2010
+ pairs: list[dict[str, str]] = []
2011
+ model_ids: list[str] = []
2012
+
2013
+ allow_models_norm = {m.strip() for m in allowed_models if isinstance(m, str) and m.strip()}
2014
+
2015
+ for provider in available_providers:
2016
+ try:
2017
+ models = get_available_models_for_provider(provider)
2018
+ except Exception:
2019
+ models = []
2020
+ if not isinstance(models, list):
2021
+ models = []
2022
+ for m in models:
2023
+ if not isinstance(m, str) or not m.strip():
2024
+ continue
2025
+ model = m.strip()
2026
+ mid = f"{provider}/{model}"
2027
+ if allow_models_norm:
2028
+ # Accept either full ids or raw model names.
2029
+ if mid not in allow_models_norm and model not in allow_models_norm:
2030
+ continue
2031
+ pairs.append({"provider": provider, "model": model, "id": mid})
2032
+ model_ids.append(mid)
2033
+
2034
+ selected = pairs[idx] if pairs and idx < len(pairs) else (pairs[0] if pairs else None)
2035
+ return {
2036
+ "providers": available_providers,
2037
+ "models": model_ids,
2038
+ "pair": selected,
2039
+ "provider": selected.get("provider", "") if isinstance(selected, dict) else "",
2040
+ "model": selected.get("model", "") if isinstance(selected, dict) else "",
2041
+ }
2042
+
2043
+ return handler
2044
+
2045
+ def _create_provider_catalog_handler(data: Dict[str, Any]):
2046
+ def _as_str_list(raw: Any) -> list[str]:
2047
+ if not isinstance(raw, list):
2048
+ return []
2049
+ out: list[str] = []
2050
+ for x in raw:
2051
+ if isinstance(x, str) and x.strip():
2052
+ out.append(x.strip())
2053
+ return out
2054
+
2055
+ def handler(input_data: Any):
2056
+ allowed_providers = _as_str_list(
2057
+ input_data.get("allowed_providers") if isinstance(input_data, dict) else None
2058
+ )
2059
+
2060
+ try:
2061
+ from abstractcore.providers.registry import get_all_providers_with_models
2062
+ except Exception:
2063
+ return {"providers": []}
2064
+
2065
+ providers_meta = get_all_providers_with_models(include_models=False)
2066
+ available: list[str] = []
2067
+ for p in providers_meta:
2068
+ if not isinstance(p, dict):
2069
+ continue
2070
+ if p.get("status") != "available":
2071
+ continue
2072
+ name = p.get("name")
2073
+ if isinstance(name, str) and name.strip():
2074
+ available.append(name.strip())
2075
+
2076
+ if allowed_providers:
2077
+ allow = {x.lower() for x in allowed_providers}
2078
+ available = [p for p in available if p.lower() in allow]
2079
+
2080
+ return {"providers": available}
2081
+
2082
+ return handler
2083
+
2084
+ def _create_provider_models_handler(data: Dict[str, Any]):
2085
+ cfg = data.get("providerModelsConfig", {}) if isinstance(data, dict) else {}
2086
+ cfg = dict(cfg) if isinstance(cfg, dict) else {}
2087
+
2088
+ def _as_str_list(raw: Any) -> list[str]:
2089
+ if not isinstance(raw, list):
2090
+ return []
2091
+ out: list[str] = []
2092
+ for x in raw:
2093
+ if isinstance(x, str) and x.strip():
2094
+ out.append(x.strip())
2095
+ return out
2096
+
2097
+ def handler(input_data: Any):
2098
+ provider = None
2099
+ if isinstance(input_data, dict) and isinstance(input_data.get("provider"), str):
2100
+ provider = input_data.get("provider")
2101
+ if not provider and isinstance(cfg.get("provider"), str):
2102
+ provider = cfg.get("provider")
2103
+
2104
+ provider = str(provider or "").strip()
2105
+ if not provider:
2106
+ return {"provider": "", "models": []}
2107
+
2108
+ allowed_models = _as_str_list(
2109
+ input_data.get("allowed_models") if isinstance(input_data, dict) else None
2110
+ )
2111
+ if not allowed_models:
2112
+ # Optional allowlist from node config when the pin isn't connected.
2113
+ allowed_models = _as_str_list(cfg.get("allowedModels")) or _as_str_list(cfg.get("allowed_models"))
2114
+ allow = {m for m in allowed_models if m}
2115
+
2116
+ try:
2117
+ from abstractcore.providers.registry import get_available_models_for_provider
2118
+ except Exception:
2119
+ return {"provider": provider, "models": []}
2120
+
2121
+ try:
2122
+ models = get_available_models_for_provider(provider)
2123
+ except Exception:
2124
+ models = []
2125
+ if not isinstance(models, list):
2126
+ models = []
2127
+
2128
+ out: list[str] = []
2129
+ for m in models:
2130
+ if not isinstance(m, str) or not m.strip():
2131
+ continue
2132
+ name = m.strip()
2133
+ mid = f"{provider}/{name}"
2134
+ if allow and (name not in allow and mid not in allow):
2135
+ continue
2136
+ out.append(name)
2137
+
2138
+ return {"provider": provider, "models": out}
2139
+
2140
+ return handler
2141
+
2142
+ def _create_wait_until_handler(data: Dict[str, Any], config: Dict[str, Any]):
2143
+ from datetime import datetime as _dt, timedelta, timezone
2144
+
2145
+ duration_type = config.get("durationType", "seconds")
2146
+
2147
+ def handler(input_data):
2148
+ duration = input_data.get("duration", 0) if isinstance(input_data, dict) else 0
2149
+
2150
+ try:
2151
+ amount = float(duration)
2152
+ except (TypeError, ValueError):
2153
+ amount = 0
2154
+
2155
+ now = _dt.now(timezone.utc)
2156
+ if duration_type == "timestamp":
2157
+ until = str(duration or "")
2158
+ elif duration_type == "minutes":
2159
+ until = (now + timedelta(minutes=amount)).isoformat()
2160
+ elif duration_type == "hours":
2161
+ until = (now + timedelta(hours=amount)).isoformat()
2162
+ else:
2163
+ until = (now + timedelta(seconds=amount)).isoformat()
2164
+
2165
+ return {"_pending_effect": {"type": "wait_until", "until": until}}
2166
+
2167
+ return handler
2168
+
2169
+ def _create_wait_event_handler(data: Dict[str, Any], config: Dict[str, Any]):
2170
+ def handler(input_data):
2171
+ # `wait_event` is a durable pause that waits for an external signal.
2172
+ #
2173
+ # Input shape (best-effort):
2174
+ # - event_key: str (required; defaults to "default" for backward-compat)
2175
+ # - prompt: str (optional; enables human-in-the-loop UX for EVENT waits)
2176
+ # - choices: list[str] (optional)
2177
+ # - allow_free_text: bool (optional; default True)
2178
+ #
2179
+ # NOTE: The compiler will wrap `_pending_effect` into an AbstractRuntime Effect payload.
2180
+ event_key = input_data.get("event_key", "default") if isinstance(input_data, dict) else str(input_data)
2181
+ prompt = None
2182
+ choices = None
2183
+ allow_free_text = True
2184
+ if isinstance(input_data, dict):
2185
+ p = input_data.get("prompt")
2186
+ if isinstance(p, str) and p.strip():
2187
+ prompt = p
2188
+ ch = input_data.get("choices")
2189
+ if isinstance(ch, list):
2190
+ # Keep choices JSON-safe and predictable.
2191
+ choices = [str(c) for c in ch if isinstance(c, str) and str(c).strip()]
2192
+ aft = input_data.get("allow_free_text")
2193
+ if aft is None:
2194
+ aft = input_data.get("allowFreeText")
2195
+ if aft is not None:
2196
+ allow_free_text = bool(aft)
2197
+
2198
+ pending: Dict[str, Any] = {"type": "wait_event", "wait_key": event_key}
2199
+ if prompt is not None:
2200
+ pending["prompt"] = prompt
2201
+ if isinstance(choices, list):
2202
+ pending["choices"] = choices
2203
+ # Always include allow_free_text so hosts can render consistent UX.
2204
+ pending["allow_free_text"] = allow_free_text
2205
+ return {
2206
+ "event_data": {},
2207
+ "event_key": event_key,
2208
+ "_pending_effect": pending,
2209
+ }
2210
+
2211
+ return handler
2212
+
2213
+ def _create_memory_note_handler(data: Dict[str, Any], config: Dict[str, Any]):
2214
+ def handler(input_data):
2215
+ content = input_data.get("content", "") if isinstance(input_data, dict) else str(input_data)
2216
+ tags = input_data.get("tags") if isinstance(input_data, dict) else None
2217
+ sources = input_data.get("sources") if isinstance(input_data, dict) else None
2218
+ location = input_data.get("location") if isinstance(input_data, dict) else None
2219
+ scope = input_data.get("scope") if isinstance(input_data, dict) else None
2220
+
2221
+ pending: Dict[str, Any] = {"type": "memory_note", "note": content, "tags": tags if isinstance(tags, dict) else {}}
2222
+ if isinstance(sources, dict):
2223
+ pending["sources"] = sources
2224
+ if isinstance(location, str) and location.strip():
2225
+ pending["location"] = location.strip()
2226
+ if isinstance(scope, str) and scope.strip():
2227
+ pending["scope"] = scope.strip()
2228
+
2229
+ keep_in_context_specified = isinstance(input_data, dict) and (
2230
+ "keep_in_context" in input_data or "keepInContext" in input_data
2231
+ )
2232
+ if keep_in_context_specified:
2233
+ raw_keep = (
2234
+ input_data.get("keep_in_context")
2235
+ if isinstance(input_data, dict) and "keep_in_context" in input_data
2236
+ else input_data.get("keepInContext") if isinstance(input_data, dict) else None
2237
+ )
2238
+ keep_in_context = _coerce_bool(raw_keep)
2239
+ else:
2240
+ # Visual-editor config (checkbox) default.
2241
+ keep_cfg = None
2242
+ if isinstance(config, dict):
2243
+ keep_cfg = config.get("keep_in_context")
2244
+ if keep_cfg is None:
2245
+ keep_cfg = config.get("keepInContext")
2246
+ keep_in_context = _coerce_bool(keep_cfg)
2247
+ if keep_in_context:
2248
+ pending["keep_in_context"] = True
2249
+
2250
+ return {"note_id": None, "_pending_effect": pending}
2251
+
2252
+ return handler
2253
+
2254
+ def _create_memory_query_handler(data: Dict[str, Any], config: Dict[str, Any]):
2255
+ def handler(input_data):
2256
+ query = input_data.get("query", "") if isinstance(input_data, dict) else str(input_data)
2257
+ limit = input_data.get("limit", 10) if isinstance(input_data, dict) else 10
2258
+ recall_level = input_data.get("recall_level") if isinstance(input_data, dict) else None
2259
+ tags = input_data.get("tags") if isinstance(input_data, dict) else None
2260
+ tags_mode = input_data.get("tags_mode") if isinstance(input_data, dict) else None
2261
+ usernames = input_data.get("usernames") if isinstance(input_data, dict) else None
2262
+ locations = input_data.get("locations") if isinstance(input_data, dict) else None
2263
+ since = input_data.get("since") if isinstance(input_data, dict) else None
2264
+ until = input_data.get("until") if isinstance(input_data, dict) else None
2265
+ scope = input_data.get("scope") if isinstance(input_data, dict) else None
2266
+ try:
2267
+ limit_int = int(limit) if limit is not None else 10
2268
+ except Exception:
2269
+ limit_int = 10
2270
+
2271
+ pending: Dict[str, Any] = {"type": "memory_query", "query": query, "limit_spans": limit_int, "return": "both"}
2272
+ if isinstance(recall_level, str) and recall_level.strip():
2273
+ pending["recall_level"] = recall_level.strip()
2274
+ if isinstance(tags, dict):
2275
+ pending["tags"] = tags
2276
+ if isinstance(tags_mode, str) and tags_mode.strip():
2277
+ pending["tags_mode"] = tags_mode.strip()
2278
+ if isinstance(usernames, list):
2279
+ pending["usernames"] = [str(x).strip() for x in usernames if isinstance(x, str) and str(x).strip()]
2280
+ if isinstance(locations, list):
2281
+ pending["locations"] = [str(x).strip() for x in locations if isinstance(x, str) and str(x).strip()]
2282
+ if since is not None:
2283
+ pending["since"] = since
2284
+ if until is not None:
2285
+ pending["until"] = until
2286
+ if isinstance(scope, str) and scope.strip():
2287
+ pending["scope"] = scope.strip()
2288
+
2289
+ return {"results": [], "rendered": "", "_pending_effect": pending}
2290
+
2291
+ return handler
2292
+
2293
+ def _create_memory_kg_assert_handler(data: Dict[str, Any], config: Dict[str, Any]):
2294
+ def _normalize_assertions(raw: Any) -> list[Dict[str, Any]]:
2295
+ if raw is None:
2296
+ return []
2297
+ if isinstance(raw, dict):
2298
+ return [dict(raw)]
2299
+ if isinstance(raw, list):
2300
+ out: list[Dict[str, Any]] = []
2301
+ for x in raw:
2302
+ if isinstance(x, dict):
2303
+ out.append(dict(x))
2304
+ return out
2305
+ return []
2306
+
2307
+ def handler(input_data):
2308
+ payload = input_data if isinstance(input_data, dict) else {}
2309
+ assertions_raw = payload.get("assertions")
2310
+ if assertions_raw is None:
2311
+ assertions_raw = payload.get("triples")
2312
+ if assertions_raw is None:
2313
+ assertions_raw = payload.get("items")
2314
+
2315
+ assertions = _normalize_assertions(assertions_raw)
2316
+
2317
+ pending: Dict[str, Any] = {"type": "memory_kg_assert", "assertions": assertions}
2318
+ scope = payload.get("scope")
2319
+ if isinstance(scope, str) and scope.strip():
2320
+ pending["scope"] = scope.strip()
2321
+ owner_id = payload.get("owner_id")
2322
+ if isinstance(owner_id, str) and owner_id.strip():
2323
+ pending["owner_id"] = owner_id.strip()
2324
+ span_id = payload.get("span_id")
2325
+ if isinstance(span_id, str) and span_id.strip():
2326
+ pending["span_id"] = span_id.strip()
2327
+ attributes_defaults = payload.get("attributes_defaults")
2328
+ if isinstance(attributes_defaults, dict) and attributes_defaults:
2329
+ pending["attributes_defaults"] = dict(attributes_defaults)
2330
+ allow_custom = payload.get("allow_custom_predicates")
2331
+ if isinstance(allow_custom, bool):
2332
+ pending["allow_custom_predicates"] = bool(allow_custom)
2333
+
2334
+ return {"assertion_ids": [], "count": 0, "_pending_effect": pending}
2335
+
2336
+ return handler
2337
+
2338
+ def _create_memory_kg_query_handler(data: Dict[str, Any], config: Dict[str, Any]):
2339
+ def handler(input_data):
2340
+ payload = input_data if isinstance(input_data, dict) else {}
2341
+ pending: Dict[str, Any] = {"type": "memory_kg_query"}
2342
+
2343
+ for k in (
2344
+ "subject",
2345
+ "predicate",
2346
+ "object",
2347
+ "recall_level",
2348
+ "scope",
2349
+ "owner_id",
2350
+ "since",
2351
+ "until",
2352
+ "active_at",
2353
+ "query_text",
2354
+ "order",
2355
+ # Optional packetization / packing controls (Active Memory mapping).
2356
+ "model",
2357
+ ):
2358
+ v = payload.get(k)
2359
+ if isinstance(v, str) and v.strip():
2360
+ pending[k] = v.strip()
2361
+
2362
+ max_input_tokens = payload.get("max_input_tokens")
2363
+ if max_input_tokens is None:
2364
+ max_input_tokens = payload.get("max_in_tokens")
2365
+ if max_input_tokens is not None and not isinstance(max_input_tokens, bool):
2366
+ try:
2367
+ pending["max_input_tokens"] = int(float(max_input_tokens))
2368
+ except Exception:
2369
+ pass
2370
+
2371
+ min_score = payload.get("min_score")
2372
+ if min_score is not None and not isinstance(min_score, bool):
2373
+ try:
2374
+ pending["min_score"] = float(min_score)
2375
+ except Exception:
2376
+ pass
2377
+
2378
+ limit = payload.get("limit")
2379
+ if limit is not None and not isinstance(limit, bool):
2380
+ try:
2381
+ pending["limit"] = int(limit)
2382
+ except Exception:
2383
+ pass
2384
+
2385
+ return {"items": [], "count": 0, "_pending_effect": pending}
2386
+
2387
+ return handler
2388
+
2389
+ def _create_memory_kg_resolve_handler(data: Dict[str, Any], config: Dict[str, Any]):
2390
+ def handler(input_data):
2391
+ payload = input_data if isinstance(input_data, dict) else {}
2392
+ pending: Dict[str, Any] = {"type": "memory_kg_resolve"}
2393
+
2394
+ for k in ("label", "expected_type", "recall_level", "scope", "owner_id"):
2395
+ v = payload.get(k)
2396
+ if isinstance(v, str) and v.strip():
2397
+ pending[k] = v.strip()
2398
+
2399
+ include_semantic = payload.get("include_semantic")
2400
+ if include_semantic is None:
2401
+ include_semantic = payload.get("includeSemantic")
2402
+ if isinstance(include_semantic, bool):
2403
+ pending["include_semantic"] = bool(include_semantic)
2404
+
2405
+ min_score = payload.get("min_score")
2406
+ if min_score is None:
2407
+ min_score = payload.get("minScore")
2408
+ if min_score is not None and not isinstance(min_score, bool):
2409
+ try:
2410
+ pending["min_score"] = float(min_score)
2411
+ except Exception:
2412
+ pass
2413
+
2414
+ max_candidates = payload.get("max_candidates")
2415
+ if max_candidates is None:
2416
+ max_candidates = payload.get("maxCandidates")
2417
+ if max_candidates is None:
2418
+ max_candidates = payload.get("limit")
2419
+ if max_candidates is not None and not isinstance(max_candidates, bool):
2420
+ try:
2421
+ pending["max_candidates"] = int(float(max_candidates))
2422
+ except Exception:
2423
+ pass
2424
+
2425
+ return {"candidates": [], "count": 0, "_pending_effect": pending}
2426
+
2427
+ return handler
2428
+
2429
+ def _create_memory_tag_handler(data: Dict[str, Any], config: Dict[str, Any]):
2430
+ def handler(input_data):
2431
+ span_id = None
2432
+ tags: Dict[str, Any] = {}
2433
+ merge = None
2434
+ scope = None
2435
+ if isinstance(input_data, dict):
2436
+ span_id = input_data.get("span_id")
2437
+ if span_id is None:
2438
+ span_id = input_data.get("spanId")
2439
+ raw_tags = input_data.get("tags")
2440
+ if isinstance(raw_tags, dict):
2441
+ tags = raw_tags
2442
+ if "merge" in input_data:
2443
+ merge = _coerce_bool(input_data.get("merge"))
2444
+ if isinstance(input_data.get("scope"), str):
2445
+ scope = str(input_data.get("scope") or "").strip() or None
2446
+
2447
+ pending: Dict[str, Any] = {"type": "memory_tag", "span_id": span_id, "tags": tags}
2448
+ if merge is not None:
2449
+ pending["merge"] = bool(merge)
2450
+ if scope is not None:
2451
+ pending["scope"] = scope
2452
+ return {"rendered": "", "success": False, "_pending_effect": pending}
2453
+
2454
+ return handler
2455
+
2456
+ def _create_memory_compact_handler(data: Dict[str, Any], config: Dict[str, Any]):
2457
+ def handler(input_data):
2458
+ preserve_recent = input_data.get("preserve_recent") if isinstance(input_data, dict) else None
2459
+ compression_mode = input_data.get("compression_mode") if isinstance(input_data, dict) else None
2460
+ focus = input_data.get("focus") if isinstance(input_data, dict) else None
2461
+
2462
+ pending: Dict[str, Any] = {"type": "memory_compact"}
2463
+ if preserve_recent is not None:
2464
+ pending["preserve_recent"] = preserve_recent
2465
+ if isinstance(compression_mode, str) and compression_mode.strip():
2466
+ pending["compression_mode"] = compression_mode.strip()
2467
+ if isinstance(focus, str) and focus.strip():
2468
+ pending["focus"] = focus.strip()
2469
+
2470
+ return {"span_id": None, "_pending_effect": pending}
2471
+
2472
+ return handler
2473
+
2474
+ def _create_memory_rehydrate_handler(data: Dict[str, Any], config: Dict[str, Any]):
2475
+ def handler(input_data):
2476
+ raw = input_data.get("span_ids") if isinstance(input_data, dict) else None
2477
+ if raw is None and isinstance(input_data, dict):
2478
+ raw = input_data.get("span_id")
2479
+ span_ids: list[Any] = []
2480
+ if isinstance(raw, list):
2481
+ span_ids = list(raw)
2482
+ elif raw is not None:
2483
+ span_ids = [raw]
2484
+
2485
+ placement = input_data.get("placement") if isinstance(input_data, dict) else None
2486
+ placement_str = str(placement).strip() if isinstance(placement, str) else "after_summary"
2487
+ if placement_str not in {"after_summary", "after_system", "end"}:
2488
+ placement_str = "after_summary"
2489
+
2490
+ max_messages = input_data.get("max_messages") if isinstance(input_data, dict) else None
2491
+ recall_level = input_data.get("recall_level") if isinstance(input_data, dict) else None
2492
+
2493
+ pending: Dict[str, Any] = {"type": "memory_rehydrate", "span_ids": span_ids, "placement": placement_str}
2494
+ if max_messages is not None:
2495
+ pending["max_messages"] = max_messages
2496
+ if isinstance(recall_level, str) and recall_level.strip():
2497
+ pending["recall_level"] = recall_level.strip()
2498
+ return {"inserted": 0, "skipped": 0, "_pending_effect": pending}
2499
+
2500
+ return handler
2501
+
2502
+ def _create_handler(node_type: NodeType, data: Dict[str, Any]) -> Any:
2503
+ type_str = node_type.value if isinstance(node_type, NodeType) else str(node_type)
2504
+
2505
+ if type_str == "get_var":
2506
+ return _create_get_var_handler(data)
2507
+
2508
+ if type_str == "get_context":
2509
+ return _create_get_context_handler(data)
2510
+
2511
+ if type_str == "bool_var":
2512
+ return _create_bool_var_handler(data)
2513
+
2514
+ if type_str == "var_decl":
2515
+ return _create_var_decl_handler(data)
2516
+
2517
+ if type_str == "set_var":
2518
+ return _create_set_var_handler(data)
2519
+
2520
+ if type_str == "concat":
2521
+ return _create_concat_handler(data)
2522
+
2523
+ if type_str == "make_array":
2524
+ return _create_make_array_handler(data)
2525
+
2526
+ if type_str == "array_concat":
2527
+ return _create_array_concat_handler(data)
2528
+
2529
+ if type_str == "read_file":
2530
+ return _create_read_file_handler(data)
2531
+
2532
+ if type_str == "write_file":
2533
+ return _create_write_file_handler(data)
2534
+
2535
+ # Sequence / Parallel are scheduler nodes compiled specially by `compile_flow`.
2536
+ # Their runtime semantics are handled in `abstractflow.adapters.control_adapter`.
2537
+ if type_str in ("sequence", "parallel"):
2538
+ return lambda x: x
2539
+
2540
+ builtin = get_builtin_handler(type_str)
2541
+ if builtin:
2542
+ return _wrap_builtin(builtin, data)
2543
+
2544
+ if type_str == "code":
2545
+ code = data.get("code", "def transform(input):\n return input")
2546
+ function_name = data.get("functionName", "transform")
2547
+ return create_code_handler(code, function_name)
2548
+
2549
+ if type_str == "agent":
2550
+ return _create_agent_input_handler(data)
2551
+
2552
+ if type_str == "model_catalog":
2553
+ return _create_model_catalog_handler(data)
2554
+
2555
+ if type_str == "provider_catalog":
2556
+ return _create_provider_catalog_handler(data)
2557
+
2558
+ if type_str == "provider_models":
2559
+ return _create_provider_models_handler(data)
2560
+
2561
+ if type_str == "subflow":
2562
+ return _create_subflow_effect_builder(data)
2563
+
2564
+ if type_str == "break_object":
2565
+ return _create_break_object_handler(data)
2566
+
2567
+ if type_str == "tool_parameters":
2568
+ return _create_tool_parameters_handler(data)
2569
+
2570
+ if type_str == "function":
2571
+ if "code" in data:
2572
+ return create_code_handler(data["code"], data.get("functionName", "transform"))
2573
+ if "expression" in data:
2574
+ return _create_expression_handler(data["expression"])
2575
+ return lambda x: x
2576
+
2577
+ if type_str == "on_flow_end":
2578
+ return _create_flow_end_handler(data)
2579
+
2580
+ if type_str in ("on_flow_start", "on_user_request", "on_agent_message"):
2581
+ return _create_event_handler(type_str, data)
2582
+
2583
+ if type_str == "if":
2584
+ return _create_if_handler(data)
2585
+ if type_str == "switch":
2586
+ return _create_switch_handler(data)
2587
+ if type_str == "while":
2588
+ return _create_while_handler(data)
2589
+ if type_str == "for":
2590
+ return _create_for_handler(data)
2591
+ if type_str == "loop":
2592
+ return _create_loop_handler(data)
2593
+
2594
+ if type_str in EFFECT_NODE_TYPES:
2595
+ return _create_effect_handler(type_str, data)
2596
+
2597
+ return lambda x: x
2598
+
2599
+ for node in visual.nodes:
2600
+ type_str = node.type.value if hasattr(node.type, "value") else str(node.type)
2601
+
2602
+ if type_str in LITERAL_NODE_TYPES:
2603
+ continue
2604
+
2605
+ base_handler = _create_handler(node.type, node.data)
2606
+
2607
+ if not _has_execution_pins(type_str, node.data):
2608
+ pure_base_handlers[node.id] = base_handler
2609
+ pure_node_ids.add(node.id)
2610
+ if type_str in {"get_var", "get_context", "bool_var", "var_decl"}:
2611
+ volatile_pure_node_ids.add(node.id)
2612
+ continue
2613
+
2614
+ # Ignore disconnected/unreachable execution nodes.
2615
+ if reachable_exec and node.id not in reachable_exec:
2616
+ continue
2617
+
2618
+ wrapped_handler = _create_data_aware_handler(
2619
+ node_id=node.id,
2620
+ base_handler=base_handler,
2621
+ data_edges=data_edge_map.get(node.id, {}),
2622
+ pin_defaults=pin_defaults_by_node_id.get(node.id),
2623
+ node_outputs=flow._node_outputs, # type: ignore[attr-defined]
2624
+ ensure_node_output=_ensure_node_output,
2625
+ volatile_node_ids=volatile_pure_node_ids,
2626
+ )
2627
+
2628
+ input_key = node.data.get("inputKey")
2629
+ output_key = node.data.get("outputKey")
2630
+
2631
+ effect_type: Optional[str] = None
2632
+ effect_config: Optional[Dict[str, Any]] = None
2633
+ if type_str in EFFECT_NODE_TYPES:
2634
+ effect_type = type_str
2635
+ effect_config = node.data.get("effectConfig", {})
2636
+ elif type_str == "on_schedule":
2637
+ # Schedule trigger: compiles into WAIT_UNTIL under the hood.
2638
+ effect_type = "on_schedule"
2639
+ effect_config = node.data.get("eventConfig", {})
2640
+ elif type_str == "on_event":
2641
+ # Custom event listener (Blueprint-style "Custom Event").
2642
+ # Compiles into WAIT_EVENT under the hood.
2643
+ effect_type = "on_event"
2644
+ effect_config = node.data.get("eventConfig", {})
2645
+ elif type_str == "agent":
2646
+ effect_type = "agent"
2647
+ raw_cfg = node.data.get("agentConfig", {})
2648
+ cfg = dict(raw_cfg) if isinstance(raw_cfg, dict) else {}
2649
+ cfg.setdefault(
2650
+ "_react_workflow_id",
2651
+ visual_react_workflow_id(flow_id=visual.id, node_id=node.id),
2652
+ )
2653
+ effect_config = cfg
2654
+ elif type_str in ("sequence", "parallel"):
2655
+ # Control-flow scheduler nodes. Store pin order so compilation can
2656
+ # execute branches deterministically (Blueprint-style).
2657
+ effect_type = type_str
2658
+
2659
+ pins = node.data.get("outputs") if isinstance(node.data, dict) else None
2660
+ exec_ids: list[str] = []
2661
+ if isinstance(pins, list):
2662
+ for p in pins:
2663
+ if not isinstance(p, dict):
2664
+ continue
2665
+ if p.get("type") != "execution":
2666
+ continue
2667
+ pid = p.get("id")
2668
+ if isinstance(pid, str) and pid:
2669
+ exec_ids.append(pid)
2670
+
2671
+ def _then_key(h: str) -> int:
2672
+ try:
2673
+ if h.startswith("then:"):
2674
+ return int(h.split(":", 1)[1])
2675
+ except Exception:
2676
+ pass
2677
+ return 10**9
2678
+
2679
+ then_handles = sorted([h for h in exec_ids if h.startswith("then:")], key=_then_key)
2680
+ cfg = {"then_handles": then_handles}
2681
+ if type_str == "parallel":
2682
+ cfg["completed_handle"] = "completed"
2683
+ effect_config = cfg
2684
+ elif type_str == "loop":
2685
+ # Control-flow scheduler node (Blueprint-style foreach).
2686
+ # Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
2687
+ effect_type = type_str
2688
+ effect_config = {}
2689
+ elif type_str == "while":
2690
+ # Control-flow scheduler node (Blueprint-style while).
2691
+ # Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
2692
+ effect_type = type_str
2693
+ effect_config = {}
2694
+ elif type_str == "for":
2695
+ # Control-flow scheduler node (Blueprint-style numeric for).
2696
+ # Runtime semantics are handled in `abstractflow.adapters.control_adapter`.
2697
+ effect_type = type_str
2698
+ effect_config = {}
2699
+ elif type_str == "subflow":
2700
+ effect_type = "start_subworkflow"
2701
+ subflow_id = node.data.get("subflowId") or node.data.get("flowId")
2702
+ output_pin_ids: list[str] = []
2703
+ outs = node.data.get("outputs")
2704
+ if isinstance(outs, list):
2705
+ for p in outs:
2706
+ if not isinstance(p, dict):
2707
+ continue
2708
+ if p.get("type") == "execution":
2709
+ continue
2710
+ pid = p.get("id")
2711
+ if isinstance(pid, str) and pid and pid != "output":
2712
+ output_pin_ids.append(pid)
2713
+ effect_config = {"workflow_id": subflow_id, "output_pins": output_pin_ids}
2714
+
2715
+ # Always attach minimal visual metadata for downstream compilation/wrapping.
2716
+ meta_cfg: Dict[str, Any] = {"_visual_type": type_str}
2717
+ if isinstance(effect_config, dict):
2718
+ meta_cfg.update(effect_config)
2719
+ effect_config = meta_cfg
2720
+
2721
+ flow.add_node(
2722
+ node_id=node.id,
2723
+ handler=wrapped_handler,
2724
+ input_key=input_key,
2725
+ output_key=output_key,
2726
+ effect_type=effect_type,
2727
+ effect_config=effect_config,
2728
+ )
2729
+
2730
+ for edge in visual.edges:
2731
+ if edge.targetHandle == "exec-in":
2732
+ if edge.source in flow.nodes and edge.target in flow.nodes:
2733
+ flow.add_edge(edge.source, edge.target, source_handle=edge.sourceHandle)
2734
+
2735
+ if visual.entryNode and visual.entryNode in flow.nodes:
2736
+ flow.set_entry(visual.entryNode)
2737
+ else:
2738
+ targets = {e.target for e in visual.edges if e.targetHandle == "exec-in"}
2739
+ for node_id in flow.nodes:
2740
+ if node_id not in targets:
2741
+ flow.set_entry(node_id)
2742
+ break
2743
+ if not flow.entry_node and flow.nodes:
2744
+ flow.set_entry(next(iter(flow.nodes)))
2745
+
2746
+ # Pure (no-exec) nodes are cached in `flow._node_outputs` for data-edge resolution.
2747
+ # Some schedulers (While, On Event, On Schedule) must invalidate these caches between iterations.
2748
+ flow._pure_node_ids = pure_node_ids # type: ignore[attr-defined]
2749
+
2750
+ return flow
2751
+
2752
+
2753
+ def _create_data_aware_handler(
2754
+ node_id: str,
2755
+ base_handler,
2756
+ data_edges: Dict[str, tuple[str, str]],
2757
+ pin_defaults: Optional[Dict[str, Any]],
2758
+ node_outputs: Dict[str, Dict[str, Any]],
2759
+ *,
2760
+ ensure_node_output=None,
2761
+ volatile_node_ids: Optional[set[str]] = None,
2762
+ ):
2763
+ """Wrap a handler to resolve data edge inputs before execution."""
2764
+
2765
+ volatile: set[str] = volatile_node_ids if isinstance(volatile_node_ids, set) else set()
2766
+
2767
+ def wrapped_handler(input_data):
2768
+ resolved_input: Dict[str, Any] = {}
2769
+
2770
+ if isinstance(input_data, dict):
2771
+ resolved_input.update(input_data)
2772
+
2773
+ for target_pin, (source_node, source_pin) in data_edges.items():
2774
+ if ensure_node_output is not None and (source_node not in node_outputs or source_node in volatile):
2775
+ ensure_node_output(source_node)
2776
+ if source_node in node_outputs:
2777
+ source_output = node_outputs[source_node]
2778
+ if isinstance(source_output, dict) and source_pin in source_output:
2779
+ resolved_input[target_pin] = source_output[source_pin]
2780
+ elif source_pin in ("result", "output"):
2781
+ resolved_input[target_pin] = source_output
2782
+
2783
+ if pin_defaults:
2784
+ for pin_id, value in pin_defaults.items():
2785
+ # Connected pins always win (even if the upstream value is None).
2786
+ if pin_id in data_edges:
2787
+ continue
2788
+ if pin_id not in resolved_input:
2789
+ # Clone object/array defaults so handlers can't mutate the shared default.
2790
+ if isinstance(value, (dict, list)):
2791
+ try:
2792
+ import copy
2793
+
2794
+ resolved_input[pin_id] = copy.deepcopy(value)
2795
+ except Exception:
2796
+ resolved_input[pin_id] = value
2797
+ else:
2798
+ resolved_input[pin_id] = value
2799
+
2800
+ result = base_handler(resolved_input if resolved_input else input_data)
2801
+ node_outputs[node_id] = result
2802
+ return result
2803
+
2804
+ return wrapped_handler