abstractflow 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractflow/__init__.py +75 -95
- abstractflow/__main__.py +2 -0
- abstractflow/adapters/__init__.py +11 -0
- abstractflow/adapters/agent_adapter.py +124 -0
- abstractflow/adapters/control_adapter.py +615 -0
- abstractflow/adapters/effect_adapter.py +645 -0
- abstractflow/adapters/event_adapter.py +307 -0
- abstractflow/adapters/function_adapter.py +97 -0
- abstractflow/adapters/subflow_adapter.py +74 -0
- abstractflow/adapters/variable_adapter.py +317 -0
- abstractflow/cli.py +2 -0
- abstractflow/compiler.py +2027 -0
- abstractflow/core/__init__.py +5 -0
- abstractflow/core/flow.py +247 -0
- abstractflow/py.typed +2 -0
- abstractflow/runner.py +348 -0
- abstractflow/visual/__init__.py +43 -0
- abstractflow/visual/agent_ids.py +29 -0
- abstractflow/visual/builtins.py +789 -0
- abstractflow/visual/code_executor.py +214 -0
- abstractflow/visual/event_ids.py +33 -0
- abstractflow/visual/executor.py +2789 -0
- abstractflow/visual/interfaces.py +347 -0
- abstractflow/visual/models.py +252 -0
- abstractflow/visual/session_runner.py +168 -0
- abstractflow/visual/workspace_scoped_tools.py +261 -0
- abstractflow-0.3.0.dist-info/METADATA +413 -0
- abstractflow-0.3.0.dist-info/RECORD +32 -0
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.0.dist-info}/licenses/LICENSE +2 -0
- abstractflow-0.1.0.dist-info/METADATA +0 -238
- abstractflow-0.1.0.dist-info/RECORD +0 -10
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.0.dist-info}/WHEEL +0 -0
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.0.dist-info}/entry_points.txt +0 -0
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""VisualFlow interface contracts (portable host validation).
|
|
2
|
+
|
|
3
|
+
This module defines *declarative* workflow interface markers and best-effort
|
|
4
|
+
validators so hosts (e.g. AbstractCode) can safely treat a workflow as a
|
|
5
|
+
specialized capability with a known IO contract.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple
|
|
12
|
+
|
|
13
|
+
from .models import VisualFlow
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
ABSTRACTCODE_AGENT_V1 = "abstractcode.agent.v1"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass(frozen=True)
|
|
20
|
+
class VisualFlowInterfaceSpec:
|
|
21
|
+
interface_id: str
|
|
22
|
+
label: str
|
|
23
|
+
description: str
|
|
24
|
+
required_start_outputs: Mapping[str, str]
|
|
25
|
+
required_end_inputs: Mapping[str, str]
|
|
26
|
+
recommended_start_outputs: Mapping[str, str] = None # type: ignore[assignment]
|
|
27
|
+
recommended_end_inputs: Mapping[str, str] = None # type: ignore[assignment]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _pin_types(pins: Any) -> Dict[str, str]:
|
|
31
|
+
"""Return {pin_id -> type_str} for a pin list.
|
|
32
|
+
|
|
33
|
+
VisualFlow stores pins inside the node's `data.inputs/outputs` lists.
|
|
34
|
+
"""
|
|
35
|
+
out: Dict[str, str] = {}
|
|
36
|
+
if not isinstance(pins, list):
|
|
37
|
+
return out
|
|
38
|
+
for p in pins:
|
|
39
|
+
if not isinstance(p, dict):
|
|
40
|
+
continue
|
|
41
|
+
pid = p.get("id")
|
|
42
|
+
if not isinstance(pid, str) or not pid:
|
|
43
|
+
continue
|
|
44
|
+
ptype = p.get("type")
|
|
45
|
+
t = ptype.value if hasattr(ptype, "value") else str(ptype or "")
|
|
46
|
+
out[pid] = t
|
|
47
|
+
return out
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _node_type_str(node: Any) -> str:
|
|
51
|
+
t = getattr(node, "type", None)
|
|
52
|
+
return t.value if hasattr(t, "value") else str(t or "")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _iter_nodes(flow: VisualFlow) -> Iterable[Any]:
|
|
56
|
+
for n in getattr(flow, "nodes", []) or []:
|
|
57
|
+
yield n
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_interface_specs() -> Dict[str, VisualFlowInterfaceSpec]:
|
|
61
|
+
"""Return known interface specs (by id)."""
|
|
62
|
+
return {
|
|
63
|
+
ABSTRACTCODE_AGENT_V1: VisualFlowInterfaceSpec(
|
|
64
|
+
interface_id=ABSTRACTCODE_AGENT_V1,
|
|
65
|
+
label="AbstractCode Agent (v1)",
|
|
66
|
+
description=(
|
|
67
|
+
"Host-configurable request → response contract for running a workflow as an AbstractCode agent."
|
|
68
|
+
),
|
|
69
|
+
# NOTE: We require host routing/policy pins (provider/model/tools) so workflows
|
|
70
|
+
# can be driven by AbstractCode without hardcoding node configs.
|
|
71
|
+
required_start_outputs={
|
|
72
|
+
"request": "string",
|
|
73
|
+
"provider": "provider",
|
|
74
|
+
"model": "model",
|
|
75
|
+
"tools": "tools",
|
|
76
|
+
},
|
|
77
|
+
required_end_inputs={"response": "string"},
|
|
78
|
+
recommended_start_outputs={
|
|
79
|
+
"context": "object",
|
|
80
|
+
"max_iterations": "number",
|
|
81
|
+
},
|
|
82
|
+
recommended_end_inputs={
|
|
83
|
+
"meta": "object",
|
|
84
|
+
"scratchpad": "object",
|
|
85
|
+
"raw_result": "object",
|
|
86
|
+
},
|
|
87
|
+
),
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def validate_visual_flow_interface(flow: VisualFlow, interface_id: str) -> List[str]:
|
|
92
|
+
"""Validate that a VisualFlow implements a known interface contract.
|
|
93
|
+
|
|
94
|
+
Returns a list of human-friendly error strings (empty when valid).
|
|
95
|
+
"""
|
|
96
|
+
errors: List[str] = []
|
|
97
|
+
iid = str(interface_id or "").strip()
|
|
98
|
+
if not iid:
|
|
99
|
+
return ["interface_id is required"]
|
|
100
|
+
|
|
101
|
+
spec = get_interface_specs().get(iid)
|
|
102
|
+
if spec is None:
|
|
103
|
+
return [f"Unknown interface_id: {iid}"]
|
|
104
|
+
|
|
105
|
+
declared = getattr(flow, "interfaces", None)
|
|
106
|
+
declared_list = list(declared) if isinstance(declared, list) else []
|
|
107
|
+
if iid not in declared_list:
|
|
108
|
+
errors.append(f"Flow must declare interfaces: ['{iid}']")
|
|
109
|
+
|
|
110
|
+
starts = [n for n in _iter_nodes(flow) if _node_type_str(n) == "on_flow_start"]
|
|
111
|
+
if not starts:
|
|
112
|
+
errors.append("Flow must include an On Flow Start node (type=on_flow_start).")
|
|
113
|
+
return errors
|
|
114
|
+
if len(starts) > 1:
|
|
115
|
+
errors.append("Flow must include exactly one On Flow Start node (found multiple).")
|
|
116
|
+
return errors
|
|
117
|
+
|
|
118
|
+
ends = [n for n in _iter_nodes(flow) if _node_type_str(n) == "on_flow_end"]
|
|
119
|
+
if not ends:
|
|
120
|
+
errors.append("Flow must include at least one On Flow End node (type=on_flow_end).")
|
|
121
|
+
return errors
|
|
122
|
+
|
|
123
|
+
start = starts[0]
|
|
124
|
+
start_data = getattr(start, "data", None)
|
|
125
|
+
start_out = _pin_types(start_data.get("outputs") if isinstance(start_data, dict) else None)
|
|
126
|
+
|
|
127
|
+
for pin_id, expected_type in dict(spec.required_start_outputs).items():
|
|
128
|
+
if pin_id not in start_out:
|
|
129
|
+
errors.append(f"On Flow Start must expose an output pin '{pin_id}' ({expected_type}).")
|
|
130
|
+
continue
|
|
131
|
+
actual = start_out.get(pin_id) or ""
|
|
132
|
+
if expected_type and actual and actual != expected_type:
|
|
133
|
+
errors.append(
|
|
134
|
+
f"On Flow Start pin '{pin_id}' must be type '{expected_type}' (got '{actual}')."
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Validate all end nodes: whichever executes must satisfy the contract.
|
|
138
|
+
for end in ends:
|
|
139
|
+
end_data = getattr(end, "data", None)
|
|
140
|
+
end_in = _pin_types(end_data.get("inputs") if isinstance(end_data, dict) else None)
|
|
141
|
+
for pin_id, expected_type in dict(spec.required_end_inputs).items():
|
|
142
|
+
if pin_id not in end_in:
|
|
143
|
+
errors.append(
|
|
144
|
+
f"On Flow End node '{getattr(end, 'id', '')}' must expose an input pin '{pin_id}' ({expected_type})."
|
|
145
|
+
)
|
|
146
|
+
continue
|
|
147
|
+
actual = end_in.get(pin_id) or ""
|
|
148
|
+
if expected_type and actual and actual != expected_type:
|
|
149
|
+
errors.append(
|
|
150
|
+
f"On Flow End node '{getattr(end, 'id', '')}' pin '{pin_id}' must be type '{expected_type}' (got '{actual}')."
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
return errors
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def apply_visual_flow_interface_scaffold(
|
|
157
|
+
flow: VisualFlow,
|
|
158
|
+
interface_id: str,
|
|
159
|
+
*,
|
|
160
|
+
include_recommended: bool = True,
|
|
161
|
+
) -> bool:
|
|
162
|
+
"""Best-effort: apply a known interface's pin scaffolding to a VisualFlow.
|
|
163
|
+
|
|
164
|
+
This is intended for authoring UX:
|
|
165
|
+
- When a workflow is marked as implementing an interface, we ensure the
|
|
166
|
+
required pins exist on the expected nodes (On Flow Start / On Flow End).
|
|
167
|
+
- If those nodes are missing, we create them (unconnected) so the author
|
|
168
|
+
has a correct starting point.
|
|
169
|
+
|
|
170
|
+
Returns True if the flow was mutated.
|
|
171
|
+
"""
|
|
172
|
+
iid = str(interface_id or "").strip()
|
|
173
|
+
spec = get_interface_specs().get(iid)
|
|
174
|
+
if spec is None:
|
|
175
|
+
return False
|
|
176
|
+
|
|
177
|
+
def _pin_dict(pin_id: str, type_str: str, *, label: Optional[str] = None) -> Dict[str, Any]:
|
|
178
|
+
return {"id": pin_id, "label": label or pin_id, "type": type_str}
|
|
179
|
+
|
|
180
|
+
def _ensure_pin(
|
|
181
|
+
pins: list[Any],
|
|
182
|
+
*,
|
|
183
|
+
pin_id: str,
|
|
184
|
+
type_str: str,
|
|
185
|
+
label: Optional[str] = None,
|
|
186
|
+
) -> bool:
|
|
187
|
+
for p in pins:
|
|
188
|
+
if isinstance(p, dict) and p.get("id") == pin_id:
|
|
189
|
+
# Ensure type matches the interface contract.
|
|
190
|
+
if p.get("type") != type_str:
|
|
191
|
+
p["type"] = type_str
|
|
192
|
+
return True
|
|
193
|
+
return False
|
|
194
|
+
pins.append(_pin_dict(pin_id, type_str, label=label))
|
|
195
|
+
return True
|
|
196
|
+
|
|
197
|
+
def _ensure_exec_pin(pins: list[Any], *, pin_id: str, direction: str) -> bool:
|
|
198
|
+
# We keep exec pins present because most authoring UX expects them, even though the
|
|
199
|
+
# interface contract itself only speaks about data pins.
|
|
200
|
+
if not isinstance(direction, str) or direction not in {"in", "out"}:
|
|
201
|
+
direction = "out"
|
|
202
|
+
changed = False
|
|
203
|
+
for p in pins:
|
|
204
|
+
if isinstance(p, dict) and p.get("id") == pin_id:
|
|
205
|
+
if p.get("type") != "execution":
|
|
206
|
+
p["type"] = "execution"
|
|
207
|
+
changed = True
|
|
208
|
+
# exec pins typically have empty label; keep existing label if present.
|
|
209
|
+
return changed
|
|
210
|
+
# Prepend exec pins for readability.
|
|
211
|
+
pins.insert(0, {"id": pin_id, "label": "", "type": "execution"})
|
|
212
|
+
return True
|
|
213
|
+
|
|
214
|
+
# Desired pins (required + optional recommended).
|
|
215
|
+
start_pins = dict(spec.required_start_outputs)
|
|
216
|
+
end_pins = dict(spec.required_end_inputs)
|
|
217
|
+
if include_recommended:
|
|
218
|
+
if isinstance(spec.recommended_start_outputs, Mapping):
|
|
219
|
+
for k, v in dict(spec.recommended_start_outputs).items():
|
|
220
|
+
start_pins.setdefault(str(k), str(v))
|
|
221
|
+
if isinstance(spec.recommended_end_inputs, Mapping):
|
|
222
|
+
for k, v in dict(spec.recommended_end_inputs).items():
|
|
223
|
+
end_pins.setdefault(str(k), str(v))
|
|
224
|
+
|
|
225
|
+
# Locate nodes.
|
|
226
|
+
nodes = list(getattr(flow, "nodes", []) or [])
|
|
227
|
+
used_ids = {str(getattr(n, "id", "") or "") for n in nodes}
|
|
228
|
+
|
|
229
|
+
def _unique_node_id(base: str) -> str:
|
|
230
|
+
b = str(base or "").strip() or "node"
|
|
231
|
+
if b not in used_ids:
|
|
232
|
+
used_ids.add(b)
|
|
233
|
+
return b
|
|
234
|
+
i = 2
|
|
235
|
+
while True:
|
|
236
|
+
cand = f"{b}-{i}"
|
|
237
|
+
if cand not in used_ids:
|
|
238
|
+
used_ids.add(cand)
|
|
239
|
+
return cand
|
|
240
|
+
i += 1
|
|
241
|
+
|
|
242
|
+
def _ensure_nodes() -> Tuple[Any, List[Any], bool]:
|
|
243
|
+
changed_local = False
|
|
244
|
+
starts = [n for n in nodes if _node_type_str(n) == "on_flow_start"]
|
|
245
|
+
ends = [n for n in nodes if _node_type_str(n) == "on_flow_end"]
|
|
246
|
+
|
|
247
|
+
if not starts:
|
|
248
|
+
try:
|
|
249
|
+
from .models import NodeType, Position, VisualNode
|
|
250
|
+
except Exception:
|
|
251
|
+
# Should not happen in normal installs; bail out gracefully.
|
|
252
|
+
return (None, ends, False)
|
|
253
|
+
start_id = _unique_node_id("start")
|
|
254
|
+
start = VisualNode(
|
|
255
|
+
id=start_id,
|
|
256
|
+
type=NodeType.ON_FLOW_START,
|
|
257
|
+
position=Position(x=-420.0, y=120.0),
|
|
258
|
+
data={
|
|
259
|
+
"nodeType": "on_flow_start",
|
|
260
|
+
"label": "On Flow Start",
|
|
261
|
+
"icon": "🏁",
|
|
262
|
+
"headerColor": "#C0392B",
|
|
263
|
+
"inputs": [],
|
|
264
|
+
"outputs": [{"id": "exec-out", "label": "", "type": "execution"}],
|
|
265
|
+
},
|
|
266
|
+
)
|
|
267
|
+
nodes.insert(0, start)
|
|
268
|
+
changed_local = True
|
|
269
|
+
starts = [start]
|
|
270
|
+
|
|
271
|
+
if not ends:
|
|
272
|
+
try:
|
|
273
|
+
from .models import NodeType, Position, VisualNode
|
|
274
|
+
except Exception:
|
|
275
|
+
return (starts[0], [], changed_local)
|
|
276
|
+
end_id = _unique_node_id("end")
|
|
277
|
+
end = VisualNode(
|
|
278
|
+
id=end_id,
|
|
279
|
+
type=NodeType.ON_FLOW_END,
|
|
280
|
+
position=Position(x=260.0, y=120.0),
|
|
281
|
+
data={
|
|
282
|
+
"nodeType": "on_flow_end",
|
|
283
|
+
"label": "On Flow End",
|
|
284
|
+
"icon": "⏹",
|
|
285
|
+
"headerColor": "#C0392B",
|
|
286
|
+
"inputs": [{"id": "exec-in", "label": "", "type": "execution"}],
|
|
287
|
+
"outputs": [],
|
|
288
|
+
},
|
|
289
|
+
)
|
|
290
|
+
nodes.append(end)
|
|
291
|
+
changed_local = True
|
|
292
|
+
ends = [end]
|
|
293
|
+
|
|
294
|
+
return (starts[0], ends, changed_local)
|
|
295
|
+
|
|
296
|
+
start_node, end_nodes, changed = _ensure_nodes()
|
|
297
|
+
if start_node is None:
|
|
298
|
+
return False
|
|
299
|
+
|
|
300
|
+
# Ensure pins on start.
|
|
301
|
+
start_data = getattr(start_node, "data", None)
|
|
302
|
+
if not isinstance(start_data, dict):
|
|
303
|
+
start_data = {}
|
|
304
|
+
setattr(start_node, "data", start_data)
|
|
305
|
+
changed = True
|
|
306
|
+
outputs = start_data.get("outputs")
|
|
307
|
+
if not isinstance(outputs, list):
|
|
308
|
+
outputs = []
|
|
309
|
+
start_data["outputs"] = outputs
|
|
310
|
+
changed = True
|
|
311
|
+
changed = _ensure_exec_pin(outputs, pin_id="exec-out", direction="out") or changed
|
|
312
|
+
for pid, t in start_pins.items():
|
|
313
|
+
changed = _ensure_pin(outputs, pin_id=str(pid), type_str=str(t), label=str(pid)) or changed
|
|
314
|
+
|
|
315
|
+
# Ensure pins on all end nodes.
|
|
316
|
+
for end in end_nodes:
|
|
317
|
+
end_data = getattr(end, "data", None)
|
|
318
|
+
if not isinstance(end_data, dict):
|
|
319
|
+
end_data = {}
|
|
320
|
+
setattr(end, "data", end_data)
|
|
321
|
+
changed = True
|
|
322
|
+
inputs = end_data.get("inputs")
|
|
323
|
+
if not isinstance(inputs, list):
|
|
324
|
+
inputs = []
|
|
325
|
+
end_data["inputs"] = inputs
|
|
326
|
+
changed = True
|
|
327
|
+
changed = _ensure_exec_pin(inputs, pin_id="exec-in", direction="in") or changed
|
|
328
|
+
for pid, t in end_pins.items():
|
|
329
|
+
changed = _ensure_pin(inputs, pin_id=str(pid), type_str=str(t), label=str(pid)) or changed
|
|
330
|
+
|
|
331
|
+
# Write back nodes list if it was reconstructed.
|
|
332
|
+
try:
|
|
333
|
+
flow.nodes = nodes # type: ignore[assignment]
|
|
334
|
+
except Exception:
|
|
335
|
+
pass
|
|
336
|
+
|
|
337
|
+
# Ensure entryNode points at the start when missing/empty.
|
|
338
|
+
try:
|
|
339
|
+
entry = getattr(flow, "entryNode", None)
|
|
340
|
+
if not isinstance(entry, str) or not entry.strip():
|
|
341
|
+
flow.entryNode = str(getattr(start_node, "id", "") or "") or None
|
|
342
|
+
changed = True
|
|
343
|
+
except Exception:
|
|
344
|
+
pass
|
|
345
|
+
|
|
346
|
+
return bool(changed)
|
|
347
|
+
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"""Pydantic models for the AbstractFlow visual workflow JSON format.
|
|
2
|
+
|
|
3
|
+
These models are intentionally kept in the `abstractflow` package so workflows
|
|
4
|
+
authored in the visual editor can be loaded and executed from any host (CLI,
|
|
5
|
+
AbstractCode, servers), not only the web backend.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
import uuid
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel, Field
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PinType(str, Enum):
|
|
19
|
+
"""Types of pins with their colors."""
|
|
20
|
+
|
|
21
|
+
EXECUTION = "execution" # White #FFFFFF - Flow control
|
|
22
|
+
STRING = "string" # Magenta #FF00FF - Text data
|
|
23
|
+
NUMBER = "number" # Green #00FF00 - Integer/Float
|
|
24
|
+
BOOLEAN = "boolean" # Red #FF0000 - True/False
|
|
25
|
+
OBJECT = "object" # Cyan #00FFFF - JSON objects
|
|
26
|
+
ARRAY = "array" # Orange #FF8800 - Collections
|
|
27
|
+
TOOLS = "tools" # Orange - Tool allowlist (string[])
|
|
28
|
+
PROVIDER = "provider" # Cyan-blue - LLM provider id/name (string-like)
|
|
29
|
+
MODEL = "model" # Purple - LLM model id/name (string-like)
|
|
30
|
+
AGENT = "agent" # Blue #4488FF - Agent reference
|
|
31
|
+
ANY = "any" # Gray #888888 - Accepts any type
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class NodeType(str, Enum):
|
|
35
|
+
"""Types of nodes in the visual editor."""
|
|
36
|
+
|
|
37
|
+
# Event/Trigger nodes (entry points)
|
|
38
|
+
ON_FLOW_START = "on_flow_start"
|
|
39
|
+
ON_USER_REQUEST = "on_user_request"
|
|
40
|
+
ON_AGENT_MESSAGE = "on_agent_message"
|
|
41
|
+
ON_SCHEDULE = "on_schedule"
|
|
42
|
+
ON_EVENT = "on_event"
|
|
43
|
+
# Flow IO nodes
|
|
44
|
+
ON_FLOW_END = "on_flow_end"
|
|
45
|
+
# Core execution nodes
|
|
46
|
+
AGENT = "agent"
|
|
47
|
+
FUNCTION = "function"
|
|
48
|
+
CODE = "code"
|
|
49
|
+
SUBFLOW = "subflow"
|
|
50
|
+
# Math
|
|
51
|
+
ADD = "add"
|
|
52
|
+
SUBTRACT = "subtract"
|
|
53
|
+
MULTIPLY = "multiply"
|
|
54
|
+
DIVIDE = "divide"
|
|
55
|
+
MODULO = "modulo"
|
|
56
|
+
POWER = "power"
|
|
57
|
+
ABS = "abs"
|
|
58
|
+
ROUND = "round"
|
|
59
|
+
# String
|
|
60
|
+
CONCAT = "concat"
|
|
61
|
+
SPLIT = "split"
|
|
62
|
+
JOIN = "join"
|
|
63
|
+
FORMAT = "format"
|
|
64
|
+
STRING_TEMPLATE = "string_template"
|
|
65
|
+
UPPERCASE = "uppercase"
|
|
66
|
+
LOWERCASE = "lowercase"
|
|
67
|
+
TRIM = "trim"
|
|
68
|
+
SUBSTRING = "substring"
|
|
69
|
+
LENGTH = "length"
|
|
70
|
+
# Control
|
|
71
|
+
IF = "if"
|
|
72
|
+
SWITCH = "switch"
|
|
73
|
+
LOOP = "loop"
|
|
74
|
+
WHILE = "while"
|
|
75
|
+
FOR = "for"
|
|
76
|
+
SEQUENCE = "sequence"
|
|
77
|
+
PARALLEL = "parallel"
|
|
78
|
+
COMPARE = "compare"
|
|
79
|
+
NOT = "not"
|
|
80
|
+
AND = "and"
|
|
81
|
+
OR = "or"
|
|
82
|
+
COALESCE = "coalesce"
|
|
83
|
+
# Data
|
|
84
|
+
GET = "get"
|
|
85
|
+
SET = "set"
|
|
86
|
+
MERGE = "merge"
|
|
87
|
+
MAKE_ARRAY = "make_array"
|
|
88
|
+
ARRAY_MAP = "array_map"
|
|
89
|
+
ARRAY_FILTER = "array_filter"
|
|
90
|
+
ARRAY_CONCAT = "array_concat"
|
|
91
|
+
ARRAY_LENGTH = "array_length"
|
|
92
|
+
ARRAY_APPEND = "array_append"
|
|
93
|
+
ARRAY_DEDUP = "array_dedup"
|
|
94
|
+
GET_VAR = "get_var"
|
|
95
|
+
SET_VAR = "set_var"
|
|
96
|
+
SET_VARS = "set_vars"
|
|
97
|
+
SET_VAR_PROPERTY = "set_var_property"
|
|
98
|
+
PARSE_JSON = "parse_json"
|
|
99
|
+
STRINGIFY_JSON = "stringify_json"
|
|
100
|
+
AGENT_TRACE_REPORT = "agent_trace_report"
|
|
101
|
+
BREAK_OBJECT = "break_object"
|
|
102
|
+
SYSTEM_DATETIME = "system_datetime"
|
|
103
|
+
MODEL_CATALOG = "model_catalog"
|
|
104
|
+
PROVIDER_CATALOG = "provider_catalog"
|
|
105
|
+
PROVIDER_MODELS = "provider_models"
|
|
106
|
+
# Literals
|
|
107
|
+
LITERAL_STRING = "literal_string"
|
|
108
|
+
LITERAL_NUMBER = "literal_number"
|
|
109
|
+
LITERAL_BOOLEAN = "literal_boolean"
|
|
110
|
+
LITERAL_JSON = "literal_json"
|
|
111
|
+
JSON_SCHEMA = "json_schema"
|
|
112
|
+
LITERAL_ARRAY = "literal_array"
|
|
113
|
+
# Effects
|
|
114
|
+
ASK_USER = "ask_user"
|
|
115
|
+
ANSWER_USER = "answer_user"
|
|
116
|
+
LLM_CALL = "llm_call"
|
|
117
|
+
WAIT_UNTIL = "wait_until"
|
|
118
|
+
WAIT_EVENT = "wait_event"
|
|
119
|
+
EMIT_EVENT = "emit_event"
|
|
120
|
+
READ_FILE = "read_file"
|
|
121
|
+
WRITE_FILE = "write_file"
|
|
122
|
+
MEMORY_NOTE = "memory_note"
|
|
123
|
+
MEMORY_QUERY = "memory_query"
|
|
124
|
+
MEMORY_REHYDRATE = "memory_rehydrate"
|
|
125
|
+
TOOL_CALLS = "tool_calls"
|
|
126
|
+
TOOLS_ALLOWLIST = "tools_allowlist"
|
|
127
|
+
BOOL_VAR = "bool_var"
|
|
128
|
+
VAR_DECL = "var_decl"
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class Pin(BaseModel):
|
|
132
|
+
"""A connection point on a node."""
|
|
133
|
+
|
|
134
|
+
id: str
|
|
135
|
+
label: str
|
|
136
|
+
type: PinType
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class Position(BaseModel):
|
|
140
|
+
"""2D position on canvas."""
|
|
141
|
+
|
|
142
|
+
x: float
|
|
143
|
+
y: float
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class VisualNode(BaseModel):
|
|
147
|
+
"""A node in the visual flow editor."""
|
|
148
|
+
|
|
149
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4())[:8])
|
|
150
|
+
type: NodeType
|
|
151
|
+
position: Position
|
|
152
|
+
data: Dict[str, Any] = Field(default_factory=dict)
|
|
153
|
+
# Node display properties (from template)
|
|
154
|
+
label: Optional[str] = None
|
|
155
|
+
icon: Optional[str] = None
|
|
156
|
+
headerColor: Optional[str] = None
|
|
157
|
+
inputs: List[Pin] = Field(default_factory=list)
|
|
158
|
+
outputs: List[Pin] = Field(default_factory=list)
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class VisualEdge(BaseModel):
|
|
162
|
+
"""An edge connecting two nodes."""
|
|
163
|
+
|
|
164
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4())[:8])
|
|
165
|
+
source: str
|
|
166
|
+
sourceHandle: str # Pin ID on source node
|
|
167
|
+
target: str
|
|
168
|
+
targetHandle: str # Pin ID on target node
|
|
169
|
+
animated: bool = False
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class VisualFlow(BaseModel):
|
|
173
|
+
"""A complete visual flow definition."""
|
|
174
|
+
|
|
175
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4())[:8])
|
|
176
|
+
name: str
|
|
177
|
+
description: str = ""
|
|
178
|
+
# Optional interface markers (host contracts).
|
|
179
|
+
# Example: ["abstractcode.agent.v1"] to indicate this workflow can be run as an AbstractCode agent.
|
|
180
|
+
interfaces: List[str] = Field(default_factory=list)
|
|
181
|
+
nodes: List[VisualNode] = Field(default_factory=list)
|
|
182
|
+
edges: List[VisualEdge] = Field(default_factory=list)
|
|
183
|
+
entryNode: Optional[str] = None
|
|
184
|
+
created_at: Optional[str] = None
|
|
185
|
+
updated_at: Optional[str] = None
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class FlowCreateRequest(BaseModel):
|
|
189
|
+
"""Request to create a new flow."""
|
|
190
|
+
|
|
191
|
+
name: str
|
|
192
|
+
description: str = ""
|
|
193
|
+
interfaces: List[str] = Field(default_factory=list)
|
|
194
|
+
nodes: List[VisualNode] = Field(default_factory=list)
|
|
195
|
+
edges: List[VisualEdge] = Field(default_factory=list)
|
|
196
|
+
entryNode: Optional[str] = None
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class FlowUpdateRequest(BaseModel):
|
|
200
|
+
"""Request to update an existing flow."""
|
|
201
|
+
|
|
202
|
+
name: Optional[str] = None
|
|
203
|
+
description: Optional[str] = None
|
|
204
|
+
interfaces: Optional[List[str]] = None
|
|
205
|
+
nodes: Optional[List[VisualNode]] = None
|
|
206
|
+
edges: Optional[List[VisualEdge]] = None
|
|
207
|
+
entryNode: Optional[str] = None
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class FlowRunRequest(BaseModel):
|
|
211
|
+
"""Request to execute a flow."""
|
|
212
|
+
|
|
213
|
+
input_data: Dict[str, Any] = Field(default_factory=dict)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class FlowRunResult(BaseModel):
|
|
217
|
+
"""Result of a flow execution."""
|
|
218
|
+
|
|
219
|
+
success: bool
|
|
220
|
+
result: Optional[Any] = None
|
|
221
|
+
error: Optional[str] = None
|
|
222
|
+
run_id: Optional[str] = None
|
|
223
|
+
waiting: bool = False
|
|
224
|
+
wait_key: Optional[str] = None
|
|
225
|
+
prompt: Optional[str] = None
|
|
226
|
+
choices: Optional[List[str]] = None
|
|
227
|
+
allow_free_text: Optional[bool] = None
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class ExecutionMetrics(BaseModel):
|
|
231
|
+
"""Optional per-step (or whole-run) execution metrics.
|
|
232
|
+
|
|
233
|
+
These fields are best-effort and may be omitted depending on host/runtime capabilities.
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
duration_ms: Optional[float] = None
|
|
237
|
+
input_tokens: Optional[int] = None
|
|
238
|
+
output_tokens: Optional[int] = None
|
|
239
|
+
tokens_per_s: Optional[float] = None
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class ExecutionEvent(BaseModel):
|
|
243
|
+
"""Real-time execution event for WebSocket."""
|
|
244
|
+
|
|
245
|
+
type: str # "node_start", "node_complete", "flow_complete", "flow_error"
|
|
246
|
+
# ISO 8601 UTC timestamp for event emission (host-side observability).
|
|
247
|
+
ts: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
|
248
|
+
runId: Optional[str] = None
|
|
249
|
+
nodeId: Optional[str] = None
|
|
250
|
+
result: Optional[Any] = None
|
|
251
|
+
error: Optional[str] = None
|
|
252
|
+
meta: Optional[ExecutionMetrics] = None
|