abstractflow 0.1.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractflow/__init__.py +74 -94
- abstractflow/__main__.py +2 -0
- abstractflow/adapters/__init__.py +11 -0
- abstractflow/adapters/agent_adapter.py +5 -0
- abstractflow/adapters/control_adapter.py +5 -0
- abstractflow/adapters/effect_adapter.py +5 -0
- abstractflow/adapters/event_adapter.py +5 -0
- abstractflow/adapters/function_adapter.py +5 -0
- abstractflow/adapters/subflow_adapter.py +5 -0
- abstractflow/adapters/variable_adapter.py +5 -0
- abstractflow/cli.py +75 -28
- abstractflow/compiler.py +23 -0
- abstractflow/core/__init__.py +5 -0
- abstractflow/core/flow.py +11 -0
- abstractflow/py.typed +2 -0
- abstractflow/runner.py +402 -0
- abstractflow/visual/__init__.py +43 -0
- abstractflow/visual/agent_ids.py +5 -0
- abstractflow/visual/builtins.py +5 -0
- abstractflow/visual/code_executor.py +5 -0
- abstractflow/visual/event_ids.py +33 -0
- abstractflow/visual/executor.py +968 -0
- abstractflow/visual/interfaces.py +440 -0
- abstractflow/visual/models.py +277 -0
- abstractflow/visual/session_runner.py +182 -0
- abstractflow/visual/workspace_scoped_tools.py +29 -0
- abstractflow/workflow_bundle.py +290 -0
- abstractflow-0.3.1.dist-info/METADATA +186 -0
- abstractflow-0.3.1.dist-info/RECORD +33 -0
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/WHEEL +1 -1
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/licenses/LICENSE +2 -0
- abstractflow-0.1.0.dist-info/METADATA +0 -238
- abstractflow-0.1.0.dist-info/RECORD +0 -10
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/entry_points.txt +0 -0
- {abstractflow-0.1.0.dist-info → abstractflow-0.3.1.dist-info}/top_level.txt +0 -0
abstractflow/runner.py
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
"""FlowRunner - executes flows using AbstractRuntime."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, Optional, TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from .core.flow import Flow
|
|
8
|
+
from .compiler import compile_flow
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from abstractruntime.core.models import RunState
|
|
12
|
+
from abstractruntime.core.runtime import Runtime
|
|
13
|
+
from abstractruntime.core.spec import WorkflowSpec
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class FlowRunner:
|
|
17
|
+
"""Executes flows using AbstractRuntime.
|
|
18
|
+
|
|
19
|
+
FlowRunner provides a high-level interface for running flows. It handles:
|
|
20
|
+
- Compiling the flow to a WorkflowSpec
|
|
21
|
+
- Creating a default runtime if not provided
|
|
22
|
+
- Managing run lifecycle (start, step, run, resume)
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
>>> flow = Flow("my_flow")
|
|
26
|
+
>>> flow.add_node("start", lambda x: x * 2, input_key="value")
|
|
27
|
+
>>> flow.set_entry("start")
|
|
28
|
+
>>>
|
|
29
|
+
>>> runner = FlowRunner(flow)
|
|
30
|
+
>>> result = runner.run({"value": 21})
|
|
31
|
+
>>> print(result) # {'result': 42, 'success': True}
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
flow: Flow,
|
|
37
|
+
runtime: Optional["Runtime"] = None,
|
|
38
|
+
):
|
|
39
|
+
"""Initialize a FlowRunner.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
flow: The Flow definition to run
|
|
43
|
+
runtime: Optional AbstractRuntime instance. If not provided,
|
|
44
|
+
a default in-memory runtime will be created.
|
|
45
|
+
"""
|
|
46
|
+
self.flow = flow
|
|
47
|
+
self.workflow: "WorkflowSpec" = compile_flow(flow)
|
|
48
|
+
self.runtime = runtime or self._create_default_runtime()
|
|
49
|
+
self._current_run_id: Optional[str] = None
|
|
50
|
+
|
|
51
|
+
def _create_default_runtime(self) -> "Runtime":
|
|
52
|
+
"""Create a default in-memory runtime."""
|
|
53
|
+
try:
|
|
54
|
+
from abstractruntime import Runtime, InMemoryRunStore, InMemoryLedgerStore # type: ignore
|
|
55
|
+
except Exception: # pragma: no cover
|
|
56
|
+
from abstractruntime.core.runtime import Runtime # type: ignore
|
|
57
|
+
from abstractruntime.storage.in_memory import InMemoryLedgerStore, InMemoryRunStore # type: ignore
|
|
58
|
+
|
|
59
|
+
return Runtime(
|
|
60
|
+
run_store=InMemoryRunStore(),
|
|
61
|
+
ledger_store=InMemoryLedgerStore(),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def run_id(self) -> Optional[str]:
|
|
66
|
+
"""Get the current run ID."""
|
|
67
|
+
return self._current_run_id
|
|
68
|
+
|
|
69
|
+
@staticmethod
|
|
70
|
+
def _normalize_completed_output(raw: Any) -> Dict[str, Any]:
|
|
71
|
+
"""Normalize workflow completion output for host callers.
|
|
72
|
+
|
|
73
|
+
Runtime-level workflows may complete with various output shapes:
|
|
74
|
+
- VisualFlow On Flow End: {"my_output": ..., "success": True}
|
|
75
|
+
- Terminal node returning scalar: {"response": 123, "success": True}
|
|
76
|
+
- Legacy / explicit: {"result": ..., "success": True}
|
|
77
|
+
|
|
78
|
+
AbstractFlow's public contract is: {"success": bool, "result": Any, ...}.
|
|
79
|
+
"""
|
|
80
|
+
if not isinstance(raw, dict):
|
|
81
|
+
return {"success": True, "result": raw}
|
|
82
|
+
|
|
83
|
+
success = raw.get("success")
|
|
84
|
+
if success is False:
|
|
85
|
+
# Preserve error shape (tests + callers expect top-level "error"/"node", etc).
|
|
86
|
+
return raw
|
|
87
|
+
|
|
88
|
+
# Prefer explicit `result` when present (visual flows may also keep
|
|
89
|
+
# top-level keys for UI/WS convenience).
|
|
90
|
+
if "result" in raw:
|
|
91
|
+
return {"success": True, "result": raw.get("result")}
|
|
92
|
+
|
|
93
|
+
payload = {k: v for k, v in raw.items() if k != "success"}
|
|
94
|
+
if len(payload) == 1:
|
|
95
|
+
(only_key, only_val) = next(iter(payload.items()))
|
|
96
|
+
if only_key in {"result", "response"}:
|
|
97
|
+
return {"success": True, "result": only_val}
|
|
98
|
+
|
|
99
|
+
return {"success": True, "result": payload}
|
|
100
|
+
|
|
101
|
+
def start(
|
|
102
|
+
self,
|
|
103
|
+
input_data: Optional[Dict[str, Any]] = None,
|
|
104
|
+
*,
|
|
105
|
+
actor_id: Optional[str] = None,
|
|
106
|
+
session_id: Optional[str] = None,
|
|
107
|
+
) -> str:
|
|
108
|
+
"""Start flow execution.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
input_data: Initial variables for the flow
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
The run ID for this execution
|
|
115
|
+
"""
|
|
116
|
+
vars_dict = input_data or {}
|
|
117
|
+
self._current_run_id = self.runtime.start(
|
|
118
|
+
workflow=self.workflow,
|
|
119
|
+
vars=vars_dict,
|
|
120
|
+
actor_id=actor_id,
|
|
121
|
+
session_id=session_id,
|
|
122
|
+
)
|
|
123
|
+
return self._current_run_id
|
|
124
|
+
|
|
125
|
+
def step(self, max_steps: int = 1) -> "RunState":
|
|
126
|
+
"""Execute one or more steps.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
max_steps: Maximum number of steps to execute
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
The current RunState after stepping
|
|
133
|
+
|
|
134
|
+
Raises:
|
|
135
|
+
ValueError: If no run has been started
|
|
136
|
+
"""
|
|
137
|
+
if not self._current_run_id:
|
|
138
|
+
raise ValueError("No active run. Call start() first.")
|
|
139
|
+
|
|
140
|
+
return self.runtime.tick(
|
|
141
|
+
workflow=self.workflow,
|
|
142
|
+
run_id=self._current_run_id,
|
|
143
|
+
max_steps=max_steps,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
def run(
|
|
147
|
+
self,
|
|
148
|
+
input_data: Optional[Dict[str, Any]] = None,
|
|
149
|
+
*,
|
|
150
|
+
actor_id: Optional[str] = None,
|
|
151
|
+
session_id: Optional[str] = None,
|
|
152
|
+
) -> Dict[str, Any]:
|
|
153
|
+
"""Execute flow to completion.
|
|
154
|
+
|
|
155
|
+
This method starts the flow and runs until it completes, fails,
|
|
156
|
+
or enters a waiting state.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
input_data: Initial variables for the flow
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
The flow's output dictionary. If the flow is waiting,
|
|
163
|
+
returns {"waiting": True, "state": <RunState>}.
|
|
164
|
+
|
|
165
|
+
Raises:
|
|
166
|
+
RuntimeError: If the flow fails
|
|
167
|
+
"""
|
|
168
|
+
from abstractruntime.core.models import RunStatus, WaitReason
|
|
169
|
+
|
|
170
|
+
self.start(input_data, actor_id=actor_id, session_id=session_id)
|
|
171
|
+
|
|
172
|
+
while True:
|
|
173
|
+
state = self.runtime.tick(
|
|
174
|
+
workflow=self.workflow,
|
|
175
|
+
run_id=self._current_run_id,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
if state.status == RunStatus.COMPLETED:
|
|
179
|
+
return self._normalize_completed_output(state.output)
|
|
180
|
+
|
|
181
|
+
if state.status == RunStatus.FAILED:
|
|
182
|
+
raise RuntimeError(f"Flow failed: {state.error}")
|
|
183
|
+
|
|
184
|
+
if state.status == RunStatus.WAITING:
|
|
185
|
+
# Convenience: when waiting on a SUBWORKFLOW, FlowRunner.run() can
|
|
186
|
+
# auto-drive the child to completion and resume the parent.
|
|
187
|
+
#
|
|
188
|
+
# Visual Agent nodes use async+wait START_SUBWORKFLOW so web hosts
|
|
189
|
+
# can stream traces. In non-interactive contexts (unit tests, CLI),
|
|
190
|
+
# we still want a synchronous `run()` to complete when possible.
|
|
191
|
+
wait = getattr(state, "waiting", None)
|
|
192
|
+
if (
|
|
193
|
+
wait is not None
|
|
194
|
+
and getattr(wait, "reason", None) == WaitReason.SUBWORKFLOW
|
|
195
|
+
and getattr(self.runtime, "workflow_registry", None) is not None
|
|
196
|
+
):
|
|
197
|
+
registry = getattr(self.runtime, "workflow_registry", None)
|
|
198
|
+
|
|
199
|
+
def _extract_sub_run_id(wait_state: Any) -> Optional[str]:
|
|
200
|
+
details2 = getattr(wait_state, "details", None)
|
|
201
|
+
if isinstance(details2, dict):
|
|
202
|
+
rid2 = details2.get("sub_run_id")
|
|
203
|
+
if isinstance(rid2, str) and rid2:
|
|
204
|
+
return rid2
|
|
205
|
+
wk = getattr(wait_state, "wait_key", None)
|
|
206
|
+
if isinstance(wk, str) and wk.startswith("subworkflow:"):
|
|
207
|
+
return wk.split("subworkflow:", 1)[1] or None
|
|
208
|
+
return None
|
|
209
|
+
|
|
210
|
+
def _spec_for(run_state: Any):
|
|
211
|
+
wf_id = getattr(run_state, "workflow_id", None)
|
|
212
|
+
# FlowRunner always has the root workflow spec (self.workflow).
|
|
213
|
+
# The runtime registry is required only for *child* workflows.
|
|
214
|
+
#
|
|
215
|
+
# Without this fallback, synchronous `FlowRunner.run()` can hang on
|
|
216
|
+
# SUBWORKFLOW waits if callers register only subworkflows (common in
|
|
217
|
+
# unit tests where the parent spec is not registered).
|
|
218
|
+
if wf_id == getattr(self.workflow, "workflow_id", None):
|
|
219
|
+
return self.workflow
|
|
220
|
+
return registry.get(wf_id) if registry is not None else None
|
|
221
|
+
|
|
222
|
+
top_run_id = self._current_run_id # type: ignore[assignment]
|
|
223
|
+
if isinstance(top_run_id, str) and top_run_id:
|
|
224
|
+
# Find the deepest run in a SUBWORKFLOW wait chain.
|
|
225
|
+
target_run_id = top_run_id
|
|
226
|
+
for _ in range(50):
|
|
227
|
+
cur_state = self.runtime.get_state(target_run_id)
|
|
228
|
+
if cur_state.status != RunStatus.WAITING or cur_state.waiting is None:
|
|
229
|
+
break
|
|
230
|
+
if cur_state.waiting.reason != WaitReason.SUBWORKFLOW:
|
|
231
|
+
break
|
|
232
|
+
next_id = _extract_sub_run_id(cur_state.waiting)
|
|
233
|
+
if not next_id:
|
|
234
|
+
break
|
|
235
|
+
target_run_id = next_id
|
|
236
|
+
|
|
237
|
+
# Drive runs bottom-up: tick the deepest runnable run, then bubble completion
|
|
238
|
+
# payloads to waiting parents until we either block on external input or
|
|
239
|
+
# the chain unwinds.
|
|
240
|
+
current_run_id = target_run_id
|
|
241
|
+
for _ in range(10_000):
|
|
242
|
+
cur_state = self.runtime.get_state(current_run_id)
|
|
243
|
+
if cur_state.status == RunStatus.RUNNING:
|
|
244
|
+
wf = _spec_for(cur_state)
|
|
245
|
+
if wf is None:
|
|
246
|
+
break
|
|
247
|
+
cur_state = self.runtime.tick(workflow=wf, run_id=current_run_id)
|
|
248
|
+
|
|
249
|
+
if cur_state.status == RunStatus.WAITING:
|
|
250
|
+
# If this is a subworkflow wait, descend further.
|
|
251
|
+
if cur_state.waiting is not None and cur_state.waiting.reason == WaitReason.SUBWORKFLOW:
|
|
252
|
+
next_id = _extract_sub_run_id(cur_state.waiting)
|
|
253
|
+
if next_id:
|
|
254
|
+
current_run_id = next_id
|
|
255
|
+
continue
|
|
256
|
+
# Blocked on non-subworkflow input (ASK_USER / EVENT / UNTIL).
|
|
257
|
+
break
|
|
258
|
+
|
|
259
|
+
if cur_state.status == RunStatus.FAILED:
|
|
260
|
+
raise RuntimeError(f"Subworkflow failed: {cur_state.error}")
|
|
261
|
+
if cur_state.status == RunStatus.CANCELLED:
|
|
262
|
+
raise RuntimeError("Subworkflow cancelled")
|
|
263
|
+
if cur_state.status != RunStatus.COMPLETED:
|
|
264
|
+
break
|
|
265
|
+
|
|
266
|
+
parent_id = getattr(cur_state, "parent_run_id", None)
|
|
267
|
+
if not isinstance(parent_id, str) or not parent_id:
|
|
268
|
+
break
|
|
269
|
+
|
|
270
|
+
parent_state = self.runtime.get_state(parent_id)
|
|
271
|
+
if (
|
|
272
|
+
parent_state.status == RunStatus.WAITING
|
|
273
|
+
and parent_state.waiting is not None
|
|
274
|
+
and parent_state.waiting.reason == WaitReason.SUBWORKFLOW
|
|
275
|
+
):
|
|
276
|
+
parent_wf = _spec_for(parent_state)
|
|
277
|
+
if parent_wf is None:
|
|
278
|
+
break
|
|
279
|
+
|
|
280
|
+
node_traces = None
|
|
281
|
+
try:
|
|
282
|
+
node_traces = self.runtime.get_node_traces(cur_state.run_id)
|
|
283
|
+
except Exception:
|
|
284
|
+
node_traces = None
|
|
285
|
+
|
|
286
|
+
self.runtime.resume(
|
|
287
|
+
workflow=parent_wf,
|
|
288
|
+
run_id=parent_id,
|
|
289
|
+
wait_key=None,
|
|
290
|
+
payload={
|
|
291
|
+
"sub_run_id": cur_state.run_id,
|
|
292
|
+
"output": cur_state.output,
|
|
293
|
+
"node_traces": node_traces,
|
|
294
|
+
},
|
|
295
|
+
max_steps=0,
|
|
296
|
+
)
|
|
297
|
+
current_run_id = parent_id
|
|
298
|
+
# Continue bubbling (and ticking resumed parents) until we unwind.
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
break
|
|
302
|
+
|
|
303
|
+
# After driving/bubbling, re-enter the main loop and tick the top run again.
|
|
304
|
+
continue
|
|
305
|
+
|
|
306
|
+
# Flow is waiting for external input
|
|
307
|
+
return {
|
|
308
|
+
"waiting": True,
|
|
309
|
+
"state": state,
|
|
310
|
+
"wait_key": state.waiting.wait_key if state.waiting else None,
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
def resume(
|
|
314
|
+
self,
|
|
315
|
+
wait_key: Optional[str] = None,
|
|
316
|
+
payload: Optional[Dict[str, Any]] = None,
|
|
317
|
+
*,
|
|
318
|
+
max_steps: int = 100,
|
|
319
|
+
) -> "RunState":
|
|
320
|
+
"""Resume a waiting flow.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
wait_key: The wait key to resume (optional, uses current if not specified)
|
|
324
|
+
payload: Data to provide to the waiting node
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
The RunState after resuming
|
|
328
|
+
"""
|
|
329
|
+
if not self._current_run_id:
|
|
330
|
+
raise ValueError("No active run to resume.")
|
|
331
|
+
|
|
332
|
+
state = self.runtime.resume(
|
|
333
|
+
workflow=self.workflow,
|
|
334
|
+
run_id=self._current_run_id,
|
|
335
|
+
wait_key=wait_key,
|
|
336
|
+
payload=payload or {},
|
|
337
|
+
max_steps=max_steps,
|
|
338
|
+
)
|
|
339
|
+
try:
|
|
340
|
+
from abstractruntime.core.models import RunStatus
|
|
341
|
+
|
|
342
|
+
if getattr(state, "status", None) == RunStatus.COMPLETED:
|
|
343
|
+
state.output = self._normalize_completed_output(getattr(state, "output", None)) # type: ignore[attr-defined]
|
|
344
|
+
except Exception:
|
|
345
|
+
pass
|
|
346
|
+
return state
|
|
347
|
+
|
|
348
|
+
def get_state(self) -> Optional["RunState"]:
|
|
349
|
+
"""Get the current run state.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
The current RunState, or None if no run is active
|
|
353
|
+
"""
|
|
354
|
+
if not self._current_run_id:
|
|
355
|
+
return None
|
|
356
|
+
return self.runtime.get_state(self._current_run_id)
|
|
357
|
+
|
|
358
|
+
def get_ledger(self) -> list:
|
|
359
|
+
"""Get the execution ledger for the current run.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
List of step records, or empty list if no run
|
|
363
|
+
"""
|
|
364
|
+
if not self._current_run_id:
|
|
365
|
+
return []
|
|
366
|
+
return self.runtime.get_ledger(self._current_run_id)
|
|
367
|
+
|
|
368
|
+
def is_running(self) -> bool:
|
|
369
|
+
"""Check if the flow is currently running."""
|
|
370
|
+
from abstractruntime.core.models import RunStatus
|
|
371
|
+
|
|
372
|
+
state = self.get_state()
|
|
373
|
+
return state is not None and state.status == RunStatus.RUNNING
|
|
374
|
+
|
|
375
|
+
def is_waiting(self) -> bool:
|
|
376
|
+
"""Check if the flow is waiting for input."""
|
|
377
|
+
from abstractruntime.core.models import RunStatus
|
|
378
|
+
|
|
379
|
+
state = self.get_state()
|
|
380
|
+
return state is not None and state.status == RunStatus.WAITING
|
|
381
|
+
|
|
382
|
+
def is_complete(self) -> bool:
|
|
383
|
+
"""Check if the flow has completed."""
|
|
384
|
+
from abstractruntime.core.models import RunStatus
|
|
385
|
+
|
|
386
|
+
state = self.get_state()
|
|
387
|
+
return state is not None and state.status == RunStatus.COMPLETED
|
|
388
|
+
|
|
389
|
+
def is_failed(self) -> bool:
|
|
390
|
+
"""Check if the flow has failed."""
|
|
391
|
+
from abstractruntime.core.models import RunStatus
|
|
392
|
+
|
|
393
|
+
state = self.get_state()
|
|
394
|
+
return state is not None and state.status == RunStatus.FAILED
|
|
395
|
+
|
|
396
|
+
def __repr__(self) -> str:
|
|
397
|
+
status = "not started"
|
|
398
|
+
if self._current_run_id:
|
|
399
|
+
state = self.get_state()
|
|
400
|
+
if state:
|
|
401
|
+
status = state.status.value
|
|
402
|
+
return f"FlowRunner(flow={self.flow.flow_id!r}, status={status!r})"
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Portable utilities for AbstractFlow visual workflows.
|
|
2
|
+
|
|
3
|
+
The visual editor saves flows as JSON (nodes/edges). These helpers compile that
|
|
4
|
+
representation into an `abstractflow.Flow` / `abstractruntime.WorkflowSpec` so
|
|
5
|
+
the same workflow can be executed from other hosts (e.g. AbstractCode, CLI),
|
|
6
|
+
not only the web backend.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from .executor import create_visual_runner, execute_visual_flow, visual_to_flow
|
|
10
|
+
from .models import (
|
|
11
|
+
ExecutionEvent,
|
|
12
|
+
FlowCreateRequest,
|
|
13
|
+
FlowRunRequest,
|
|
14
|
+
FlowRunResult,
|
|
15
|
+
FlowUpdateRequest,
|
|
16
|
+
NodeType,
|
|
17
|
+
Pin,
|
|
18
|
+
PinType,
|
|
19
|
+
Position,
|
|
20
|
+
VisualEdge,
|
|
21
|
+
VisualFlow,
|
|
22
|
+
VisualNode,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
__all__ = [
|
|
26
|
+
"create_visual_runner",
|
|
27
|
+
"execute_visual_flow",
|
|
28
|
+
"visual_to_flow",
|
|
29
|
+
# Models
|
|
30
|
+
"ExecutionEvent",
|
|
31
|
+
"FlowCreateRequest",
|
|
32
|
+
"FlowRunRequest",
|
|
33
|
+
"FlowRunResult",
|
|
34
|
+
"FlowUpdateRequest",
|
|
35
|
+
"NodeType",
|
|
36
|
+
"Pin",
|
|
37
|
+
"PinType",
|
|
38
|
+
"Position",
|
|
39
|
+
"VisualEdge",
|
|
40
|
+
"VisualFlow",
|
|
41
|
+
"VisualNode",
|
|
42
|
+
]
|
|
43
|
+
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Deterministic workflow IDs for VisualFlow custom event listeners.
|
|
2
|
+
|
|
3
|
+
Visual "On Event" nodes are compiled into dedicated listener workflows and started
|
|
4
|
+
alongside the main workflow run.
|
|
5
|
+
|
|
6
|
+
IDs must be stable across hosts so a VisualFlow JSON document can be executed
|
|
7
|
+
outside the web editor (CLI, AbstractCode, third-party apps).
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import re
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
_SAFE_ID_RE = re.compile(r"[^a-zA-Z0-9_-]+")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _sanitize(value: str) -> str:
|
|
19
|
+
value = str(value or "").strip()
|
|
20
|
+
if not value:
|
|
21
|
+
return "unknown"
|
|
22
|
+
value = _SAFE_ID_RE.sub("_", value)
|
|
23
|
+
return value or "unknown"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def visual_event_listener_workflow_id(*, flow_id: str, node_id: str) -> str:
|
|
27
|
+
"""Return the workflow_id used for a VisualFlow `on_event` listener workflow."""
|
|
28
|
+
return f"visual_event_listener_{_sanitize(flow_id)}_{_sanitize(node_id)}"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|