abstractgateway 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractgateway/__init__.py +11 -0
- abstractgateway/app.py +55 -0
- abstractgateway/cli.py +30 -0
- abstractgateway/config.py +94 -0
- abstractgateway/hosts/__init__.py +6 -0
- abstractgateway/hosts/bundle_host.py +626 -0
- abstractgateway/hosts/visualflow_host.py +213 -0
- abstractgateway/routes/__init__.py +5 -0
- abstractgateway/routes/gateway.py +393 -0
- abstractgateway/runner.py +429 -0
- abstractgateway/security/__init__.py +5 -0
- abstractgateway/security/gateway_security.py +504 -0
- abstractgateway/service.py +134 -0
- abstractgateway/stores.py +34 -0
- abstractgateway-0.1.0.dist-info/METADATA +101 -0
- abstractgateway-0.1.0.dist-info/RECORD +18 -0
- abstractgateway-0.1.0.dist-info/WHEEL +4 -0
- abstractgateway-0.1.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,626 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Dict, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
from abstractruntime import Runtime, WorkflowRegistry, WorkflowSpec
|
|
10
|
+
from abstractruntime.visualflow_compiler import compile_visualflow
|
|
11
|
+
from abstractruntime.workflow_bundle import WorkflowBundle, WorkflowBundleError, open_workflow_bundle
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _namespace(bundle_id: str, flow_id: str) -> str:
|
|
18
|
+
return f"{bundle_id}:{flow_id}"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _coerce_namespaced_id(*, bundle_id: Optional[str], flow_id: str, default_bundle_id: Optional[str]) -> str:
|
|
22
|
+
fid = str(flow_id or "").strip()
|
|
23
|
+
if not fid:
|
|
24
|
+
raise ValueError("flow_id is required")
|
|
25
|
+
|
|
26
|
+
bid = str(bundle_id or "").strip() if isinstance(bundle_id, str) else ""
|
|
27
|
+
if bid:
|
|
28
|
+
# If the caller passed a fully-qualified id (bundle:flow), allow it as-is so
|
|
29
|
+
# clients can safely send both {bundle_id, flow_id} without producing a
|
|
30
|
+
# double-namespace like "bundle:bundle:flow".
|
|
31
|
+
if ":" in fid:
|
|
32
|
+
prefix = fid.split(":", 1)[0].strip()
|
|
33
|
+
if prefix == bid:
|
|
34
|
+
return fid
|
|
35
|
+
raise ValueError(
|
|
36
|
+
f"flow_id '{fid}' is already namespaced, but bundle_id '{bid}' was also provided; "
|
|
37
|
+
"omit bundle_id or pass a non-namespaced flow_id"
|
|
38
|
+
)
|
|
39
|
+
return _namespace(bid, fid)
|
|
40
|
+
|
|
41
|
+
# Allow passing a fully-qualified id as flow_id.
|
|
42
|
+
if ":" in fid:
|
|
43
|
+
return fid
|
|
44
|
+
|
|
45
|
+
if default_bundle_id:
|
|
46
|
+
return _namespace(default_bundle_id, fid)
|
|
47
|
+
|
|
48
|
+
raise ValueError("bundle_id is required when multiple bundles are loaded (or pass flow_id as 'bundle:flow')")
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _namespace_visualflow_raw(
|
|
52
|
+
*,
|
|
53
|
+
raw: Dict[str, Any],
|
|
54
|
+
bundle_id: str,
|
|
55
|
+
flow_id: str,
|
|
56
|
+
id_map: Dict[str, str],
|
|
57
|
+
) -> Dict[str, Any]:
|
|
58
|
+
"""Return a namespaced copy of VisualFlow JSON, rewriting internal subflow references."""
|
|
59
|
+
fid = str(flow_id or "").strip()
|
|
60
|
+
if not fid:
|
|
61
|
+
raise ValueError("flow_id is required")
|
|
62
|
+
|
|
63
|
+
namespaced_id = id_map.get(fid) or _namespace(bundle_id, fid)
|
|
64
|
+
|
|
65
|
+
def _maybe_rewrite(v: Any) -> Any:
|
|
66
|
+
if isinstance(v, str):
|
|
67
|
+
s = v.strip()
|
|
68
|
+
if s in id_map:
|
|
69
|
+
return id_map[s]
|
|
70
|
+
return v
|
|
71
|
+
|
|
72
|
+
out: Dict[str, Any] = dict(raw)
|
|
73
|
+
out["id"] = namespaced_id
|
|
74
|
+
|
|
75
|
+
# Copy/normalize nodes to avoid mutating the original object and to ensure nested dicts
|
|
76
|
+
# are not shared by reference.
|
|
77
|
+
nodes_raw = out.get("nodes")
|
|
78
|
+
if isinstance(nodes_raw, list):
|
|
79
|
+
new_nodes: list[Any] = []
|
|
80
|
+
try:
|
|
81
|
+
from abstractruntime.visualflow_compiler.visual.agent_ids import visual_react_workflow_id
|
|
82
|
+
except Exception: # pragma: no cover
|
|
83
|
+
visual_react_workflow_id = None # type: ignore[assignment]
|
|
84
|
+
|
|
85
|
+
for n_any in nodes_raw:
|
|
86
|
+
n = n_any if isinstance(n_any, dict) else None
|
|
87
|
+
if n is None:
|
|
88
|
+
new_nodes.append(n_any)
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
n2: Dict[str, Any] = dict(n)
|
|
92
|
+
node_type = n2.get("type")
|
|
93
|
+
type_str = node_type.value if hasattr(node_type, "value") else str(node_type or "")
|
|
94
|
+
data0 = n2.get("data")
|
|
95
|
+
data = dict(data0) if isinstance(data0, dict) else {}
|
|
96
|
+
|
|
97
|
+
if type_str == "subflow":
|
|
98
|
+
for key in ("subflowId", "flowId", "workflowId", "workflow_id"):
|
|
99
|
+
if key in data:
|
|
100
|
+
data[key] = _maybe_rewrite(data.get(key))
|
|
101
|
+
|
|
102
|
+
if type_str == "agent":
|
|
103
|
+
cfg0 = data.get("agentConfig")
|
|
104
|
+
cfg = dict(cfg0) if isinstance(cfg0, dict) else {}
|
|
105
|
+
node_id = str(n2.get("id") or "").strip()
|
|
106
|
+
if node_id and callable(visual_react_workflow_id):
|
|
107
|
+
cfg["_react_workflow_id"] = visual_react_workflow_id(flow_id=namespaced_id, node_id=node_id)
|
|
108
|
+
if cfg:
|
|
109
|
+
data["agentConfig"] = cfg
|
|
110
|
+
|
|
111
|
+
n2["data"] = data
|
|
112
|
+
new_nodes.append(n2)
|
|
113
|
+
|
|
114
|
+
out["nodes"] = new_nodes
|
|
115
|
+
|
|
116
|
+
# Shallow-copy edges for consistency (not strictly required).
|
|
117
|
+
edges_raw = out.get("edges")
|
|
118
|
+
if isinstance(edges_raw, list):
|
|
119
|
+
out["edges"] = [dict(e) if isinstance(e, dict) else e for e in edges_raw]
|
|
120
|
+
|
|
121
|
+
return out
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _env(name: str, fallback: Optional[str] = None) -> Optional[str]:
|
|
125
|
+
v = os.getenv(name)
|
|
126
|
+
if v is not None and str(v).strip():
|
|
127
|
+
return v
|
|
128
|
+
if fallback:
|
|
129
|
+
v2 = os.getenv(fallback)
|
|
130
|
+
if v2 is not None and str(v2).strip():
|
|
131
|
+
return v2
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _node_type_from_raw(n: Any) -> str:
|
|
136
|
+
if not isinstance(n, dict):
|
|
137
|
+
return ""
|
|
138
|
+
t = n.get("type")
|
|
139
|
+
return t.value if hasattr(t, "value") else str(t or "")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _scan_flows_for_llm_defaults(flows_by_id: Dict[str, Dict[str, Any]]) -> Optional[Tuple[str, str]]:
|
|
143
|
+
"""Return a best-effort (provider, model) pair from VisualFlow node configs."""
|
|
144
|
+
for raw in (flows_by_id or {}).values():
|
|
145
|
+
nodes = raw.get("nodes")
|
|
146
|
+
if not isinstance(nodes, list):
|
|
147
|
+
continue
|
|
148
|
+
for n in nodes:
|
|
149
|
+
t = _node_type_from_raw(n)
|
|
150
|
+
data = n.get("data") if isinstance(n, dict) else None
|
|
151
|
+
if not isinstance(data, dict):
|
|
152
|
+
data = {}
|
|
153
|
+
|
|
154
|
+
if t == "llm_call":
|
|
155
|
+
cfg = data.get("effectConfig")
|
|
156
|
+
cfg = cfg if isinstance(cfg, dict) else {}
|
|
157
|
+
provider = cfg.get("provider")
|
|
158
|
+
model = cfg.get("model")
|
|
159
|
+
if isinstance(provider, str) and provider.strip() and isinstance(model, str) and model.strip():
|
|
160
|
+
return (provider.strip().lower(), model.strip())
|
|
161
|
+
|
|
162
|
+
if t == "agent":
|
|
163
|
+
cfg = data.get("agentConfig")
|
|
164
|
+
cfg = cfg if isinstance(cfg, dict) else {}
|
|
165
|
+
provider = cfg.get("provider")
|
|
166
|
+
model = cfg.get("model")
|
|
167
|
+
if isinstance(provider, str) and provider.strip() and isinstance(model, str) and model.strip():
|
|
168
|
+
return (provider.strip().lower(), model.strip())
|
|
169
|
+
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _flow_uses_llm(raw: Dict[str, Any]) -> bool:
|
|
174
|
+
nodes = raw.get("nodes")
|
|
175
|
+
if not isinstance(nodes, list):
|
|
176
|
+
return False
|
|
177
|
+
for n in nodes:
|
|
178
|
+
t = _node_type_from_raw(n)
|
|
179
|
+
if t in {"llm_call", "agent"}:
|
|
180
|
+
return True
|
|
181
|
+
return False
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _flow_uses_tools(raw: Dict[str, Any]) -> bool:
|
|
185
|
+
nodes = raw.get("nodes")
|
|
186
|
+
if not isinstance(nodes, list):
|
|
187
|
+
return False
|
|
188
|
+
for n in nodes:
|
|
189
|
+
t = _node_type_from_raw(n)
|
|
190
|
+
if t in {"tool_calls", "agent"}:
|
|
191
|
+
return True
|
|
192
|
+
return False
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _collect_agent_nodes(raw: Dict[str, Any]) -> list[tuple[str, Dict[str, Any]]]:
|
|
196
|
+
nodes = raw.get("nodes")
|
|
197
|
+
if not isinstance(nodes, list):
|
|
198
|
+
return []
|
|
199
|
+
out: list[tuple[str, Dict[str, Any]]] = []
|
|
200
|
+
for n in nodes:
|
|
201
|
+
if _node_type_from_raw(n) != "agent":
|
|
202
|
+
continue
|
|
203
|
+
if not isinstance(n, dict):
|
|
204
|
+
continue
|
|
205
|
+
node_id = str(n.get("id") or "").strip()
|
|
206
|
+
if not node_id:
|
|
207
|
+
continue
|
|
208
|
+
data = n.get("data")
|
|
209
|
+
data = data if isinstance(data, dict) else {}
|
|
210
|
+
cfg0 = data.get("agentConfig")
|
|
211
|
+
cfg = dict(cfg0) if isinstance(cfg0, dict) else {}
|
|
212
|
+
out.append((node_id, cfg))
|
|
213
|
+
return out
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _visual_event_listener_workflow_id(*, flow_id: str, node_id: str) -> str:
|
|
217
|
+
# Local copy of the canonical id scheme (kept simple and deterministic).
|
|
218
|
+
import re
|
|
219
|
+
|
|
220
|
+
safe_re = re.compile(r"[^a-zA-Z0-9_-]+")
|
|
221
|
+
|
|
222
|
+
def _sanitize(v: str) -> str:
|
|
223
|
+
s = str(v or "").strip()
|
|
224
|
+
if not s:
|
|
225
|
+
return "unknown"
|
|
226
|
+
s = safe_re.sub("_", s)
|
|
227
|
+
return s or "unknown"
|
|
228
|
+
|
|
229
|
+
return f"visual_event_listener_{_sanitize(flow_id)}_{_sanitize(node_id)}"
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
@dataclass(frozen=True)
|
|
233
|
+
class WorkflowBundleGatewayHost:
|
|
234
|
+
"""Gateway host that starts/ticks runs from WorkflowBundles (no AbstractFlow import).
|
|
235
|
+
|
|
236
|
+
Compiles `manifest.flows` (VisualFlow JSON) via AbstractRuntime's VisualFlow compiler
|
|
237
|
+
(single semantics).
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
bundles_dir: Path
|
|
241
|
+
bundles: Dict[str, WorkflowBundle]
|
|
242
|
+
runtime: Runtime
|
|
243
|
+
workflow_registry: WorkflowRegistry
|
|
244
|
+
specs: Dict[str, WorkflowSpec]
|
|
245
|
+
event_listener_specs_by_root: Dict[str, list[str]]
|
|
246
|
+
_default_bundle_id: Optional[str]
|
|
247
|
+
|
|
248
|
+
@staticmethod
|
|
249
|
+
def load_from_dir(
|
|
250
|
+
*,
|
|
251
|
+
bundles_dir: Path,
|
|
252
|
+
run_store: Any,
|
|
253
|
+
ledger_store: Any,
|
|
254
|
+
artifact_store: Any,
|
|
255
|
+
) -> "WorkflowBundleGatewayHost":
|
|
256
|
+
base = Path(bundles_dir).expanduser().resolve()
|
|
257
|
+
if not base.exists():
|
|
258
|
+
raise FileNotFoundError(f"bundles_dir does not exist: {base}")
|
|
259
|
+
|
|
260
|
+
bundle_paths: list[Path] = []
|
|
261
|
+
if base.is_file():
|
|
262
|
+
bundle_paths = [base]
|
|
263
|
+
else:
|
|
264
|
+
bundle_paths = sorted([p for p in base.glob("*.flow") if p.is_file()])
|
|
265
|
+
|
|
266
|
+
bundles: Dict[str, WorkflowBundle] = {}
|
|
267
|
+
for p in bundle_paths:
|
|
268
|
+
try:
|
|
269
|
+
b = open_workflow_bundle(p)
|
|
270
|
+
bundles[str(b.manifest.bundle_id)] = b
|
|
271
|
+
except Exception as e:
|
|
272
|
+
logger.warning("Failed to load bundle %s: %s", p, e)
|
|
273
|
+
|
|
274
|
+
if not bundles:
|
|
275
|
+
raise FileNotFoundError(f"No bundles found in {base} (expected *.flow)")
|
|
276
|
+
|
|
277
|
+
default_bundle_id = next(iter(bundles.keys())) if len(bundles) == 1 else None
|
|
278
|
+
|
|
279
|
+
# Build runtime + registry and register all workflow specs.
|
|
280
|
+
wf_reg: WorkflowRegistry = WorkflowRegistry()
|
|
281
|
+
specs: Dict[str, WorkflowSpec] = {}
|
|
282
|
+
flows_by_namespaced_id: Dict[str, Dict[str, Any]] = {}
|
|
283
|
+
|
|
284
|
+
for bid, b in bundles.items():
|
|
285
|
+
man = b.manifest
|
|
286
|
+
if not man.flows:
|
|
287
|
+
raise WorkflowBundleError(f"Bundle '{bid}' has no flows (manifest.flows is empty)")
|
|
288
|
+
|
|
289
|
+
flow_ids = set(man.flows.keys())
|
|
290
|
+
id_map = {flow_id: _namespace(bid, flow_id) for flow_id in flow_ids}
|
|
291
|
+
|
|
292
|
+
for flow_id, rel in man.flows.items():
|
|
293
|
+
raw = b.read_json(rel)
|
|
294
|
+
if not isinstance(raw, dict):
|
|
295
|
+
raise WorkflowBundleError(f"VisualFlow JSON for '{bid}:{flow_id}' must be an object")
|
|
296
|
+
namespaced_raw = _namespace_visualflow_raw(
|
|
297
|
+
raw=raw,
|
|
298
|
+
bundle_id=bid,
|
|
299
|
+
flow_id=flow_id,
|
|
300
|
+
id_map=id_map,
|
|
301
|
+
)
|
|
302
|
+
flows_by_namespaced_id[str(namespaced_raw.get("id") or _namespace(bid, flow_id))] = namespaced_raw
|
|
303
|
+
try:
|
|
304
|
+
spec = compile_visualflow(namespaced_raw)
|
|
305
|
+
except Exception as e:
|
|
306
|
+
raise WorkflowBundleError(f"Failed compiling VisualFlow '{bid}:{flow_id}': {e}") from e
|
|
307
|
+
wf_reg.register(spec)
|
|
308
|
+
specs[str(spec.workflow_id)] = spec
|
|
309
|
+
|
|
310
|
+
needs_llm = any(_flow_uses_llm(raw) for raw in flows_by_namespaced_id.values())
|
|
311
|
+
needs_tools = any(_flow_uses_tools(raw) for raw in flows_by_namespaced_id.values())
|
|
312
|
+
|
|
313
|
+
# Optional AbstractCore integration for LLM_CALL + TOOL_CALLS.
|
|
314
|
+
if needs_llm or needs_tools:
|
|
315
|
+
try:
|
|
316
|
+
from abstractruntime.integrations.abstractcore.default_tools import build_default_tool_map
|
|
317
|
+
from abstractruntime.integrations.abstractcore.tool_executor import (
|
|
318
|
+
AbstractCoreToolExecutor,
|
|
319
|
+
MappingToolExecutor,
|
|
320
|
+
PassthroughToolExecutor,
|
|
321
|
+
)
|
|
322
|
+
except Exception as e: # pragma: no cover
|
|
323
|
+
raise WorkflowBundleError(
|
|
324
|
+
"This bundle requires LLM/tool execution, but AbstractRuntime was installed "
|
|
325
|
+
"without AbstractCore integration. Install `abstractruntime[abstractcore]` "
|
|
326
|
+
"(and ensure `abstractcore` is importable)."
|
|
327
|
+
) from e
|
|
328
|
+
|
|
329
|
+
tool_mode = str(_env("ABSTRACTGATEWAY_TOOL_MODE") or "passthrough").strip().lower()
|
|
330
|
+
if tool_mode == "local":
|
|
331
|
+
# Dev-only: execute the default tool map in-process without relying on the
|
|
332
|
+
# AbstractCore global registry (which is typically empty in gateway mode).
|
|
333
|
+
tool_executor: Any = MappingToolExecutor(build_default_tool_map())
|
|
334
|
+
else:
|
|
335
|
+
# Default safe mode: do not execute tools in-process; enter a durable wait instead.
|
|
336
|
+
tool_executor = PassthroughToolExecutor(mode="approval_required")
|
|
337
|
+
|
|
338
|
+
if needs_llm:
|
|
339
|
+
try:
|
|
340
|
+
from abstractruntime.integrations.abstractcore.factory import create_local_runtime
|
|
341
|
+
except Exception as e: # pragma: no cover
|
|
342
|
+
raise WorkflowBundleError(
|
|
343
|
+
"LLM nodes require AbstractRuntime AbstractCore integration. "
|
|
344
|
+
"Install `abstractruntime[abstractcore]`."
|
|
345
|
+
) from e
|
|
346
|
+
|
|
347
|
+
provider = _env("ABSTRACTGATEWAY_PROVIDER") or _env("ABSTRACTFLOW_PROVIDER") or ""
|
|
348
|
+
model = _env("ABSTRACTGATEWAY_MODEL") or _env("ABSTRACTFLOW_MODEL") or ""
|
|
349
|
+
provider = provider.strip().lower()
|
|
350
|
+
model = model.strip()
|
|
351
|
+
|
|
352
|
+
if not provider or not model:
|
|
353
|
+
detected = _scan_flows_for_llm_defaults(flows_by_namespaced_id)
|
|
354
|
+
if detected is not None:
|
|
355
|
+
provider, model = detected
|
|
356
|
+
|
|
357
|
+
if not provider or not model:
|
|
358
|
+
raise WorkflowBundleError(
|
|
359
|
+
"Bundle contains LLM nodes but no default provider/model is configured. "
|
|
360
|
+
"Set ABSTRACTGATEWAY_PROVIDER and ABSTRACTGATEWAY_MODEL (or ensure the flow JSON "
|
|
361
|
+
"includes provider/model on at least one llm_call/agent node)."
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
runtime = create_local_runtime(
|
|
365
|
+
provider=provider,
|
|
366
|
+
model=model,
|
|
367
|
+
run_store=run_store,
|
|
368
|
+
ledger_store=ledger_store,
|
|
369
|
+
artifact_store=artifact_store,
|
|
370
|
+
tool_executor=tool_executor,
|
|
371
|
+
)
|
|
372
|
+
runtime.set_workflow_registry(wf_reg)
|
|
373
|
+
else:
|
|
374
|
+
# Tools-only runtime: avoid constructing an LLM client.
|
|
375
|
+
from abstractruntime.core.models import EffectType
|
|
376
|
+
from abstractruntime.integrations.abstractcore.effect_handlers import make_tool_calls_handler
|
|
377
|
+
|
|
378
|
+
runtime = Runtime(
|
|
379
|
+
run_store=run_store,
|
|
380
|
+
ledger_store=ledger_store,
|
|
381
|
+
workflow_registry=wf_reg,
|
|
382
|
+
artifact_store=artifact_store,
|
|
383
|
+
effect_handlers={
|
|
384
|
+
EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tool_executor),
|
|
385
|
+
},
|
|
386
|
+
)
|
|
387
|
+
else:
|
|
388
|
+
runtime = Runtime(
|
|
389
|
+
run_store=run_store,
|
|
390
|
+
ledger_store=ledger_store,
|
|
391
|
+
workflow_registry=wf_reg,
|
|
392
|
+
artifact_store=artifact_store,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Register derived workflows required by VisualFlow semantics:
|
|
396
|
+
# - per-Agent-node ReAct subworkflows
|
|
397
|
+
# - per-OnEvent-node listener workflows (Blueprint-style)
|
|
398
|
+
event_listener_specs_by_root: Dict[str, list[str]] = {}
|
|
399
|
+
|
|
400
|
+
agent_pairs: list[tuple[str, Dict[str, Any]]] = []
|
|
401
|
+
for flow_id, raw in flows_by_namespaced_id.items():
|
|
402
|
+
for node_id, cfg in _collect_agent_nodes(raw):
|
|
403
|
+
agent_pairs.append((flow_id, {"node_id": node_id, "cfg": cfg}))
|
|
404
|
+
|
|
405
|
+
if agent_pairs:
|
|
406
|
+
try:
|
|
407
|
+
from abstractagent.adapters.react_runtime import create_react_workflow
|
|
408
|
+
from abstractagent.logic.react import ReActLogic
|
|
409
|
+
except Exception as e: # pragma: no cover
|
|
410
|
+
raise WorkflowBundleError(
|
|
411
|
+
"Bundle contains Visual Agent nodes, but AbstractAgent is not installed/importable. "
|
|
412
|
+
"Install `abstractagent` to execute Agent nodes."
|
|
413
|
+
) from e
|
|
414
|
+
|
|
415
|
+
from abstractcore.tools import ToolDefinition
|
|
416
|
+
|
|
417
|
+
try:
|
|
418
|
+
from abstractruntime.integrations.abstractcore.default_tools import list_default_tool_specs
|
|
419
|
+
except Exception as e: # pragma: no cover
|
|
420
|
+
raise WorkflowBundleError(
|
|
421
|
+
"Visual Agent nodes require AbstractCore tool schemas (abstractruntime[abstractcore])."
|
|
422
|
+
) from e
|
|
423
|
+
|
|
424
|
+
def _tool_defs_from_specs(specs0: list[dict[str, Any]]) -> list[ToolDefinition]:
|
|
425
|
+
out: list[ToolDefinition] = []
|
|
426
|
+
for s in specs0:
|
|
427
|
+
if not isinstance(s, dict):
|
|
428
|
+
continue
|
|
429
|
+
name = s.get("name")
|
|
430
|
+
if not isinstance(name, str) or not name.strip():
|
|
431
|
+
continue
|
|
432
|
+
desc = s.get("description")
|
|
433
|
+
params = s.get("parameters")
|
|
434
|
+
out.append(
|
|
435
|
+
ToolDefinition(
|
|
436
|
+
name=name.strip(),
|
|
437
|
+
description=str(desc or ""),
|
|
438
|
+
parameters=dict(params) if isinstance(params, dict) else {},
|
|
439
|
+
)
|
|
440
|
+
)
|
|
441
|
+
return out
|
|
442
|
+
|
|
443
|
+
def _normalize_tool_names(raw_tools: Any) -> list[str]:
|
|
444
|
+
if not isinstance(raw_tools, list):
|
|
445
|
+
return []
|
|
446
|
+
out: list[str] = []
|
|
447
|
+
for t in raw_tools:
|
|
448
|
+
if isinstance(t, str) and t.strip():
|
|
449
|
+
out.append(t.strip())
|
|
450
|
+
return out
|
|
451
|
+
|
|
452
|
+
all_tool_defs = _tool_defs_from_specs(list_default_tool_specs())
|
|
453
|
+
# Schema-only builtins (executed as runtime effects by AbstractAgent adapters).
|
|
454
|
+
try:
|
|
455
|
+
from abstractagent.logic.builtins import ( # type: ignore
|
|
456
|
+
ASK_USER_TOOL,
|
|
457
|
+
COMPACT_MEMORY_TOOL,
|
|
458
|
+
INSPECT_VARS_TOOL,
|
|
459
|
+
RECALL_MEMORY_TOOL,
|
|
460
|
+
REMEMBER_TOOL,
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
builtin_defs = [ASK_USER_TOOL, RECALL_MEMORY_TOOL, INSPECT_VARS_TOOL, REMEMBER_TOOL, COMPACT_MEMORY_TOOL]
|
|
464
|
+
seen_names = {t.name for t in all_tool_defs if getattr(t, "name", None)}
|
|
465
|
+
for t in builtin_defs:
|
|
466
|
+
if getattr(t, "name", None) and t.name not in seen_names:
|
|
467
|
+
all_tool_defs.append(t)
|
|
468
|
+
seen_names.add(t.name)
|
|
469
|
+
except Exception:
|
|
470
|
+
pass
|
|
471
|
+
|
|
472
|
+
logic = ReActLogic(tools=all_tool_defs)
|
|
473
|
+
|
|
474
|
+
from abstractruntime.visualflow_compiler.visual.agent_ids import visual_react_workflow_id
|
|
475
|
+
|
|
476
|
+
for flow_id, meta in agent_pairs:
|
|
477
|
+
node_id = str(meta.get("node_id") or "").strip()
|
|
478
|
+
cfg = meta.get("cfg") if isinstance(meta.get("cfg"), dict) else {}
|
|
479
|
+
cfg2 = dict(cfg) if isinstance(cfg, dict) else {}
|
|
480
|
+
workflow_id_raw = cfg2.get("_react_workflow_id")
|
|
481
|
+
react_workflow_id = (
|
|
482
|
+
workflow_id_raw.strip()
|
|
483
|
+
if isinstance(workflow_id_raw, str) and workflow_id_raw.strip()
|
|
484
|
+
else visual_react_workflow_id(flow_id=flow_id, node_id=node_id)
|
|
485
|
+
)
|
|
486
|
+
tools_selected = _normalize_tool_names(cfg2.get("tools"))
|
|
487
|
+
spec = create_react_workflow(
|
|
488
|
+
logic=logic,
|
|
489
|
+
workflow_id=react_workflow_id,
|
|
490
|
+
provider=None,
|
|
491
|
+
model=None,
|
|
492
|
+
allowed_tools=tools_selected,
|
|
493
|
+
)
|
|
494
|
+
wf_reg.register(spec)
|
|
495
|
+
specs[str(spec.workflow_id)] = spec
|
|
496
|
+
|
|
497
|
+
# Custom event listeners ("On Event" nodes) are compiled into dedicated listener workflows.
|
|
498
|
+
for flow_id, raw in flows_by_namespaced_id.items():
|
|
499
|
+
nodes = raw.get("nodes")
|
|
500
|
+
if not isinstance(nodes, list):
|
|
501
|
+
continue
|
|
502
|
+
for n in nodes:
|
|
503
|
+
if _node_type_from_raw(n) != "on_event":
|
|
504
|
+
continue
|
|
505
|
+
if not isinstance(n, dict):
|
|
506
|
+
continue
|
|
507
|
+
node_id = str(n.get("id") or "").strip()
|
|
508
|
+
if not node_id:
|
|
509
|
+
continue
|
|
510
|
+
listener_wid = _visual_event_listener_workflow_id(flow_id=flow_id, node_id=node_id)
|
|
511
|
+
|
|
512
|
+
# Derive a listener workflow with entryNode = on_event node.
|
|
513
|
+
derived: Dict[str, Any] = dict(raw)
|
|
514
|
+
derived["id"] = listener_wid
|
|
515
|
+
derived["entryNode"] = node_id
|
|
516
|
+
try:
|
|
517
|
+
spec = compile_visualflow(derived)
|
|
518
|
+
except Exception as e:
|
|
519
|
+
raise WorkflowBundleError(f"Failed compiling On Event listener '{listener_wid}': {e}") from e
|
|
520
|
+
wf_reg.register(spec)
|
|
521
|
+
specs[str(spec.workflow_id)] = spec
|
|
522
|
+
event_listener_specs_by_root.setdefault(flow_id, []).append(str(spec.workflow_id))
|
|
523
|
+
|
|
524
|
+
return WorkflowBundleGatewayHost(
|
|
525
|
+
bundles_dir=base,
|
|
526
|
+
bundles=bundles,
|
|
527
|
+
runtime=runtime,
|
|
528
|
+
workflow_registry=wf_reg,
|
|
529
|
+
specs=specs,
|
|
530
|
+
event_listener_specs_by_root=event_listener_specs_by_root,
|
|
531
|
+
_default_bundle_id=default_bundle_id,
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
@property
|
|
535
|
+
def run_store(self) -> Any:
|
|
536
|
+
return self.runtime.run_store
|
|
537
|
+
|
|
538
|
+
@property
|
|
539
|
+
def ledger_store(self) -> Any:
|
|
540
|
+
return self.runtime.ledger_store
|
|
541
|
+
|
|
542
|
+
@property
|
|
543
|
+
def artifact_store(self) -> Any:
|
|
544
|
+
return self.runtime.artifact_store
|
|
545
|
+
|
|
546
|
+
def start_run(
|
|
547
|
+
self,
|
|
548
|
+
*,
|
|
549
|
+
flow_id: str,
|
|
550
|
+
input_data: Dict[str, Any],
|
|
551
|
+
actor_id: str = "gateway",
|
|
552
|
+
bundle_id: Optional[str] = None,
|
|
553
|
+
) -> str:
|
|
554
|
+
fid = str(flow_id or "").strip()
|
|
555
|
+
bid = str(bundle_id or "").strip() if isinstance(bundle_id, str) else ""
|
|
556
|
+
bundle_id_clean = bid or None
|
|
557
|
+
|
|
558
|
+
if not fid:
|
|
559
|
+
# Default entrypoint selection for the common case:
|
|
560
|
+
# start {bundle_id, input_data} without needing flow_id.
|
|
561
|
+
selected_bundle_id = bundle_id_clean or self._default_bundle_id
|
|
562
|
+
if not selected_bundle_id:
|
|
563
|
+
raise ValueError(
|
|
564
|
+
"flow_id is required when multiple bundles are loaded; "
|
|
565
|
+
"provide bundle_id (or pass flow_id as 'bundle:flow')"
|
|
566
|
+
)
|
|
567
|
+
bundle = self.bundles.get(str(selected_bundle_id))
|
|
568
|
+
if bundle is None:
|
|
569
|
+
raise KeyError(f"Bundle '{selected_bundle_id}' not found")
|
|
570
|
+
entrypoints = list(getattr(bundle.manifest, "entrypoints", None) or [])
|
|
571
|
+
default_ep = str(getattr(bundle.manifest, "default_entrypoint", "") or "").strip()
|
|
572
|
+
if len(entrypoints) == 1:
|
|
573
|
+
ep_fid = str(getattr(entrypoints[0], "flow_id", "") or "").strip()
|
|
574
|
+
elif default_ep:
|
|
575
|
+
ep_fid = default_ep
|
|
576
|
+
else:
|
|
577
|
+
raise ValueError(
|
|
578
|
+
f"Bundle '{selected_bundle_id}' has {len(entrypoints)} entrypoints; "
|
|
579
|
+
"specify flow_id to select which entrypoint to start "
|
|
580
|
+
"(or set manifest.default_entrypoint)"
|
|
581
|
+
)
|
|
582
|
+
if not ep_fid:
|
|
583
|
+
raise ValueError(f"Bundle '{selected_bundle_id}' entrypoint flow_id is empty")
|
|
584
|
+
workflow_id = _namespace(str(selected_bundle_id), ep_fid)
|
|
585
|
+
else:
|
|
586
|
+
workflow_id = _coerce_namespaced_id(
|
|
587
|
+
bundle_id=bundle_id_clean, flow_id=fid, default_bundle_id=self._default_bundle_id
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
spec = self.specs.get(workflow_id)
|
|
591
|
+
if spec is None:
|
|
592
|
+
raise KeyError(f"Workflow '{workflow_id}' not found")
|
|
593
|
+
run_id = str(self.runtime.start(workflow=spec, vars=dict(input_data or {}), actor_id=actor_id))
|
|
594
|
+
|
|
595
|
+
# Start session-scoped event listener workflows (best-effort).
|
|
596
|
+
listener_ids = self.event_listener_specs_by_root.get(workflow_id) or []
|
|
597
|
+
for wid in listener_ids:
|
|
598
|
+
listener_spec = self.specs.get(wid)
|
|
599
|
+
if listener_spec is None:
|
|
600
|
+
continue
|
|
601
|
+
try:
|
|
602
|
+
child_run_id = self.runtime.start(
|
|
603
|
+
workflow=listener_spec,
|
|
604
|
+
vars={},
|
|
605
|
+
session_id=run_id,
|
|
606
|
+
parent_run_id=run_id,
|
|
607
|
+
actor_id=actor_id,
|
|
608
|
+
)
|
|
609
|
+
# Advance to the first WAIT_EVENT.
|
|
610
|
+
self.runtime.tick(workflow=listener_spec, run_id=child_run_id, max_steps=10)
|
|
611
|
+
except Exception:
|
|
612
|
+
continue
|
|
613
|
+
|
|
614
|
+
return run_id
|
|
615
|
+
|
|
616
|
+
def runtime_and_workflow_for_run(self, run_id: str) -> tuple[Runtime, Any]:
|
|
617
|
+
run = self.run_store.load(str(run_id))
|
|
618
|
+
if run is None:
|
|
619
|
+
raise KeyError(f"Run '{run_id}' not found")
|
|
620
|
+
workflow_id = getattr(run, "workflow_id", None)
|
|
621
|
+
if not isinstance(workflow_id, str) or not workflow_id:
|
|
622
|
+
raise ValueError(f"Run '{run_id}' missing workflow_id")
|
|
623
|
+
spec = self.specs.get(workflow_id)
|
|
624
|
+
if spec is None:
|
|
625
|
+
raise KeyError(f"Workflow '{workflow_id}' not registered")
|
|
626
|
+
return (self.runtime, spec)
|