aethergraph 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aethergraph/__init__.py +49 -0
- aethergraph/config/__init__.py +0 -0
- aethergraph/config/config.py +121 -0
- aethergraph/config/context.py +16 -0
- aethergraph/config/llm.py +26 -0
- aethergraph/config/loader.py +60 -0
- aethergraph/config/runtime.py +9 -0
- aethergraph/contracts/errors/errors.py +44 -0
- aethergraph/contracts/services/artifacts.py +142 -0
- aethergraph/contracts/services/channel.py +72 -0
- aethergraph/contracts/services/continuations.py +23 -0
- aethergraph/contracts/services/eventbus.py +12 -0
- aethergraph/contracts/services/kv.py +24 -0
- aethergraph/contracts/services/llm.py +17 -0
- aethergraph/contracts/services/mcp.py +22 -0
- aethergraph/contracts/services/memory.py +108 -0
- aethergraph/contracts/services/resume.py +28 -0
- aethergraph/contracts/services/state_stores.py +33 -0
- aethergraph/contracts/services/wakeup.py +28 -0
- aethergraph/core/execution/base_scheduler.py +77 -0
- aethergraph/core/execution/forward_scheduler.py +777 -0
- aethergraph/core/execution/global_scheduler.py +634 -0
- aethergraph/core/execution/retry_policy.py +22 -0
- aethergraph/core/execution/step_forward.py +411 -0
- aethergraph/core/execution/step_result.py +18 -0
- aethergraph/core/execution/wait_types.py +72 -0
- aethergraph/core/graph/graph_builder.py +192 -0
- aethergraph/core/graph/graph_fn.py +219 -0
- aethergraph/core/graph/graph_io.py +67 -0
- aethergraph/core/graph/graph_refs.py +154 -0
- aethergraph/core/graph/graph_spec.py +115 -0
- aethergraph/core/graph/graph_state.py +59 -0
- aethergraph/core/graph/graphify.py +128 -0
- aethergraph/core/graph/interpreter.py +145 -0
- aethergraph/core/graph/node_handle.py +33 -0
- aethergraph/core/graph/node_spec.py +46 -0
- aethergraph/core/graph/node_state.py +63 -0
- aethergraph/core/graph/task_graph.py +747 -0
- aethergraph/core/graph/task_node.py +82 -0
- aethergraph/core/graph/utils.py +37 -0
- aethergraph/core/graph/visualize.py +239 -0
- aethergraph/core/runtime/ad_hoc_context.py +61 -0
- aethergraph/core/runtime/base_service.py +153 -0
- aethergraph/core/runtime/bind_adapter.py +42 -0
- aethergraph/core/runtime/bound_memory.py +69 -0
- aethergraph/core/runtime/execution_context.py +220 -0
- aethergraph/core/runtime/graph_runner.py +349 -0
- aethergraph/core/runtime/lifecycle.py +26 -0
- aethergraph/core/runtime/node_context.py +203 -0
- aethergraph/core/runtime/node_services.py +30 -0
- aethergraph/core/runtime/recovery.py +159 -0
- aethergraph/core/runtime/run_registration.py +33 -0
- aethergraph/core/runtime/runtime_env.py +157 -0
- aethergraph/core/runtime/runtime_registry.py +32 -0
- aethergraph/core/runtime/runtime_services.py +224 -0
- aethergraph/core/runtime/wakeup_watcher.py +40 -0
- aethergraph/core/tools/__init__.py +10 -0
- aethergraph/core/tools/builtins/channel_tools.py +194 -0
- aethergraph/core/tools/builtins/toolset.py +134 -0
- aethergraph/core/tools/toolkit.py +510 -0
- aethergraph/core/tools/waitable.py +109 -0
- aethergraph/plugins/channel/__init__.py +0 -0
- aethergraph/plugins/channel/adapters/__init__.py +0 -0
- aethergraph/plugins/channel/adapters/console.py +106 -0
- aethergraph/plugins/channel/adapters/file.py +102 -0
- aethergraph/plugins/channel/adapters/slack.py +285 -0
- aethergraph/plugins/channel/adapters/telegram.py +302 -0
- aethergraph/plugins/channel/adapters/webhook.py +104 -0
- aethergraph/plugins/channel/adapters/webui.py +134 -0
- aethergraph/plugins/channel/routes/__init__.py +0 -0
- aethergraph/plugins/channel/routes/console_routes.py +86 -0
- aethergraph/plugins/channel/routes/slack_routes.py +49 -0
- aethergraph/plugins/channel/routes/telegram_routes.py +26 -0
- aethergraph/plugins/channel/routes/webui_routes.py +136 -0
- aethergraph/plugins/channel/utils/__init__.py +0 -0
- aethergraph/plugins/channel/utils/slack_utils.py +278 -0
- aethergraph/plugins/channel/utils/telegram_utils.py +324 -0
- aethergraph/plugins/channel/websockets/slack_ws.py +68 -0
- aethergraph/plugins/channel/websockets/telegram_polling.py +151 -0
- aethergraph/plugins/mcp/fs_server.py +128 -0
- aethergraph/plugins/mcp/http_server.py +101 -0
- aethergraph/plugins/mcp/ws_server.py +180 -0
- aethergraph/plugins/net/http.py +10 -0
- aethergraph/plugins/utils/data_io.py +359 -0
- aethergraph/runner/__init__.py +5 -0
- aethergraph/runtime/__init__.py +62 -0
- aethergraph/server/__init__.py +3 -0
- aethergraph/server/app_factory.py +84 -0
- aethergraph/server/start.py +122 -0
- aethergraph/services/__init__.py +10 -0
- aethergraph/services/artifacts/facade.py +284 -0
- aethergraph/services/artifacts/factory.py +35 -0
- aethergraph/services/artifacts/fs_store.py +656 -0
- aethergraph/services/artifacts/jsonl_index.py +123 -0
- aethergraph/services/artifacts/paths.py +23 -0
- aethergraph/services/artifacts/sqlite_index.py +209 -0
- aethergraph/services/artifacts/utils.py +124 -0
- aethergraph/services/auth/dev.py +16 -0
- aethergraph/services/channel/channel_bus.py +293 -0
- aethergraph/services/channel/factory.py +44 -0
- aethergraph/services/channel/session.py +511 -0
- aethergraph/services/channel/wait_helpers.py +57 -0
- aethergraph/services/clock/clock.py +9 -0
- aethergraph/services/container/default_container.py +320 -0
- aethergraph/services/continuations/continuation.py +56 -0
- aethergraph/services/continuations/factory.py +34 -0
- aethergraph/services/continuations/stores/fs_store.py +264 -0
- aethergraph/services/continuations/stores/inmem_store.py +95 -0
- aethergraph/services/eventbus/inmem.py +21 -0
- aethergraph/services/features/static.py +10 -0
- aethergraph/services/kv/ephemeral.py +90 -0
- aethergraph/services/kv/factory.py +27 -0
- aethergraph/services/kv/layered.py +41 -0
- aethergraph/services/kv/sqlite_kv.py +128 -0
- aethergraph/services/llm/factory.py +157 -0
- aethergraph/services/llm/generic_client.py +542 -0
- aethergraph/services/llm/providers.py +3 -0
- aethergraph/services/llm/service.py +105 -0
- aethergraph/services/logger/base.py +36 -0
- aethergraph/services/logger/compat.py +50 -0
- aethergraph/services/logger/formatters.py +106 -0
- aethergraph/services/logger/std.py +203 -0
- aethergraph/services/mcp/helpers.py +23 -0
- aethergraph/services/mcp/http_client.py +70 -0
- aethergraph/services/mcp/mcp_tools.py +21 -0
- aethergraph/services/mcp/registry.py +14 -0
- aethergraph/services/mcp/service.py +100 -0
- aethergraph/services/mcp/stdio_client.py +70 -0
- aethergraph/services/mcp/ws_client.py +115 -0
- aethergraph/services/memory/bound.py +106 -0
- aethergraph/services/memory/distillers/episode.py +116 -0
- aethergraph/services/memory/distillers/rolling.py +74 -0
- aethergraph/services/memory/facade.py +633 -0
- aethergraph/services/memory/factory.py +78 -0
- aethergraph/services/memory/hotlog_kv.py +27 -0
- aethergraph/services/memory/indices.py +74 -0
- aethergraph/services/memory/io_helpers.py +72 -0
- aethergraph/services/memory/persist_fs.py +40 -0
- aethergraph/services/memory/resolver.py +152 -0
- aethergraph/services/metering/noop.py +4 -0
- aethergraph/services/prompts/file_store.py +41 -0
- aethergraph/services/rag/chunker.py +29 -0
- aethergraph/services/rag/facade.py +593 -0
- aethergraph/services/rag/index/base.py +27 -0
- aethergraph/services/rag/index/faiss_index.py +121 -0
- aethergraph/services/rag/index/sqlite_index.py +134 -0
- aethergraph/services/rag/index_factory.py +52 -0
- aethergraph/services/rag/parsers/md.py +7 -0
- aethergraph/services/rag/parsers/pdf.py +14 -0
- aethergraph/services/rag/parsers/txt.py +7 -0
- aethergraph/services/rag/utils/hybrid.py +39 -0
- aethergraph/services/rag/utils/make_fs_key.py +62 -0
- aethergraph/services/redactor/simple.py +16 -0
- aethergraph/services/registry/key_parsing.py +44 -0
- aethergraph/services/registry/registry_key.py +19 -0
- aethergraph/services/registry/unified_registry.py +185 -0
- aethergraph/services/resume/multi_scheduler_resume_bus.py +65 -0
- aethergraph/services/resume/router.py +73 -0
- aethergraph/services/schedulers/registry.py +41 -0
- aethergraph/services/secrets/base.py +7 -0
- aethergraph/services/secrets/env.py +8 -0
- aethergraph/services/state_stores/externalize.py +135 -0
- aethergraph/services/state_stores/graph_observer.py +131 -0
- aethergraph/services/state_stores/json_store.py +67 -0
- aethergraph/services/state_stores/resume_policy.py +119 -0
- aethergraph/services/state_stores/serialize.py +249 -0
- aethergraph/services/state_stores/utils.py +91 -0
- aethergraph/services/state_stores/validate.py +78 -0
- aethergraph/services/tracing/noop.py +18 -0
- aethergraph/services/waits/wait_registry.py +91 -0
- aethergraph/services/wakeup/memory_queue.py +57 -0
- aethergraph/services/wakeup/scanner_producer.py +56 -0
- aethergraph/services/wakeup/worker.py +31 -0
- aethergraph/tools/__init__.py +25 -0
- aethergraph/utils/optdeps.py +8 -0
- aethergraph-0.1.0a1.dist-info/METADATA +410 -0
- aethergraph-0.1.0a1.dist-info/RECORD +182 -0
- aethergraph-0.1.0a1.dist-info/WHEEL +5 -0
- aethergraph-0.1.0a1.dist-info/entry_points.txt +2 -0
- aethergraph-0.1.0a1.dist-info/licenses/LICENSE +176 -0
- aethergraph-0.1.0a1.dist-info/licenses/NOTICE +31 -0
- aethergraph-0.1.0a1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,411 @@
|
|
|
1
|
+
from datetime import datetime, timedelta
|
|
2
|
+
import functools
|
|
3
|
+
import inspect
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from aethergraph.services.continuations.continuation import Continuation
|
|
7
|
+
|
|
8
|
+
from ..graph.graph_refs import RESERVED_INJECTABLES # {"context", "resume", "self"}
|
|
9
|
+
from ..graph.task_node import NodeStatus, TaskNodeRuntime
|
|
10
|
+
from ..runtime.execution_context import ExecutionContext
|
|
11
|
+
from ..runtime.node_context import NodeContext
|
|
12
|
+
from .retry_policy import RetryPolicy
|
|
13
|
+
from .step_result import StepResult
|
|
14
|
+
from .wait_types import WaitRequested
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def maybe_await(func, *args, **kwargs):
|
|
18
|
+
if inspect.iscoroutinefunction(func):
|
|
19
|
+
return await func(*args, **kwargs)
|
|
20
|
+
return func(*args, **kwargs)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _normalize_result(res):
|
|
24
|
+
if res is None:
|
|
25
|
+
return {}
|
|
26
|
+
if isinstance(res, dict):
|
|
27
|
+
return res
|
|
28
|
+
if isinstance(res, tuple):
|
|
29
|
+
return {f"out{i}": v for i, v in enumerate(res)}
|
|
30
|
+
return {"result": res}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _waiting_status(kind: str) -> str:
|
|
34
|
+
return NodeStatus.from_kind(kind) if kind else NodeStatus.WAITING_EXTERNAL # maps to WAITING_*
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def unwrap_callable(fn):
|
|
38
|
+
"""Unwrap a callable from various wrapper types.
|
|
39
|
+
This includes:
|
|
40
|
+
- functions decorated with @tool or @waitable_tool (have __aether_impl__)
|
|
41
|
+
- functools.partial
|
|
42
|
+
- bound methods (unwrap to function)
|
|
43
|
+
Returns the innermost callable.
|
|
44
|
+
|
|
45
|
+
The function works as follows:
|
|
46
|
+
- If the callable has already been seen (to prevent infinite loops), return it as is.
|
|
47
|
+
- If the callable has an attribute __aether_impl__, unwrap it to that attribute.
|
|
48
|
+
- If the callable is a functools.partial, unwrap it to its func attribute.
|
|
49
|
+
- If the callable is a bound method, unwrap it to its __func__ attribute.
|
|
50
|
+
- If none of the above, return the callable as is.
|
|
51
|
+
This function is useful for extracting the core logic function from various
|
|
52
|
+
wrappers that may have been applied to it.
|
|
53
|
+
Args:
|
|
54
|
+
fn: The callable to unwrap.
|
|
55
|
+
"""
|
|
56
|
+
seen = set()
|
|
57
|
+
while True:
|
|
58
|
+
if id(fn) in seen:
|
|
59
|
+
return fn
|
|
60
|
+
seen.add(id(fn))
|
|
61
|
+
if hasattr(fn, "__aether_impl__"):
|
|
62
|
+
fn = fn.__aether_impl__
|
|
63
|
+
continue
|
|
64
|
+
if isinstance(fn, functools.partial):
|
|
65
|
+
fn = fn.func
|
|
66
|
+
continue
|
|
67
|
+
if inspect.ismethod(fn):
|
|
68
|
+
fn = fn.__func__
|
|
69
|
+
continue
|
|
70
|
+
return fn
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _flatten_inputs(resolved_inputs: dict[str, Any]) -> dict[str, Any]:
|
|
74
|
+
"""Copy, then expand nested 'kwargs' dict into top-level keys."""
|
|
75
|
+
out = dict(resolved_inputs) if resolved_inputs else {}
|
|
76
|
+
nested = out.pop("kwargs", None)
|
|
77
|
+
if isinstance(nested, dict):
|
|
78
|
+
# only fill missing keys to let explicit top-level override nested
|
|
79
|
+
for k, v in nested.items():
|
|
80
|
+
out.setdefault(k, v)
|
|
81
|
+
return out
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def build_call_kwargs(
|
|
85
|
+
logic_fn,
|
|
86
|
+
resolved_inputs: dict[str, Any],
|
|
87
|
+
*,
|
|
88
|
+
node_ctx: NodeContext,
|
|
89
|
+
runtime_ctx: "ExecutionContext" = None,
|
|
90
|
+
) -> dict[str, Any]:
|
|
91
|
+
"""Build kwargs to call a logic function:
|
|
92
|
+
- flatten resolved_inputs (expand nested kwargs)
|
|
93
|
+
- inject framework args by name (node/context/logger/resume)
|
|
94
|
+
- validate required args
|
|
95
|
+
Returns a dict of kwargs to call the logic function.
|
|
96
|
+
|
|
97
|
+
NOTE: the input context is the full ExecutionContext, not a limited NodeContext. The 'context' param in the output kwargs
|
|
98
|
+
will be a NodeContext if the callee wants it. NodeContext is used when calling the logic function if it
|
|
99
|
+
accepts a 'context' parameter.
|
|
100
|
+
|
|
101
|
+
Raises TypeError if required args are missing.
|
|
102
|
+
"""
|
|
103
|
+
import inspect
|
|
104
|
+
|
|
105
|
+
if runtime_ctx is None or node_ctx is None:
|
|
106
|
+
raise RuntimeError("build_call_kwargs: node_ctx and runtime_ctx are required")
|
|
107
|
+
|
|
108
|
+
sig = inspect.signature(logic_fn)
|
|
109
|
+
params = sig.parameters
|
|
110
|
+
has_var_kw = any(p.kind is inspect.Parameter.VAR_KEYWORD for p in params.values())
|
|
111
|
+
|
|
112
|
+
flat = _flatten_inputs(resolved_inputs)
|
|
113
|
+
if "kwargs" in flat and isinstance(flat["kwargs"], dict):
|
|
114
|
+
flat = {**flat, **flat["kwargs"]}
|
|
115
|
+
flat.pop("kwargs", None)
|
|
116
|
+
|
|
117
|
+
# Framework injectables (authoritative)
|
|
118
|
+
inject_pool = {
|
|
119
|
+
"context": node_ctx, # always NodeContext
|
|
120
|
+
"resume": getattr(runtime_ctx, "resume_payload", None),
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
merged = dict(flat)
|
|
124
|
+
for k in RESERVED_INJECTABLES:
|
|
125
|
+
if k == "self":
|
|
126
|
+
continue
|
|
127
|
+
if k in params or has_var_kw:
|
|
128
|
+
merged[k] = inject_pool.get(k)
|
|
129
|
+
|
|
130
|
+
if not has_var_kw:
|
|
131
|
+
merged = {k: v for k, v in merged.items() if k in params}
|
|
132
|
+
merged.pop("self", None)
|
|
133
|
+
merged.pop("kwargs", None)
|
|
134
|
+
|
|
135
|
+
required = [
|
|
136
|
+
name
|
|
137
|
+
for name, p in params.items()
|
|
138
|
+
if name != "self"
|
|
139
|
+
and p.default is inspect._empty
|
|
140
|
+
and p.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)
|
|
141
|
+
]
|
|
142
|
+
missing = [k for k in required if k not in merged]
|
|
143
|
+
if missing:
|
|
144
|
+
raise TypeError(
|
|
145
|
+
f"{getattr(logic_fn, '__name__', type(logic_fn).__name__)} missing required arguments: {missing}. "
|
|
146
|
+
f"Provided keys: {sorted(merged.keys())}"
|
|
147
|
+
)
|
|
148
|
+
return merged
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
async def step_forward(
|
|
152
|
+
*, node: "TaskNodeRuntime", ctx: "ExecutionContext", retry_policy: "RetryPolicy"
|
|
153
|
+
) -> StepResult:
|
|
154
|
+
"""
|
|
155
|
+
Execute one node forward:
|
|
156
|
+
- resolve & inject kwargs (node/context/memory/logger/resume)
|
|
157
|
+
- await async logic
|
|
158
|
+
- apply should_run gate
|
|
159
|
+
- route subgraph to a dedicated handler (NotImplemented here)
|
|
160
|
+
- distinguish waits vs failures
|
|
161
|
+
- persist Continuation on wait (token, deadline/poll, channel)
|
|
162
|
+
Returns a StepResult; the runner is responsible for mutating node state.
|
|
163
|
+
"""
|
|
164
|
+
lg = None
|
|
165
|
+
if getattr(ctx, "logger_factory", None) and hasattr(ctx.logger_factory, "for_node_ctx"):
|
|
166
|
+
lg = ctx.logger_factory.for_node_ctx(
|
|
167
|
+
run_id=ctx.run_id, node_id=node.node_id, graph_id=getattr(ctx, "graph_id", None)
|
|
168
|
+
)
|
|
169
|
+
attempts = getattr(node, "attempts", 0)
|
|
170
|
+
|
|
171
|
+
logic_fn = unwrap_callable(ctx.get_logic(node.logic))
|
|
172
|
+
|
|
173
|
+
# Resolve graph inputs
|
|
174
|
+
try:
|
|
175
|
+
resolved_inputs = await ctx.resolve_inputs(node)
|
|
176
|
+
except Exception as e:
|
|
177
|
+
if lg:
|
|
178
|
+
lg.exception("input resolution error")
|
|
179
|
+
return StepResult(status=NodeStatus.FAILED, error=e)
|
|
180
|
+
|
|
181
|
+
# should_run gate (unchanged) ...
|
|
182
|
+
should = True
|
|
183
|
+
if hasattr(ctx, "should_run") and callable(ctx.should_run):
|
|
184
|
+
try:
|
|
185
|
+
should = (
|
|
186
|
+
await ctx.should_run(node, resolved_inputs)
|
|
187
|
+
if inspect.iscoroutinefunction(ctx.should_run)
|
|
188
|
+
else ctx.should_run(node, resolved_inputs)
|
|
189
|
+
)
|
|
190
|
+
except Exception as e:
|
|
191
|
+
if lg:
|
|
192
|
+
lg.warning(f"should_run raised {e!r}; defaulting to run=True")
|
|
193
|
+
if not should:
|
|
194
|
+
return StepResult(
|
|
195
|
+
status=getattr(NodeStatus, "SKIPPED", "SKIPPED"), outputs={"skipped": True}
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# create NodeContext once
|
|
199
|
+
node_ctx = ctx.create_node_context(node)
|
|
200
|
+
|
|
201
|
+
# Build kwargs with node_ctx as 'context' and the full ctx as 'runtime'
|
|
202
|
+
kwargs = build_call_kwargs(
|
|
203
|
+
logic_fn,
|
|
204
|
+
resolved_inputs=resolved_inputs,
|
|
205
|
+
node_ctx=node_ctx, # <-- pass node_ctx explicitly for convenience
|
|
206
|
+
runtime_ctx=ctx, # <-- pass runtime explicitly to resolve resume payload
|
|
207
|
+
)
|
|
208
|
+
try:
|
|
209
|
+
result = (
|
|
210
|
+
await logic_fn(**kwargs)
|
|
211
|
+
if inspect.iscoroutinefunction(logic_fn)
|
|
212
|
+
or (callable(logic_fn) and inspect.iscoroutinefunction(logic_fn.__call__))
|
|
213
|
+
else logic_fn(**kwargs)
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
outputs = _normalize_result(result)
|
|
217
|
+
if lg:
|
|
218
|
+
lg.info("done")
|
|
219
|
+
return StepResult(status=NodeStatus.DONE, outputs=outputs)
|
|
220
|
+
|
|
221
|
+
except WaitRequested as w:
|
|
222
|
+
# persist a Continuation and return StepResult with WAITING_*
|
|
223
|
+
if lg:
|
|
224
|
+
lg.info("wait requested: %s", getattr(w, "kind", None))
|
|
225
|
+
return await _enter_wait(
|
|
226
|
+
node=node, ctx=ctx, node_ctx=node_ctx, lg=lg, spec=w.to_dict(), attempts=attempts
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
if lg:
|
|
231
|
+
lg.exception("tool error")
|
|
232
|
+
if attempts < retry_policy.max_attempts and retry_policy.should_retry(e):
|
|
233
|
+
backoff = retry_policy.backoff(attempts)
|
|
234
|
+
if lg:
|
|
235
|
+
lg.warning(f"retry scheduled in {backoff}")
|
|
236
|
+
node.attempts = attempts + 1
|
|
237
|
+
# import traceback; traceback.print_exc()
|
|
238
|
+
return StepResult(status=NodeStatus.FAILED, error=e)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
# ---- wait path ---------------------------------------------------------------
|
|
242
|
+
def _parse_deadline(deadline: Any, now_fn) -> datetime | None:
|
|
243
|
+
if not deadline:
|
|
244
|
+
return None
|
|
245
|
+
if isinstance(deadline, datetime):
|
|
246
|
+
return deadline
|
|
247
|
+
try:
|
|
248
|
+
return datetime.fromisoformat(deadline)
|
|
249
|
+
except Exception:
|
|
250
|
+
# allow "in N seconds" style if ever passed
|
|
251
|
+
try:
|
|
252
|
+
sec = int(deadline)
|
|
253
|
+
return now_fn() + timedelta(seconds=sec)
|
|
254
|
+
except Exception:
|
|
255
|
+
return None
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def normalize_wait_spec(spec: dict[str, Any], *, node_ctx: "NodeContext") -> dict[str, Any]:
|
|
259
|
+
"""Normalize wait spec from WaitRequested to a canonical dict that used in channel/continuation:
|
|
260
|
+
In WaitSpec, we allow:
|
|
261
|
+
- kind: str e.g. "approval" | "user_input" | "human" | "robot" | "external" | "time" | "event" | ...
|
|
262
|
+
- prompt: str | dict
|
|
263
|
+
- resume_schema: dict
|
|
264
|
+
- channel: str | None (it may be None)
|
|
265
|
+
- deadline: datetime | str (ISO) | int (seconds from now)
|
|
266
|
+
- poll: dict
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
In the normalized dict, we ensure:
|
|
270
|
+
- kind: str (default "external")
|
|
271
|
+
- prompt: str | dict | None
|
|
272
|
+
- resume_schema: dict | None
|
|
273
|
+
- channel: str (default from node_ctx or "console:stdin")
|
|
274
|
+
- deadline: datetime | None
|
|
275
|
+
- poll: dict | None
|
|
276
|
+
|
|
277
|
+
NOTE: in channel, we only allow kind to be "approval" or "user_input" for external interaction. Other kinds will
|
|
278
|
+
simply push a notification without expecting a user response.
|
|
279
|
+
"""
|
|
280
|
+
from datetime import datetime, timezone
|
|
281
|
+
|
|
282
|
+
out = dict(spec or {})
|
|
283
|
+
out["kind"] = out.get("kind") or "external"
|
|
284
|
+
out["prompt"] = out.get("prompt")
|
|
285
|
+
out["resume_schema"] = out.get("resume_schema")
|
|
286
|
+
|
|
287
|
+
# Channel resolution via node_ctx
|
|
288
|
+
ch = out.get("channel")
|
|
289
|
+
if isinstance(ch, dict):
|
|
290
|
+
ch = None
|
|
291
|
+
if not ch:
|
|
292
|
+
ch = node_ctx.channel()._resolve_default_key() or "console:stdin"
|
|
293
|
+
out["channel"] = ch
|
|
294
|
+
|
|
295
|
+
# Deadline
|
|
296
|
+
now_fn = getattr(node_ctx, "_now", None)
|
|
297
|
+
if now_fn is None:
|
|
298
|
+
|
|
299
|
+
def now_fn():
|
|
300
|
+
return datetime.now(timezone.utc)
|
|
301
|
+
|
|
302
|
+
out["deadline"] = _parse_deadline(out.get("deadline"), now_fn)
|
|
303
|
+
|
|
304
|
+
# Poll
|
|
305
|
+
poll = out.get("poll")
|
|
306
|
+
if poll:
|
|
307
|
+
try:
|
|
308
|
+
poll["interval_sec"] = int(poll.get("interval_sec", 30))
|
|
309
|
+
except Exception:
|
|
310
|
+
poll["interval_sec"] = 30
|
|
311
|
+
out["poll"] = poll
|
|
312
|
+
return out
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
async def _enter_wait(
|
|
316
|
+
*, node, ctx, node_ctx, lg, spec: dict[str, Any], attempts: int
|
|
317
|
+
) -> StepResult:
|
|
318
|
+
spec = normalize_wait_spec(spec, node_ctx=node_ctx)
|
|
319
|
+
|
|
320
|
+
# 1) Reuse token if present
|
|
321
|
+
token = spec.get("token")
|
|
322
|
+
store = ctx.services.continuation_store
|
|
323
|
+
|
|
324
|
+
# Add wait spec in node state for reference -> This has not been used anywhere yet, We need save it with TaskGraph when state changes to WAITING_*
|
|
325
|
+
node.state.wait_spec = {
|
|
326
|
+
"kind": spec["kind"], # "text" | "approval" | "files" | ...
|
|
327
|
+
"channel": spec.get("channel"),
|
|
328
|
+
"prompt": spec.get("prompt"),
|
|
329
|
+
"options": spec.get("options"),
|
|
330
|
+
"meta": spec.get("meta", {}),
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
cont = None
|
|
334
|
+
if token:
|
|
335
|
+
try:
|
|
336
|
+
cont = await store.get_by_token(token)
|
|
337
|
+
except Exception:
|
|
338
|
+
cont = None
|
|
339
|
+
|
|
340
|
+
if cont is None:
|
|
341
|
+
# fall back to minting (legacy path)
|
|
342
|
+
token = token or await store.mint_token(ctx.run_id, node.node_id, attempts)
|
|
343
|
+
cont = Continuation(
|
|
344
|
+
run_id=ctx.run_id,
|
|
345
|
+
node_id=node.node_id,
|
|
346
|
+
kind=spec["kind"],
|
|
347
|
+
token=token,
|
|
348
|
+
prompt=spec.get("prompt"),
|
|
349
|
+
resume_schema=spec.get("resume_schema"),
|
|
350
|
+
channel=spec["channel"],
|
|
351
|
+
deadline=spec.get("deadline"),
|
|
352
|
+
poll=spec.get("poll"),
|
|
353
|
+
next_wakeup_at=None,
|
|
354
|
+
created_at=ctx.now(),
|
|
355
|
+
attempts=attempts,
|
|
356
|
+
)
|
|
357
|
+
else:
|
|
358
|
+
# update mutable fields
|
|
359
|
+
cont.kind = spec.get("kind", cont.kind)
|
|
360
|
+
cont.prompt = spec.get("prompt", cont.prompt)
|
|
361
|
+
cont.resume_schema = spec.get("resume_schema", cont.resume_schema)
|
|
362
|
+
cont.channel = spec.get("channel", cont.channel)
|
|
363
|
+
cont.deadline = spec.get("deadline", cont.deadline)
|
|
364
|
+
cont.poll = spec.get("poll", cont.poll)
|
|
365
|
+
cont.attempts = attempts
|
|
366
|
+
|
|
367
|
+
# schedule next wakeup
|
|
368
|
+
if cont.poll and "interval_sec" in cont.poll:
|
|
369
|
+
from datetime import timedelta
|
|
370
|
+
|
|
371
|
+
cont.next_wakeup_at = ctx.now() + timedelta(seconds=int(cont.poll["interval_sec"]))
|
|
372
|
+
elif cont.deadline:
|
|
373
|
+
cont.next_wakeup_at = cont.deadline
|
|
374
|
+
else:
|
|
375
|
+
cont.next_wakeup_at = None
|
|
376
|
+
|
|
377
|
+
# persist (create or update)
|
|
378
|
+
await store.save(cont)
|
|
379
|
+
|
|
380
|
+
# 2) If inline payload was captured during setup, resume immediately
|
|
381
|
+
inline = spec.get("inline_payload")
|
|
382
|
+
if inline is not None:
|
|
383
|
+
try:
|
|
384
|
+
await ctx.resume_router.resume(cont.run_id, cont.node_id, cont.token, inline)
|
|
385
|
+
if lg:
|
|
386
|
+
lg.debug("inline resume dispatched for token=%s", cont.token)
|
|
387
|
+
# No need to notify again
|
|
388
|
+
return StepResult(
|
|
389
|
+
status=_waiting_status(cont.kind),
|
|
390
|
+
continuation=cont,
|
|
391
|
+
next_wakeup_at=cont.next_wakeup_at,
|
|
392
|
+
)
|
|
393
|
+
except Exception as e:
|
|
394
|
+
if lg:
|
|
395
|
+
lg.warning(f"inline resume failed: {e!r}; will proceed without it")
|
|
396
|
+
|
|
397
|
+
# 3) Notify only if the tool hasn't already done it
|
|
398
|
+
if not spec.get("notified", False):
|
|
399
|
+
try:
|
|
400
|
+
await ctx.channels.notify(cont)
|
|
401
|
+
if lg:
|
|
402
|
+
lg.debug("notified channel=%s", cont.channel)
|
|
403
|
+
except Exception as e:
|
|
404
|
+
if lg:
|
|
405
|
+
lg.error(f"notify failed: {e}")
|
|
406
|
+
|
|
407
|
+
return StepResult(
|
|
408
|
+
status=_waiting_status(cont.kind),
|
|
409
|
+
continuation=cont,
|
|
410
|
+
next_wakeup_at=cont.next_wakeup_at,
|
|
411
|
+
)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from aethergraph.contracts.services.artifacts import Artifact
|
|
8
|
+
from aethergraph.services.continuations.continuation import Continuation
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class StepResult:
|
|
13
|
+
status: str # NodeStatus
|
|
14
|
+
outputs: dict[str, Any] | None = None # outputs if completed
|
|
15
|
+
artifacts: list[Artifact] = field(default_factory=list)
|
|
16
|
+
error: str | None = None # error message if failed
|
|
17
|
+
continuation: Continuation | None = None # continuation if waiting
|
|
18
|
+
next_wakeup_at: datetime | None = None # ISO timestamp for next wakeup (for time-based waits)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
_WAIT_KEY = "__wait__"
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class WaitSpec:
|
|
12
|
+
kind: str = "external" # "human" | "ask_text" | "external" | "time" | "event" | ... This is more generic than channel wait kinds
|
|
13
|
+
prompt: dict[str, Any] | str | None = None # for human/robot
|
|
14
|
+
resume_schema: dict[str, Any] | None = None # for human/robot validation
|
|
15
|
+
channel: str | None = None # for external/event
|
|
16
|
+
deadline: datetime | str | None = None # ISO timestamp or datetime
|
|
17
|
+
poll: dict[str, Any] | None = (
|
|
18
|
+
None # {"interval_sec": 30, "endpoint": "...", "extract": "$.path"}
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# resume handles
|
|
22
|
+
token: str | None = None # internal opaque continuation id (do NOT expose to untrusted clients)
|
|
23
|
+
resume_key: str | None = None # short alias safe to surface in UI/buttons
|
|
24
|
+
notified: bool = False # internal flag: whether continuation notification has been sent out
|
|
25
|
+
inline_payload: dict[str, Any] | None = (
|
|
26
|
+
None # internal: optional inline payload returned from notification step
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Optional grab-bag for extensions; avoids new fields churn later
|
|
30
|
+
meta: dict[str, Any] = field(default_factory=dict)
|
|
31
|
+
|
|
32
|
+
def to_dict(self) -> dict[str, Any]:
|
|
33
|
+
# Only include non-None fields to preserve backward compatibility with consumers
|
|
34
|
+
d = {
|
|
35
|
+
"kind": self.kind,
|
|
36
|
+
"prompt": self.prompt,
|
|
37
|
+
"resume_schema": self.resume_schema,
|
|
38
|
+
"channel": self.channel,
|
|
39
|
+
"deadline": self.deadline,
|
|
40
|
+
"poll": self.poll,
|
|
41
|
+
"token": self.token,
|
|
42
|
+
"resume_key": self.resume_key,
|
|
43
|
+
"notified": self.notified,
|
|
44
|
+
"inline_payload": self.inline_payload,
|
|
45
|
+
"meta": self.meta or None,
|
|
46
|
+
}
|
|
47
|
+
return {k: v for k, v in d.items() if v is not None}
|
|
48
|
+
|
|
49
|
+
def sanitized_for_transport(self) -> dict[str, Any]:
|
|
50
|
+
"""
|
|
51
|
+
Strip sensitive fields for UI/adapters/webhooks.
|
|
52
|
+
Prefer exposing `resume_key` (short alias) over raw `token`.
|
|
53
|
+
"""
|
|
54
|
+
d = self.to_dict()
|
|
55
|
+
d.pop("token", None)
|
|
56
|
+
return d
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def wait_sentinel(spec: WaitSpec | dict[str, Any]) -> dict[str, Any]:
|
|
60
|
+
"""Return the canonical sentinel the executor understands as 'please wait'."""
|
|
61
|
+
return {_WAIT_KEY: spec if isinstance(spec, dict) else spec.__dict__}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class WaitRequested(RuntimeError):
|
|
65
|
+
"""Exception to raise from a tool to indicate it wants to wait."""
|
|
66
|
+
|
|
67
|
+
def __init__(self, spec: dict[str, Any]):
|
|
68
|
+
self.spec = spec
|
|
69
|
+
super().__init__(f"Wait requested: {spec}")
|
|
70
|
+
|
|
71
|
+
def to_dict(self):
|
|
72
|
+
return self.spec if isinstance(self.spec, dict) else self.spec.to_dict()
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Iterable
|
|
4
|
+
from contextlib import contextmanager
|
|
5
|
+
from contextvars import ContextVar
|
|
6
|
+
import itertools
|
|
7
|
+
from typing import Any
|
|
8
|
+
import uuid
|
|
9
|
+
|
|
10
|
+
from .graph_refs import GRAPH_INPUTS_NODE_ID, RESERVED_INJECTABLES
|
|
11
|
+
from .graph_spec import TaskGraphSpec
|
|
12
|
+
from .node_spec import TaskNodeSpec
|
|
13
|
+
from .task_graph import TaskGraph
|
|
14
|
+
|
|
15
|
+
_GRAPH_CTX: ContextVar[GraphBuilder | None] = ContextVar("_GRAPH_CTX", default=None) # Async-safe
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def current_builder() -> GraphBuilder | None:
|
|
19
|
+
return _GRAPH_CTX.get()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GraphBuilder:
|
|
23
|
+
_auto_counter = itertools.count(1)
|
|
24
|
+
|
|
25
|
+
def __init__(self, *, name: str = "default_graph"):
|
|
26
|
+
self.spec = TaskGraphSpec(graph_id=name, nodes={}, meta={})
|
|
27
|
+
self.graph = TaskGraph(spec=self.spec)
|
|
28
|
+
self.graph.ensure_inputs_node()
|
|
29
|
+
|
|
30
|
+
self._auto_counter_by_logic = {} # logic_name -> counter
|
|
31
|
+
|
|
32
|
+
# index for quick lookup
|
|
33
|
+
self._alias_index: dict[str, str] = {} # alias -> node_id
|
|
34
|
+
self._logic_index: dict[str, list[str]] = {} # logic -> [node_id, ...]
|
|
35
|
+
self._label_index: dict[str, list[str]] = {} # label -> {node_id, ...}
|
|
36
|
+
|
|
37
|
+
def add_node(self, node_spec: TaskNodeSpec) -> str:
|
|
38
|
+
if node_spec.node_id in self.spec.nodes:
|
|
39
|
+
raise ValueError(
|
|
40
|
+
f"Node ID '{node_spec.node_id}' already exists in graph '{self.spec.graph_id}'"
|
|
41
|
+
)
|
|
42
|
+
self.spec.nodes[node_spec.node_id] = node_spec
|
|
43
|
+
return self
|
|
44
|
+
|
|
45
|
+
def add_tool_node(
|
|
46
|
+
self,
|
|
47
|
+
*,
|
|
48
|
+
node_id: str,
|
|
49
|
+
logic: str,
|
|
50
|
+
inputs: dict,
|
|
51
|
+
expected_input_keys: Iterable[str] | None = None,
|
|
52
|
+
expected_output_keys: Iterable[str] | None = None,
|
|
53
|
+
after: Iterable[str] | None = None,
|
|
54
|
+
inject: list[str] | None = None,
|
|
55
|
+
tool_name: str | None = None,
|
|
56
|
+
tool_version: str | None = None,
|
|
57
|
+
) -> GraphBuilder:
|
|
58
|
+
"""Add a tool node to the graph."""
|
|
59
|
+
|
|
60
|
+
if node_id in self.spec.nodes:
|
|
61
|
+
raise ValueError(f"Node with id {node_id} already exists in the graph.")
|
|
62
|
+
|
|
63
|
+
# Initialize injection and pure input mappings. Injection is for reserved keywords that should be passed from the context.
|
|
64
|
+
deps = set(after or [])
|
|
65
|
+
inject = inject or []
|
|
66
|
+
pure_inputs = {}
|
|
67
|
+
has_arg = False
|
|
68
|
+
|
|
69
|
+
for k, v in list(inputs.items()):
|
|
70
|
+
if k in RESERVED_INJECTABLES:
|
|
71
|
+
inject.append(k)
|
|
72
|
+
else:
|
|
73
|
+
pure_inputs[k] = v
|
|
74
|
+
|
|
75
|
+
# infer dependencies from input Refs
|
|
76
|
+
def _walk_refs(x):
|
|
77
|
+
# Recursively walk input bindings to find Ref dependencies
|
|
78
|
+
nonlocal has_arg
|
|
79
|
+
if isinstance(x, dict):
|
|
80
|
+
if x.get("_type") == "ref" and "from" in x:
|
|
81
|
+
yield x["from"]
|
|
82
|
+
elif x.get("_type") == "arg":
|
|
83
|
+
has_arg = True
|
|
84
|
+
else:
|
|
85
|
+
for v in x.values():
|
|
86
|
+
yield from _walk_refs(v)
|
|
87
|
+
elif isinstance(x, list | tuple):
|
|
88
|
+
for v in x:
|
|
89
|
+
yield from _walk_refs(v)
|
|
90
|
+
|
|
91
|
+
deps = set(_walk_refs(pure_inputs))
|
|
92
|
+
if has_arg:
|
|
93
|
+
deps.add(GRAPH_INPUTS_NODE_ID) # ensure inputs node is a dependency
|
|
94
|
+
if after:
|
|
95
|
+
for a in after:
|
|
96
|
+
deps.add(a.node_id if hasattr(a, "node_id") else a)
|
|
97
|
+
|
|
98
|
+
node = TaskNodeSpec(
|
|
99
|
+
node_id=node_id,
|
|
100
|
+
type="tool",
|
|
101
|
+
logic=logic,
|
|
102
|
+
inputs=inputs,
|
|
103
|
+
dependencies=list(deps),
|
|
104
|
+
expected_input_keys=expected_input_keys,
|
|
105
|
+
expected_output_keys=expected_output_keys,
|
|
106
|
+
metadata={},
|
|
107
|
+
tool_name=tool_name or logic or "unknown_tool",
|
|
108
|
+
tool_version=tool_version, # could be set to a version string if available
|
|
109
|
+
)
|
|
110
|
+
return self.add_node(node)
|
|
111
|
+
|
|
112
|
+
def ensure_inputs_node(self):
|
|
113
|
+
"""Ensure the special inputs node exists in the graph."""
|
|
114
|
+
if GRAPH_INPUTS_NODE_ID not in self.spec.nodes:
|
|
115
|
+
self.spec.nodes[GRAPH_INPUTS_NODE_ID] = TaskNodeSpec(
|
|
116
|
+
node_id=GRAPH_INPUTS_NODE_ID,
|
|
117
|
+
type="inputs",
|
|
118
|
+
logic=None,
|
|
119
|
+
inputs={},
|
|
120
|
+
dependencies=[],
|
|
121
|
+
expected_input_keys=[],
|
|
122
|
+
expected_output_keys=[],
|
|
123
|
+
metadata={"synthetic": True},
|
|
124
|
+
)
|
|
125
|
+
return self
|
|
126
|
+
|
|
127
|
+
def freeze(self) -> TaskGraphSpec:
|
|
128
|
+
"""Frozen dataclass / validate topo order"""
|
|
129
|
+
return self.spec
|
|
130
|
+
|
|
131
|
+
def expose(self, name: str, value: Any):
|
|
132
|
+
self.graph.expose(name, value)
|
|
133
|
+
|
|
134
|
+
# ---- ids and utils ----
|
|
135
|
+
def next_id(self, logic_name: str | None = None) -> str:
|
|
136
|
+
"""Generate a unique node ID."""
|
|
137
|
+
base = (logic_name or "node").rstrip("_")
|
|
138
|
+
return f"{base}_{next(self._auto_counter)}_{uuid.uuid4().hex[:6]}"
|
|
139
|
+
|
|
140
|
+
def _next_readable_id(self, logic_name: str | None = None) -> str:
|
|
141
|
+
"""Generate a more human-readable node ID, but may not be unique."""
|
|
142
|
+
n = self._auto_counter_by_logic.get(logic_name, 0) + 1
|
|
143
|
+
self._auto_counter_by_logic[logic_name] = n
|
|
144
|
+
return f"{logic_name}_{n}" # deterministic and readable
|
|
145
|
+
|
|
146
|
+
def to_graph(self) -> TaskGraph:
|
|
147
|
+
self.graph.spec.metadata["graph_io"] = self.graph.io_signature()
|
|
148
|
+
return self.graph
|
|
149
|
+
|
|
150
|
+
def register_alias(self, alias: str, node_id: str):
|
|
151
|
+
if alias in self._alias_index and self._alias_index[alias] != node_id:
|
|
152
|
+
raise ValueError(
|
|
153
|
+
f"Alias '{alias}' already registered for node '{self._alias_index[alias]}', cannot re-register for '{node_id}'"
|
|
154
|
+
)
|
|
155
|
+
self._alias_index[alias] = node_id
|
|
156
|
+
|
|
157
|
+
def register_logic_name(self, logic_name: str, node_id: str):
|
|
158
|
+
self._logic_index.setdefault(logic_name, []).append(node_id)
|
|
159
|
+
|
|
160
|
+
def register_labels(self, labels: Iterable[str], node_id: str):
|
|
161
|
+
for label in labels or []:
|
|
162
|
+
self._label_index.setdefault(label, set()).add(node_id)
|
|
163
|
+
|
|
164
|
+
# ergonomic accessors
|
|
165
|
+
def find_by_alias(self, alias: str) -> str | None:
|
|
166
|
+
return self._alias_index.get(alias)
|
|
167
|
+
|
|
168
|
+
def find_by_logic(self, logic_prefix: str) -> list[str]:
|
|
169
|
+
exact = self._logic_index.get(logic_prefix, [])
|
|
170
|
+
if exact:
|
|
171
|
+
return list(exact)
|
|
172
|
+
# fuzzy match: logic_name contained in key
|
|
173
|
+
out = []
|
|
174
|
+
for k, v in self._logic_index.items():
|
|
175
|
+
if k.startswith(logic_prefix):
|
|
176
|
+
out.extend(v)
|
|
177
|
+
return out
|
|
178
|
+
|
|
179
|
+
def find_by_label(self, label: str) -> list[str]:
|
|
180
|
+
return sorted(self._label_index.get(label, set()))
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
@contextmanager
|
|
184
|
+
def graph(*, name: str = "default_graph"):
|
|
185
|
+
"""Context manager that yields a GraphBuilder to build a TaskGraph."""
|
|
186
|
+
builder = GraphBuilder(name=name)
|
|
187
|
+
token = _GRAPH_CTX.set(builder)
|
|
188
|
+
try:
|
|
189
|
+
yield builder.graph
|
|
190
|
+
finally:
|
|
191
|
+
builder.graph.__post_init__() # reify runtime nodes
|
|
192
|
+
_GRAPH_CTX.reset(token)
|