AbstractRuntime 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +104 -2
- abstractruntime/core/__init__.py +19 -0
- abstractruntime/core/models.py +239 -0
- abstractruntime/core/policy.py +166 -0
- abstractruntime/core/runtime.py +581 -0
- abstractruntime/core/spec.py +53 -0
- abstractruntime/identity/__init__.py +7 -0
- abstractruntime/identity/fingerprint.py +57 -0
- abstractruntime/integrations/__init__.py +11 -0
- abstractruntime/integrations/abstractcore/__init__.py +43 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +89 -0
- abstractruntime/integrations/abstractcore/factory.py +150 -0
- abstractruntime/integrations/abstractcore/llm_client.py +296 -0
- abstractruntime/integrations/abstractcore/logging.py +27 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +89 -0
- abstractruntime/scheduler/__init__.py +13 -0
- abstractruntime/scheduler/convenience.py +324 -0
- abstractruntime/scheduler/registry.py +101 -0
- abstractruntime/scheduler/scheduler.py +431 -0
- abstractruntime/storage/__init__.py +25 -0
- abstractruntime/storage/artifacts.py +488 -0
- abstractruntime/storage/base.py +107 -0
- abstractruntime/storage/in_memory.py +119 -0
- abstractruntime/storage/json_files.py +208 -0
- abstractruntime/storage/ledger_chain.py +153 -0
- abstractruntime/storage/snapshots.py +217 -0
- abstractruntime-0.0.1.dist-info/METADATA +163 -0
- abstractruntime-0.0.1.dist-info/RECORD +30 -0
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/licenses/LICENSE +3 -1
- abstractruntime-0.0.0.dist-info/METADATA +0 -89
- abstractruntime-0.0.0.dist-info/RECORD +0 -5
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,581 @@
|
|
|
1
|
+
"""abstractruntime.core.runtime
|
|
2
|
+
|
|
3
|
+
Minimal durable graph runner (v0.1).
|
|
4
|
+
|
|
5
|
+
Key semantics:
|
|
6
|
+
- `tick()` progresses a run until it blocks (WAITING) or completes.
|
|
7
|
+
- Blocking is represented by a persisted WaitState in RunState.
|
|
8
|
+
- `resume()` injects an external payload to unblock a waiting run.
|
|
9
|
+
|
|
10
|
+
Durability note:
|
|
11
|
+
This MVP persists checkpoints + a ledger, but does NOT attempt to implement
|
|
12
|
+
full Temporal-like replay/determinism guarantees.
|
|
13
|
+
|
|
14
|
+
We keep the design explicitly modular:
|
|
15
|
+
- stores: RunStore + LedgerStore
|
|
16
|
+
- effect handlers: pluggable registry
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from datetime import datetime, timezone
|
|
23
|
+
from typing import Any, Callable, Dict, Optional
|
|
24
|
+
import inspect
|
|
25
|
+
|
|
26
|
+
from .models import (
|
|
27
|
+
Effect,
|
|
28
|
+
EffectType,
|
|
29
|
+
RunState,
|
|
30
|
+
RunStatus,
|
|
31
|
+
StepPlan,
|
|
32
|
+
StepRecord,
|
|
33
|
+
StepStatus,
|
|
34
|
+
WaitReason,
|
|
35
|
+
WaitState,
|
|
36
|
+
)
|
|
37
|
+
from .spec import WorkflowSpec
|
|
38
|
+
from .policy import DefaultEffectPolicy, EffectPolicy
|
|
39
|
+
from ..storage.base import LedgerStore, RunStore
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def utc_now_iso() -> str:
|
|
43
|
+
return datetime.now(timezone.utc).isoformat()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@dataclass
|
|
47
|
+
class DefaultRunContext:
|
|
48
|
+
def now_iso(self) -> str:
|
|
49
|
+
return utc_now_iso()
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# NOTE:
|
|
53
|
+
# Effect handlers are given the node's `next_node` as `default_next_node` so that
|
|
54
|
+
# waiting effects (ask_user / wait_until / tool passthrough) can safely resume
|
|
55
|
+
# into the next node without forcing every node to duplicate `resume_to_node`
|
|
56
|
+
# into the effect payload.
|
|
57
|
+
EffectHandler = Callable[[RunState, Effect, Optional[str]], "EffectOutcome"]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@dataclass(frozen=True)
|
|
61
|
+
class EffectOutcome:
|
|
62
|
+
"""Result of executing an effect."""
|
|
63
|
+
|
|
64
|
+
status: str # "completed" | "waiting" | "failed"
|
|
65
|
+
result: Optional[Dict[str, Any]] = None
|
|
66
|
+
wait: Optional[WaitState] = None
|
|
67
|
+
error: Optional[str] = None
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def completed(cls, result: Optional[Dict[str, Any]] = None) -> "EffectOutcome":
|
|
71
|
+
return cls(status="completed", result=result)
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def waiting(cls, wait: WaitState) -> "EffectOutcome":
|
|
75
|
+
return cls(status="waiting", wait=wait)
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def failed(cls, error: str) -> "EffectOutcome":
|
|
79
|
+
return cls(status="failed", error=error)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class Runtime:
|
|
83
|
+
"""Durable graph runner."""
|
|
84
|
+
|
|
85
|
+
def __init__(
|
|
86
|
+
self,
|
|
87
|
+
*,
|
|
88
|
+
run_store: RunStore,
|
|
89
|
+
ledger_store: LedgerStore,
|
|
90
|
+
effect_handlers: Optional[Dict[EffectType, EffectHandler]] = None,
|
|
91
|
+
context: Optional[Any] = None,
|
|
92
|
+
workflow_registry: Optional[Any] = None,
|
|
93
|
+
artifact_store: Optional[Any] = None,
|
|
94
|
+
effect_policy: Optional[EffectPolicy] = None,
|
|
95
|
+
):
|
|
96
|
+
self._run_store = run_store
|
|
97
|
+
self._ledger_store = ledger_store
|
|
98
|
+
self._ctx = context or DefaultRunContext()
|
|
99
|
+
self._workflow_registry = workflow_registry
|
|
100
|
+
self._artifact_store = artifact_store
|
|
101
|
+
self._effect_policy: EffectPolicy = effect_policy or DefaultEffectPolicy()
|
|
102
|
+
|
|
103
|
+
self._handlers: Dict[EffectType, EffectHandler] = {}
|
|
104
|
+
self._register_builtin_handlers()
|
|
105
|
+
if effect_handlers:
|
|
106
|
+
self._handlers.update(effect_handlers)
|
|
107
|
+
|
|
108
|
+
# ---------------------------------------------------------------------
|
|
109
|
+
# Public API
|
|
110
|
+
# ---------------------------------------------------------------------
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def run_store(self) -> RunStore:
|
|
114
|
+
"""Access the run store."""
|
|
115
|
+
return self._run_store
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def ledger_store(self) -> LedgerStore:
|
|
119
|
+
"""Access the ledger store."""
|
|
120
|
+
return self._ledger_store
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def workflow_registry(self) -> Optional[Any]:
|
|
124
|
+
"""Access the workflow registry (if set)."""
|
|
125
|
+
return self._workflow_registry
|
|
126
|
+
|
|
127
|
+
def set_workflow_registry(self, registry: Any) -> None:
|
|
128
|
+
"""Set the workflow registry for subworkflow support."""
|
|
129
|
+
self._workflow_registry = registry
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def artifact_store(self) -> Optional[Any]:
|
|
133
|
+
"""Access the artifact store (if set)."""
|
|
134
|
+
return self._artifact_store
|
|
135
|
+
|
|
136
|
+
def set_artifact_store(self, store: Any) -> None:
|
|
137
|
+
"""Set the artifact store for large payload support."""
|
|
138
|
+
self._artifact_store = store
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
def effect_policy(self) -> EffectPolicy:
|
|
142
|
+
"""Access the effect policy."""
|
|
143
|
+
return self._effect_policy
|
|
144
|
+
|
|
145
|
+
def set_effect_policy(self, policy: EffectPolicy) -> None:
|
|
146
|
+
"""Set the effect policy for retry and idempotency."""
|
|
147
|
+
self._effect_policy = policy
|
|
148
|
+
|
|
149
|
+
def start(self, *, workflow: WorkflowSpec, vars: Optional[Dict[str, Any]] = None, actor_id: Optional[str] = None, parent_run_id: Optional[str] = None) -> str:
|
|
150
|
+
run = RunState.new(workflow_id=workflow.workflow_id, entry_node=workflow.entry_node, vars=vars, actor_id=actor_id, parent_run_id=parent_run_id)
|
|
151
|
+
self._run_store.save(run)
|
|
152
|
+
return run.run_id
|
|
153
|
+
|
|
154
|
+
def cancel_run(self, run_id: str, *, reason: Optional[str] = None) -> RunState:
|
|
155
|
+
"""Cancel a run.
|
|
156
|
+
|
|
157
|
+
Sets the run status to CANCELLED. Only RUNNING or WAITING runs can be cancelled.
|
|
158
|
+
COMPLETED, FAILED, or already CANCELLED runs are returned unchanged.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
run_id: The run to cancel.
|
|
162
|
+
reason: Optional cancellation reason (stored in error field).
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
The updated RunState.
|
|
166
|
+
|
|
167
|
+
Raises:
|
|
168
|
+
KeyError: If run_id not found.
|
|
169
|
+
"""
|
|
170
|
+
run = self.get_state(run_id)
|
|
171
|
+
|
|
172
|
+
# Terminal states cannot be cancelled
|
|
173
|
+
if run.status in (RunStatus.COMPLETED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
174
|
+
return run
|
|
175
|
+
|
|
176
|
+
run.status = RunStatus.CANCELLED
|
|
177
|
+
run.error = reason or "Cancelled"
|
|
178
|
+
run.waiting = None
|
|
179
|
+
run.updated_at = utc_now_iso()
|
|
180
|
+
self._run_store.save(run)
|
|
181
|
+
return run
|
|
182
|
+
|
|
183
|
+
def get_state(self, run_id: str) -> RunState:
|
|
184
|
+
run = self._run_store.load(run_id)
|
|
185
|
+
if run is None:
|
|
186
|
+
raise KeyError(f"Unknown run_id: {run_id}")
|
|
187
|
+
return run
|
|
188
|
+
|
|
189
|
+
def get_ledger(self, run_id: str) -> list[dict[str, Any]]:
|
|
190
|
+
return self._ledger_store.list(run_id)
|
|
191
|
+
|
|
192
|
+
def tick(self, *, workflow: WorkflowSpec, run_id: str, max_steps: int = 100) -> RunState:
|
|
193
|
+
run = self.get_state(run_id)
|
|
194
|
+
if run.status in (RunStatus.COMPLETED, RunStatus.FAILED):
|
|
195
|
+
return run
|
|
196
|
+
if run.status == RunStatus.WAITING:
|
|
197
|
+
# For WAIT_UNTIL we can auto-unblock if time passed
|
|
198
|
+
if run.waiting and run.waiting.reason == WaitReason.UNTIL and run.waiting.until:
|
|
199
|
+
if utc_now_iso() >= run.waiting.until:
|
|
200
|
+
self._apply_resume_payload(run, payload={}, override_node=run.waiting.resume_to_node)
|
|
201
|
+
else:
|
|
202
|
+
return run
|
|
203
|
+
else:
|
|
204
|
+
return run
|
|
205
|
+
|
|
206
|
+
steps = 0
|
|
207
|
+
while steps < max_steps:
|
|
208
|
+
steps += 1
|
|
209
|
+
|
|
210
|
+
handler = workflow.get_node(run.current_node)
|
|
211
|
+
plan = handler(run, self._ctx)
|
|
212
|
+
|
|
213
|
+
# Completion
|
|
214
|
+
if plan.complete_output is not None:
|
|
215
|
+
run.status = RunStatus.COMPLETED
|
|
216
|
+
run.output = plan.complete_output
|
|
217
|
+
run.updated_at = utc_now_iso()
|
|
218
|
+
self._run_store.save(run)
|
|
219
|
+
# ledger: completion record (no effect)
|
|
220
|
+
rec = StepRecord.start(run=run, node_id=plan.node_id, effect=None)
|
|
221
|
+
rec.status = StepStatus.COMPLETED
|
|
222
|
+
rec.result = {"completed": True}
|
|
223
|
+
rec.ended_at = utc_now_iso()
|
|
224
|
+
self._ledger_store.append(rec)
|
|
225
|
+
return run
|
|
226
|
+
|
|
227
|
+
# Pure transition
|
|
228
|
+
if plan.effect is None:
|
|
229
|
+
if not plan.next_node:
|
|
230
|
+
raise ValueError(f"Node '{plan.node_id}' returned no effect and no next_node")
|
|
231
|
+
run.current_node = plan.next_node
|
|
232
|
+
run.updated_at = utc_now_iso()
|
|
233
|
+
self._run_store.save(run)
|
|
234
|
+
continue
|
|
235
|
+
|
|
236
|
+
# Effectful step - check for prior completed result (idempotency)
|
|
237
|
+
idempotency_key = self._effect_policy.idempotency_key(
|
|
238
|
+
run=run, node_id=plan.node_id, effect=plan.effect
|
|
239
|
+
)
|
|
240
|
+
prior_result = self._find_prior_completed_result(run.run_id, idempotency_key)
|
|
241
|
+
|
|
242
|
+
if prior_result is not None:
|
|
243
|
+
# Reuse prior result - skip re-execution
|
|
244
|
+
outcome = EffectOutcome.completed(prior_result)
|
|
245
|
+
else:
|
|
246
|
+
# Execute with retry logic
|
|
247
|
+
outcome = self._execute_effect_with_retry(
|
|
248
|
+
run=run,
|
|
249
|
+
node_id=plan.node_id,
|
|
250
|
+
effect=plan.effect,
|
|
251
|
+
idempotency_key=idempotency_key,
|
|
252
|
+
default_next_node=plan.next_node,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
if outcome.status == "failed":
|
|
256
|
+
run.status = RunStatus.FAILED
|
|
257
|
+
run.error = outcome.error or "unknown error"
|
|
258
|
+
run.updated_at = utc_now_iso()
|
|
259
|
+
self._run_store.save(run)
|
|
260
|
+
return run
|
|
261
|
+
|
|
262
|
+
if outcome.status == "waiting":
|
|
263
|
+
assert outcome.wait is not None
|
|
264
|
+
run.status = RunStatus.WAITING
|
|
265
|
+
run.waiting = outcome.wait
|
|
266
|
+
run.updated_at = utc_now_iso()
|
|
267
|
+
self._run_store.save(run)
|
|
268
|
+
return run
|
|
269
|
+
|
|
270
|
+
# completed
|
|
271
|
+
if plan.effect.result_key and outcome.result is not None:
|
|
272
|
+
_set_nested(run.vars, plan.effect.result_key, outcome.result)
|
|
273
|
+
|
|
274
|
+
if not plan.next_node:
|
|
275
|
+
raise ValueError(f"Node '{plan.node_id}' executed effect but did not specify next_node")
|
|
276
|
+
run.current_node = plan.next_node
|
|
277
|
+
run.updated_at = utc_now_iso()
|
|
278
|
+
self._run_store.save(run)
|
|
279
|
+
|
|
280
|
+
return run
|
|
281
|
+
|
|
282
|
+
def resume(self, *, workflow: WorkflowSpec, run_id: str, wait_key: Optional[str], payload: Dict[str, Any]) -> RunState:
|
|
283
|
+
run = self.get_state(run_id)
|
|
284
|
+
if run.status != RunStatus.WAITING or run.waiting is None:
|
|
285
|
+
raise ValueError("Run is not waiting")
|
|
286
|
+
|
|
287
|
+
# Validate wait_key if provided
|
|
288
|
+
if wait_key is not None and run.waiting.wait_key is not None and wait_key != run.waiting.wait_key:
|
|
289
|
+
raise ValueError(f"wait_key mismatch: expected '{run.waiting.wait_key}', got '{wait_key}'")
|
|
290
|
+
|
|
291
|
+
resume_to = run.waiting.resume_to_node
|
|
292
|
+
result_key = run.waiting.result_key
|
|
293
|
+
|
|
294
|
+
if result_key:
|
|
295
|
+
_set_nested(run.vars, result_key, payload)
|
|
296
|
+
|
|
297
|
+
self._apply_resume_payload(run, payload=payload, override_node=resume_to)
|
|
298
|
+
run.updated_at = utc_now_iso()
|
|
299
|
+
self._run_store.save(run)
|
|
300
|
+
|
|
301
|
+
return self.tick(workflow=workflow, run_id=run_id)
|
|
302
|
+
|
|
303
|
+
# ---------------------------------------------------------------------
|
|
304
|
+
# Internals
|
|
305
|
+
# ---------------------------------------------------------------------
|
|
306
|
+
|
|
307
|
+
def _register_builtin_handlers(self) -> None:
|
|
308
|
+
self._handlers[EffectType.WAIT_EVENT] = self._handle_wait_event
|
|
309
|
+
self._handlers[EffectType.WAIT_UNTIL] = self._handle_wait_until
|
|
310
|
+
self._handlers[EffectType.ASK_USER] = self._handle_ask_user
|
|
311
|
+
self._handlers[EffectType.START_SUBWORKFLOW] = self._handle_start_subworkflow
|
|
312
|
+
|
|
313
|
+
def _find_prior_completed_result(
|
|
314
|
+
self, run_id: str, idempotency_key: str
|
|
315
|
+
) -> Optional[Dict[str, Any]]:
|
|
316
|
+
"""Find a prior completed result for an idempotency key.
|
|
317
|
+
|
|
318
|
+
Scans the ledger for a completed step with the same idempotency key.
|
|
319
|
+
Returns the result if found, None otherwise.
|
|
320
|
+
"""
|
|
321
|
+
records = self._ledger_store.list(run_id)
|
|
322
|
+
for record in records:
|
|
323
|
+
if record.get("idempotency_key") == idempotency_key:
|
|
324
|
+
if record.get("status") == StepStatus.COMPLETED.value:
|
|
325
|
+
return record.get("result")
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
def _execute_effect_with_retry(
|
|
329
|
+
self,
|
|
330
|
+
*,
|
|
331
|
+
run: RunState,
|
|
332
|
+
node_id: str,
|
|
333
|
+
effect: Effect,
|
|
334
|
+
idempotency_key: str,
|
|
335
|
+
default_next_node: Optional[str],
|
|
336
|
+
) -> EffectOutcome:
|
|
337
|
+
"""Execute an effect with retry logic.
|
|
338
|
+
|
|
339
|
+
Retries according to the effect policy. Records each attempt
|
|
340
|
+
in the ledger with attempt number and idempotency key.
|
|
341
|
+
"""
|
|
342
|
+
import time
|
|
343
|
+
|
|
344
|
+
max_attempts = self._effect_policy.max_attempts(effect)
|
|
345
|
+
last_error: Optional[str] = None
|
|
346
|
+
|
|
347
|
+
for attempt in range(1, max_attempts + 1):
|
|
348
|
+
# Record attempt start
|
|
349
|
+
rec = StepRecord.start(
|
|
350
|
+
run=run,
|
|
351
|
+
node_id=node_id,
|
|
352
|
+
effect=effect,
|
|
353
|
+
attempt=attempt,
|
|
354
|
+
idempotency_key=idempotency_key,
|
|
355
|
+
)
|
|
356
|
+
self._ledger_store.append(rec)
|
|
357
|
+
|
|
358
|
+
# Execute the effect (catch exceptions as failures)
|
|
359
|
+
try:
|
|
360
|
+
outcome = self._execute_effect(run, effect, default_next_node)
|
|
361
|
+
except Exception as e:
|
|
362
|
+
outcome = EffectOutcome.failed(f"Effect handler raised exception: {e}")
|
|
363
|
+
|
|
364
|
+
if outcome.status == "completed":
|
|
365
|
+
rec.finish_success(outcome.result)
|
|
366
|
+
self._ledger_store.append(rec)
|
|
367
|
+
return outcome
|
|
368
|
+
|
|
369
|
+
if outcome.status == "waiting":
|
|
370
|
+
rec.finish_waiting(outcome.wait)
|
|
371
|
+
self._ledger_store.append(rec)
|
|
372
|
+
return outcome
|
|
373
|
+
|
|
374
|
+
# Failed - record and maybe retry
|
|
375
|
+
last_error = outcome.error or "unknown error"
|
|
376
|
+
rec.finish_failure(last_error)
|
|
377
|
+
self._ledger_store.append(rec)
|
|
378
|
+
|
|
379
|
+
if attempt < max_attempts:
|
|
380
|
+
# Wait before retry
|
|
381
|
+
backoff = self._effect_policy.backoff_seconds(
|
|
382
|
+
effect=effect, attempt=attempt
|
|
383
|
+
)
|
|
384
|
+
if backoff > 0:
|
|
385
|
+
time.sleep(backoff)
|
|
386
|
+
|
|
387
|
+
# All attempts exhausted
|
|
388
|
+
return EffectOutcome.failed(
|
|
389
|
+
f"Effect failed after {max_attempts} attempts: {last_error}"
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
def _execute_effect(self, run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
393
|
+
if effect.type not in self._handlers:
|
|
394
|
+
return EffectOutcome.failed(f"No effect handler registered for {effect.type.value}")
|
|
395
|
+
handler = self._handlers[effect.type]
|
|
396
|
+
|
|
397
|
+
# Backward compatibility: allow older handlers with signature (run, effect).
|
|
398
|
+
# New handlers can accept (run, effect, default_next_node) to implement
|
|
399
|
+
# correct resume semantics for waiting effects without duplicating payload fields.
|
|
400
|
+
try:
|
|
401
|
+
sig = inspect.signature(handler)
|
|
402
|
+
params = list(sig.parameters.values())
|
|
403
|
+
has_varargs = any(p.kind == inspect.Parameter.VAR_POSITIONAL for p in params)
|
|
404
|
+
if has_varargs or len(params) >= 3:
|
|
405
|
+
return handler(run, effect, default_next_node)
|
|
406
|
+
return handler(run, effect)
|
|
407
|
+
except Exception:
|
|
408
|
+
# If signature inspection fails, fall back to attempting the new call form,
|
|
409
|
+
# then the legacy form.
|
|
410
|
+
try:
|
|
411
|
+
return handler(run, effect, default_next_node)
|
|
412
|
+
except TypeError:
|
|
413
|
+
return handler(run, effect)
|
|
414
|
+
|
|
415
|
+
def _apply_resume_payload(self, run: RunState, *, payload: Dict[str, Any], override_node: Optional[str]) -> None:
|
|
416
|
+
run.status = RunStatus.RUNNING
|
|
417
|
+
run.waiting = None
|
|
418
|
+
if override_node:
|
|
419
|
+
run.current_node = override_node
|
|
420
|
+
|
|
421
|
+
# Built-in wait handlers ------------------------------------------------
|
|
422
|
+
|
|
423
|
+
def _handle_wait_event(self, run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
424
|
+
wait_key = effect.payload.get("wait_key")
|
|
425
|
+
if not wait_key:
|
|
426
|
+
return EffectOutcome.failed("wait_event requires payload.wait_key")
|
|
427
|
+
resume_to = effect.payload.get("resume_to_node") or default_next_node
|
|
428
|
+
wait = WaitState(
|
|
429
|
+
reason=WaitReason.EVENT,
|
|
430
|
+
wait_key=str(wait_key),
|
|
431
|
+
resume_to_node=resume_to,
|
|
432
|
+
result_key=effect.result_key,
|
|
433
|
+
)
|
|
434
|
+
return EffectOutcome.waiting(wait)
|
|
435
|
+
|
|
436
|
+
def _handle_wait_until(self, run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
437
|
+
until = effect.payload.get("until")
|
|
438
|
+
if not until:
|
|
439
|
+
return EffectOutcome.failed("wait_until requires payload.until (ISO timestamp)")
|
|
440
|
+
|
|
441
|
+
resume_to = effect.payload.get("resume_to_node") or default_next_node
|
|
442
|
+
if utc_now_iso() >= str(until):
|
|
443
|
+
# immediate
|
|
444
|
+
return EffectOutcome.completed({"until": str(until), "ready": True})
|
|
445
|
+
|
|
446
|
+
wait = WaitState(
|
|
447
|
+
reason=WaitReason.UNTIL,
|
|
448
|
+
until=str(until),
|
|
449
|
+
resume_to_node=resume_to,
|
|
450
|
+
result_key=effect.result_key,
|
|
451
|
+
)
|
|
452
|
+
return EffectOutcome.waiting(wait)
|
|
453
|
+
|
|
454
|
+
def _handle_ask_user(self, run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
455
|
+
prompt = effect.payload.get("prompt")
|
|
456
|
+
if not prompt:
|
|
457
|
+
return EffectOutcome.failed("ask_user requires payload.prompt")
|
|
458
|
+
|
|
459
|
+
resume_to = effect.payload.get("resume_to_node") or default_next_node
|
|
460
|
+
wait_key = effect.payload.get("wait_key") or f"user:{run.run_id}:{run.current_node}"
|
|
461
|
+
choices = effect.payload.get("choices")
|
|
462
|
+
allow_free_text = bool(effect.payload.get("allow_free_text", True))
|
|
463
|
+
|
|
464
|
+
wait = WaitState(
|
|
465
|
+
reason=WaitReason.USER,
|
|
466
|
+
wait_key=str(wait_key),
|
|
467
|
+
resume_to_node=resume_to,
|
|
468
|
+
result_key=effect.result_key,
|
|
469
|
+
prompt=str(prompt),
|
|
470
|
+
choices=list(choices) if isinstance(choices, list) else None,
|
|
471
|
+
allow_free_text=allow_free_text,
|
|
472
|
+
)
|
|
473
|
+
return EffectOutcome.waiting(wait)
|
|
474
|
+
|
|
475
|
+
def _handle_start_subworkflow(
|
|
476
|
+
self, run: RunState, effect: Effect, default_next_node: Optional[str]
|
|
477
|
+
) -> EffectOutcome:
|
|
478
|
+
"""Handle START_SUBWORKFLOW effect.
|
|
479
|
+
|
|
480
|
+
Payload:
|
|
481
|
+
workflow_id: str - ID of the subworkflow to start (required)
|
|
482
|
+
vars: dict - Initial variables for the subworkflow (optional)
|
|
483
|
+
async: bool - If True, don't wait for completion (optional, default False)
|
|
484
|
+
|
|
485
|
+
Sync mode (async=False):
|
|
486
|
+
- Starts the subworkflow and runs it until completion or waiting
|
|
487
|
+
- If subworkflow completes: returns its output
|
|
488
|
+
- If subworkflow waits: parent also waits (WaitReason.SUBWORKFLOW)
|
|
489
|
+
|
|
490
|
+
Async mode (async=True):
|
|
491
|
+
- Starts the subworkflow and returns immediately
|
|
492
|
+
- Returns {"sub_run_id": "..."} so parent can track it
|
|
493
|
+
"""
|
|
494
|
+
workflow_id = effect.payload.get("workflow_id")
|
|
495
|
+
if not workflow_id:
|
|
496
|
+
return EffectOutcome.failed("start_subworkflow requires payload.workflow_id")
|
|
497
|
+
|
|
498
|
+
if self._workflow_registry is None:
|
|
499
|
+
return EffectOutcome.failed(
|
|
500
|
+
"start_subworkflow requires a workflow_registry. "
|
|
501
|
+
"Set it via Runtime(workflow_registry=...) or runtime.set_workflow_registry(...)"
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
# Look up the subworkflow
|
|
505
|
+
sub_workflow = self._workflow_registry.get(workflow_id)
|
|
506
|
+
if sub_workflow is None:
|
|
507
|
+
return EffectOutcome.failed(f"Workflow '{workflow_id}' not found in registry")
|
|
508
|
+
|
|
509
|
+
sub_vars = effect.payload.get("vars") or {}
|
|
510
|
+
is_async = bool(effect.payload.get("async", False))
|
|
511
|
+
resume_to = effect.payload.get("resume_to_node") or default_next_node
|
|
512
|
+
|
|
513
|
+
# Start the subworkflow with parent tracking
|
|
514
|
+
sub_run_id = self.start(
|
|
515
|
+
workflow=sub_workflow,
|
|
516
|
+
vars=sub_vars,
|
|
517
|
+
actor_id=run.actor_id, # Inherit actor from parent
|
|
518
|
+
parent_run_id=run.run_id, # Track parent for hierarchy
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
if is_async:
|
|
522
|
+
# Async mode: return immediately with sub_run_id
|
|
523
|
+
# The child is started but not ticked - caller is responsible for driving it
|
|
524
|
+
return EffectOutcome.completed({"sub_run_id": sub_run_id, "async": True})
|
|
525
|
+
|
|
526
|
+
# Sync mode: run the subworkflow until completion or waiting
|
|
527
|
+
try:
|
|
528
|
+
sub_state = self.tick(workflow=sub_workflow, run_id=sub_run_id)
|
|
529
|
+
except Exception as e:
|
|
530
|
+
# Child raised an exception - propagate as failure
|
|
531
|
+
return EffectOutcome.failed(f"Subworkflow '{workflow_id}' failed: {e}")
|
|
532
|
+
|
|
533
|
+
if sub_state.status == RunStatus.COMPLETED:
|
|
534
|
+
# Subworkflow completed - return its output
|
|
535
|
+
return EffectOutcome.completed({
|
|
536
|
+
"sub_run_id": sub_run_id,
|
|
537
|
+
"output": sub_state.output,
|
|
538
|
+
})
|
|
539
|
+
|
|
540
|
+
if sub_state.status == RunStatus.FAILED:
|
|
541
|
+
# Subworkflow failed - propagate error
|
|
542
|
+
return EffectOutcome.failed(
|
|
543
|
+
f"Subworkflow '{workflow_id}' failed: {sub_state.error}"
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
if sub_state.status == RunStatus.WAITING:
|
|
547
|
+
# Subworkflow is waiting - parent must also wait
|
|
548
|
+
wait = WaitState(
|
|
549
|
+
reason=WaitReason.SUBWORKFLOW,
|
|
550
|
+
wait_key=f"subworkflow:{sub_run_id}",
|
|
551
|
+
resume_to_node=resume_to,
|
|
552
|
+
result_key=effect.result_key,
|
|
553
|
+
details={
|
|
554
|
+
"sub_run_id": sub_run_id,
|
|
555
|
+
"sub_workflow_id": workflow_id,
|
|
556
|
+
"sub_waiting": {
|
|
557
|
+
"reason": sub_state.waiting.reason.value if sub_state.waiting else None,
|
|
558
|
+
"wait_key": sub_state.waiting.wait_key if sub_state.waiting else None,
|
|
559
|
+
},
|
|
560
|
+
},
|
|
561
|
+
)
|
|
562
|
+
return EffectOutcome.waiting(wait)
|
|
563
|
+
|
|
564
|
+
# Unexpected status
|
|
565
|
+
return EffectOutcome.failed(f"Unexpected subworkflow status: {sub_state.status.value}")
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
def _set_nested(target: Dict[str, Any], dotted_key: str, value: Any) -> None:
|
|
569
|
+
"""Set nested dict value using dot notation."""
|
|
570
|
+
|
|
571
|
+
parts = dotted_key.split(".")
|
|
572
|
+
cur: Dict[str, Any] = target
|
|
573
|
+
for p in parts[:-1]:
|
|
574
|
+
nxt = cur.get(p)
|
|
575
|
+
if not isinstance(nxt, dict):
|
|
576
|
+
nxt = {}
|
|
577
|
+
cur[p] = nxt
|
|
578
|
+
cur = nxt
|
|
579
|
+
cur[parts[-1]] = value
|
|
580
|
+
|
|
581
|
+
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""abstractruntime.core.spec
|
|
2
|
+
|
|
3
|
+
Workflow specification and node contract.
|
|
4
|
+
|
|
5
|
+
In v0.1 we keep the spec simple:
|
|
6
|
+
- The workflow is a graph: nodes identified by string IDs.
|
|
7
|
+
- Nodes are implemented as Python callables (handlers).
|
|
8
|
+
- This kernel does not define a UI DSL; AbstractFlow can produce specs.
|
|
9
|
+
|
|
10
|
+
Durability note:
|
|
11
|
+
- We persist *RunState* and a *ledger*.
|
|
12
|
+
- We assume the workflow spec + node handlers are available at resume time.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
from typing import Callable, Dict, Optional, Protocol
|
|
19
|
+
|
|
20
|
+
from .models import RunState, StepPlan
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class RunContext(Protocol):
|
|
24
|
+
"""Dependency injection surface for node handlers.
|
|
25
|
+
|
|
26
|
+
This is intentionally small and can be extended later.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def now_iso(self) -> str: ...
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
NodeHandler = Callable[[RunState, RunContext], StepPlan]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass(frozen=True)
|
|
36
|
+
class WorkflowSpec:
|
|
37
|
+
workflow_id: str
|
|
38
|
+
entry_node: str
|
|
39
|
+
nodes: Dict[str, NodeHandler]
|
|
40
|
+
|
|
41
|
+
def get_node(self, node_id: str) -> NodeHandler:
|
|
42
|
+
if node_id not in self.nodes:
|
|
43
|
+
raise KeyError(f"Unknown node_id '{node_id}' in workflow '{self.workflow_id}'")
|
|
44
|
+
return self.nodes[node_id]
|
|
45
|
+
|
|
46
|
+
def is_terminal(self, node_id: str) -> bool:
|
|
47
|
+
"""A workflow is terminal when the node returns StepPlan.complete_output.
|
|
48
|
+
|
|
49
|
+
The runtime decides termination based on StepPlan, not a dedicated node type.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
return False # evaluated at runtime
|
|
53
|
+
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""abstractruntime.identity.fingerprint
|
|
2
|
+
|
|
3
|
+
AI fingerprint / provenance (v0.1 - data model + hashing only).
|
|
4
|
+
|
|
5
|
+
Important:
|
|
6
|
+
- This does NOT implement cryptographic signatures yet (no non-forgeability).
|
|
7
|
+
- It provides a stable, deterministic *identifier* given stable inputs.
|
|
8
|
+
|
|
9
|
+
Backlog item will define:
|
|
10
|
+
- keypair-based identity (public key) and signing of ledger chains.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
from dataclasses import dataclass, asdict
|
|
18
|
+
from typing import Any, Dict, Optional
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _canonical_json(data: Dict[str, Any]) -> str:
|
|
22
|
+
return json.dumps(data, sort_keys=True, separators=(",", ":"), ensure_ascii=False)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def sha256_hex(text: str) -> str:
|
|
26
|
+
return hashlib.sha256(text.encode("utf-8")).hexdigest()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class ActorFingerprint:
|
|
31
|
+
"""A stable identifier for an actor (agent/service/human).
|
|
32
|
+
|
|
33
|
+
This is intentionally minimal. For accountability you typically want:
|
|
34
|
+
- stable actor id
|
|
35
|
+
- metadata about the owner/org
|
|
36
|
+
- (future) signature key to make logs tamper-evident
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
actor_id: str
|
|
40
|
+
kind: str # "agent" | "human" | "service"
|
|
41
|
+
display_name: Optional[str] = None
|
|
42
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def from_metadata(cls, *, kind: str, display_name: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> "ActorFingerprint":
|
|
46
|
+
payload = {
|
|
47
|
+
"kind": kind,
|
|
48
|
+
"display_name": display_name,
|
|
49
|
+
"metadata": metadata or {},
|
|
50
|
+
}
|
|
51
|
+
actor_id = f"ar_{sha256_hex(_canonical_json(payload))[:24]}"
|
|
52
|
+
return cls(actor_id=actor_id, kind=kind, display_name=display_name, metadata=metadata or {})
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
return asdict(self)
|
|
56
|
+
|
|
57
|
+
|