AbstractRuntime 0.0.0__py3-none-any.whl → 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +104 -2
- abstractruntime/core/__init__.py +19 -0
- abstractruntime/core/models.py +239 -0
- abstractruntime/core/policy.py +166 -0
- abstractruntime/core/runtime.py +581 -0
- abstractruntime/core/spec.py +53 -0
- abstractruntime/identity/__init__.py +7 -0
- abstractruntime/identity/fingerprint.py +57 -0
- abstractruntime/integrations/__init__.py +11 -0
- abstractruntime/integrations/abstractcore/__init__.py +43 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +89 -0
- abstractruntime/integrations/abstractcore/factory.py +150 -0
- abstractruntime/integrations/abstractcore/llm_client.py +296 -0
- abstractruntime/integrations/abstractcore/logging.py +27 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +89 -0
- abstractruntime/scheduler/__init__.py +13 -0
- abstractruntime/scheduler/convenience.py +324 -0
- abstractruntime/scheduler/registry.py +101 -0
- abstractruntime/scheduler/scheduler.py +431 -0
- abstractruntime/storage/__init__.py +25 -0
- abstractruntime/storage/artifacts.py +488 -0
- abstractruntime/storage/base.py +107 -0
- abstractruntime/storage/in_memory.py +119 -0
- abstractruntime/storage/json_files.py +208 -0
- abstractruntime/storage/ledger_chain.py +153 -0
- abstractruntime/storage/snapshots.py +217 -0
- abstractruntime-0.0.1.dist-info/METADATA +163 -0
- abstractruntime-0.0.1.dist-info/RECORD +30 -0
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/licenses/LICENSE +3 -1
- abstractruntime-0.0.0.dist-info/METADATA +0 -89
- abstractruntime-0.0.0.dist-info/RECORD +0 -5
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.0.1.dist-info}/WHEEL +0 -0
abstractruntime/__init__.py
CHANGED
|
@@ -1,8 +1,110 @@
|
|
|
1
1
|
"""
|
|
2
2
|
AbstractRuntime
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
Durable graph runner (interrupt → checkpoint → resume).
|
|
5
|
+
|
|
6
|
+
This package provides a minimal execution substrate:
|
|
7
|
+
- workflow graphs (state machines)
|
|
8
|
+
- durable RunState with WAITING / RESUME semantics
|
|
9
|
+
- append-only execution journal (ledger)
|
|
10
|
+
|
|
11
|
+
Higher-level orchestration and UI graph authoring is expected to live in AbstractFlow.
|
|
6
12
|
"""
|
|
7
13
|
|
|
14
|
+
from .core.models import (
|
|
15
|
+
Effect,
|
|
16
|
+
EffectType,
|
|
17
|
+
RunState,
|
|
18
|
+
RunStatus,
|
|
19
|
+
StepPlan,
|
|
20
|
+
WaitReason,
|
|
21
|
+
WaitState,
|
|
22
|
+
)
|
|
23
|
+
from .core.runtime import Runtime
|
|
24
|
+
from .core.spec import WorkflowSpec
|
|
25
|
+
from .core.policy import (
|
|
26
|
+
EffectPolicy,
|
|
27
|
+
DefaultEffectPolicy,
|
|
28
|
+
RetryPolicy,
|
|
29
|
+
NoRetryPolicy,
|
|
30
|
+
compute_idempotency_key,
|
|
31
|
+
)
|
|
32
|
+
from .storage.base import QueryableRunStore
|
|
33
|
+
from .storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
|
|
34
|
+
from .storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
35
|
+
from .storage.ledger_chain import HashChainedLedgerStore, verify_ledger_chain
|
|
36
|
+
from .storage.snapshots import Snapshot, SnapshotStore, InMemorySnapshotStore, JsonSnapshotStore
|
|
37
|
+
from .storage.artifacts import (
|
|
38
|
+
Artifact,
|
|
39
|
+
ArtifactMetadata,
|
|
40
|
+
ArtifactStore,
|
|
41
|
+
InMemoryArtifactStore,
|
|
42
|
+
FileArtifactStore,
|
|
43
|
+
artifact_ref,
|
|
44
|
+
is_artifact_ref,
|
|
45
|
+
get_artifact_id,
|
|
46
|
+
resolve_artifact,
|
|
47
|
+
compute_artifact_id,
|
|
48
|
+
)
|
|
49
|
+
from .identity.fingerprint import ActorFingerprint
|
|
50
|
+
from .scheduler import (
|
|
51
|
+
WorkflowRegistry,
|
|
52
|
+
Scheduler,
|
|
53
|
+
SchedulerStats,
|
|
54
|
+
ScheduledRuntime,
|
|
55
|
+
create_scheduled_runtime,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
__all__ = [
|
|
59
|
+
# Core models
|
|
60
|
+
"Effect",
|
|
61
|
+
"EffectType",
|
|
62
|
+
"RunState",
|
|
63
|
+
"RunStatus",
|
|
64
|
+
"StepPlan",
|
|
65
|
+
"WaitReason",
|
|
66
|
+
"WaitState",
|
|
67
|
+
# Spec + runtime
|
|
68
|
+
"WorkflowSpec",
|
|
69
|
+
"Runtime",
|
|
70
|
+
# Scheduler
|
|
71
|
+
"WorkflowRegistry",
|
|
72
|
+
"Scheduler",
|
|
73
|
+
"SchedulerStats",
|
|
74
|
+
"ScheduledRuntime",
|
|
75
|
+
"create_scheduled_runtime",
|
|
76
|
+
# Storage backends
|
|
77
|
+
"QueryableRunStore",
|
|
78
|
+
"InMemoryRunStore",
|
|
79
|
+
"InMemoryLedgerStore",
|
|
80
|
+
"JsonFileRunStore",
|
|
81
|
+
"JsonlLedgerStore",
|
|
82
|
+
"HashChainedLedgerStore",
|
|
83
|
+
"verify_ledger_chain",
|
|
84
|
+
"Snapshot",
|
|
85
|
+
"SnapshotStore",
|
|
86
|
+
"InMemorySnapshotStore",
|
|
87
|
+
"JsonSnapshotStore",
|
|
88
|
+
# Artifacts
|
|
89
|
+
"Artifact",
|
|
90
|
+
"ArtifactMetadata",
|
|
91
|
+
"ArtifactStore",
|
|
92
|
+
"InMemoryArtifactStore",
|
|
93
|
+
"FileArtifactStore",
|
|
94
|
+
"artifact_ref",
|
|
95
|
+
"is_artifact_ref",
|
|
96
|
+
"get_artifact_id",
|
|
97
|
+
"resolve_artifact",
|
|
98
|
+
"compute_artifact_id",
|
|
99
|
+
# Identity
|
|
100
|
+
"ActorFingerprint",
|
|
101
|
+
# Effect policies
|
|
102
|
+
"EffectPolicy",
|
|
103
|
+
"DefaultEffectPolicy",
|
|
104
|
+
"RetryPolicy",
|
|
105
|
+
"NoRetryPolicy",
|
|
106
|
+
"compute_idempotency_key",
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
|
|
8
110
|
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Core runtime primitives."""
|
|
2
|
+
|
|
3
|
+
from .models import Effect, EffectType, RunState, RunStatus, StepPlan, WaitReason, WaitState
|
|
4
|
+
from .runtime import Runtime
|
|
5
|
+
from .spec import WorkflowSpec
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"Effect",
|
|
9
|
+
"EffectType",
|
|
10
|
+
"RunState",
|
|
11
|
+
"RunStatus",
|
|
12
|
+
"StepPlan",
|
|
13
|
+
"WaitReason",
|
|
14
|
+
"WaitState",
|
|
15
|
+
"WorkflowSpec",
|
|
16
|
+
"Runtime",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""abstractruntime.core.models
|
|
2
|
+
|
|
3
|
+
Core data model for AbstractRuntime (v0.1).
|
|
4
|
+
|
|
5
|
+
Design intent:
|
|
6
|
+
- Keep everything JSON-serializable (durable execution)
|
|
7
|
+
- Separate *what to do* (Effect) from *how to do it* (EffectHandler)
|
|
8
|
+
- Represent long pauses explicitly (WaitState), never by keeping Python stacks alive
|
|
9
|
+
|
|
10
|
+
We intentionally keep this module dependency-light (stdlib only).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from typing import Any, Dict, List, Optional
|
|
19
|
+
import uuid
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def utc_now() -> datetime:
|
|
23
|
+
return datetime.now(timezone.utc)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class RunStatus(str, Enum):
|
|
27
|
+
RUNNING = "running"
|
|
28
|
+
WAITING = "waiting"
|
|
29
|
+
COMPLETED = "completed"
|
|
30
|
+
FAILED = "failed"
|
|
31
|
+
CANCELLED = "cancelled"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class WaitReason(str, Enum):
|
|
35
|
+
EVENT = "event" # arbitrary external signal
|
|
36
|
+
UNTIL = "until" # time-based
|
|
37
|
+
USER = "user" # human-in-the-loop
|
|
38
|
+
JOB = "job" # external job completion
|
|
39
|
+
SUBWORKFLOW = "subworkflow" # waiting for child workflow
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class EffectType(str, Enum):
|
|
43
|
+
"""Side-effects a node can request."""
|
|
44
|
+
|
|
45
|
+
# Pure waiting primitives
|
|
46
|
+
WAIT_EVENT = "wait_event"
|
|
47
|
+
WAIT_UNTIL = "wait_until"
|
|
48
|
+
ASK_USER = "ask_user"
|
|
49
|
+
|
|
50
|
+
# Integrations (implemented via pluggable handlers)
|
|
51
|
+
LLM_CALL = "llm_call"
|
|
52
|
+
TOOL_CALLS = "tool_calls"
|
|
53
|
+
|
|
54
|
+
# Composition
|
|
55
|
+
START_SUBWORKFLOW = "start_subworkflow"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass(frozen=True)
|
|
59
|
+
class Effect:
|
|
60
|
+
"""A request for an external side-effect.
|
|
61
|
+
|
|
62
|
+
Notes:
|
|
63
|
+
- Effects must be serializable (payload is JSON-like).
|
|
64
|
+
- `result_key` specifies where the effect result is stored in run state variables.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
type: EffectType
|
|
68
|
+
payload: Dict[str, Any] = field(default_factory=dict)
|
|
69
|
+
result_key: Optional[str] = None
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass(frozen=True)
|
|
73
|
+
class StepPlan:
|
|
74
|
+
"""What the runtime should do next for a node."""
|
|
75
|
+
|
|
76
|
+
node_id: str
|
|
77
|
+
effect: Optional[Effect] = None
|
|
78
|
+
next_node: Optional[str] = None
|
|
79
|
+
|
|
80
|
+
# If set, the runtime completes the run immediately.
|
|
81
|
+
complete_output: Optional[Dict[str, Any]] = None
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
@dataclass
|
|
85
|
+
class WaitState:
|
|
86
|
+
"""Represents a durable pause.
|
|
87
|
+
|
|
88
|
+
The run can be resumed by calling `resume(run_id, event)`.
|
|
89
|
+
|
|
90
|
+
- For EVENT/USER/JOB: `wait_key` identifies which event unblocks the run.
|
|
91
|
+
- For UNTIL: `until` specifies when the run can continue.
|
|
92
|
+
|
|
93
|
+
`resume_to_node` defines where execution continues after resume.
|
|
94
|
+
`result_key` tells where to store the resume payload.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
reason: WaitReason
|
|
98
|
+
wait_key: Optional[str] = None
|
|
99
|
+
until: Optional[str] = None # ISO timestamp
|
|
100
|
+
|
|
101
|
+
resume_to_node: Optional[str] = None
|
|
102
|
+
result_key: Optional[str] = None
|
|
103
|
+
|
|
104
|
+
prompt: Optional[str] = None
|
|
105
|
+
choices: Optional[List[str]] = None
|
|
106
|
+
allow_free_text: bool = True
|
|
107
|
+
|
|
108
|
+
# Optional structured details for non-user waits (e.g. tool passthrough).
|
|
109
|
+
# Must be JSON-serializable.
|
|
110
|
+
details: Optional[Dict[str, Any]] = None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@dataclass
|
|
114
|
+
class RunState:
|
|
115
|
+
"""Durable state for a workflow run."""
|
|
116
|
+
|
|
117
|
+
run_id: str
|
|
118
|
+
workflow_id: str
|
|
119
|
+
status: RunStatus
|
|
120
|
+
current_node: str
|
|
121
|
+
|
|
122
|
+
vars: Dict[str, Any] = field(default_factory=dict)
|
|
123
|
+
|
|
124
|
+
waiting: Optional[WaitState] = None
|
|
125
|
+
output: Optional[Dict[str, Any]] = None
|
|
126
|
+
error: Optional[str] = None
|
|
127
|
+
|
|
128
|
+
created_at: str = field(default_factory=lambda: utc_now().isoformat())
|
|
129
|
+
updated_at: str = field(default_factory=lambda: utc_now().isoformat())
|
|
130
|
+
|
|
131
|
+
# Optional provenance fields
|
|
132
|
+
actor_id: Optional[str] = None
|
|
133
|
+
parent_run_id: Optional[str] = None # For subworkflow tracking
|
|
134
|
+
|
|
135
|
+
@classmethod
|
|
136
|
+
def new(cls, *, workflow_id: str, entry_node: str, actor_id: Optional[str] = None, vars: Optional[Dict[str, Any]] = None, parent_run_id: Optional[str] = None) -> "RunState":
|
|
137
|
+
return cls(
|
|
138
|
+
run_id=str(uuid.uuid4()),
|
|
139
|
+
workflow_id=workflow_id,
|
|
140
|
+
status=RunStatus.RUNNING,
|
|
141
|
+
current_node=entry_node,
|
|
142
|
+
vars=vars or {},
|
|
143
|
+
actor_id=actor_id,
|
|
144
|
+
parent_run_id=parent_run_id,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class StepStatus(str, Enum):
|
|
149
|
+
STARTED = "started"
|
|
150
|
+
COMPLETED = "completed"
|
|
151
|
+
WAITING = "waiting"
|
|
152
|
+
FAILED = "failed"
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@dataclass
|
|
156
|
+
class StepRecord:
|
|
157
|
+
"""One append-only ledger entry (journal d'exécution)."""
|
|
158
|
+
|
|
159
|
+
run_id: str
|
|
160
|
+
step_id: str
|
|
161
|
+
node_id: str
|
|
162
|
+
status: StepStatus
|
|
163
|
+
|
|
164
|
+
effect: Optional[Dict[str, Any]] = None
|
|
165
|
+
result: Optional[Dict[str, Any]] = None
|
|
166
|
+
error: Optional[str] = None
|
|
167
|
+
|
|
168
|
+
started_at: str = field(default_factory=lambda: utc_now().isoformat())
|
|
169
|
+
ended_at: Optional[str] = None
|
|
170
|
+
|
|
171
|
+
# Optional provenance/integrity
|
|
172
|
+
actor_id: Optional[str] = None
|
|
173
|
+
|
|
174
|
+
# Retry and idempotency fields
|
|
175
|
+
attempt: int = 1 # Current attempt number (1-indexed)
|
|
176
|
+
idempotency_key: Optional[str] = None # For deduplication on restart
|
|
177
|
+
|
|
178
|
+
# Tamper-evident chain fields (optional in v0.1; filled by a chained LedgerStore).
|
|
179
|
+
prev_hash: Optional[str] = None
|
|
180
|
+
record_hash: Optional[str] = None
|
|
181
|
+
signature: Optional[str] = None
|
|
182
|
+
|
|
183
|
+
@classmethod
|
|
184
|
+
def start(
|
|
185
|
+
cls,
|
|
186
|
+
*,
|
|
187
|
+
run: RunState,
|
|
188
|
+
node_id: str,
|
|
189
|
+
effect: Optional[Effect],
|
|
190
|
+
attempt: int = 1,
|
|
191
|
+
idempotency_key: Optional[str] = None,
|
|
192
|
+
) -> "StepRecord":
|
|
193
|
+
return cls(
|
|
194
|
+
run_id=run.run_id,
|
|
195
|
+
step_id=str(uuid.uuid4()),
|
|
196
|
+
node_id=node_id,
|
|
197
|
+
status=StepStatus.STARTED,
|
|
198
|
+
effect={
|
|
199
|
+
"type": effect.type.value,
|
|
200
|
+
"payload": effect.payload,
|
|
201
|
+
"result_key": effect.result_key,
|
|
202
|
+
} if effect else None,
|
|
203
|
+
actor_id=run.actor_id,
|
|
204
|
+
attempt=attempt,
|
|
205
|
+
idempotency_key=idempotency_key,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
def finish_success(self, result: Optional[Dict[str, Any]] = None) -> "StepRecord":
|
|
209
|
+
self.status = StepStatus.COMPLETED
|
|
210
|
+
self.result = result
|
|
211
|
+
self.ended_at = utc_now().isoformat()
|
|
212
|
+
return self
|
|
213
|
+
|
|
214
|
+
def finish_waiting(self, wait_state: WaitState) -> "StepRecord":
|
|
215
|
+
self.status = StepStatus.WAITING
|
|
216
|
+
self.result = {
|
|
217
|
+
"wait": {
|
|
218
|
+
"reason": wait_state.reason.value,
|
|
219
|
+
"wait_key": wait_state.wait_key,
|
|
220
|
+
"until": wait_state.until,
|
|
221
|
+
"resume_to_node": wait_state.resume_to_node,
|
|
222
|
+
"result_key": wait_state.result_key,
|
|
223
|
+
# Optional fields for richer audit/debugging
|
|
224
|
+
"prompt": wait_state.prompt,
|
|
225
|
+
"choices": wait_state.choices,
|
|
226
|
+
"allow_free_text": wait_state.allow_free_text,
|
|
227
|
+
"details": wait_state.details,
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
self.ended_at = utc_now().isoformat()
|
|
231
|
+
return self
|
|
232
|
+
|
|
233
|
+
def finish_failure(self, error: str) -> "StepRecord":
|
|
234
|
+
self.status = StepStatus.FAILED
|
|
235
|
+
self.error = error
|
|
236
|
+
self.ended_at = utc_now().isoformat()
|
|
237
|
+
return self
|
|
238
|
+
|
|
239
|
+
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""abstractruntime.core.policy
|
|
2
|
+
|
|
3
|
+
Effect execution policies for retry and idempotency.
|
|
4
|
+
|
|
5
|
+
Policies control:
|
|
6
|
+
- How many times to retry a failed effect
|
|
7
|
+
- Backoff timing between retries
|
|
8
|
+
- Idempotency keys for deduplication on crash recovery
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import hashlib
|
|
14
|
+
import json
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from typing import Any, Dict, Optional, Protocol
|
|
17
|
+
|
|
18
|
+
from .models import Effect, RunState
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class EffectPolicy(Protocol):
|
|
22
|
+
"""Protocol for effect execution policies.
|
|
23
|
+
|
|
24
|
+
Implementations control retry behavior and idempotency.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def max_attempts(self, effect: Effect) -> int:
|
|
28
|
+
"""Maximum number of attempts for an effect.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
effect: The effect being executed.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Maximum attempts (1 = no retries, 2 = one retry, etc.)
|
|
35
|
+
"""
|
|
36
|
+
...
|
|
37
|
+
|
|
38
|
+
def backoff_seconds(self, *, effect: Effect, attempt: int) -> float:
|
|
39
|
+
"""Seconds to wait before retry.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
effect: The effect being retried.
|
|
43
|
+
attempt: Current attempt number (1-indexed).
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Seconds to wait before next attempt.
|
|
47
|
+
"""
|
|
48
|
+
...
|
|
49
|
+
|
|
50
|
+
def idempotency_key(
|
|
51
|
+
self, *, run: RunState, node_id: str, effect: Effect
|
|
52
|
+
) -> str:
|
|
53
|
+
"""Compute idempotency key for an effect.
|
|
54
|
+
|
|
55
|
+
Effects with the same idempotency key are considered duplicates.
|
|
56
|
+
If a prior completed result exists for this key, it will be reused.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
run: Current run state.
|
|
60
|
+
node_id: Current node ID.
|
|
61
|
+
effect: The effect being executed.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Idempotency key string.
|
|
65
|
+
"""
|
|
66
|
+
...
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataclass
|
|
70
|
+
class DefaultEffectPolicy:
|
|
71
|
+
"""Default effect policy with configurable retry and idempotency.
|
|
72
|
+
|
|
73
|
+
Attributes:
|
|
74
|
+
default_max_attempts: Default max attempts for all effects.
|
|
75
|
+
default_backoff_base: Base backoff in seconds (exponential).
|
|
76
|
+
default_backoff_max: Maximum backoff in seconds.
|
|
77
|
+
effect_max_attempts: Per-effect-type max attempts override.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
default_max_attempts: int = 1 # No retries by default
|
|
81
|
+
default_backoff_base: float = 1.0
|
|
82
|
+
default_backoff_max: float = 60.0
|
|
83
|
+
effect_max_attempts: Dict[str, int] = None # type: ignore
|
|
84
|
+
|
|
85
|
+
def __post_init__(self):
|
|
86
|
+
if self.effect_max_attempts is None:
|
|
87
|
+
self.effect_max_attempts = {}
|
|
88
|
+
|
|
89
|
+
def max_attempts(self, effect: Effect) -> int:
|
|
90
|
+
"""Get max attempts for an effect type."""
|
|
91
|
+
effect_type = effect.type.value
|
|
92
|
+
return self.effect_max_attempts.get(effect_type, self.default_max_attempts)
|
|
93
|
+
|
|
94
|
+
def backoff_seconds(self, *, effect: Effect, attempt: int) -> float:
|
|
95
|
+
"""Exponential backoff capped at max."""
|
|
96
|
+
# Exponential: base * 2^(attempt-1), capped at max
|
|
97
|
+
delay = self.default_backoff_base * (2 ** (attempt - 1))
|
|
98
|
+
return min(delay, self.default_backoff_max)
|
|
99
|
+
|
|
100
|
+
def idempotency_key(
|
|
101
|
+
self, *, run: RunState, node_id: str, effect: Effect
|
|
102
|
+
) -> str:
|
|
103
|
+
"""Compute idempotency key from run_id, node_id, and effect.
|
|
104
|
+
|
|
105
|
+
The key is a hash of:
|
|
106
|
+
- run_id: Unique to this run
|
|
107
|
+
- node_id: Current node
|
|
108
|
+
- effect type and payload: What we're doing
|
|
109
|
+
|
|
110
|
+
This ensures the same effect at the same point in the same run
|
|
111
|
+
gets the same key, enabling deduplication on restart.
|
|
112
|
+
"""
|
|
113
|
+
key_data = {
|
|
114
|
+
"run_id": run.run_id,
|
|
115
|
+
"node_id": node_id,
|
|
116
|
+
"effect_type": effect.type.value,
|
|
117
|
+
"effect_payload": effect.payload,
|
|
118
|
+
}
|
|
119
|
+
key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
|
|
120
|
+
return hashlib.sha256(key_json.encode()).hexdigest()[:32]
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class RetryPolicy(DefaultEffectPolicy):
|
|
124
|
+
"""Policy with retries enabled for LLM and tool calls."""
|
|
125
|
+
|
|
126
|
+
def __init__(
|
|
127
|
+
self,
|
|
128
|
+
*,
|
|
129
|
+
llm_max_attempts: int = 3,
|
|
130
|
+
tool_max_attempts: int = 2,
|
|
131
|
+
backoff_base: float = 1.0,
|
|
132
|
+
backoff_max: float = 30.0,
|
|
133
|
+
):
|
|
134
|
+
super().__init__(
|
|
135
|
+
default_max_attempts=1,
|
|
136
|
+
default_backoff_base=backoff_base,
|
|
137
|
+
default_backoff_max=backoff_max,
|
|
138
|
+
effect_max_attempts={
|
|
139
|
+
"llm_call": llm_max_attempts,
|
|
140
|
+
"tool_calls": tool_max_attempts,
|
|
141
|
+
},
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class NoRetryPolicy(DefaultEffectPolicy):
|
|
146
|
+
"""Policy with no retries (fail immediately)."""
|
|
147
|
+
|
|
148
|
+
def __init__(self):
|
|
149
|
+
super().__init__(default_max_attempts=1)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def compute_idempotency_key(
|
|
153
|
+
*, run_id: str, node_id: str, effect: Effect
|
|
154
|
+
) -> str:
|
|
155
|
+
"""Standalone function to compute idempotency key.
|
|
156
|
+
|
|
157
|
+
Useful when you need to compute a key without a full policy.
|
|
158
|
+
"""
|
|
159
|
+
key_data = {
|
|
160
|
+
"run_id": run_id,
|
|
161
|
+
"node_id": node_id,
|
|
162
|
+
"effect_type": effect.type.value,
|
|
163
|
+
"effect_payload": effect.payload,
|
|
164
|
+
}
|
|
165
|
+
key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
|
|
166
|
+
return hashlib.sha256(key_json.encode()).hexdigest()[:32]
|