AbstractRuntime 0.0.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. abstractruntime/__init__.py +104 -2
  2. abstractruntime/core/__init__.py +26 -0
  3. abstractruntime/core/config.py +101 -0
  4. abstractruntime/core/models.py +282 -0
  5. abstractruntime/core/policy.py +166 -0
  6. abstractruntime/core/runtime.py +736 -0
  7. abstractruntime/core/spec.py +53 -0
  8. abstractruntime/core/vars.py +94 -0
  9. abstractruntime/identity/__init__.py +7 -0
  10. abstractruntime/identity/fingerprint.py +57 -0
  11. abstractruntime/integrations/__init__.py +11 -0
  12. abstractruntime/integrations/abstractcore/__init__.py +47 -0
  13. abstractruntime/integrations/abstractcore/effect_handlers.py +119 -0
  14. abstractruntime/integrations/abstractcore/factory.py +187 -0
  15. abstractruntime/integrations/abstractcore/llm_client.py +397 -0
  16. abstractruntime/integrations/abstractcore/logging.py +27 -0
  17. abstractruntime/integrations/abstractcore/tool_executor.py +168 -0
  18. abstractruntime/scheduler/__init__.py +13 -0
  19. abstractruntime/scheduler/convenience.py +324 -0
  20. abstractruntime/scheduler/registry.py +101 -0
  21. abstractruntime/scheduler/scheduler.py +431 -0
  22. abstractruntime/storage/__init__.py +25 -0
  23. abstractruntime/storage/artifacts.py +519 -0
  24. abstractruntime/storage/base.py +107 -0
  25. abstractruntime/storage/in_memory.py +119 -0
  26. abstractruntime/storage/json_files.py +208 -0
  27. abstractruntime/storage/ledger_chain.py +153 -0
  28. abstractruntime/storage/snapshots.py +217 -0
  29. abstractruntime-0.2.0.dist-info/METADATA +163 -0
  30. abstractruntime-0.2.0.dist-info/RECORD +32 -0
  31. {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/licenses/LICENSE +3 -1
  32. abstractruntime-0.0.0.dist-info/METADATA +0 -89
  33. abstractruntime-0.0.0.dist-info/RECORD +0 -5
  34. {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/WHEEL +0 -0
@@ -1,8 +1,110 @@
1
1
  """
2
2
  AbstractRuntime
3
3
 
4
- Placeholder package for a durable graph runner (interrupt → checkpoint → resume).
5
- Implementation intentionally not included yet.
4
+ Durable graph runner (interrupt → checkpoint → resume).
5
+
6
+ This package provides a minimal execution substrate:
7
+ - workflow graphs (state machines)
8
+ - durable RunState with WAITING / RESUME semantics
9
+ - append-only execution journal (ledger)
10
+
11
+ Higher-level orchestration and UI graph authoring is expected to live in AbstractFlow.
6
12
  """
7
13
 
14
+ from .core.models import (
15
+ Effect,
16
+ EffectType,
17
+ RunState,
18
+ RunStatus,
19
+ StepPlan,
20
+ WaitReason,
21
+ WaitState,
22
+ )
23
+ from .core.runtime import Runtime
24
+ from .core.spec import WorkflowSpec
25
+ from .core.policy import (
26
+ EffectPolicy,
27
+ DefaultEffectPolicy,
28
+ RetryPolicy,
29
+ NoRetryPolicy,
30
+ compute_idempotency_key,
31
+ )
32
+ from .storage.base import QueryableRunStore
33
+ from .storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
34
+ from .storage.json_files import JsonFileRunStore, JsonlLedgerStore
35
+ from .storage.ledger_chain import HashChainedLedgerStore, verify_ledger_chain
36
+ from .storage.snapshots import Snapshot, SnapshotStore, InMemorySnapshotStore, JsonSnapshotStore
37
+ from .storage.artifacts import (
38
+ Artifact,
39
+ ArtifactMetadata,
40
+ ArtifactStore,
41
+ InMemoryArtifactStore,
42
+ FileArtifactStore,
43
+ artifact_ref,
44
+ is_artifact_ref,
45
+ get_artifact_id,
46
+ resolve_artifact,
47
+ compute_artifact_id,
48
+ )
49
+ from .identity.fingerprint import ActorFingerprint
50
+ from .scheduler import (
51
+ WorkflowRegistry,
52
+ Scheduler,
53
+ SchedulerStats,
54
+ ScheduledRuntime,
55
+ create_scheduled_runtime,
56
+ )
57
+
58
+ __all__ = [
59
+ # Core models
60
+ "Effect",
61
+ "EffectType",
62
+ "RunState",
63
+ "RunStatus",
64
+ "StepPlan",
65
+ "WaitReason",
66
+ "WaitState",
67
+ # Spec + runtime
68
+ "WorkflowSpec",
69
+ "Runtime",
70
+ # Scheduler
71
+ "WorkflowRegistry",
72
+ "Scheduler",
73
+ "SchedulerStats",
74
+ "ScheduledRuntime",
75
+ "create_scheduled_runtime",
76
+ # Storage backends
77
+ "QueryableRunStore",
78
+ "InMemoryRunStore",
79
+ "InMemoryLedgerStore",
80
+ "JsonFileRunStore",
81
+ "JsonlLedgerStore",
82
+ "HashChainedLedgerStore",
83
+ "verify_ledger_chain",
84
+ "Snapshot",
85
+ "SnapshotStore",
86
+ "InMemorySnapshotStore",
87
+ "JsonSnapshotStore",
88
+ # Artifacts
89
+ "Artifact",
90
+ "ArtifactMetadata",
91
+ "ArtifactStore",
92
+ "InMemoryArtifactStore",
93
+ "FileArtifactStore",
94
+ "artifact_ref",
95
+ "is_artifact_ref",
96
+ "get_artifact_id",
97
+ "resolve_artifact",
98
+ "compute_artifact_id",
99
+ # Identity
100
+ "ActorFingerprint",
101
+ # Effect policies
102
+ "EffectPolicy",
103
+ "DefaultEffectPolicy",
104
+ "RetryPolicy",
105
+ "NoRetryPolicy",
106
+ "compute_idempotency_key",
107
+ ]
108
+
109
+
8
110
 
@@ -0,0 +1,26 @@
1
+ """Core runtime primitives."""
2
+
3
+ from .config import RuntimeConfig
4
+ from .models import Effect, EffectType, LimitWarning, RunState, RunStatus, StepPlan, WaitReason, WaitState
5
+ from .runtime import Runtime
6
+ from .spec import WorkflowSpec
7
+ from .vars import LIMITS, ensure_limits, get_limits
8
+
9
+ __all__ = [
10
+ "Effect",
11
+ "EffectType",
12
+ "LimitWarning",
13
+ "LIMITS",
14
+ "RunState",
15
+ "RunStatus",
16
+ "Runtime",
17
+ "RuntimeConfig",
18
+ "StepPlan",
19
+ "WaitReason",
20
+ "WaitState",
21
+ "WorkflowSpec",
22
+ "ensure_limits",
23
+ "get_limits",
24
+ ]
25
+
26
+
@@ -0,0 +1,101 @@
1
+ """abstractruntime.core.config
2
+
3
+ Runtime configuration for resource limits and model capabilities.
4
+
5
+ This module provides a RuntimeConfig dataclass that centralizes configuration
6
+ for runtime resource limits (iterations, tokens, history) and model capabilities.
7
+ The config is used to initialize the `_limits` namespace in RunState.vars.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass, field
13
+ from typing import Any, Dict, Optional
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class RuntimeConfig:
18
+ """Configuration for runtime resource limits and model capabilities.
19
+
20
+ This configuration is used by the Runtime to:
21
+ 1. Initialize the `_limits` namespace in RunState.vars when starting a run
22
+ 2. Provide model capability information for resource tracking
23
+ 3. Configure warning thresholds for proactive notifications
24
+
25
+ Attributes:
26
+ max_iterations: Maximum number of reasoning iterations (default: 25)
27
+ warn_iterations_pct: Percentage threshold for iteration warnings (default: 80)
28
+ max_tokens: Maximum context window tokens (None = use model capabilities)
29
+ max_output_tokens: Maximum tokens for LLM response (None = provider default)
30
+ warn_tokens_pct: Percentage threshold for token warnings (default: 80)
31
+ max_history_messages: Maximum conversation history messages (-1 = unlimited)
32
+ model_capabilities: Dict of model capabilities from LLM provider
33
+
34
+ Example:
35
+ >>> config = RuntimeConfig(max_iterations=50, max_tokens=65536)
36
+ >>> limits = config.to_limits_dict()
37
+ >>> limits["max_iterations"]
38
+ 50
39
+ """
40
+
41
+ # Iteration control
42
+ max_iterations: int = 25
43
+ warn_iterations_pct: int = 80
44
+
45
+ # Token/context window management
46
+ max_tokens: Optional[int] = None # None = query from model capabilities
47
+ max_output_tokens: Optional[int] = None # None = use provider default
48
+ warn_tokens_pct: int = 80
49
+
50
+ # History management
51
+ max_history_messages: int = -1 # -1 = unlimited (send all messages)
52
+
53
+ # Model capabilities (populated from LLM client)
54
+ model_capabilities: Dict[str, Any] = field(default_factory=dict)
55
+
56
+ def to_limits_dict(self) -> Dict[str, Any]:
57
+ """Convert to _limits namespace dict for RunState.vars.
58
+
59
+ Returns:
60
+ Dict with canonical limit values for storage in RunState.vars["_limits"].
61
+ Uses model_capabilities as fallback for max_tokens if not explicitly set.
62
+ """
63
+ return {
64
+ # Iteration control
65
+ "max_iterations": self.max_iterations,
66
+ "current_iteration": 0,
67
+
68
+ # Token management
69
+ "max_tokens": self.max_tokens or self.model_capabilities.get("max_tokens", 32768),
70
+ "max_output_tokens": self.max_output_tokens,
71
+ "estimated_tokens_used": 0,
72
+
73
+ # History management
74
+ "max_history_messages": self.max_history_messages,
75
+
76
+ # Warning thresholds
77
+ "warn_iterations_pct": self.warn_iterations_pct,
78
+ "warn_tokens_pct": self.warn_tokens_pct,
79
+ }
80
+
81
+ def with_capabilities(self, capabilities: Dict[str, Any]) -> "RuntimeConfig":
82
+ """Create a new RuntimeConfig with updated model capabilities.
83
+
84
+ This is useful for merging model capabilities from an LLM client
85
+ into an existing configuration.
86
+
87
+ Args:
88
+ capabilities: Dict of model capabilities (e.g., from get_model_capabilities())
89
+
90
+ Returns:
91
+ New RuntimeConfig with merged capabilities
92
+ """
93
+ return RuntimeConfig(
94
+ max_iterations=self.max_iterations,
95
+ warn_iterations_pct=self.warn_iterations_pct,
96
+ max_tokens=self.max_tokens,
97
+ max_output_tokens=self.max_output_tokens,
98
+ warn_tokens_pct=self.warn_tokens_pct,
99
+ max_history_messages=self.max_history_messages,
100
+ model_capabilities=capabilities,
101
+ )
@@ -0,0 +1,282 @@
1
+ """abstractruntime.core.models
2
+
3
+ Core data model for AbstractRuntime (v0.1).
4
+
5
+ Design intent:
6
+ - Keep everything JSON-serializable (durable execution)
7
+ - Separate *what to do* (Effect) from *how to do it* (EffectHandler)
8
+ - Represent long pauses explicitly (WaitState), never by keeping Python stacks alive
9
+
10
+ We intentionally keep this module dependency-light (stdlib only).
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from dataclasses import dataclass, field
16
+ from datetime import datetime, timezone
17
+ from enum import Enum
18
+ from typing import Any, Dict, List, Optional
19
+ import uuid
20
+
21
+
22
+ def utc_now() -> datetime:
23
+ return datetime.now(timezone.utc)
24
+
25
+
26
+ class RunStatus(str, Enum):
27
+ RUNNING = "running"
28
+ WAITING = "waiting"
29
+ COMPLETED = "completed"
30
+ FAILED = "failed"
31
+ CANCELLED = "cancelled"
32
+
33
+
34
+ class WaitReason(str, Enum):
35
+ EVENT = "event" # arbitrary external signal
36
+ UNTIL = "until" # time-based
37
+ USER = "user" # human-in-the-loop
38
+ JOB = "job" # external job completion
39
+ SUBWORKFLOW = "subworkflow" # waiting for child workflow
40
+
41
+
42
+ class EffectType(str, Enum):
43
+ """Side-effects a node can request."""
44
+
45
+ # Pure waiting primitives
46
+ WAIT_EVENT = "wait_event"
47
+ WAIT_UNTIL = "wait_until"
48
+ ASK_USER = "ask_user"
49
+
50
+ # Integrations (implemented via pluggable handlers)
51
+ LLM_CALL = "llm_call"
52
+ TOOL_CALLS = "tool_calls"
53
+
54
+ # Composition
55
+ START_SUBWORKFLOW = "start_subworkflow"
56
+
57
+
58
+ @dataclass(frozen=True)
59
+ class Effect:
60
+ """A request for an external side-effect.
61
+
62
+ Notes:
63
+ - Effects must be serializable (payload is JSON-like).
64
+ - `result_key` specifies where the effect result is stored in run state variables.
65
+ """
66
+
67
+ type: EffectType
68
+ payload: Dict[str, Any] = field(default_factory=dict)
69
+ result_key: Optional[str] = None
70
+
71
+
72
+ @dataclass(frozen=True)
73
+ class StepPlan:
74
+ """What the runtime should do next for a node."""
75
+
76
+ node_id: str
77
+ effect: Optional[Effect] = None
78
+ next_node: Optional[str] = None
79
+
80
+ # If set, the runtime completes the run immediately.
81
+ complete_output: Optional[Dict[str, Any]] = None
82
+
83
+
84
+ @dataclass
85
+ class WaitState:
86
+ """Represents a durable pause.
87
+
88
+ The run can be resumed by calling `resume(run_id, event)`.
89
+
90
+ - For EVENT/USER/JOB: `wait_key` identifies which event unblocks the run.
91
+ - For UNTIL: `until` specifies when the run can continue.
92
+
93
+ `resume_to_node` defines where execution continues after resume.
94
+ `result_key` tells where to store the resume payload.
95
+ """
96
+
97
+ reason: WaitReason
98
+ wait_key: Optional[str] = None
99
+ until: Optional[str] = None # ISO timestamp
100
+
101
+ resume_to_node: Optional[str] = None
102
+ result_key: Optional[str] = None
103
+
104
+ prompt: Optional[str] = None
105
+ choices: Optional[List[str]] = None
106
+ allow_free_text: bool = True
107
+
108
+ # Optional structured details for non-user waits (e.g. tool passthrough).
109
+ # Must be JSON-serializable.
110
+ details: Optional[Dict[str, Any]] = None
111
+
112
+
113
+ @dataclass
114
+ class RunState:
115
+ """Durable state for a workflow run."""
116
+
117
+ run_id: str
118
+ workflow_id: str
119
+ status: RunStatus
120
+ current_node: str
121
+
122
+ vars: Dict[str, Any] = field(default_factory=dict)
123
+
124
+ waiting: Optional[WaitState] = None
125
+ output: Optional[Dict[str, Any]] = None
126
+ error: Optional[str] = None
127
+
128
+ created_at: str = field(default_factory=lambda: utc_now().isoformat())
129
+ updated_at: str = field(default_factory=lambda: utc_now().isoformat())
130
+
131
+ # Optional provenance fields
132
+ actor_id: Optional[str] = None
133
+ session_id: Optional[str] = None
134
+ parent_run_id: Optional[str] = None # For subworkflow tracking
135
+
136
+ @classmethod
137
+ def new(
138
+ cls,
139
+ *,
140
+ workflow_id: str,
141
+ entry_node: str,
142
+ actor_id: Optional[str] = None,
143
+ session_id: Optional[str] = None,
144
+ vars: Optional[Dict[str, Any]] = None,
145
+ parent_run_id: Optional[str] = None,
146
+ ) -> "RunState":
147
+ return cls(
148
+ run_id=str(uuid.uuid4()),
149
+ workflow_id=workflow_id,
150
+ status=RunStatus.RUNNING,
151
+ current_node=entry_node,
152
+ vars=vars or {},
153
+ actor_id=actor_id,
154
+ session_id=session_id,
155
+ parent_run_id=parent_run_id,
156
+ )
157
+
158
+
159
+ class StepStatus(str, Enum):
160
+ STARTED = "started"
161
+ COMPLETED = "completed"
162
+ WAITING = "waiting"
163
+ FAILED = "failed"
164
+
165
+
166
+ @dataclass
167
+ class StepRecord:
168
+ """One append-only ledger entry (journal d'exécution)."""
169
+
170
+ run_id: str
171
+ step_id: str
172
+ node_id: str
173
+ status: StepStatus
174
+
175
+ effect: Optional[Dict[str, Any]] = None
176
+ result: Optional[Dict[str, Any]] = None
177
+ error: Optional[str] = None
178
+
179
+ started_at: str = field(default_factory=lambda: utc_now().isoformat())
180
+ ended_at: Optional[str] = None
181
+
182
+ # Optional provenance/integrity
183
+ actor_id: Optional[str] = None
184
+ session_id: Optional[str] = None
185
+
186
+ # Retry and idempotency fields
187
+ attempt: int = 1 # Current attempt number (1-indexed)
188
+ idempotency_key: Optional[str] = None # For deduplication on restart
189
+
190
+ # Tamper-evident chain fields (optional in v0.1; filled by a chained LedgerStore).
191
+ prev_hash: Optional[str] = None
192
+ record_hash: Optional[str] = None
193
+ signature: Optional[str] = None
194
+
195
+ @classmethod
196
+ def start(
197
+ cls,
198
+ *,
199
+ run: RunState,
200
+ node_id: str,
201
+ effect: Optional[Effect],
202
+ attempt: int = 1,
203
+ idempotency_key: Optional[str] = None,
204
+ ) -> "StepRecord":
205
+ return cls(
206
+ run_id=run.run_id,
207
+ step_id=str(uuid.uuid4()),
208
+ node_id=node_id,
209
+ status=StepStatus.STARTED,
210
+ effect={
211
+ "type": effect.type.value,
212
+ "payload": effect.payload,
213
+ "result_key": effect.result_key,
214
+ } if effect else None,
215
+ actor_id=run.actor_id,
216
+ session_id=getattr(run, "session_id", None),
217
+ attempt=attempt,
218
+ idempotency_key=idempotency_key,
219
+ )
220
+
221
+ def finish_success(self, result: Optional[Dict[str, Any]] = None) -> "StepRecord":
222
+ self.status = StepStatus.COMPLETED
223
+ self.result = result
224
+ self.ended_at = utc_now().isoformat()
225
+ return self
226
+
227
+ def finish_waiting(self, wait_state: WaitState) -> "StepRecord":
228
+ self.status = StepStatus.WAITING
229
+ self.result = {
230
+ "wait": {
231
+ "reason": wait_state.reason.value,
232
+ "wait_key": wait_state.wait_key,
233
+ "until": wait_state.until,
234
+ "resume_to_node": wait_state.resume_to_node,
235
+ "result_key": wait_state.result_key,
236
+ # Optional fields for richer audit/debugging
237
+ "prompt": wait_state.prompt,
238
+ "choices": wait_state.choices,
239
+ "allow_free_text": wait_state.allow_free_text,
240
+ "details": wait_state.details,
241
+ }
242
+ }
243
+ self.ended_at = utc_now().isoformat()
244
+ return self
245
+
246
+ def finish_failure(self, error: str) -> "StepRecord":
247
+ self.status = StepStatus.FAILED
248
+ self.error = error
249
+ self.ended_at = utc_now().isoformat()
250
+ return self
251
+
252
+
253
+ @dataclass
254
+ class LimitWarning:
255
+ """Warning about approaching or exceeding a runtime limit.
256
+
257
+ Generated by Runtime.check_limits() to proactively notify about
258
+ resource constraints before they cause failures.
259
+
260
+ Attributes:
261
+ limit_type: Type of limit ("iterations", "tokens", "history")
262
+ status: Warning status ("warning" at threshold, "exceeded" at limit)
263
+ current: Current value of the resource
264
+ maximum: Maximum allowed value
265
+ pct: Percentage of limit used (computed in __post_init__)
266
+
267
+ Example:
268
+ >>> warning = LimitWarning("iterations", "warning", 20, 25)
269
+ >>> warning.pct
270
+ 80.0
271
+ """
272
+
273
+ limit_type: str # "iterations", "tokens", "history"
274
+ status: str # "warning", "exceeded"
275
+ current: int
276
+ maximum: int
277
+ pct: float = 0.0
278
+
279
+ def __post_init__(self) -> None:
280
+ if self.maximum > 0:
281
+ self.pct = round(self.current / self.maximum * 100, 1)
282
+
@@ -0,0 +1,166 @@
1
+ """abstractruntime.core.policy
2
+
3
+ Effect execution policies for retry and idempotency.
4
+
5
+ Policies control:
6
+ - How many times to retry a failed effect
7
+ - Backoff timing between retries
8
+ - Idempotency keys for deduplication on crash recovery
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import hashlib
14
+ import json
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, Optional, Protocol
17
+
18
+ from .models import Effect, RunState
19
+
20
+
21
+ class EffectPolicy(Protocol):
22
+ """Protocol for effect execution policies.
23
+
24
+ Implementations control retry behavior and idempotency.
25
+ """
26
+
27
+ def max_attempts(self, effect: Effect) -> int:
28
+ """Maximum number of attempts for an effect.
29
+
30
+ Args:
31
+ effect: The effect being executed.
32
+
33
+ Returns:
34
+ Maximum attempts (1 = no retries, 2 = one retry, etc.)
35
+ """
36
+ ...
37
+
38
+ def backoff_seconds(self, *, effect: Effect, attempt: int) -> float:
39
+ """Seconds to wait before retry.
40
+
41
+ Args:
42
+ effect: The effect being retried.
43
+ attempt: Current attempt number (1-indexed).
44
+
45
+ Returns:
46
+ Seconds to wait before next attempt.
47
+ """
48
+ ...
49
+
50
+ def idempotency_key(
51
+ self, *, run: RunState, node_id: str, effect: Effect
52
+ ) -> str:
53
+ """Compute idempotency key for an effect.
54
+
55
+ Effects with the same idempotency key are considered duplicates.
56
+ If a prior completed result exists for this key, it will be reused.
57
+
58
+ Args:
59
+ run: Current run state.
60
+ node_id: Current node ID.
61
+ effect: The effect being executed.
62
+
63
+ Returns:
64
+ Idempotency key string.
65
+ """
66
+ ...
67
+
68
+
69
+ @dataclass
70
+ class DefaultEffectPolicy:
71
+ """Default effect policy with configurable retry and idempotency.
72
+
73
+ Attributes:
74
+ default_max_attempts: Default max attempts for all effects.
75
+ default_backoff_base: Base backoff in seconds (exponential).
76
+ default_backoff_max: Maximum backoff in seconds.
77
+ effect_max_attempts: Per-effect-type max attempts override.
78
+ """
79
+
80
+ default_max_attempts: int = 1 # No retries by default
81
+ default_backoff_base: float = 1.0
82
+ default_backoff_max: float = 60.0
83
+ effect_max_attempts: Dict[str, int] = None # type: ignore
84
+
85
+ def __post_init__(self):
86
+ if self.effect_max_attempts is None:
87
+ self.effect_max_attempts = {}
88
+
89
+ def max_attempts(self, effect: Effect) -> int:
90
+ """Get max attempts for an effect type."""
91
+ effect_type = effect.type.value
92
+ return self.effect_max_attempts.get(effect_type, self.default_max_attempts)
93
+
94
+ def backoff_seconds(self, *, effect: Effect, attempt: int) -> float:
95
+ """Exponential backoff capped at max."""
96
+ # Exponential: base * 2^(attempt-1), capped at max
97
+ delay = self.default_backoff_base * (2 ** (attempt - 1))
98
+ return min(delay, self.default_backoff_max)
99
+
100
+ def idempotency_key(
101
+ self, *, run: RunState, node_id: str, effect: Effect
102
+ ) -> str:
103
+ """Compute idempotency key from run_id, node_id, and effect.
104
+
105
+ The key is a hash of:
106
+ - run_id: Unique to this run
107
+ - node_id: Current node
108
+ - effect type and payload: What we're doing
109
+
110
+ This ensures the same effect at the same point in the same run
111
+ gets the same key, enabling deduplication on restart.
112
+ """
113
+ key_data = {
114
+ "run_id": run.run_id,
115
+ "node_id": node_id,
116
+ "effect_type": effect.type.value,
117
+ "effect_payload": effect.payload,
118
+ }
119
+ key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
120
+ return hashlib.sha256(key_json.encode()).hexdigest()[:32]
121
+
122
+
123
+ class RetryPolicy(DefaultEffectPolicy):
124
+ """Policy with retries enabled for LLM and tool calls."""
125
+
126
+ def __init__(
127
+ self,
128
+ *,
129
+ llm_max_attempts: int = 3,
130
+ tool_max_attempts: int = 2,
131
+ backoff_base: float = 1.0,
132
+ backoff_max: float = 30.0,
133
+ ):
134
+ super().__init__(
135
+ default_max_attempts=1,
136
+ default_backoff_base=backoff_base,
137
+ default_backoff_max=backoff_max,
138
+ effect_max_attempts={
139
+ "llm_call": llm_max_attempts,
140
+ "tool_calls": tool_max_attempts,
141
+ },
142
+ )
143
+
144
+
145
+ class NoRetryPolicy(DefaultEffectPolicy):
146
+ """Policy with no retries (fail immediately)."""
147
+
148
+ def __init__(self):
149
+ super().__init__(default_max_attempts=1)
150
+
151
+
152
+ def compute_idempotency_key(
153
+ *, run_id: str, node_id: str, effect: Effect
154
+ ) -> str:
155
+ """Standalone function to compute idempotency key.
156
+
157
+ Useful when you need to compute a key without a full policy.
158
+ """
159
+ key_data = {
160
+ "run_id": run_id,
161
+ "node_id": node_id,
162
+ "effect_type": effect.type.value,
163
+ "effect_payload": effect.payload,
164
+ }
165
+ key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
166
+ return hashlib.sha256(key_json.encode()).hexdigest()[:32]