edda-framework 0.14.0__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,144 @@
1
+ """Marker nodes for durable graph operations.
2
+
3
+ These are special marker classes that tell DurableGraph to perform
4
+ workflow-level operations (wait_event, sleep) outside of activities.
5
+
6
+ This design keeps activities pure (atomic, retryable units of work)
7
+ while allowing graphs to wait for external events or sleep.
8
+
9
+ These classes inherit from pydantic-graph's BaseNode so they can be:
10
+ 1. Included in return type annotations without type: ignore
11
+ 2. Registered in Graph for proper type validation
12
+ 3. Detected by graph visualization tools
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
19
+
20
+ NextT = TypeVar("NextT")
21
+
22
+ # Try to import BaseNode for inheritance
23
+ # If pydantic-graph is not installed, use a fallback
24
+ try:
25
+ from pydantic_graph import BaseNode
26
+
27
+ _HAS_PYDANTIC_GRAPH = True
28
+ except ImportError:
29
+ _HAS_PYDANTIC_GRAPH = False
30
+
31
+ # Fallback base class when pydantic-graph is not installed
32
+ class BaseNode: # type: ignore[no-redef]
33
+ pass
34
+
35
+
36
+ if TYPE_CHECKING:
37
+ from pydantic_graph import BaseNode as _BaseNode
38
+
39
+ # For type checking, we need the actual BaseNode
40
+ _MarkerBase = _BaseNode[Any, Any, Any]
41
+ else:
42
+ _MarkerBase = BaseNode
43
+
44
+
45
+ @dataclass
46
+ class WaitForEvent(_MarkerBase, Generic[NextT]): # type: ignore[misc,valid-type]
47
+ """
48
+ Marker node that tells DurableGraph to wait for an external event.
49
+
50
+ When a node returns this marker, DurableGraph will:
51
+ 1. Complete the current node's activity
52
+ 2. Call wait_event() at the workflow level (outside activities)
53
+ 3. Store the received event data in ctx.last_event
54
+ 4. Continue execution with next_node
55
+
56
+ IMPORTANT: Register this class in your Graph for type checking:
57
+ graph = Graph(nodes=[MyNode1, MyNode2, WaitForEvent])
58
+
59
+ Example:
60
+ @dataclass
61
+ class WaitForPaymentNode(BaseNode[OrderState, None, str]):
62
+ async def run(
63
+ self, ctx: DurableGraphContext
64
+ ) -> WaitForEvent[ProcessPaymentNode] | End[str]:
65
+ ctx.state.waiting_for = "payment"
66
+ return WaitForEvent(
67
+ event_type=f"payment.{ctx.state.order_id}",
68
+ next_node=ProcessPaymentNode(),
69
+ timeout_seconds=3600,
70
+ )
71
+
72
+ @dataclass
73
+ class ProcessPaymentNode(BaseNode[OrderState, None, str]):
74
+ async def run(self, ctx: DurableGraphContext) -> End[str]:
75
+ event = ctx.last_event
76
+ if event.data.get("status") == "success":
77
+ return End("payment_received")
78
+ return End("payment_failed")
79
+
80
+ # Register WaitForEvent in the Graph
81
+ graph = Graph(nodes=[WaitForPaymentNode, ProcessPaymentNode, WaitForEvent])
82
+ """
83
+
84
+ event_type: str
85
+ next_node: NextT
86
+ timeout_seconds: int | None = None
87
+
88
+ async def run(self, _ctx: Any) -> Any:
89
+ """Never called - DurableGraph intercepts this marker."""
90
+ raise RuntimeError(
91
+ "WaitForEvent marker should not be executed directly. "
92
+ "Use DurableGraph.run() instead of Graph.run()."
93
+ )
94
+
95
+
96
+ @dataclass
97
+ class Sleep(_MarkerBase, Generic[NextT]): # type: ignore[misc,valid-type]
98
+ """
99
+ Marker node that tells DurableGraph to sleep before continuing.
100
+
101
+ When a node returns this marker, DurableGraph will:
102
+ 1. Complete the current node's activity
103
+ 2. Call sleep() at the workflow level (outside activities)
104
+ 3. Continue execution with next_node
105
+
106
+ IMPORTANT: Register this class in your Graph for type checking:
107
+ graph = Graph(nodes=[MyNode1, MyNode2, Sleep])
108
+
109
+ Example:
110
+ @dataclass
111
+ class RateLimitNode(BaseNode[ApiState, None, str]):
112
+ async def run(
113
+ self, ctx: DurableGraphContext
114
+ ) -> Sleep[RetryApiNode] | End[str]:
115
+ if rate_limited:
116
+ return Sleep(seconds=60, next_node=RetryApiNode())
117
+ return End("success")
118
+
119
+ # Register Sleep in the Graph
120
+ graph = Graph(nodes=[RateLimitNode, RetryApiNode, Sleep])
121
+ """
122
+
123
+ seconds: int
124
+ next_node: NextT
125
+
126
+ async def run(self, _ctx: Any) -> Any:
127
+ """Never called - DurableGraph intercepts this marker."""
128
+ raise RuntimeError(
129
+ "Sleep marker should not be executed directly. "
130
+ "Use DurableGraph.run() instead of Graph.run()."
131
+ )
132
+
133
+
134
+ @dataclass
135
+ class ReceivedEvent:
136
+ """
137
+ Event data received from wait_event.
138
+
139
+ This is stored in DurableGraphContext.last_event after WaitForEvent completes.
140
+ """
141
+
142
+ event_type: str
143
+ data: dict[str, Any] = field(default_factory=dict)
144
+ metadata: dict[str, Any] = field(default_factory=dict)
@@ -0,0 +1,51 @@
1
+ """
2
+ LlamaIndex Workflow Integration for Edda.
3
+
4
+ This module provides integration between LlamaIndex Workflow and Edda's durable
5
+ execution framework, making workflow execution crash-recoverable and supporting
6
+ durable wait operations.
7
+
8
+ Example:
9
+ from llama_index.core.workflow import Workflow, step, Event, StartEvent, StopEvent
10
+ from edda import workflow, WorkflowContext
11
+ from edda.integrations.llamaindex import DurableWorkflowRunner, DurableSleepEvent
12
+
13
+ # Define events
14
+ class ProcessedEvent(Event):
15
+ data: str
16
+
17
+ # Define workflow
18
+ class MyWorkflow(Workflow):
19
+ @step
20
+ async def process(self, ctx: Context, ev: StartEvent) -> ProcessedEvent:
21
+ return ProcessedEvent(data=f"processed: {ev.input}")
22
+
23
+ @step
24
+ async def finalize(self, ctx: Context, ev: ProcessedEvent) -> StopEvent:
25
+ return StopEvent(result={"status": "done", "data": ev.data})
26
+
27
+ # Create durable runner
28
+ runner = DurableWorkflowRunner(MyWorkflow)
29
+
30
+ # Use in Edda workflow
31
+ @workflow
32
+ async def my_workflow(ctx: WorkflowContext, input_data: str) -> dict:
33
+ result = await runner.run(ctx, input=input_data)
34
+ return result
35
+
36
+ Installation:
37
+ pip install 'edda-framework[llamaindex]'
38
+ """
39
+
40
+ from .events import DurableSleepEvent, DurableWaitEvent, ResumeEvent
41
+ from .exceptions import WorkflowExecutionError, WorkflowReplayError
42
+ from .workflow import DurableWorkflowRunner
43
+
44
+ __all__ = [
45
+ "DurableWorkflowRunner",
46
+ "DurableSleepEvent",
47
+ "DurableWaitEvent",
48
+ "ResumeEvent",
49
+ "WorkflowExecutionError",
50
+ "WorkflowReplayError",
51
+ ]
@@ -0,0 +1,160 @@
1
+ """Durable events for LlamaIndex Workflow integration.
2
+
3
+ These events signal to the DurableWorkflow that a durable operation
4
+ (sleep or wait for external event) should be performed.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, Any
10
+
11
+ # Lazy import to avoid requiring llama-index at import time
12
+ if TYPE_CHECKING:
13
+ pass
14
+
15
+
16
+ def _import_event_class() -> type[Any]:
17
+ """Import Event class with helpful error message."""
18
+ try:
19
+ from llama_index.core.workflow import Event # type: ignore[import-not-found]
20
+
21
+ return Event # type: ignore[no-any-return]
22
+ except ImportError as e:
23
+ msg = (
24
+ "llama-index-core is not installed. Install with:\n"
25
+ " pip install llama-index-core\n"
26
+ "or\n"
27
+ " pip install 'edda-framework[llamaindex]'"
28
+ )
29
+ raise ImportError(msg) from e
30
+
31
+
32
+ class DurableSleepEvent:
33
+ """
34
+ Event that signals a durable sleep operation.
35
+
36
+ When a step returns this event, the DurableWorkflow will:
37
+ 1. Record the step completion
38
+ 2. Call Edda's sleep() function (durable timer)
39
+ 3. Resume with the specified resume_data after the sleep completes
40
+
41
+ Example:
42
+ @step
43
+ async def rate_limited_step(self, ctx: Context, ev: SomeEvent) -> DurableSleepEvent:
44
+ # Hit rate limit, need to wait
45
+ return DurableSleepEvent(
46
+ seconds=60,
47
+ resume_data={"retry_count": ev.retry_count + 1},
48
+ )
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ seconds: float,
54
+ resume_data: dict[str, Any] | None = None,
55
+ ) -> None:
56
+ """
57
+ Initialize a durable sleep event.
58
+
59
+ Args:
60
+ seconds: Number of seconds to sleep
61
+ resume_data: Data to include when resuming after sleep
62
+ """
63
+ self.seconds = seconds
64
+ self.resume_data = resume_data or {}
65
+
66
+ def to_dict(self) -> dict[str, Any]:
67
+ """Serialize to dictionary."""
68
+ return {
69
+ "_type": "DurableSleepEvent",
70
+ "seconds": self.seconds,
71
+ "resume_data": self.resume_data,
72
+ }
73
+
74
+ @classmethod
75
+ def from_dict(cls, data: dict[str, Any]) -> DurableSleepEvent:
76
+ """Deserialize from dictionary."""
77
+ return cls(
78
+ seconds=data["seconds"],
79
+ resume_data=data.get("resume_data", {}),
80
+ )
81
+
82
+
83
+ class DurableWaitEvent:
84
+ """
85
+ Event that signals waiting for an external event.
86
+
87
+ When a step returns this event, the DurableWorkflow will:
88
+ 1. Record the step completion
89
+ 2. Call Edda's wait_event() function (durable event subscription)
90
+ 3. Resume with the received event data after the event arrives
91
+
92
+ Example:
93
+ @step
94
+ async def wait_for_approval(self, ctx: Context, ev: OrderEvent) -> DurableWaitEvent:
95
+ return DurableWaitEvent(
96
+ event_type=f"approval.{ev.order_id}",
97
+ timeout_seconds=3600, # 1 hour timeout
98
+ )
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ event_type: str,
104
+ timeout_seconds: float | None = None,
105
+ ) -> None:
106
+ """
107
+ Initialize a durable wait event.
108
+
109
+ Args:
110
+ event_type: The event type to wait for (e.g., "payment.completed")
111
+ timeout_seconds: Optional timeout in seconds
112
+ """
113
+ self.event_type = event_type
114
+ self.timeout_seconds = timeout_seconds
115
+
116
+ def to_dict(self) -> dict[str, Any]:
117
+ """Serialize to dictionary."""
118
+ return {
119
+ "_type": "DurableWaitEvent",
120
+ "event_type": self.event_type,
121
+ "timeout_seconds": self.timeout_seconds,
122
+ }
123
+
124
+ @classmethod
125
+ def from_dict(cls, data: dict[str, Any]) -> DurableWaitEvent:
126
+ """Deserialize from dictionary."""
127
+ return cls(
128
+ event_type=data["event_type"],
129
+ timeout_seconds=data.get("timeout_seconds"),
130
+ )
131
+
132
+
133
+ class ResumeEvent:
134
+ """
135
+ Event used to resume workflow after a durable operation.
136
+
137
+ This is an internal event type used by DurableWorkflow to resume
138
+ execution after a DurableSleepEvent or DurableWaitEvent completes.
139
+ """
140
+
141
+ def __init__(self, data: dict[str, Any] | None = None) -> None:
142
+ """
143
+ Initialize a resume event.
144
+
145
+ Args:
146
+ data: Data from the completed operation (sleep resume_data or received event)
147
+ """
148
+ self.data = data or {}
149
+
150
+ def to_dict(self) -> dict[str, Any]:
151
+ """Serialize to dictionary."""
152
+ return {
153
+ "_type": "ResumeEvent",
154
+ "data": self.data,
155
+ }
156
+
157
+ @classmethod
158
+ def from_dict(cls, data: dict[str, Any]) -> ResumeEvent:
159
+ """Deserialize from dictionary."""
160
+ return cls(data=data.get("data", {}))
@@ -0,0 +1,15 @@
1
+ """Exceptions for LlamaIndex Workflow integration."""
2
+
3
+
4
+ class WorkflowExecutionError(Exception):
5
+ """Error during workflow execution."""
6
+
7
+ def __init__(self, message: str, step_name: str | None = None) -> None:
8
+ self.step_name = step_name
9
+ super().__init__(message)
10
+
11
+
12
+ class WorkflowReplayError(Exception):
13
+ """Error during workflow replay."""
14
+
15
+ pass
@@ -0,0 +1,306 @@
1
+ """DurableWorkflow - makes LlamaIndex Workflow execution durable via Edda.
2
+
3
+ This module provides integration between LlamaIndex Workflow and Edda's durable
4
+ execution framework, making workflow execution crash-recoverable and supporting
5
+ durable wait operations.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import importlib
11
+ from dataclasses import dataclass
12
+ from typing import TYPE_CHECKING, Any, TypeVar
13
+
14
+ from edda.activity import activity
15
+ from edda.pydantic_utils import to_json_dict
16
+
17
+ from .events import DurableSleepEvent, DurableWaitEvent, ResumeEvent
18
+ from .exceptions import WorkflowExecutionError
19
+
20
+ if TYPE_CHECKING:
21
+ from edda.context import WorkflowContext
22
+
23
+ T = TypeVar("T")
24
+
25
+
26
+ def _import_llamaindex_workflow() -> Any:
27
+ """Import llama_index.core.workflow with helpful error message."""
28
+ try:
29
+ from llama_index.core import workflow # type: ignore[import-not-found]
30
+
31
+ return workflow
32
+ except ImportError as e:
33
+ msg = (
34
+ "llama-index-core is not installed. Install with:\n"
35
+ " pip install llama-index-core\n"
36
+ "or\n"
37
+ " pip install 'edda-framework[llamaindex]'"
38
+ )
39
+ raise ImportError(msg) from e
40
+
41
+
42
+ def _serialize_event(event: Any) -> dict[str, Any]:
43
+ """Serialize a LlamaIndex Event to a dictionary."""
44
+ if isinstance(event, DurableSleepEvent):
45
+ return event.to_dict()
46
+ if isinstance(event, DurableWaitEvent):
47
+ return event.to_dict()
48
+ if isinstance(event, ResumeEvent):
49
+ return event.to_dict()
50
+
51
+ # For LlamaIndex events, use model_dump if available (Pydantic)
52
+ if hasattr(event, "model_dump"):
53
+ data = event.model_dump()
54
+ elif hasattr(event, "__dict__"):
55
+ data = {k: v for k, v in event.__dict__.items() if not k.startswith("_")}
56
+ else:
57
+ data = {}
58
+
59
+ return {
60
+ "_type": f"{event.__class__.__module__}:{event.__class__.__qualname__}",
61
+ "_data": to_json_dict(data),
62
+ }
63
+
64
+
65
+ def _deserialize_event(data: dict[str, Any]) -> Any:
66
+ """Deserialize a dictionary to a LlamaIndex Event."""
67
+ event_type = data.get("_type", "")
68
+
69
+ # Handle our special events
70
+ if event_type == "DurableSleepEvent":
71
+ return DurableSleepEvent.from_dict(data)
72
+ if event_type == "DurableWaitEvent":
73
+ return DurableWaitEvent.from_dict(data)
74
+ if event_type == "ResumeEvent":
75
+ return ResumeEvent.from_dict(data)
76
+
77
+ # Handle LlamaIndex events
78
+ if ":" in event_type:
79
+ module_path, class_name = event_type.rsplit(":", 1)
80
+ module = importlib.import_module(module_path)
81
+ event_class = getattr(module, class_name)
82
+ event_data = data.get("_data", {})
83
+
84
+ # Use model_validate for Pydantic models
85
+ if hasattr(event_class, "model_validate"):
86
+ return event_class.model_validate(event_data)
87
+ return event_class(**event_data)
88
+
89
+ raise ValueError(f"Unknown event type: {event_type}")
90
+
91
+
92
+ @dataclass
93
+ class StepResult:
94
+ """Result of a step execution."""
95
+
96
+ event: Any
97
+ step_name: str
98
+
99
+
100
+ @activity
101
+ async def _run_step(
102
+ ctx: WorkflowContext, # noqa: ARG001 - Used by @activity decorator
103
+ workflow_class_path: str,
104
+ step_name: str,
105
+ event_data: dict[str, Any],
106
+ context_data: dict[str, Any],
107
+ ) -> dict[str, Any]:
108
+ """
109
+ Execute a single workflow step as a durable activity.
110
+
111
+ This activity is the core of DurableWorkflow - it runs one step and returns
112
+ the serialized result event.
113
+ """
114
+ # Import the workflow class
115
+ module_path, class_name = workflow_class_path.rsplit(":", 1)
116
+ module = importlib.import_module(module_path)
117
+ workflow_class = getattr(module, class_name)
118
+
119
+ # Create workflow instance
120
+ workflow_instance = workflow_class()
121
+
122
+ # Deserialize the input event
123
+ input_event = _deserialize_event(event_data)
124
+
125
+ # Get the step method
126
+ step_method = getattr(workflow_instance, step_name, None)
127
+ if step_method is None:
128
+ raise WorkflowExecutionError(f"Step '{step_name}' not found", step_name)
129
+
130
+ # Create a minimal context for the step
131
+ # Note: LlamaIndex Context is created per-run, we create a simple mock
132
+
133
+ # Execute the step
134
+ try:
135
+ # The step method signature is: async def step(self, ctx, event) -> Event
136
+ # We need to provide a context - use a simple object with store
137
+ @dataclass
138
+ class SimpleContext:
139
+ store: dict[str, Any]
140
+
141
+ simple_ctx = SimpleContext(store=context_data.get("store", {}))
142
+ result_event = await step_method(simple_ctx, input_event)
143
+ except Exception as e:
144
+ raise WorkflowExecutionError(f"Step '{step_name}' failed: {e}", step_name) from e
145
+
146
+ # Serialize the result
147
+ return {
148
+ "event": _serialize_event(result_event),
149
+ "context_store": simple_ctx.store,
150
+ }
151
+
152
+
153
+ class DurableWorkflowRunner:
154
+ """
155
+ Runner that executes a LlamaIndex Workflow with Edda durability.
156
+
157
+ This class wraps a LlamaIndex Workflow and executes it step-by-step,
158
+ recording each step as an Edda Activity for crash recovery.
159
+
160
+ Example:
161
+ from llama_index.core.workflow import Workflow, step, StartEvent, StopEvent
162
+
163
+ class MyWorkflow(Workflow):
164
+ @step
165
+ async def process(self, ctx: Context, ev: StartEvent) -> StopEvent:
166
+ return StopEvent(result="done")
167
+
168
+ runner = DurableWorkflowRunner(MyWorkflow)
169
+
170
+ @workflow
171
+ async def my_edda_workflow(ctx: WorkflowContext) -> str:
172
+ result = await runner.run(ctx, input_data="hello")
173
+ return result
174
+ """
175
+
176
+ def __init__(self, workflow_class: type) -> None:
177
+ """
178
+ Initialize DurableWorkflowRunner.
179
+
180
+ Args:
181
+ workflow_class: A LlamaIndex Workflow class (not instance)
182
+ """
183
+ self._workflow_class = workflow_class
184
+ self._class_path = f"{workflow_class.__module__}:{workflow_class.__qualname__}"
185
+
186
+ # Validate it's a Workflow subclass
187
+ llamaindex_workflow = _import_llamaindex_workflow()
188
+ if not issubclass(workflow_class, llamaindex_workflow.Workflow):
189
+ raise TypeError(f"Expected a Workflow subclass, got {type(workflow_class).__name__}")
190
+
191
+ async def run(
192
+ self,
193
+ ctx: WorkflowContext,
194
+ **kwargs: Any,
195
+ ) -> Any:
196
+ """
197
+ Execute the workflow durably with Edda crash recovery.
198
+
199
+ Args:
200
+ ctx: Edda WorkflowContext
201
+ **kwargs: Arguments passed to StartEvent
202
+
203
+ Returns:
204
+ The result from StopEvent
205
+ """
206
+ from edda.channels import sleep as edda_sleep
207
+ from edda.channels import wait_event as edda_wait_event
208
+
209
+ llamaindex_workflow = _import_llamaindex_workflow()
210
+
211
+ # Create a workflow instance to analyze its steps
212
+ workflow_instance = self._workflow_class()
213
+
214
+ # Build step registry from the workflow
215
+ step_registry = self._build_step_registry(workflow_instance)
216
+
217
+ # Start with StartEvent
218
+ start_event_class = llamaindex_workflow.StartEvent
219
+ current_event = start_event_class(**kwargs)
220
+ context_store: dict[str, Any] = {}
221
+
222
+ # Main execution loop
223
+ while True:
224
+ # Find the step that handles this event type
225
+ event_type = type(current_event)
226
+ step_name = self._find_step_for_event(step_registry, event_type)
227
+
228
+ if step_name is None:
229
+ # No step found - check if it's a stop event
230
+ if isinstance(current_event, llamaindex_workflow.StopEvent):
231
+ return current_event.result
232
+ raise WorkflowExecutionError(f"No step found for event type: {event_type.__name__}")
233
+
234
+ # Execute the step as an activity
235
+ result = await _run_step( # type: ignore[misc,call-arg]
236
+ ctx, # type: ignore[arg-type]
237
+ self._class_path,
238
+ step_name,
239
+ _serialize_event(current_event),
240
+ {"store": context_store},
241
+ )
242
+
243
+ # Update context store
244
+ context_store = result.get("context_store", {})
245
+
246
+ # Deserialize the result event
247
+ result_event = _deserialize_event(result["event"])
248
+
249
+ # Handle special durable events
250
+ if isinstance(result_event, DurableSleepEvent):
251
+ # Durable sleep
252
+ await edda_sleep(ctx, int(result_event.seconds))
253
+ # Resume with ResumeEvent containing the sleep's resume_data
254
+ current_event = ResumeEvent(data=result_event.resume_data)
255
+
256
+ elif isinstance(result_event, DurableWaitEvent):
257
+ # Durable wait for external event
258
+ received = await edda_wait_event(
259
+ ctx,
260
+ result_event.event_type,
261
+ timeout_seconds=(
262
+ int(result_event.timeout_seconds) if result_event.timeout_seconds else None
263
+ ),
264
+ )
265
+ # Resume with ResumeEvent containing the received data
266
+ current_event = ResumeEvent(data=received.data if hasattr(received, "data") else {})
267
+
268
+ elif isinstance(result_event, llamaindex_workflow.StopEvent):
269
+ # Workflow completed
270
+ return result_event.result
271
+
272
+ else:
273
+ # Normal event transition
274
+ current_event = result_event
275
+
276
+ def _build_step_registry(self, workflow_instance: Any) -> dict[str, list[type]]:
277
+ """Build a registry mapping step names to their input event types."""
278
+ registry: dict[str, list[type]] = {}
279
+
280
+ # Look for methods decorated with @step in the class
281
+ workflow_class = type(workflow_instance)
282
+ for name in dir(workflow_class):
283
+ if name.startswith("_"):
284
+ continue
285
+
286
+ # Get the raw function from the class (not bound method)
287
+ method = getattr(workflow_class, name, None)
288
+ if method is None or not callable(method):
289
+ continue
290
+
291
+ # Check if it's a step (has _step_config attribute from @step decorator)
292
+ if hasattr(method, "_step_config"):
293
+ step_config = method._step_config
294
+ # Get accepted event types directly from step config
295
+ if hasattr(step_config, "accepted_events"):
296
+ registry[name] = list(step_config.accepted_events)
297
+
298
+ return registry
299
+
300
+ def _find_step_for_event(self, registry: dict[str, list[type]], event_type: type) -> str | None:
301
+ """Find the step that handles the given event type."""
302
+ for step_name, accepted_types in registry.items():
303
+ for accepted_type in accepted_types:
304
+ if issubclass(event_type, accepted_type):
305
+ return step_name
306
+ return None