edda-framework 0.14.1__py3-none-any.whl → 0.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,58 @@
1
+ """
2
+ Durable Graph Integration for Edda.
3
+
4
+ This module provides integration between pydantic-graph and Edda's durable
5
+ execution framework, making pydantic-graph execution crash-recoverable and
6
+ supporting durable wait operations.
7
+
8
+ Example:
9
+ from dataclasses import dataclass
10
+ from pydantic_graph import BaseNode, Graph, End
11
+ from edda import workflow, WorkflowContext
12
+ from edda.integrations.graph import DurableGraph, DurableGraphContext
13
+
14
+ @dataclass
15
+ class MyState:
16
+ counter: int = 0
17
+
18
+ @dataclass
19
+ class IncrementNode(BaseNode[MyState, None, int]):
20
+ async def run(self, ctx: DurableGraphContext) -> "CheckNode":
21
+ ctx.state.counter += 1
22
+ return CheckNode()
23
+
24
+ @dataclass
25
+ class CheckNode(BaseNode[MyState, None, int]):
26
+ async def run(self, ctx: DurableGraphContext) -> IncrementNode | End[int]:
27
+ if ctx.state.counter >= 5:
28
+ return End(ctx.state.counter)
29
+ return IncrementNode()
30
+
31
+ graph = Graph(nodes=[IncrementNode, CheckNode])
32
+ durable = DurableGraph(graph)
33
+
34
+ @workflow
35
+ async def counter_workflow(ctx: WorkflowContext) -> int:
36
+ return await durable.run(
37
+ ctx,
38
+ start_node=IncrementNode(),
39
+ state=MyState(),
40
+ )
41
+
42
+ Installation:
43
+ pip install 'edda-framework[graph]'
44
+ """
45
+
46
+ from .context import DurableGraphContext
47
+ from .exceptions import GraphExecutionError
48
+ from .graph import DurableGraph
49
+ from .nodes import ReceivedEvent, Sleep, WaitForEvent
50
+
51
+ __all__ = [
52
+ "DurableGraph",
53
+ "DurableGraphContext",
54
+ "GraphExecutionError",
55
+ "ReceivedEvent",
56
+ "Sleep",
57
+ "WaitForEvent",
58
+ ]
@@ -0,0 +1,81 @@
1
+ """DurableGraphContext - bridges pydantic-graph and Edda contexts."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import TYPE_CHECKING, Generic, TypeVar
7
+
8
+ if TYPE_CHECKING:
9
+ from edda.context import WorkflowContext
10
+
11
+ from .nodes import ReceivedEvent
12
+
13
+ StateT = TypeVar("StateT")
14
+ DepsT = TypeVar("DepsT")
15
+
16
+
17
+ @dataclass
18
+ class DurableGraphContext(Generic[StateT, DepsT]):
19
+ """
20
+ Context that bridges pydantic-graph and Edda.
21
+
22
+ Provides access to:
23
+ - pydantic-graph's state and deps via properties
24
+ - last_event: The most recent event received via WaitForEvent
25
+
26
+ This context is passed to node's run() method when executing
27
+ via DurableGraph.
28
+
29
+ For durable wait operations (wait_event, sleep), use the WaitForEvent
30
+ and Sleep marker nodes instead of calling methods directly:
31
+
32
+ from edda.integrations.graph import WaitForEvent, Sleep
33
+
34
+ @dataclass
35
+ class MyNode(BaseNode[MyState, None, str]):
36
+ async def run(self, ctx: DurableGraphContext) -> WaitForEvent[NextNode]:
37
+ # Return a marker to wait for an event
38
+ return WaitForEvent(
39
+ event_type="payment.completed",
40
+ next_node=NextNode(),
41
+ timeout_seconds=3600,
42
+ )
43
+
44
+ @dataclass
45
+ class NextNode(BaseNode[MyState, None, str]):
46
+ async def run(self, ctx: DurableGraphContext) -> End[str]:
47
+ # Access the received event
48
+ event = ctx.last_event
49
+ return End(event.data.get("status", "unknown"))
50
+
51
+ Attributes:
52
+ state: The graph state object (mutable, shared across nodes)
53
+ deps: The dependencies object (immutable)
54
+ last_event: The most recent event received via WaitForEvent (or None)
55
+ workflow_ctx: The Edda WorkflowContext
56
+ """
57
+
58
+ _state: StateT
59
+ _deps: DepsT
60
+ workflow_ctx: WorkflowContext
61
+ last_event: ReceivedEvent | None = field(default=None)
62
+
63
+ @property
64
+ def state(self) -> StateT:
65
+ """Get the graph state object."""
66
+ return self._state
67
+
68
+ @property
69
+ def deps(self) -> DepsT:
70
+ """Get the dependencies object."""
71
+ return self._deps
72
+
73
+ @property
74
+ def instance_id(self) -> str:
75
+ """Get the workflow instance ID."""
76
+ return self.workflow_ctx.instance_id
77
+
78
+ @property
79
+ def is_replaying(self) -> bool:
80
+ """Check if the workflow is currently replaying."""
81
+ return self.workflow_ctx.is_replaying
@@ -0,0 +1,9 @@
1
+ """Exceptions for durable graph integration."""
2
+
3
+
4
+ class GraphExecutionError(Exception):
5
+ """Raised when a graph node execution fails."""
6
+
7
+ def __init__(self, message: str, node_name: str | None = None) -> None:
8
+ self.node_name = node_name
9
+ super().__init__(message)
@@ -0,0 +1,385 @@
1
+ """DurableGraph - makes pydantic-graph execution durable via Edda."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import dataclasses
6
+ import importlib
7
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
8
+
9
+ from edda.activity import activity
10
+ from edda.pydantic_utils import to_json_dict
11
+
12
+ from .context import DurableGraphContext
13
+ from .exceptions import GraphExecutionError
14
+ from .nodes import ReceivedEvent, Sleep, WaitForEvent
15
+
16
+ if TYPE_CHECKING:
17
+ from edda.context import WorkflowContext
18
+
19
+ StateT = TypeVar("StateT")
20
+ DepsT = TypeVar("DepsT")
21
+ RunEndT = TypeVar("RunEndT")
22
+
23
+
24
+ def _import_pydantic_graph() -> Any:
25
+ """Import pydantic_graph with helpful error message."""
26
+ try:
27
+ import pydantic_graph
28
+
29
+ return pydantic_graph
30
+ except ImportError as e:
31
+ msg = (
32
+ "pydantic-graph is not installed. Install with:\n"
33
+ " pip install pydantic-graph\n"
34
+ "or\n"
35
+ " pip install 'edda-framework[graph]'"
36
+ )
37
+ raise ImportError(msg) from e
38
+
39
+
40
+ def _get_class_path(cls: type) -> str:
41
+ """Get fully qualified class path for serialization."""
42
+ return f"{cls.__module__}:{cls.__qualname__}"
43
+
44
+
45
+ def _import_class(path: str) -> type:
46
+ """Import a class from its fully qualified path."""
47
+ module_path, class_name = path.rsplit(":", 1)
48
+ module = importlib.import_module(module_path)
49
+ return getattr(module, class_name) # type: ignore[no-any-return]
50
+
51
+
52
+ def _serialize_node(node: Any) -> dict[str, Any]:
53
+ """Serialize a node to a dict."""
54
+ if dataclasses.is_dataclass(node) and not isinstance(node, type):
55
+ return {
56
+ "_class_path": _get_class_path(node.__class__),
57
+ "_data": dataclasses.asdict(node),
58
+ }
59
+ return {
60
+ "_class_path": _get_class_path(node.__class__),
61
+ "_data": {},
62
+ }
63
+
64
+
65
+ def _deserialize_node(data: dict[str, Any]) -> Any:
66
+ """Deserialize a node from a dict."""
67
+ cls = _import_class(data["_class_path"])
68
+ return cls(**data.get("_data", {}))
69
+
70
+
71
+ def _serialize_state(state: Any) -> dict[str, Any]:
72
+ """Serialize state to a dict."""
73
+ if state is None:
74
+ return {"_none": True}
75
+ if dataclasses.is_dataclass(state) and not isinstance(state, type):
76
+ return {
77
+ "_class_path": _get_class_path(state.__class__),
78
+ "_data": dataclasses.asdict(state),
79
+ }
80
+ if hasattr(state, "model_dump"):
81
+ return {
82
+ "_class_path": _get_class_path(state.__class__),
83
+ "_data": state.model_dump(),
84
+ }
85
+ return {"_raw": str(state)}
86
+
87
+
88
+ def _serialize_deps(deps: Any) -> dict[str, Any] | None:
89
+ """Serialize deps to a dict."""
90
+ if deps is None:
91
+ return None
92
+ if dataclasses.is_dataclass(deps) and not isinstance(deps, type):
93
+ return {
94
+ "_class_path": _get_class_path(deps.__class__),
95
+ "_data": dataclasses.asdict(deps),
96
+ }
97
+ if hasattr(deps, "model_dump"):
98
+ return {
99
+ "_class_path": _get_class_path(deps.__class__),
100
+ "_data": deps.model_dump(),
101
+ }
102
+ # For simple types (int, str, etc.), return as-is wrapped
103
+ return {"_value": deps}
104
+
105
+
106
+ def _deserialize_deps(data: dict[str, Any] | None) -> Any:
107
+ """Deserialize deps from a dict."""
108
+ if data is None:
109
+ return None
110
+ if "_value" in data:
111
+ return data["_value"]
112
+ cls = _import_class(data["_class_path"])
113
+ if dataclasses.is_dataclass(cls):
114
+ return cls(**data["_data"])
115
+ if hasattr(cls, "model_validate"):
116
+ return cls.model_validate(data["_data"])
117
+ return cls(**data["_data"])
118
+
119
+
120
+ def _deserialize_state(data: dict[str, Any]) -> Any:
121
+ """Deserialize state from a dict."""
122
+ if data.get("_none"):
123
+ return None
124
+ if "_raw" in data:
125
+ raise ValueError(f"Cannot deserialize state from raw: {data['_raw']}")
126
+ cls = _import_class(data["_class_path"])
127
+ if dataclasses.is_dataclass(cls):
128
+ return cls(**data["_data"])
129
+ if hasattr(cls, "model_validate"):
130
+ return cls.model_validate(data["_data"])
131
+ return cls(**data["_data"])
132
+
133
+
134
+ def _restore_state(source: Any, target: Any) -> None:
135
+ """Copy state from source to target object."""
136
+ if dataclasses.is_dataclass(source) and not isinstance(source, type):
137
+ for field in dataclasses.fields(source):
138
+ setattr(target, field.name, getattr(source, field.name))
139
+ elif hasattr(source, "__dict__"):
140
+ for key, value in source.__dict__.items():
141
+ if not key.startswith("_"):
142
+ setattr(target, key, value)
143
+
144
+
145
+ @activity
146
+ async def _run_graph_node(
147
+ ctx: WorkflowContext,
148
+ node_data: dict[str, Any],
149
+ state_data: dict[str, Any],
150
+ deps_data: dict[str, Any] | None,
151
+ last_event_data: dict[str, Any] | None = None,
152
+ ) -> dict[str, Any]:
153
+ """
154
+ Execute a single graph node as a durable activity.
155
+
156
+ This activity is the core of DurableGraph - it runs one node and returns
157
+ the serialized result (next node, End, WaitForEvent, or Sleep) along
158
+ with the updated state.
159
+ """
160
+ pg = _import_pydantic_graph()
161
+
162
+ # Deserialize node, state, and deps
163
+ node = _deserialize_node(node_data)
164
+ state = _deserialize_state(state_data)
165
+ deps = _deserialize_deps(deps_data)
166
+
167
+ # Reconstruct last_event if provided
168
+ last_event: ReceivedEvent | None = None
169
+ if last_event_data:
170
+ last_event = ReceivedEvent(
171
+ event_type=last_event_data.get("event_type", ""),
172
+ data=last_event_data.get("data", {}),
173
+ metadata=last_event_data.get("metadata", {}),
174
+ )
175
+
176
+ # Create durable context
177
+ durable_ctx = DurableGraphContext(
178
+ _state=state,
179
+ _deps=deps,
180
+ workflow_ctx=ctx,
181
+ last_event=last_event,
182
+ )
183
+
184
+ try:
185
+ # Execute the node
186
+ result = await node.run(durable_ctx)
187
+
188
+ # Serialize result based on type
189
+ if isinstance(result, pg.End):
190
+ return {
191
+ "_type": "End",
192
+ "_data": to_json_dict(result.data),
193
+ "_state": _serialize_state(state),
194
+ }
195
+ elif isinstance(result, WaitForEvent):
196
+ return {
197
+ "_type": "WaitForEvent",
198
+ "_event_type": result.event_type,
199
+ "_timeout_seconds": result.timeout_seconds,
200
+ "_next_node": _serialize_node(result.next_node),
201
+ "_state": _serialize_state(state),
202
+ }
203
+ elif isinstance(result, Sleep):
204
+ return {
205
+ "_type": "Sleep",
206
+ "_seconds": result.seconds,
207
+ "_next_node": _serialize_node(result.next_node),
208
+ "_state": _serialize_state(state),
209
+ }
210
+ else:
211
+ # Regular node transition
212
+ return {
213
+ "_type": "Node",
214
+ "_node": _serialize_node(result),
215
+ "_state": _serialize_state(state),
216
+ }
217
+
218
+ except Exception as e:
219
+ raise GraphExecutionError(
220
+ f"Node {node.__class__.__name__} failed: {e}",
221
+ node.__class__.__name__,
222
+ ) from e
223
+
224
+
225
+ class DurableGraph(Generic[StateT, DepsT, RunEndT]):
226
+ """
227
+ Wrapper that makes pydantic-graph execution durable.
228
+
229
+ DurableGraph wraps a pydantic-graph Graph and executes it with Edda's
230
+ durability guarantees:
231
+
232
+ - Each node execution is recorded as an Edda Activity
233
+ - On replay, completed nodes return cached results (no re-execution)
234
+ - Crash recovery: workflows resume from the last completed node
235
+ - WaitForEvent/Sleep markers enable durable wait operations
236
+
237
+ Example:
238
+ from dataclasses import dataclass
239
+ from pydantic_graph import BaseNode, Graph, End
240
+ from edda import workflow, WorkflowContext
241
+ from edda.integrations.graph import (
242
+ DurableGraph,
243
+ DurableGraphContext,
244
+ WaitForEvent,
245
+ )
246
+
247
+ @dataclass
248
+ class OrderState:
249
+ order_id: str | None = None
250
+
251
+ @dataclass
252
+ class ProcessOrder(BaseNode[OrderState, None, str]):
253
+ order_id: str
254
+
255
+ async def run(self, ctx: DurableGraphContext) -> WaitForEvent[WaitPayment]:
256
+ ctx.state.order_id = self.order_id
257
+ return WaitForEvent(
258
+ event_type="payment.completed",
259
+ next_node=WaitPayment(),
260
+ )
261
+
262
+ @dataclass
263
+ class WaitPayment(BaseNode[OrderState, None, str]):
264
+ async def run(self, ctx: DurableGraphContext) -> End[str]:
265
+ # Access the received event
266
+ event = ctx.last_event
267
+ if event and event.data.get("status") == "success":
268
+ return End("completed")
269
+ return End("failed")
270
+
271
+ graph = Graph(nodes=[ProcessOrder, WaitPayment])
272
+ durable = DurableGraph(graph)
273
+
274
+ @workflow
275
+ async def order_workflow(ctx: WorkflowContext, order_id: str) -> str:
276
+ return await durable.run(
277
+ ctx,
278
+ start_node=ProcessOrder(order_id=order_id),
279
+ state=OrderState(),
280
+ )
281
+ """
282
+
283
+ def __init__(self, graph: Any) -> None:
284
+ """
285
+ Initialize DurableGraph wrapper.
286
+
287
+ Args:
288
+ graph: A pydantic-graph Graph instance
289
+
290
+ Raises:
291
+ TypeError: If graph is not a pydantic-graph Graph instance
292
+ """
293
+ pg = _import_pydantic_graph()
294
+ if not isinstance(graph, pg.Graph):
295
+ raise TypeError(f"Expected pydantic_graph.Graph, got {type(graph).__name__}")
296
+ self._graph = graph
297
+
298
+ @property
299
+ def graph(self) -> Any:
300
+ """Get the underlying pydantic-graph Graph instance."""
301
+ return self._graph
302
+
303
+ async def run(
304
+ self,
305
+ ctx: WorkflowContext,
306
+ start_node: Any,
307
+ *,
308
+ state: StateT,
309
+ deps: DepsT = None, # type: ignore[assignment]
310
+ ) -> RunEndT:
311
+ """
312
+ Execute the graph durably with Edda crash recovery.
313
+
314
+ Args:
315
+ ctx: Edda WorkflowContext
316
+ start_node: The initial node to start execution from
317
+ state: Initial graph state (will be mutated during execution)
318
+ deps: Optional dependencies accessible via ctx.deps
319
+
320
+ Returns:
321
+ The final result (End.data value)
322
+
323
+ Raises:
324
+ GraphExecutionError: If graph execution fails
325
+ """
326
+ from edda.channels import sleep as edda_sleep
327
+ from edda.channels import wait_event as edda_wait_event
328
+
329
+ current_node = start_node
330
+ last_event_data: dict[str, Any] | None = None
331
+
332
+ # Execute nodes until End is reached
333
+ while True:
334
+ # Serialize inputs
335
+ node_data = _serialize_node(current_node)
336
+ state_data = _serialize_state(state)
337
+ deps_data = _serialize_deps(deps)
338
+
339
+ # Run node as activity (handles replay/caching automatically)
340
+ # The @activity decorator transforms the function signature
341
+ result = await _run_graph_node( # type: ignore[misc,call-arg]
342
+ ctx, # type: ignore[arg-type]
343
+ node_data,
344
+ state_data,
345
+ deps_data,
346
+ last_event_data,
347
+ )
348
+
349
+ # Restore state from result
350
+ restored_state = _deserialize_state(result["_state"])
351
+ _restore_state(restored_state, state)
352
+
353
+ # Handle result based on type
354
+ if result["_type"] == "End":
355
+ return result["_data"] # type: ignore[no-any-return]
356
+
357
+ elif result["_type"] == "WaitForEvent":
358
+ # Wait for event at workflow level (outside activity)
359
+ event = await edda_wait_event(
360
+ ctx,
361
+ result["_event_type"],
362
+ timeout_seconds=result.get("_timeout_seconds"),
363
+ )
364
+ # Store event data for next node
365
+ # Note: edda.channels.ReceivedEvent uses 'type' not 'event_type'
366
+ last_event_data = {
367
+ "event_type": getattr(event, "type", result["_event_type"]),
368
+ "data": event.data if isinstance(event.data, dict) else {},
369
+ "metadata": getattr(event, "extensions", {}),
370
+ }
371
+ # Move to next node
372
+ current_node = _deserialize_node(result["_next_node"])
373
+
374
+ elif result["_type"] == "Sleep":
375
+ # Sleep at workflow level (outside activity)
376
+ await edda_sleep(ctx, result["_seconds"])
377
+ # Clear last_event since this wasn't an event wait
378
+ last_event_data = None
379
+ # Move to next node
380
+ current_node = _deserialize_node(result["_next_node"])
381
+
382
+ else:
383
+ # Regular node transition
384
+ last_event_data = None # Clear last_event for regular transitions
385
+ current_node = _deserialize_node(result["_node"])
@@ -0,0 +1,144 @@
1
+ """Marker nodes for durable graph operations.
2
+
3
+ These are special marker classes that tell DurableGraph to perform
4
+ workflow-level operations (wait_event, sleep) outside of activities.
5
+
6
+ This design keeps activities pure (atomic, retryable units of work)
7
+ while allowing graphs to wait for external events or sleep.
8
+
9
+ These classes inherit from pydantic-graph's BaseNode so they can be:
10
+ 1. Included in return type annotations without type: ignore
11
+ 2. Registered in Graph for proper type validation
12
+ 3. Detected by graph visualization tools
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
19
+
20
+ NextT = TypeVar("NextT")
21
+
22
+ # Try to import BaseNode for inheritance
23
+ # If pydantic-graph is not installed, use a fallback
24
+ try:
25
+ from pydantic_graph import BaseNode
26
+
27
+ _HAS_PYDANTIC_GRAPH = True
28
+ except ImportError:
29
+ _HAS_PYDANTIC_GRAPH = False
30
+
31
+ # Fallback base class when pydantic-graph is not installed
32
+ class BaseNode: # type: ignore[no-redef]
33
+ pass
34
+
35
+
36
+ if TYPE_CHECKING:
37
+ from pydantic_graph import BaseNode as _BaseNode
38
+
39
+ # For type checking, we need the actual BaseNode
40
+ _MarkerBase = _BaseNode[Any, Any, Any]
41
+ else:
42
+ _MarkerBase = BaseNode
43
+
44
+
45
+ @dataclass
46
+ class WaitForEvent(_MarkerBase, Generic[NextT]): # type: ignore[misc,valid-type]
47
+ """
48
+ Marker node that tells DurableGraph to wait for an external event.
49
+
50
+ When a node returns this marker, DurableGraph will:
51
+ 1. Complete the current node's activity
52
+ 2. Call wait_event() at the workflow level (outside activities)
53
+ 3. Store the received event data in ctx.last_event
54
+ 4. Continue execution with next_node
55
+
56
+ IMPORTANT: Register this class in your Graph for type checking:
57
+ graph = Graph(nodes=[MyNode1, MyNode2, WaitForEvent])
58
+
59
+ Example:
60
+ @dataclass
61
+ class WaitForPaymentNode(BaseNode[OrderState, None, str]):
62
+ async def run(
63
+ self, ctx: DurableGraphContext
64
+ ) -> WaitForEvent[ProcessPaymentNode] | End[str]:
65
+ ctx.state.waiting_for = "payment"
66
+ return WaitForEvent(
67
+ event_type=f"payment.{ctx.state.order_id}",
68
+ next_node=ProcessPaymentNode(),
69
+ timeout_seconds=3600,
70
+ )
71
+
72
+ @dataclass
73
+ class ProcessPaymentNode(BaseNode[OrderState, None, str]):
74
+ async def run(self, ctx: DurableGraphContext) -> End[str]:
75
+ event = ctx.last_event
76
+ if event.data.get("status") == "success":
77
+ return End("payment_received")
78
+ return End("payment_failed")
79
+
80
+ # Register WaitForEvent in the Graph
81
+ graph = Graph(nodes=[WaitForPaymentNode, ProcessPaymentNode, WaitForEvent])
82
+ """
83
+
84
+ event_type: str
85
+ next_node: NextT
86
+ timeout_seconds: int | None = None
87
+
88
+ async def run(self, _ctx: Any) -> Any:
89
+ """Never called - DurableGraph intercepts this marker."""
90
+ raise RuntimeError(
91
+ "WaitForEvent marker should not be executed directly. "
92
+ "Use DurableGraph.run() instead of Graph.run()."
93
+ )
94
+
95
+
96
+ @dataclass
97
+ class Sleep(_MarkerBase, Generic[NextT]): # type: ignore[misc,valid-type]
98
+ """
99
+ Marker node that tells DurableGraph to sleep before continuing.
100
+
101
+ When a node returns this marker, DurableGraph will:
102
+ 1. Complete the current node's activity
103
+ 2. Call sleep() at the workflow level (outside activities)
104
+ 3. Continue execution with next_node
105
+
106
+ IMPORTANT: Register this class in your Graph for type checking:
107
+ graph = Graph(nodes=[MyNode1, MyNode2, Sleep])
108
+
109
+ Example:
110
+ @dataclass
111
+ class RateLimitNode(BaseNode[ApiState, None, str]):
112
+ async def run(
113
+ self, ctx: DurableGraphContext
114
+ ) -> Sleep[RetryApiNode] | End[str]:
115
+ if rate_limited:
116
+ return Sleep(seconds=60, next_node=RetryApiNode())
117
+ return End("success")
118
+
119
+ # Register Sleep in the Graph
120
+ graph = Graph(nodes=[RateLimitNode, RetryApiNode, Sleep])
121
+ """
122
+
123
+ seconds: int
124
+ next_node: NextT
125
+
126
+ async def run(self, _ctx: Any) -> Any:
127
+ """Never called - DurableGraph intercepts this marker."""
128
+ raise RuntimeError(
129
+ "Sleep marker should not be executed directly. "
130
+ "Use DurableGraph.run() instead of Graph.run()."
131
+ )
132
+
133
+
134
+ @dataclass
135
+ class ReceivedEvent:
136
+ """
137
+ Event data received from wait_event.
138
+
139
+ This is stored in DurableGraphContext.last_event after WaitForEvent completes.
140
+ """
141
+
142
+ event_type: str
143
+ data: dict[str, Any] = field(default_factory=dict)
144
+ metadata: dict[str, Any] = field(default_factory=dict)
@@ -0,0 +1,51 @@
1
+ """
2
+ LlamaIndex Workflow Integration for Edda.
3
+
4
+ This module provides integration between LlamaIndex Workflow and Edda's durable
5
+ execution framework, making workflow execution crash-recoverable and supporting
6
+ durable wait operations.
7
+
8
+ Example:
9
+ from llama_index.core.workflow import Workflow, step, Event, StartEvent, StopEvent
10
+ from edda import workflow, WorkflowContext
11
+ from edda.integrations.llamaindex import DurableWorkflowRunner, DurableSleepEvent
12
+
13
+ # Define events
14
+ class ProcessedEvent(Event):
15
+ data: str
16
+
17
+ # Define workflow
18
+ class MyWorkflow(Workflow):
19
+ @step
20
+ async def process(self, ctx: Context, ev: StartEvent) -> ProcessedEvent:
21
+ return ProcessedEvent(data=f"processed: {ev.input}")
22
+
23
+ @step
24
+ async def finalize(self, ctx: Context, ev: ProcessedEvent) -> StopEvent:
25
+ return StopEvent(result={"status": "done", "data": ev.data})
26
+
27
+ # Create durable runner
28
+ runner = DurableWorkflowRunner(MyWorkflow)
29
+
30
+ # Use in Edda workflow
31
+ @workflow
32
+ async def my_workflow(ctx: WorkflowContext, input_data: str) -> dict:
33
+ result = await runner.run(ctx, input=input_data)
34
+ return result
35
+
36
+ Installation:
37
+ pip install 'edda-framework[llamaindex]'
38
+ """
39
+
40
+ from .events import DurableSleepEvent, DurableWaitEvent, ResumeEvent
41
+ from .exceptions import WorkflowExecutionError, WorkflowReplayError
42
+ from .workflow import DurableWorkflowRunner
43
+
44
+ __all__ = [
45
+ "DurableWorkflowRunner",
46
+ "DurableSleepEvent",
47
+ "DurableWaitEvent",
48
+ "ResumeEvent",
49
+ "WorkflowExecutionError",
50
+ "WorkflowReplayError",
51
+ ]
@@ -0,0 +1,160 @@
1
+ """Durable events for LlamaIndex Workflow integration.
2
+
3
+ These events signal to the DurableWorkflow that a durable operation
4
+ (sleep or wait for external event) should be performed.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, Any
10
+
11
+ # Lazy import to avoid requiring llama-index at import time
12
+ if TYPE_CHECKING:
13
+ pass
14
+
15
+
16
+ def _import_event_class() -> type[Any]:
17
+ """Import Event class with helpful error message."""
18
+ try:
19
+ from llama_index.core.workflow import Event # type: ignore[import-not-found]
20
+
21
+ return Event # type: ignore[no-any-return]
22
+ except ImportError as e:
23
+ msg = (
24
+ "llama-index-core is not installed. Install with:\n"
25
+ " pip install llama-index-core\n"
26
+ "or\n"
27
+ " pip install 'edda-framework[llamaindex]'"
28
+ )
29
+ raise ImportError(msg) from e
30
+
31
+
32
+ class DurableSleepEvent:
33
+ """
34
+ Event that signals a durable sleep operation.
35
+
36
+ When a step returns this event, the DurableWorkflow will:
37
+ 1. Record the step completion
38
+ 2. Call Edda's sleep() function (durable timer)
39
+ 3. Resume with the specified resume_data after the sleep completes
40
+
41
+ Example:
42
+ @step
43
+ async def rate_limited_step(self, ctx: Context, ev: SomeEvent) -> DurableSleepEvent:
44
+ # Hit rate limit, need to wait
45
+ return DurableSleepEvent(
46
+ seconds=60,
47
+ resume_data={"retry_count": ev.retry_count + 1},
48
+ )
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ seconds: float,
54
+ resume_data: dict[str, Any] | None = None,
55
+ ) -> None:
56
+ """
57
+ Initialize a durable sleep event.
58
+
59
+ Args:
60
+ seconds: Number of seconds to sleep
61
+ resume_data: Data to include when resuming after sleep
62
+ """
63
+ self.seconds = seconds
64
+ self.resume_data = resume_data or {}
65
+
66
+ def to_dict(self) -> dict[str, Any]:
67
+ """Serialize to dictionary."""
68
+ return {
69
+ "_type": "DurableSleepEvent",
70
+ "seconds": self.seconds,
71
+ "resume_data": self.resume_data,
72
+ }
73
+
74
+ @classmethod
75
+ def from_dict(cls, data: dict[str, Any]) -> DurableSleepEvent:
76
+ """Deserialize from dictionary."""
77
+ return cls(
78
+ seconds=data["seconds"],
79
+ resume_data=data.get("resume_data", {}),
80
+ )
81
+
82
+
83
+ class DurableWaitEvent:
84
+ """
85
+ Event that signals waiting for an external event.
86
+
87
+ When a step returns this event, the DurableWorkflow will:
88
+ 1. Record the step completion
89
+ 2. Call Edda's wait_event() function (durable event subscription)
90
+ 3. Resume with the received event data after the event arrives
91
+
92
+ Example:
93
+ @step
94
+ async def wait_for_approval(self, ctx: Context, ev: OrderEvent) -> DurableWaitEvent:
95
+ return DurableWaitEvent(
96
+ event_type=f"approval.{ev.order_id}",
97
+ timeout_seconds=3600, # 1 hour timeout
98
+ )
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ event_type: str,
104
+ timeout_seconds: float | None = None,
105
+ ) -> None:
106
+ """
107
+ Initialize a durable wait event.
108
+
109
+ Args:
110
+ event_type: The event type to wait for (e.g., "payment.completed")
111
+ timeout_seconds: Optional timeout in seconds
112
+ """
113
+ self.event_type = event_type
114
+ self.timeout_seconds = timeout_seconds
115
+
116
+ def to_dict(self) -> dict[str, Any]:
117
+ """Serialize to dictionary."""
118
+ return {
119
+ "_type": "DurableWaitEvent",
120
+ "event_type": self.event_type,
121
+ "timeout_seconds": self.timeout_seconds,
122
+ }
123
+
124
+ @classmethod
125
+ def from_dict(cls, data: dict[str, Any]) -> DurableWaitEvent:
126
+ """Deserialize from dictionary."""
127
+ return cls(
128
+ event_type=data["event_type"],
129
+ timeout_seconds=data.get("timeout_seconds"),
130
+ )
131
+
132
+
133
+ class ResumeEvent:
134
+ """
135
+ Event used to resume workflow after a durable operation.
136
+
137
+ This is an internal event type used by DurableWorkflow to resume
138
+ execution after a DurableSleepEvent or DurableWaitEvent completes.
139
+ """
140
+
141
+ def __init__(self, data: dict[str, Any] | None = None) -> None:
142
+ """
143
+ Initialize a resume event.
144
+
145
+ Args:
146
+ data: Data from the completed operation (sleep resume_data or received event)
147
+ """
148
+ self.data = data or {}
149
+
150
+ def to_dict(self) -> dict[str, Any]:
151
+ """Serialize to dictionary."""
152
+ return {
153
+ "_type": "ResumeEvent",
154
+ "data": self.data,
155
+ }
156
+
157
+ @classmethod
158
+ def from_dict(cls, data: dict[str, Any]) -> ResumeEvent:
159
+ """Deserialize from dictionary."""
160
+ return cls(data=data.get("data", {}))
@@ -0,0 +1,15 @@
1
+ """Exceptions for LlamaIndex Workflow integration."""
2
+
3
+
4
+ class WorkflowExecutionError(Exception):
5
+ """Error during workflow execution."""
6
+
7
+ def __init__(self, message: str, step_name: str | None = None) -> None:
8
+ self.step_name = step_name
9
+ super().__init__(message)
10
+
11
+
12
+ class WorkflowReplayError(Exception):
13
+ """Error during workflow replay."""
14
+
15
+ pass
@@ -0,0 +1,306 @@
1
+ """DurableWorkflow - makes LlamaIndex Workflow execution durable via Edda.
2
+
3
+ This module provides integration between LlamaIndex Workflow and Edda's durable
4
+ execution framework, making workflow execution crash-recoverable and supporting
5
+ durable wait operations.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import importlib
11
+ from dataclasses import dataclass
12
+ from typing import TYPE_CHECKING, Any, TypeVar
13
+
14
+ from edda.activity import activity
15
+ from edda.pydantic_utils import to_json_dict
16
+
17
+ from .events import DurableSleepEvent, DurableWaitEvent, ResumeEvent
18
+ from .exceptions import WorkflowExecutionError
19
+
20
+ if TYPE_CHECKING:
21
+ from edda.context import WorkflowContext
22
+
23
+ T = TypeVar("T")
24
+
25
+
26
+ def _import_llamaindex_workflow() -> Any:
27
+ """Import llama_index.core.workflow with helpful error message."""
28
+ try:
29
+ from llama_index.core import workflow # type: ignore[import-not-found]
30
+
31
+ return workflow
32
+ except ImportError as e:
33
+ msg = (
34
+ "llama-index-core is not installed. Install with:\n"
35
+ " pip install llama-index-core\n"
36
+ "or\n"
37
+ " pip install 'edda-framework[llamaindex]'"
38
+ )
39
+ raise ImportError(msg) from e
40
+
41
+
42
+ def _serialize_event(event: Any) -> dict[str, Any]:
43
+ """Serialize a LlamaIndex Event to a dictionary."""
44
+ if isinstance(event, DurableSleepEvent):
45
+ return event.to_dict()
46
+ if isinstance(event, DurableWaitEvent):
47
+ return event.to_dict()
48
+ if isinstance(event, ResumeEvent):
49
+ return event.to_dict()
50
+
51
+ # For LlamaIndex events, use model_dump if available (Pydantic)
52
+ if hasattr(event, "model_dump"):
53
+ data = event.model_dump()
54
+ elif hasattr(event, "__dict__"):
55
+ data = {k: v for k, v in event.__dict__.items() if not k.startswith("_")}
56
+ else:
57
+ data = {}
58
+
59
+ return {
60
+ "_type": f"{event.__class__.__module__}:{event.__class__.__qualname__}",
61
+ "_data": to_json_dict(data),
62
+ }
63
+
64
+
65
+ def _deserialize_event(data: dict[str, Any]) -> Any:
66
+ """Deserialize a dictionary to a LlamaIndex Event."""
67
+ event_type = data.get("_type", "")
68
+
69
+ # Handle our special events
70
+ if event_type == "DurableSleepEvent":
71
+ return DurableSleepEvent.from_dict(data)
72
+ if event_type == "DurableWaitEvent":
73
+ return DurableWaitEvent.from_dict(data)
74
+ if event_type == "ResumeEvent":
75
+ return ResumeEvent.from_dict(data)
76
+
77
+ # Handle LlamaIndex events
78
+ if ":" in event_type:
79
+ module_path, class_name = event_type.rsplit(":", 1)
80
+ module = importlib.import_module(module_path)
81
+ event_class = getattr(module, class_name)
82
+ event_data = data.get("_data", {})
83
+
84
+ # Use model_validate for Pydantic models
85
+ if hasattr(event_class, "model_validate"):
86
+ return event_class.model_validate(event_data)
87
+ return event_class(**event_data)
88
+
89
+ raise ValueError(f"Unknown event type: {event_type}")
90
+
91
+
92
+ @dataclass
93
+ class StepResult:
94
+ """Result of a step execution."""
95
+
96
+ event: Any
97
+ step_name: str
98
+
99
+
100
+ @activity
101
+ async def _run_step(
102
+ ctx: WorkflowContext, # noqa: ARG001 - Used by @activity decorator
103
+ workflow_class_path: str,
104
+ step_name: str,
105
+ event_data: dict[str, Any],
106
+ context_data: dict[str, Any],
107
+ ) -> dict[str, Any]:
108
+ """
109
+ Execute a single workflow step as a durable activity.
110
+
111
+ This activity is the core of DurableWorkflow - it runs one step and returns
112
+ the serialized result event.
113
+ """
114
+ # Import the workflow class
115
+ module_path, class_name = workflow_class_path.rsplit(":", 1)
116
+ module = importlib.import_module(module_path)
117
+ workflow_class = getattr(module, class_name)
118
+
119
+ # Create workflow instance
120
+ workflow_instance = workflow_class()
121
+
122
+ # Deserialize the input event
123
+ input_event = _deserialize_event(event_data)
124
+
125
+ # Get the step method
126
+ step_method = getattr(workflow_instance, step_name, None)
127
+ if step_method is None:
128
+ raise WorkflowExecutionError(f"Step '{step_name}' not found", step_name)
129
+
130
+ # Create a minimal context for the step
131
+ # Note: LlamaIndex Context is created per-run, we create a simple mock
132
+
133
+ # Execute the step
134
+ try:
135
+ # The step method signature is: async def step(self, ctx, event) -> Event
136
+ # We need to provide a context - use a simple object with store
137
+ @dataclass
138
+ class SimpleContext:
139
+ store: dict[str, Any]
140
+
141
+ simple_ctx = SimpleContext(store=context_data.get("store", {}))
142
+ result_event = await step_method(simple_ctx, input_event)
143
+ except Exception as e:
144
+ raise WorkflowExecutionError(f"Step '{step_name}' failed: {e}", step_name) from e
145
+
146
+ # Serialize the result
147
+ return {
148
+ "event": _serialize_event(result_event),
149
+ "context_store": simple_ctx.store,
150
+ }
151
+
152
+
153
+ class DurableWorkflowRunner:
154
+ """
155
+ Runner that executes a LlamaIndex Workflow with Edda durability.
156
+
157
+ This class wraps a LlamaIndex Workflow and executes it step-by-step,
158
+ recording each step as an Edda Activity for crash recovery.
159
+
160
+ Example:
161
+ from llama_index.core.workflow import Workflow, step, StartEvent, StopEvent
162
+
163
+ class MyWorkflow(Workflow):
164
+ @step
165
+ async def process(self, ctx: Context, ev: StartEvent) -> StopEvent:
166
+ return StopEvent(result="done")
167
+
168
+ runner = DurableWorkflowRunner(MyWorkflow)
169
+
170
+ @workflow
171
+ async def my_edda_workflow(ctx: WorkflowContext) -> str:
172
+ result = await runner.run(ctx, input_data="hello")
173
+ return result
174
+ """
175
+
176
+ def __init__(self, workflow_class: type) -> None:
177
+ """
178
+ Initialize DurableWorkflowRunner.
179
+
180
+ Args:
181
+ workflow_class: A LlamaIndex Workflow class (not instance)
182
+ """
183
+ self._workflow_class = workflow_class
184
+ self._class_path = f"{workflow_class.__module__}:{workflow_class.__qualname__}"
185
+
186
+ # Validate it's a Workflow subclass
187
+ llamaindex_workflow = _import_llamaindex_workflow()
188
+ if not issubclass(workflow_class, llamaindex_workflow.Workflow):
189
+ raise TypeError(f"Expected a Workflow subclass, got {type(workflow_class).__name__}")
190
+
191
+ async def run(
192
+ self,
193
+ ctx: WorkflowContext,
194
+ **kwargs: Any,
195
+ ) -> Any:
196
+ """
197
+ Execute the workflow durably with Edda crash recovery.
198
+
199
+ Args:
200
+ ctx: Edda WorkflowContext
201
+ **kwargs: Arguments passed to StartEvent
202
+
203
+ Returns:
204
+ The result from StopEvent
205
+ """
206
+ from edda.channels import sleep as edda_sleep
207
+ from edda.channels import wait_event as edda_wait_event
208
+
209
+ llamaindex_workflow = _import_llamaindex_workflow()
210
+
211
+ # Create a workflow instance to analyze its steps
212
+ workflow_instance = self._workflow_class()
213
+
214
+ # Build step registry from the workflow
215
+ step_registry = self._build_step_registry(workflow_instance)
216
+
217
+ # Start with StartEvent
218
+ start_event_class = llamaindex_workflow.StartEvent
219
+ current_event = start_event_class(**kwargs)
220
+ context_store: dict[str, Any] = {}
221
+
222
+ # Main execution loop
223
+ while True:
224
+ # Find the step that handles this event type
225
+ event_type = type(current_event)
226
+ step_name = self._find_step_for_event(step_registry, event_type)
227
+
228
+ if step_name is None:
229
+ # No step found - check if it's a stop event
230
+ if isinstance(current_event, llamaindex_workflow.StopEvent):
231
+ return current_event.result
232
+ raise WorkflowExecutionError(f"No step found for event type: {event_type.__name__}")
233
+
234
+ # Execute the step as an activity
235
+ result = await _run_step( # type: ignore[misc,call-arg]
236
+ ctx, # type: ignore[arg-type]
237
+ self._class_path,
238
+ step_name,
239
+ _serialize_event(current_event),
240
+ {"store": context_store},
241
+ )
242
+
243
+ # Update context store
244
+ context_store = result.get("context_store", {})
245
+
246
+ # Deserialize the result event
247
+ result_event = _deserialize_event(result["event"])
248
+
249
+ # Handle special durable events
250
+ if isinstance(result_event, DurableSleepEvent):
251
+ # Durable sleep
252
+ await edda_sleep(ctx, int(result_event.seconds))
253
+ # Resume with ResumeEvent containing the sleep's resume_data
254
+ current_event = ResumeEvent(data=result_event.resume_data)
255
+
256
+ elif isinstance(result_event, DurableWaitEvent):
257
+ # Durable wait for external event
258
+ received = await edda_wait_event(
259
+ ctx,
260
+ result_event.event_type,
261
+ timeout_seconds=(
262
+ int(result_event.timeout_seconds) if result_event.timeout_seconds else None
263
+ ),
264
+ )
265
+ # Resume with ResumeEvent containing the received data
266
+ current_event = ResumeEvent(data=received.data if hasattr(received, "data") else {})
267
+
268
+ elif isinstance(result_event, llamaindex_workflow.StopEvent):
269
+ # Workflow completed
270
+ return result_event.result
271
+
272
+ else:
273
+ # Normal event transition
274
+ current_event = result_event
275
+
276
+ def _build_step_registry(self, workflow_instance: Any) -> dict[str, list[type]]:
277
+ """Build a registry mapping step names to their input event types."""
278
+ registry: dict[str, list[type]] = {}
279
+
280
+ # Look for methods decorated with @step in the class
281
+ workflow_class = type(workflow_instance)
282
+ for name in dir(workflow_class):
283
+ if name.startswith("_"):
284
+ continue
285
+
286
+ # Get the raw function from the class (not bound method)
287
+ method = getattr(workflow_class, name, None)
288
+ if method is None or not callable(method):
289
+ continue
290
+
291
+ # Check if it's a step (has _step_config attribute from @step decorator)
292
+ if hasattr(method, "_step_config"):
293
+ step_config = method._step_config
294
+ # Get accepted event types directly from step config
295
+ if hasattr(step_config, "accepted_events"):
296
+ registry[name] = list(step_config.accepted_events)
297
+
298
+ return registry
299
+
300
+ def _find_step_for_event(self, registry: dict[str, list[type]], event_type: type) -> str | None:
301
+ """Find the step that handles the given event type."""
302
+ for step_name, accepted_types in registry.items():
303
+ for accepted_type in accepted_types:
304
+ if issubclass(event_type, accepted_type):
305
+ return step_name
306
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: edda-framework
3
- Version: 0.14.1
3
+ Version: 0.15.0
4
4
  Summary: Lightweight Durable Execution Framework
5
5
  Project-URL: Homepage, https://github.com/i2y/edda
6
6
  Project-URL: Documentation, https://github.com/i2y/edda#readme
@@ -42,6 +42,10 @@ Requires-Dist: starlette>=0.40.0; extra == 'dev'
42
42
  Requires-Dist: testcontainers[mysql]>=4.0.0; extra == 'dev'
43
43
  Requires-Dist: testcontainers[postgres]>=4.0.0; extra == 'dev'
44
44
  Requires-Dist: tsuno>=0.1.3; extra == 'dev'
45
+ Provides-Extra: graph
46
+ Requires-Dist: pydantic-graph>=0.1.0; extra == 'graph'
47
+ Provides-Extra: llamaindex
48
+ Requires-Dist: llama-index-core>=0.12.0; extra == 'llamaindex'
45
49
  Provides-Extra: mcp
46
50
  Requires-Dist: mcp>=1.22.0; extra == 'mcp'
47
51
  Provides-Extra: mirascope
@@ -97,6 +101,8 @@ For detailed documentation, visit [https://i2y.github.io/edda/](https://i2y.gith
97
101
  - ⚡ **Instant Notifications**: PostgreSQL LISTEN/NOTIFY for near-instant event delivery (optional)
98
102
  - 🤖 **MCP Integration**: Expose durable workflows as AI tools via Model Context Protocol
99
103
  - 🧠 **Mirascope Integration**: Durable LLM calls
104
+ - 🦙 **LlamaIndex Integration**: Make LlamaIndex Workflows durable with crash recovery
105
+ - 📊 **pydantic-graph Integration**: Durable graph-based workflows (experimental)
100
106
  - 🌍 **ASGI/WSGI Support**: Deploy with your preferred server (uvicorn, gunicorn, uWSGI)
101
107
 
102
108
  ## Use Cases
@@ -233,6 +239,12 @@ uv add edda-framework --extra viewer
233
239
  # With PostgreSQL instant notifications (LISTEN/NOTIFY)
234
240
  uv add edda-framework --extra postgres-notify
235
241
 
242
+ # With LlamaIndex Workflow integration
243
+ uv add edda-framework --extra llamaindex
244
+
245
+ # With pydantic-graph integration (experimental)
246
+ uv add edda-framework --extra graph
247
+
236
248
  # All extras (PostgreSQL, MySQL, Viewer UI)
237
249
  uv add edda-framework --extra postgresql --extra mysql --extra viewer
238
250
  ```
@@ -13,6 +13,15 @@ edda/retry.py,sha256=t4_E1skrhotA1XWHTLbKi-DOgCMasOUnhI9OT-O_eCE,6843
13
13
  edda/workflow.py,sha256=hfBZM0JrtK0IkvZSrva0VmYVyvKCdiJ5FWFmIVENfrM,8807
14
14
  edda/wsgi.py,sha256=1pGE5fhHpcsYnDR8S3NEFKWUs5P0JK4roTAzX9BsIj0,2391
15
15
  edda/integrations/__init__.py,sha256=F_CaTvlDEbldfOpPKq_U9ve1E573tS6XzqXnOtyHcXI,33
16
+ edda/integrations/graph/__init__.py,sha256=MwGgkTDsOH1eaLYzWFxBR2DHumiSAz-UUsqXSNU8aWw,1652
17
+ edda/integrations/graph/context.py,sha256=ZQaesBVDAmF01P1liX2BtxaprQDRUgLKL0e94P8Yrdc,2529
18
+ edda/integrations/graph/exceptions.py,sha256=FwDNUafYHzWlPeObLLTjyOVj8M94HgZ2a3DLR6VcOj4,286
19
+ edda/integrations/graph/graph.py,sha256=RY3BbCUO_rsqQ2wP7ghfstT59stC_KNyJ7LmvljJSFk,12957
20
+ edda/integrations/graph/nodes.py,sha256=JHoJYCEAGBKTYJ-458pMYvVxEu9hunAzzEuNVcEBbvM,4746
21
+ edda/integrations/llamaindex/__init__.py,sha256=YR1ikaf7AanaQztGZfHDZtoMqz3ieTGjBeIQ3sAkzP8,1604
22
+ edda/integrations/llamaindex/events.py,sha256=brs0UVu3kM5N9mWKo91FNBBh3FL9xI2WEzgHmp-j4a8,4751
23
+ edda/integrations/llamaindex/exceptions.py,sha256=41BefzhS1qd2L1U-JHBNIkTQS_Vz480OATqOtJNmDGU,376
24
+ edda/integrations/llamaindex/workflow.py,sha256=MXvPe8d7FLivCLoDlzQA-TgQi82nulUmW0omasq4QEk,10813
16
25
  edda/integrations/mcp/__init__.py,sha256=YK-8m0DIdP-RSqewlIX7xnWU7TD3NioCiW2_aZSgnn8,1232
17
26
  edda/integrations/mcp/decorators.py,sha256=31SmbDwmHEGvUNa3aaatW91hBkpnS5iN9uy47dID3J4,10037
18
27
  edda/integrations/mcp/server.py,sha256=Q5r4AbMn-9gBcy2CZocbgW7O0fn7Qb4e9CBJa1FEmzU,14507
@@ -47,8 +56,8 @@ edda/visualizer/mermaid_generator.py,sha256=XWa2egoOTNDfJEjPcwoxwQmblUqXf7YInWFj
47
56
  edda/migrations/mysql/20251217000000_initial_schema.sql,sha256=LpINasESRhadOeqABwDk4JZ0OZ4_zQw_opnhIR4Xe9U,12367
48
57
  edda/migrations/postgresql/20251217000000_initial_schema.sql,sha256=hCaGMWeptpzpnsjfNKVsMYuwPRe__fK9E0VZpClAumQ,11732
49
58
  edda/migrations/sqlite/20251217000000_initial_schema.sql,sha256=Wq9gCnQ0K9SOt0PY_8f1MG4va8rLVWIIcf2lnRzSK5g,11906
50
- edda_framework-0.14.1.dist-info/METADATA,sha256=3WamC1lB2LrLdUIbOPrYeoWsqW8leTXF8zRFB8rObpY,37567
51
- edda_framework-0.14.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
52
- edda_framework-0.14.1.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
53
- edda_framework-0.14.1.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
54
- edda_framework-0.14.1.dist-info/RECORD,,
59
+ edda_framework-0.15.0.dist-info/METADATA,sha256=d7VXuP5MWTx0WvL4KzwmZVl0AUtTJzTxaLWL7Q0Ou8Q,38074
60
+ edda_framework-0.15.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
61
+ edda_framework-0.15.0.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
62
+ edda_framework-0.15.0.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
63
+ edda_framework-0.15.0.dist-info/RECORD,,