edda-framework 0.3.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edda/app.py +16 -1
- edda/hooks.py +11 -11
- edda/integrations/mcp/decorators.py +3 -4
- edda/integrations/mcp/server.py +157 -5
- edda/integrations/opentelemetry/__init__.py +39 -0
- edda/integrations/opentelemetry/hooks.py +579 -0
- {edda_framework-0.3.1.dist-info → edda_framework-0.5.0.dist-info}/METADATA +53 -1
- {edda_framework-0.3.1.dist-info → edda_framework-0.5.0.dist-info}/RECORD +11 -9
- {edda_framework-0.3.1.dist-info → edda_framework-0.5.0.dist-info}/WHEEL +0 -0
- {edda_framework-0.3.1.dist-info → edda_framework-0.5.0.dist-info}/entry_points.txt +0 -0
- {edda_framework-0.3.1.dist-info → edda_framework-0.5.0.dist-info}/licenses/LICENSE +0 -0
edda/app.py
CHANGED
|
@@ -238,7 +238,9 @@ class EddaApp:
|
|
|
238
238
|
Register a default CloudEvent handler for a workflow.
|
|
239
239
|
|
|
240
240
|
The default handler extracts the CloudEvent data and passes it
|
|
241
|
-
as kwargs to workflow.start().
|
|
241
|
+
as kwargs to workflow.start(). If the CloudEvent contains
|
|
242
|
+
traceparent/tracestate extension attributes (for distributed tracing),
|
|
243
|
+
they are automatically injected into _trace_context.
|
|
242
244
|
|
|
243
245
|
Args:
|
|
244
246
|
event_type: CloudEvent type (same as workflow name)
|
|
@@ -250,11 +252,24 @@ class EddaApp:
|
|
|
250
252
|
# Extract data from CloudEvent
|
|
251
253
|
data = event.get_data()
|
|
252
254
|
|
|
255
|
+
# Extract trace context from CloudEvent extension attributes
|
|
256
|
+
# (W3C Trace Context: traceparent, tracestate)
|
|
257
|
+
trace_context: dict[str, str] = {}
|
|
258
|
+
attrs = event.get_attributes()
|
|
259
|
+
if "traceparent" in attrs:
|
|
260
|
+
trace_context["traceparent"] = str(attrs["traceparent"])
|
|
261
|
+
if "tracestate" in attrs:
|
|
262
|
+
trace_context["tracestate"] = str(attrs["tracestate"])
|
|
263
|
+
|
|
253
264
|
# Start workflow with data as kwargs
|
|
254
265
|
if isinstance(data, dict):
|
|
266
|
+
# Inject trace context if present
|
|
267
|
+
if trace_context:
|
|
268
|
+
data = {**data, "_trace_context": trace_context}
|
|
255
269
|
await wf.start(**data)
|
|
256
270
|
else:
|
|
257
271
|
# If data is not a dict, start without arguments
|
|
272
|
+
# (trace context cannot be injected)
|
|
258
273
|
await wf.start()
|
|
259
274
|
|
|
260
275
|
# Register the handler
|
edda/hooks.py
CHANGED
|
@@ -12,8 +12,8 @@ Example:
|
|
|
12
12
|
... async def on_workflow_start(self, instance_id, workflow_name, input_data):
|
|
13
13
|
... print(f"Workflow {workflow_name} started: {instance_id}")
|
|
14
14
|
...
|
|
15
|
-
... async def on_activity_complete(self, instance_id,
|
|
16
|
-
... print(f"Activity {activity_name} completed (cache_hit={cache_hit})")
|
|
15
|
+
... async def on_activity_complete(self, instance_id, activity_id, activity_name, result, cache_hit):
|
|
16
|
+
... print(f"Activity {activity_name} ({activity_id}) completed (cache_hit={cache_hit})")
|
|
17
17
|
>>>
|
|
18
18
|
>>> app = EddaApp(service_name="my-service", db_url="...", hooks=MyHooks())
|
|
19
19
|
"""
|
|
@@ -86,7 +86,7 @@ class WorkflowHooks(Protocol):
|
|
|
86
86
|
async def on_activity_start(
|
|
87
87
|
self,
|
|
88
88
|
instance_id: str,
|
|
89
|
-
|
|
89
|
+
activity_id: str,
|
|
90
90
|
activity_name: str,
|
|
91
91
|
is_replaying: bool,
|
|
92
92
|
) -> None:
|
|
@@ -95,7 +95,7 @@ class WorkflowHooks(Protocol):
|
|
|
95
95
|
|
|
96
96
|
Args:
|
|
97
97
|
instance_id: Unique workflow instance ID
|
|
98
|
-
|
|
98
|
+
activity_id: Activity ID (e.g., "reserve_inventory:1")
|
|
99
99
|
activity_name: Name of the activity function
|
|
100
100
|
is_replaying: True if this is a replay (cached result)
|
|
101
101
|
"""
|
|
@@ -104,7 +104,7 @@ class WorkflowHooks(Protocol):
|
|
|
104
104
|
async def on_activity_complete(
|
|
105
105
|
self,
|
|
106
106
|
instance_id: str,
|
|
107
|
-
|
|
107
|
+
activity_id: str,
|
|
108
108
|
activity_name: str,
|
|
109
109
|
result: Any,
|
|
110
110
|
cache_hit: bool,
|
|
@@ -114,7 +114,7 @@ class WorkflowHooks(Protocol):
|
|
|
114
114
|
|
|
115
115
|
Args:
|
|
116
116
|
instance_id: Unique workflow instance ID
|
|
117
|
-
|
|
117
|
+
activity_id: Activity ID (e.g., "reserve_inventory:1")
|
|
118
118
|
activity_name: Name of the activity function
|
|
119
119
|
result: Return value from the activity
|
|
120
120
|
cache_hit: True if result was retrieved from cache (replay)
|
|
@@ -124,7 +124,7 @@ class WorkflowHooks(Protocol):
|
|
|
124
124
|
async def on_activity_failed(
|
|
125
125
|
self,
|
|
126
126
|
instance_id: str,
|
|
127
|
-
|
|
127
|
+
activity_id: str,
|
|
128
128
|
activity_name: str,
|
|
129
129
|
error: Exception,
|
|
130
130
|
) -> None:
|
|
@@ -133,7 +133,7 @@ class WorkflowHooks(Protocol):
|
|
|
133
133
|
|
|
134
134
|
Args:
|
|
135
135
|
instance_id: Unique workflow instance ID
|
|
136
|
-
|
|
136
|
+
activity_id: Activity ID (e.g., "reserve_inventory:1")
|
|
137
137
|
activity_name: Name of the activity function
|
|
138
138
|
error: Exception that caused the failure
|
|
139
139
|
"""
|
|
@@ -231,7 +231,7 @@ class HooksBase(WorkflowHooks, ABC):
|
|
|
231
231
|
async def on_activity_start(
|
|
232
232
|
self,
|
|
233
233
|
instance_id: str,
|
|
234
|
-
|
|
234
|
+
activity_id: str,
|
|
235
235
|
activity_name: str,
|
|
236
236
|
is_replaying: bool,
|
|
237
237
|
) -> None:
|
|
@@ -240,7 +240,7 @@ class HooksBase(WorkflowHooks, ABC):
|
|
|
240
240
|
async def on_activity_complete(
|
|
241
241
|
self,
|
|
242
242
|
instance_id: str,
|
|
243
|
-
|
|
243
|
+
activity_id: str,
|
|
244
244
|
activity_name: str,
|
|
245
245
|
result: Any,
|
|
246
246
|
cache_hit: bool,
|
|
@@ -250,7 +250,7 @@ class HooksBase(WorkflowHooks, ABC):
|
|
|
250
250
|
async def on_activity_failed(
|
|
251
251
|
self,
|
|
252
252
|
instance_id: str,
|
|
253
|
-
|
|
253
|
+
activity_id: str,
|
|
254
254
|
activity_name: str,
|
|
255
255
|
error: Exception,
|
|
256
256
|
) -> None:
|
|
@@ -6,11 +6,10 @@ import inspect
|
|
|
6
6
|
from collections.abc import Callable
|
|
7
7
|
from typing import TYPE_CHECKING, Any, cast
|
|
8
8
|
|
|
9
|
-
from edda.workflow import workflow
|
|
9
|
+
from edda.workflow import Workflow, workflow
|
|
10
10
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
12
|
from edda.integrations.mcp.server import EddaMCPServer
|
|
13
|
-
from edda.workflow import Workflow
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
def create_durable_tool(
|
|
@@ -98,7 +97,7 @@ def create_durable_tool(
|
|
|
98
97
|
async def status_tool(instance_id: str) -> dict[str, Any]:
|
|
99
98
|
"""Check workflow status."""
|
|
100
99
|
try:
|
|
101
|
-
instance = await server.
|
|
100
|
+
instance = await server.storage.get_instance(instance_id)
|
|
102
101
|
if instance is None:
|
|
103
102
|
return {
|
|
104
103
|
"content": [
|
|
@@ -142,7 +141,7 @@ def create_durable_tool(
|
|
|
142
141
|
async def result_tool(instance_id: str) -> dict[str, Any]:
|
|
143
142
|
"""Get workflow result (if completed)."""
|
|
144
143
|
try:
|
|
145
|
-
instance = await server.
|
|
144
|
+
instance = await server.storage.get_instance(instance_id)
|
|
146
145
|
if instance is None:
|
|
147
146
|
return {
|
|
148
147
|
"content": [
|
edda/integrations/mcp/server.py
CHANGED
|
@@ -3,11 +3,14 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from collections.abc import Callable
|
|
6
|
-
from typing import Any, cast
|
|
6
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
7
7
|
|
|
8
8
|
from edda.app import EddaApp
|
|
9
9
|
from edda.workflow import Workflow
|
|
10
10
|
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from edda.storage.protocol import StorageProtocol
|
|
13
|
+
|
|
11
14
|
try:
|
|
12
15
|
from mcp.server.fastmcp import FastMCP # type: ignore[import-not-found]
|
|
13
16
|
except ImportError as e:
|
|
@@ -45,7 +48,13 @@ class EddaMCPServer:
|
|
|
45
48
|
|
|
46
49
|
# Deploy with uvicorn (HTTP transport)
|
|
47
50
|
if __name__ == "__main__":
|
|
51
|
+
import asyncio
|
|
48
52
|
import uvicorn
|
|
53
|
+
|
|
54
|
+
async def startup():
|
|
55
|
+
await server.initialize()
|
|
56
|
+
|
|
57
|
+
asyncio.run(startup())
|
|
49
58
|
uvicorn.run(server.asgi_app(), host="0.0.0.0", port=8000)
|
|
50
59
|
|
|
51
60
|
# Or deploy with stdio (for MCP clients, e.g., Claude Desktop)
|
|
@@ -97,6 +106,22 @@ class EddaMCPServer:
|
|
|
97
106
|
# Registry of durable tools (workflow_name -> Workflow instance)
|
|
98
107
|
self._workflows: dict[str, Workflow] = {}
|
|
99
108
|
|
|
109
|
+
@property
|
|
110
|
+
def storage(self) -> StorageProtocol:
|
|
111
|
+
"""
|
|
112
|
+
Access workflow storage for querying instances and history.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
StorageProtocol: Storage backend for workflow state
|
|
116
|
+
|
|
117
|
+
Example:
|
|
118
|
+
```python
|
|
119
|
+
instance = await server.storage.get_instance(instance_id)
|
|
120
|
+
history = await server.storage.get_history(instance_id)
|
|
121
|
+
```
|
|
122
|
+
"""
|
|
123
|
+
return self._edda_app.storage
|
|
124
|
+
|
|
100
125
|
def durable_tool(
|
|
101
126
|
self,
|
|
102
127
|
func: Callable[..., Any] | None = None,
|
|
@@ -135,6 +160,59 @@ class EddaMCPServer:
|
|
|
135
160
|
return decorator
|
|
136
161
|
return decorator(func)
|
|
137
162
|
|
|
163
|
+
def prompt(
|
|
164
|
+
self,
|
|
165
|
+
func: Callable[..., Any] | None = None,
|
|
166
|
+
*,
|
|
167
|
+
description: str = "",
|
|
168
|
+
) -> Callable[..., Any]:
|
|
169
|
+
"""
|
|
170
|
+
Decorator to define a prompt template.
|
|
171
|
+
|
|
172
|
+
Prompts can access workflow state to generate dynamic, context-aware
|
|
173
|
+
prompts for AI clients (Claude Desktop, etc.).
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
func: Prompt function (async or sync)
|
|
177
|
+
description: Prompt description for MCP clients
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
Decorated function
|
|
181
|
+
|
|
182
|
+
Example:
|
|
183
|
+
```python
|
|
184
|
+
from fastmcp.prompts.prompt import PromptMessage, TextContent
|
|
185
|
+
|
|
186
|
+
@server.prompt(description="Analyze workflow results")
|
|
187
|
+
async def analyze_workflow(instance_id: str) -> PromptMessage:
|
|
188
|
+
'''Generate a prompt to analyze a specific workflow execution.'''
|
|
189
|
+
instance = await server.storage.get_instance(instance_id)
|
|
190
|
+
history = await server.storage.get_history(instance_id)
|
|
191
|
+
|
|
192
|
+
text = f'''Analyze this workflow:
|
|
193
|
+
|
|
194
|
+
Instance ID: {instance_id}
|
|
195
|
+
Status: {instance['status']}
|
|
196
|
+
Activities: {len(history)}
|
|
197
|
+
|
|
198
|
+
Please identify any issues or optimization opportunities.'''
|
|
199
|
+
|
|
200
|
+
return PromptMessage(
|
|
201
|
+
role="user",
|
|
202
|
+
content=TextContent(type="text", text=text)
|
|
203
|
+
)
|
|
204
|
+
```
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
|
|
208
|
+
# Use FastMCP's native prompt decorator
|
|
209
|
+
prompt_desc = description or f.__doc__ or f"Prompt: {f.__name__}"
|
|
210
|
+
return cast(Callable[..., Any], self._mcp.prompt(description=prompt_desc)(f))
|
|
211
|
+
|
|
212
|
+
if func is None:
|
|
213
|
+
return decorator
|
|
214
|
+
return decorator(func)
|
|
215
|
+
|
|
138
216
|
def asgi_app(self) -> Callable[..., Any]:
|
|
139
217
|
"""
|
|
140
218
|
Create ASGI application with MCP + CloudEvents support.
|
|
@@ -221,11 +299,9 @@ class EddaMCPServer:
|
|
|
221
299
|
"""
|
|
222
300
|
Initialize the EddaApp (setup replay engine, storage, etc.).
|
|
223
301
|
|
|
224
|
-
This method must be called before running the server in stdio mode.
|
|
225
|
-
For HTTP mode (asgi_app()), initialization happens automatically
|
|
226
|
-
when the ASGI app is deployed.
|
|
302
|
+
This method must be called before running the server in either stdio or HTTP mode.
|
|
227
303
|
|
|
228
|
-
Example:
|
|
304
|
+
Example (stdio mode):
|
|
229
305
|
```python
|
|
230
306
|
async def main():
|
|
231
307
|
await server.initialize()
|
|
@@ -235,9 +311,85 @@ class EddaMCPServer:
|
|
|
235
311
|
import asyncio
|
|
236
312
|
asyncio.run(main())
|
|
237
313
|
```
|
|
314
|
+
|
|
315
|
+
Example (HTTP mode):
|
|
316
|
+
```python
|
|
317
|
+
import asyncio
|
|
318
|
+
import uvicorn
|
|
319
|
+
|
|
320
|
+
async def startup():
|
|
321
|
+
await server.initialize()
|
|
322
|
+
|
|
323
|
+
asyncio.run(startup())
|
|
324
|
+
uvicorn.run(server.asgi_app(), host="0.0.0.0", port=8000)
|
|
325
|
+
```
|
|
238
326
|
"""
|
|
239
327
|
await self._edda_app.initialize()
|
|
240
328
|
|
|
329
|
+
async def shutdown(self) -> None:
|
|
330
|
+
"""
|
|
331
|
+
Shutdown the server and cleanup resources.
|
|
332
|
+
|
|
333
|
+
Stops background tasks (auto-resume, timer checks, event timeouts),
|
|
334
|
+
closes storage connections, and performs graceful shutdown.
|
|
335
|
+
|
|
336
|
+
This method should be called when the server is shutting down.
|
|
337
|
+
|
|
338
|
+
Example (stdio mode):
|
|
339
|
+
```python
|
|
340
|
+
import signal
|
|
341
|
+
import asyncio
|
|
342
|
+
|
|
343
|
+
async def main():
|
|
344
|
+
server = EddaMCPServer(...)
|
|
345
|
+
await server.initialize()
|
|
346
|
+
|
|
347
|
+
# Setup signal handlers for graceful shutdown
|
|
348
|
+
loop = asyncio.get_running_loop()
|
|
349
|
+
shutdown_event = asyncio.Event()
|
|
350
|
+
|
|
351
|
+
def signal_handler():
|
|
352
|
+
shutdown_event.set()
|
|
353
|
+
|
|
354
|
+
for sig in (signal.SIGTERM, signal.SIGINT):
|
|
355
|
+
loop.add_signal_handler(sig, signal_handler)
|
|
356
|
+
|
|
357
|
+
# Run server
|
|
358
|
+
try:
|
|
359
|
+
await server.run_stdio()
|
|
360
|
+
finally:
|
|
361
|
+
await server.shutdown()
|
|
362
|
+
|
|
363
|
+
if __name__ == "__main__":
|
|
364
|
+
asyncio.run(main())
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
Example (HTTP mode with uvicorn):
|
|
368
|
+
```python
|
|
369
|
+
import asyncio
|
|
370
|
+
import uvicorn
|
|
371
|
+
|
|
372
|
+
async def startup():
|
|
373
|
+
await server.initialize()
|
|
374
|
+
|
|
375
|
+
async def shutdown_handler():
|
|
376
|
+
await server.shutdown()
|
|
377
|
+
|
|
378
|
+
# Use uvicorn lifecycle events
|
|
379
|
+
config = uvicorn.Config(
|
|
380
|
+
server.asgi_app(),
|
|
381
|
+
host="0.0.0.0",
|
|
382
|
+
port=8000,
|
|
383
|
+
)
|
|
384
|
+
server_instance = uvicorn.Server(config)
|
|
385
|
+
|
|
386
|
+
# Uvicorn handles SIGTERM/SIGINT automatically
|
|
387
|
+
await server_instance.serve()
|
|
388
|
+
await shutdown_handler()
|
|
389
|
+
```
|
|
390
|
+
"""
|
|
391
|
+
await self._edda_app.shutdown()
|
|
392
|
+
|
|
241
393
|
async def run_stdio(self) -> None:
|
|
242
394
|
"""
|
|
243
395
|
Run MCP server with stdio transport (for MCP clients, e.g., Claude Desktop).
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Edda OpenTelemetry Integration.
|
|
3
|
+
|
|
4
|
+
Provides OpenTelemetry tracing and optional metrics for Edda workflows.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
```python
|
|
8
|
+
from edda import EddaApp
|
|
9
|
+
from edda.integrations.opentelemetry import OpenTelemetryHooks
|
|
10
|
+
|
|
11
|
+
hooks = OpenTelemetryHooks(
|
|
12
|
+
service_name="order-service",
|
|
13
|
+
otlp_endpoint="http://localhost:4317", # Optional
|
|
14
|
+
enable_metrics=True, # Optional
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
app = EddaApp(
|
|
18
|
+
service_name="order-service",
|
|
19
|
+
db_url="sqlite:///workflow.db",
|
|
20
|
+
hooks=hooks,
|
|
21
|
+
)
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
Installation:
|
|
25
|
+
```bash
|
|
26
|
+
pip install edda-framework[opentelemetry]
|
|
27
|
+
|
|
28
|
+
# Or using uv
|
|
29
|
+
uv add edda-framework --extra opentelemetry
|
|
30
|
+
```
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
from edda.integrations.opentelemetry.hooks import (
|
|
34
|
+
OpenTelemetryHooks,
|
|
35
|
+
extract_trace_context,
|
|
36
|
+
inject_trace_context,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
__all__ = ["OpenTelemetryHooks", "inject_trace_context", "extract_trace_context"]
|
|
@@ -0,0 +1,579 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenTelemetry hooks implementation for Edda workflows.
|
|
3
|
+
|
|
4
|
+
This module provides the OpenTelemetryHooks class that integrates OpenTelemetry
|
|
5
|
+
tracing and optional metrics with Edda's workflow execution.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import time
|
|
11
|
+
from typing import TYPE_CHECKING, Any
|
|
12
|
+
|
|
13
|
+
from edda.hooks import HooksBase
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from opentelemetry.context import Context
|
|
17
|
+
from opentelemetry.trace import Span, Tracer
|
|
18
|
+
|
|
19
|
+
# Check if OpenTelemetry is available
|
|
20
|
+
try:
|
|
21
|
+
from opentelemetry import trace
|
|
22
|
+
from opentelemetry.context import Context
|
|
23
|
+
from opentelemetry.sdk.resources import Resource
|
|
24
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
25
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
|
|
26
|
+
from opentelemetry.trace import Span, Status, StatusCode, Tracer
|
|
27
|
+
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
28
|
+
|
|
29
|
+
_OPENTELEMETRY_AVAILABLE = True
|
|
30
|
+
except ImportError:
|
|
31
|
+
_OPENTELEMETRY_AVAILABLE = False
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OpenTelemetryHooks(HooksBase):
|
|
35
|
+
"""
|
|
36
|
+
OpenTelemetry tracing and metrics integration for Edda workflows.
|
|
37
|
+
|
|
38
|
+
Creates distributed traces with:
|
|
39
|
+
- Workflow spans as parent spans
|
|
40
|
+
- Activity spans as child spans
|
|
41
|
+
- Error recording and status propagation
|
|
42
|
+
- Retry event tracking
|
|
43
|
+
- Optional metrics (counters, histograms)
|
|
44
|
+
|
|
45
|
+
Span Hierarchy::
|
|
46
|
+
|
|
47
|
+
workflow:order_workflow (parent)
|
|
48
|
+
├── activity:reserve_inventory (child)
|
|
49
|
+
│ └── [event: retry] (if retry occurs)
|
|
50
|
+
├── activity:process_payment (child)
|
|
51
|
+
└── activity:ship_order (child)
|
|
52
|
+
└── [event: event_received] (if wait_event used)
|
|
53
|
+
|
|
54
|
+
Example::
|
|
55
|
+
|
|
56
|
+
from edda import EddaApp
|
|
57
|
+
from edda.integrations.opentelemetry import OpenTelemetryHooks
|
|
58
|
+
|
|
59
|
+
hooks = OpenTelemetryHooks(
|
|
60
|
+
service_name="order-service",
|
|
61
|
+
otlp_endpoint="http://localhost:4317", # Optional
|
|
62
|
+
enable_metrics=True, # Optional
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
app = EddaApp(
|
|
66
|
+
service_name="order-service",
|
|
67
|
+
db_url="sqlite:///workflow.db",
|
|
68
|
+
hooks=hooks,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
Attributes:
|
|
72
|
+
service.name: Service name for resource identification
|
|
73
|
+
service.version: Service version (default: "1.0.0")
|
|
74
|
+
edda.framework: Always "true" to identify Edda workflows
|
|
75
|
+
|
|
76
|
+
Installation::
|
|
77
|
+
|
|
78
|
+
pip install edda-framework[opentelemetry]
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
def __init__(
|
|
82
|
+
self,
|
|
83
|
+
service_name: str = "edda",
|
|
84
|
+
otlp_endpoint: str | None = None,
|
|
85
|
+
enable_metrics: bool = False,
|
|
86
|
+
) -> None:
|
|
87
|
+
"""
|
|
88
|
+
Initialize OpenTelemetry hooks.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
service_name: Service name for resource identification
|
|
92
|
+
otlp_endpoint: OTLP endpoint URL (e.g., "http://localhost:4317").
|
|
93
|
+
If None, uses ConsoleSpanExporter for local development.
|
|
94
|
+
enable_metrics: Enable OpenTelemetry metrics (counters, histograms)
|
|
95
|
+
|
|
96
|
+
Raises:
|
|
97
|
+
ImportError: If OpenTelemetry packages are not installed
|
|
98
|
+
"""
|
|
99
|
+
if not _OPENTELEMETRY_AVAILABLE:
|
|
100
|
+
raise ImportError(
|
|
101
|
+
"OpenTelemetry packages are not installed. "
|
|
102
|
+
"Install them with: pip install edda-framework[opentelemetry]"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
self._tracer = self._setup_tracing(service_name, otlp_endpoint)
|
|
106
|
+
self._propagator = TraceContextTextMapPropagator()
|
|
107
|
+
|
|
108
|
+
# Span lifecycle management
|
|
109
|
+
self._workflow_spans: dict[str, Span] = {}
|
|
110
|
+
self._activity_spans: dict[str, Span] = {}
|
|
111
|
+
self._workflow_start_times: dict[str, float] = {}
|
|
112
|
+
self._activity_start_times: dict[str, float] = {}
|
|
113
|
+
|
|
114
|
+
# Optional metrics
|
|
115
|
+
self._enable_metrics = enable_metrics
|
|
116
|
+
if enable_metrics:
|
|
117
|
+
self._setup_metrics(service_name, otlp_endpoint)
|
|
118
|
+
|
|
119
|
+
def _setup_tracing(self, service_name: str, otlp_endpoint: str | None) -> Tracer:
|
|
120
|
+
"""Configure OpenTelemetry tracing.
|
|
121
|
+
|
|
122
|
+
If a TracerProvider is already configured (e.g., by ASGI/WSGI middleware),
|
|
123
|
+
it will be reused instead of creating a new one. This enables trace context
|
|
124
|
+
propagation from external sources.
|
|
125
|
+
"""
|
|
126
|
+
from opentelemetry.trace import NoOpTracerProvider
|
|
127
|
+
|
|
128
|
+
# Check if a TracerProvider is already configured
|
|
129
|
+
existing_provider = trace.get_tracer_provider()
|
|
130
|
+
|
|
131
|
+
# Only create new provider if none exists (NoOpTracerProvider is the default)
|
|
132
|
+
if isinstance(existing_provider, NoOpTracerProvider):
|
|
133
|
+
resource = Resource.create(
|
|
134
|
+
{
|
|
135
|
+
"service.name": service_name,
|
|
136
|
+
"service.version": "1.0.0",
|
|
137
|
+
"edda.framework": "true",
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
provider = TracerProvider(resource=resource)
|
|
142
|
+
|
|
143
|
+
if otlp_endpoint:
|
|
144
|
+
# Production: OTLP exporter
|
|
145
|
+
try:
|
|
146
|
+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
|
|
147
|
+
OTLPSpanExporter,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True)
|
|
151
|
+
provider.add_span_processor(BatchSpanProcessor(exporter))
|
|
152
|
+
except ImportError:
|
|
153
|
+
# Fallback to console if OTLP exporter not installed
|
|
154
|
+
provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
|
|
155
|
+
else:
|
|
156
|
+
# Development: Console exporter
|
|
157
|
+
provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
|
|
158
|
+
|
|
159
|
+
trace.set_tracer_provider(provider)
|
|
160
|
+
|
|
161
|
+
# Always get tracer from current provider (whether new or existing)
|
|
162
|
+
return trace.get_tracer("edda.opentelemetry", "1.0.0")
|
|
163
|
+
|
|
164
|
+
def _setup_metrics(self, service_name: str, otlp_endpoint: str | None) -> None:
|
|
165
|
+
"""Configure OpenTelemetry metrics (optional)."""
|
|
166
|
+
try:
|
|
167
|
+
from opentelemetry import metrics
|
|
168
|
+
from opentelemetry.sdk.metrics import MeterProvider
|
|
169
|
+
from opentelemetry.sdk.metrics.export import (
|
|
170
|
+
ConsoleMetricExporter,
|
|
171
|
+
MetricExporter,
|
|
172
|
+
PeriodicExportingMetricReader,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
exporter: MetricExporter
|
|
176
|
+
if otlp_endpoint:
|
|
177
|
+
try:
|
|
178
|
+
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
|
|
179
|
+
OTLPMetricExporter,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
exporter = OTLPMetricExporter(endpoint=otlp_endpoint, insecure=True)
|
|
183
|
+
except ImportError:
|
|
184
|
+
exporter = ConsoleMetricExporter()
|
|
185
|
+
else:
|
|
186
|
+
exporter = ConsoleMetricExporter()
|
|
187
|
+
|
|
188
|
+
reader = PeriodicExportingMetricReader(exporter, export_interval_millis=10000)
|
|
189
|
+
resource = Resource.create({"service.name": service_name})
|
|
190
|
+
provider = MeterProvider(resource=resource, metric_readers=[reader])
|
|
191
|
+
metrics.set_meter_provider(provider)
|
|
192
|
+
|
|
193
|
+
meter = metrics.get_meter("edda.opentelemetry", "1.0.0")
|
|
194
|
+
|
|
195
|
+
# Counters
|
|
196
|
+
self._workflow_started_counter = meter.create_counter(
|
|
197
|
+
"edda.workflow.started",
|
|
198
|
+
description="Number of workflows started",
|
|
199
|
+
unit="1",
|
|
200
|
+
)
|
|
201
|
+
self._workflow_completed_counter = meter.create_counter(
|
|
202
|
+
"edda.workflow.completed",
|
|
203
|
+
description="Number of workflows completed",
|
|
204
|
+
unit="1",
|
|
205
|
+
)
|
|
206
|
+
self._workflow_failed_counter = meter.create_counter(
|
|
207
|
+
"edda.workflow.failed",
|
|
208
|
+
description="Number of workflows failed",
|
|
209
|
+
unit="1",
|
|
210
|
+
)
|
|
211
|
+
self._activity_executed_counter = meter.create_counter(
|
|
212
|
+
"edda.activity.executed",
|
|
213
|
+
description="Number of activities executed (not cache hit)",
|
|
214
|
+
unit="1",
|
|
215
|
+
)
|
|
216
|
+
self._activity_cache_hit_counter = meter.create_counter(
|
|
217
|
+
"edda.activity.cache_hit",
|
|
218
|
+
description="Number of activity cache hits (replay)",
|
|
219
|
+
unit="1",
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Histograms
|
|
223
|
+
self._workflow_duration_histogram = meter.create_histogram(
|
|
224
|
+
"edda.workflow.duration",
|
|
225
|
+
description="Workflow execution duration",
|
|
226
|
+
unit="s",
|
|
227
|
+
)
|
|
228
|
+
self._activity_duration_histogram = meter.create_histogram(
|
|
229
|
+
"edda.activity.duration",
|
|
230
|
+
description="Activity execution duration",
|
|
231
|
+
unit="s",
|
|
232
|
+
)
|
|
233
|
+
except ImportError:
|
|
234
|
+
self._enable_metrics = False
|
|
235
|
+
|
|
236
|
+
# =========================================================================
|
|
237
|
+
# Workflow Hooks
|
|
238
|
+
# =========================================================================
|
|
239
|
+
|
|
240
|
+
async def on_workflow_start(
|
|
241
|
+
self, instance_id: str, workflow_name: str, input_data: dict[str, Any]
|
|
242
|
+
) -> None:
|
|
243
|
+
"""Start a workflow span (parent for all activities).
|
|
244
|
+
|
|
245
|
+
Trace context is inherited in the following priority:
|
|
246
|
+
1. Explicit _trace_context in input_data (e.g., from CloudEvents)
|
|
247
|
+
2. Current active span (e.g., from ASGI/WSGI middleware)
|
|
248
|
+
3. None (creates a new root span)
|
|
249
|
+
"""
|
|
250
|
+
# Priority 1: Extract trace context from input_data (CloudEvents, manual)
|
|
251
|
+
parent_context = self._extract_trace_context(input_data)
|
|
252
|
+
|
|
253
|
+
# Priority 2: Inherit from current active span (ASGI/WSGI middleware)
|
|
254
|
+
if parent_context is None:
|
|
255
|
+
current_span = trace.get_current_span()
|
|
256
|
+
if current_span.is_recording():
|
|
257
|
+
parent_context = trace.set_span_in_context(current_span)
|
|
258
|
+
|
|
259
|
+
span = self._tracer.start_span(
|
|
260
|
+
name=f"workflow:{workflow_name}",
|
|
261
|
+
context=parent_context,
|
|
262
|
+
attributes={
|
|
263
|
+
"edda.workflow.instance_id": instance_id,
|
|
264
|
+
"edda.workflow.name": workflow_name,
|
|
265
|
+
"edda.workflow.input_keys": str(list(input_data.keys())),
|
|
266
|
+
},
|
|
267
|
+
)
|
|
268
|
+
self._workflow_spans[instance_id] = span
|
|
269
|
+
self._workflow_start_times[instance_id] = time.time()
|
|
270
|
+
|
|
271
|
+
# Metrics
|
|
272
|
+
if self._enable_metrics:
|
|
273
|
+
self._workflow_started_counter.add(1, {"workflow_name": workflow_name})
|
|
274
|
+
|
|
275
|
+
async def on_workflow_complete(
|
|
276
|
+
self, instance_id: str, workflow_name: str, result: Any # noqa: ARG002
|
|
277
|
+
) -> None:
|
|
278
|
+
"""End workflow span with success status."""
|
|
279
|
+
span = self._workflow_spans.pop(instance_id, None)
|
|
280
|
+
if span:
|
|
281
|
+
span.set_status(Status(StatusCode.OK))
|
|
282
|
+
span.end()
|
|
283
|
+
|
|
284
|
+
# Always cleanup start time
|
|
285
|
+
start_time = self._workflow_start_times.pop(instance_id, None)
|
|
286
|
+
|
|
287
|
+
# Metrics
|
|
288
|
+
if self._enable_metrics:
|
|
289
|
+
self._workflow_completed_counter.add(1, {"workflow_name": workflow_name})
|
|
290
|
+
if start_time:
|
|
291
|
+
duration = time.time() - start_time
|
|
292
|
+
self._workflow_duration_histogram.record(
|
|
293
|
+
duration, {"workflow_name": workflow_name, "status": "completed"}
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
async def on_workflow_failed(
|
|
297
|
+
self, instance_id: str, workflow_name: str, error: Exception
|
|
298
|
+
) -> None:
|
|
299
|
+
"""End workflow span with error status."""
|
|
300
|
+
span = self._workflow_spans.pop(instance_id, None)
|
|
301
|
+
if span:
|
|
302
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
303
|
+
span.record_exception(error)
|
|
304
|
+
span.end()
|
|
305
|
+
|
|
306
|
+
# Always cleanup start time
|
|
307
|
+
start_time = self._workflow_start_times.pop(instance_id, None)
|
|
308
|
+
|
|
309
|
+
# Metrics
|
|
310
|
+
if self._enable_metrics:
|
|
311
|
+
self._workflow_failed_counter.add(
|
|
312
|
+
1,
|
|
313
|
+
{"workflow_name": workflow_name, "error_type": type(error).__name__},
|
|
314
|
+
)
|
|
315
|
+
if start_time:
|
|
316
|
+
duration = time.time() - start_time
|
|
317
|
+
self._workflow_duration_histogram.record(
|
|
318
|
+
duration, {"workflow_name": workflow_name, "status": "failed"}
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
async def on_workflow_cancelled(
|
|
322
|
+
self, instance_id: str, workflow_name: str # noqa: ARG002
|
|
323
|
+
) -> None:
|
|
324
|
+
"""End workflow span with cancelled status."""
|
|
325
|
+
span = self._workflow_spans.pop(instance_id, None)
|
|
326
|
+
if span:
|
|
327
|
+
span.set_attribute("edda.workflow.cancelled", True)
|
|
328
|
+
span.set_status(Status(StatusCode.OK, "Cancelled"))
|
|
329
|
+
span.end()
|
|
330
|
+
|
|
331
|
+
self._workflow_start_times.pop(instance_id, None)
|
|
332
|
+
|
|
333
|
+
# =========================================================================
|
|
334
|
+
# Activity Hooks
|
|
335
|
+
# =========================================================================
|
|
336
|
+
|
|
337
|
+
async def on_activity_start(
|
|
338
|
+
self,
|
|
339
|
+
instance_id: str,
|
|
340
|
+
activity_id: str,
|
|
341
|
+
activity_name: str,
|
|
342
|
+
is_replaying: bool,
|
|
343
|
+
) -> None:
|
|
344
|
+
"""Start an activity span as child of workflow span."""
|
|
345
|
+
parent_span = self._workflow_spans.get(instance_id)
|
|
346
|
+
|
|
347
|
+
# Create activity span with parent context
|
|
348
|
+
if parent_span:
|
|
349
|
+
ctx = trace.set_span_in_context(parent_span)
|
|
350
|
+
span = self._tracer.start_span(
|
|
351
|
+
name=f"activity:{activity_name}",
|
|
352
|
+
context=ctx,
|
|
353
|
+
attributes={
|
|
354
|
+
"edda.activity.id": activity_id,
|
|
355
|
+
"edda.activity.name": activity_name,
|
|
356
|
+
"edda.activity.is_replaying": is_replaying,
|
|
357
|
+
"edda.workflow.instance_id": instance_id,
|
|
358
|
+
},
|
|
359
|
+
)
|
|
360
|
+
else:
|
|
361
|
+
# No parent workflow span (edge case)
|
|
362
|
+
span = self._tracer.start_span(
|
|
363
|
+
name=f"activity:{activity_name}",
|
|
364
|
+
attributes={
|
|
365
|
+
"edda.activity.id": activity_id,
|
|
366
|
+
"edda.activity.name": activity_name,
|
|
367
|
+
"edda.activity.is_replaying": is_replaying,
|
|
368
|
+
"edda.workflow.instance_id": instance_id,
|
|
369
|
+
},
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
key = f"{instance_id}:{activity_id}"
|
|
373
|
+
self._activity_spans[key] = span
|
|
374
|
+
self._activity_start_times[key] = time.time()
|
|
375
|
+
|
|
376
|
+
async def on_activity_complete(
|
|
377
|
+
self,
|
|
378
|
+
instance_id: str,
|
|
379
|
+
activity_id: str,
|
|
380
|
+
activity_name: str,
|
|
381
|
+
result: Any, # noqa: ARG002
|
|
382
|
+
cache_hit: bool,
|
|
383
|
+
) -> None:
|
|
384
|
+
"""End activity span with success status."""
|
|
385
|
+
key = f"{instance_id}:{activity_id}"
|
|
386
|
+
span = self._activity_spans.pop(key, None)
|
|
387
|
+
if span:
|
|
388
|
+
span.set_attribute("edda.activity.cache_hit", cache_hit)
|
|
389
|
+
span.set_status(Status(StatusCode.OK))
|
|
390
|
+
span.end()
|
|
391
|
+
|
|
392
|
+
# Metrics
|
|
393
|
+
if self._enable_metrics:
|
|
394
|
+
if cache_hit:
|
|
395
|
+
self._activity_cache_hit_counter.add(1, {"activity_name": activity_name})
|
|
396
|
+
else:
|
|
397
|
+
self._activity_executed_counter.add(1, {"activity_name": activity_name})
|
|
398
|
+
start_time = self._activity_start_times.pop(key, None)
|
|
399
|
+
if start_time:
|
|
400
|
+
duration = time.time() - start_time
|
|
401
|
+
self._activity_duration_histogram.record(
|
|
402
|
+
duration, {"activity_name": activity_name}
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
async def on_activity_failed(
|
|
406
|
+
self,
|
|
407
|
+
instance_id: str,
|
|
408
|
+
activity_id: str,
|
|
409
|
+
activity_name: str, # noqa: ARG002
|
|
410
|
+
error: Exception,
|
|
411
|
+
) -> None:
|
|
412
|
+
"""End activity span with error status."""
|
|
413
|
+
key = f"{instance_id}:{activity_id}"
|
|
414
|
+
span = self._activity_spans.pop(key, None)
|
|
415
|
+
if span:
|
|
416
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
417
|
+
span.record_exception(error)
|
|
418
|
+
span.end()
|
|
419
|
+
|
|
420
|
+
self._activity_start_times.pop(key, None)
|
|
421
|
+
|
|
422
|
+
async def on_activity_retry(
|
|
423
|
+
self,
|
|
424
|
+
instance_id: str,
|
|
425
|
+
activity_id: str,
|
|
426
|
+
activity_name: str, # noqa: ARG002
|
|
427
|
+
error: Exception,
|
|
428
|
+
attempt: int,
|
|
429
|
+
delay: float,
|
|
430
|
+
) -> None:
|
|
431
|
+
"""Record retry event on current activity span."""
|
|
432
|
+
key = f"{instance_id}:{activity_id}"
|
|
433
|
+
span = self._activity_spans.get(key)
|
|
434
|
+
if span:
|
|
435
|
+
span.add_event(
|
|
436
|
+
"retry",
|
|
437
|
+
attributes={
|
|
438
|
+
"edda.retry.attempt": attempt,
|
|
439
|
+
"edda.retry.delay_seconds": delay,
|
|
440
|
+
"edda.retry.error": str(error),
|
|
441
|
+
"edda.retry.error_type": type(error).__name__,
|
|
442
|
+
},
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
# =========================================================================
|
|
446
|
+
# Event Hooks
|
|
447
|
+
# =========================================================================
|
|
448
|
+
|
|
449
|
+
async def on_event_sent(
|
|
450
|
+
self,
|
|
451
|
+
event_type: str,
|
|
452
|
+
event_source: str,
|
|
453
|
+
event_data: dict[str, Any], # noqa: ARG002
|
|
454
|
+
) -> None:
|
|
455
|
+
"""Record event sent as a short-lived span."""
|
|
456
|
+
with self._tracer.start_as_current_span(
|
|
457
|
+
name=f"event:send:{event_type}",
|
|
458
|
+
attributes={
|
|
459
|
+
"edda.event.type": event_type,
|
|
460
|
+
"edda.event.source": event_source,
|
|
461
|
+
},
|
|
462
|
+
) as span:
|
|
463
|
+
span.set_status(Status(StatusCode.OK))
|
|
464
|
+
|
|
465
|
+
async def on_event_received(
|
|
466
|
+
self,
|
|
467
|
+
instance_id: str,
|
|
468
|
+
event_type: str,
|
|
469
|
+
event_data: dict[str, Any], # noqa: ARG002
|
|
470
|
+
) -> None:
|
|
471
|
+
"""Record event received as an event on workflow span."""
|
|
472
|
+
parent_span = self._workflow_spans.get(instance_id)
|
|
473
|
+
if parent_span:
|
|
474
|
+
parent_span.add_event(
|
|
475
|
+
"event_received",
|
|
476
|
+
attributes={
|
|
477
|
+
"edda.event.type": event_type,
|
|
478
|
+
},
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# =========================================================================
|
|
482
|
+
# Trace Context Propagation
|
|
483
|
+
# =========================================================================
|
|
484
|
+
|
|
485
|
+
def _extract_trace_context(self, data: dict[str, Any]) -> Context | None:
|
|
486
|
+
"""Extract W3C Trace Context from data dict."""
|
|
487
|
+
carrier: dict[str, str] = {}
|
|
488
|
+
|
|
489
|
+
# Check _trace_context nested dict (recommended)
|
|
490
|
+
if "_trace_context" in data:
|
|
491
|
+
tc = data["_trace_context"]
|
|
492
|
+
if isinstance(tc, dict):
|
|
493
|
+
carrier.update({k: v for k, v in tc.items() if k in ("traceparent", "tracestate")})
|
|
494
|
+
|
|
495
|
+
# Also check top-level keys
|
|
496
|
+
if "traceparent" in data:
|
|
497
|
+
carrier["traceparent"] = str(data["traceparent"])
|
|
498
|
+
if "tracestate" in data:
|
|
499
|
+
carrier["tracestate"] = str(data["tracestate"])
|
|
500
|
+
|
|
501
|
+
return self._propagator.extract(carrier) if carrier else None
|
|
502
|
+
|
|
503
|
+
def get_trace_context(self, instance_id: str) -> dict[str, str]:
|
|
504
|
+
"""
|
|
505
|
+
Get W3C Trace Context for a workflow instance.
|
|
506
|
+
|
|
507
|
+
Use this to propagate trace context to external services or CloudEvents.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
instance_id: Workflow instance ID
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
dict with 'traceparent' and optionally 'tracestate' keys
|
|
514
|
+
"""
|
|
515
|
+
carrier: dict[str, str] = {}
|
|
516
|
+
span = self._workflow_spans.get(instance_id)
|
|
517
|
+
if span:
|
|
518
|
+
ctx = trace.set_span_in_context(span)
|
|
519
|
+
self._propagator.inject(carrier, context=ctx)
|
|
520
|
+
return carrier
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
# =============================================================================
|
|
524
|
+
# Trace Context Propagation Helpers
|
|
525
|
+
# =============================================================================
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
def inject_trace_context(
|
|
529
|
+
hooks: OpenTelemetryHooks, instance_id: str, event_data: dict[str, Any]
|
|
530
|
+
) -> dict[str, Any]:
|
|
531
|
+
"""
|
|
532
|
+
Inject W3C Trace Context into event data for CloudEvents propagation.
|
|
533
|
+
|
|
534
|
+
Use this before calling send_event_transactional() to propagate trace
|
|
535
|
+
context across service boundaries.
|
|
536
|
+
|
|
537
|
+
Example::
|
|
538
|
+
|
|
539
|
+
from edda.integrations.opentelemetry import inject_trace_context
|
|
540
|
+
from edda.outbox.transactional import send_event_transactional
|
|
541
|
+
|
|
542
|
+
event_data = {"order_id": "ORD-123", "amount": 99.99}
|
|
543
|
+
event_data = inject_trace_context(hooks, ctx.instance_id, event_data)
|
|
544
|
+
await send_event_transactional(ctx, "payment.completed", "payment-service", event_data)
|
|
545
|
+
|
|
546
|
+
Args:
|
|
547
|
+
hooks: OpenTelemetryHooks instance
|
|
548
|
+
instance_id: Workflow instance ID
|
|
549
|
+
event_data: Event data dict to inject trace context into
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
Updated event_data with _trace_context key
|
|
553
|
+
"""
|
|
554
|
+
trace_context = hooks.get_trace_context(instance_id)
|
|
555
|
+
if trace_context:
|
|
556
|
+
event_data["_trace_context"] = trace_context
|
|
557
|
+
return event_data
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
def extract_trace_context(event_data: dict[str, Any]) -> Context | None:
|
|
561
|
+
"""
|
|
562
|
+
Extract W3C Trace Context from event data.
|
|
563
|
+
|
|
564
|
+
This is called automatically by OpenTelemetryHooks.on_workflow_start(),
|
|
565
|
+
but can also be used manually if needed.
|
|
566
|
+
|
|
567
|
+
Args:
|
|
568
|
+
event_data: Event data dict containing _trace_context
|
|
569
|
+
|
|
570
|
+
Returns:
|
|
571
|
+
OpenTelemetry Context or None if no trace context found
|
|
572
|
+
"""
|
|
573
|
+
if not _OPENTELEMETRY_AVAILABLE:
|
|
574
|
+
return None
|
|
575
|
+
|
|
576
|
+
if "_trace_context" in event_data:
|
|
577
|
+
propagator = TraceContextTextMapPropagator()
|
|
578
|
+
return propagator.extract(event_data["_trace_context"])
|
|
579
|
+
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: edda-framework
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.5.0
|
|
4
4
|
Summary: Lightweight Durable Execution Framework
|
|
5
5
|
Project-URL: Homepage, https://github.com/i2y/edda
|
|
6
6
|
Project-URL: Documentation, https://github.com/i2y/edda#readme
|
|
@@ -42,6 +42,10 @@ Provides-Extra: mcp
|
|
|
42
42
|
Requires-Dist: mcp>=1.22.0; extra == 'mcp'
|
|
43
43
|
Provides-Extra: mysql
|
|
44
44
|
Requires-Dist: aiomysql>=0.2.0; extra == 'mysql'
|
|
45
|
+
Provides-Extra: opentelemetry
|
|
46
|
+
Requires-Dist: opentelemetry-api>=1.20.0; extra == 'opentelemetry'
|
|
47
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0; extra == 'opentelemetry'
|
|
48
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0; extra == 'opentelemetry'
|
|
45
49
|
Provides-Extra: postgresql
|
|
46
50
|
Requires-Dist: asyncpg>=0.30.0; extra == 'postgresql'
|
|
47
51
|
Provides-Extra: server
|
|
@@ -91,6 +95,28 @@ Edda excels at orchestrating **long-running workflows** that must survive failur
|
|
|
91
95
|
- **🤖 AI Agent Workflows**: Orchestrate multi-step AI tasks (LLM calls, tool usage, long-running inference)
|
|
92
96
|
- **📡 Event-Driven Workflows**: React to external events with guaranteed delivery and automatic retry
|
|
93
97
|
|
|
98
|
+
### Business Process Automation
|
|
99
|
+
|
|
100
|
+
Edda's waiting functions make it ideal for time-based and event-driven business processes:
|
|
101
|
+
|
|
102
|
+
- **📧 User Onboarding**: Send reminders if users haven't completed setup after N days
|
|
103
|
+
- **🎁 Campaign Processing**: Evaluate conditions and notify winners after campaign ends
|
|
104
|
+
- **💳 Payment Reminders**: Send escalating reminders before payment deadlines
|
|
105
|
+
- **📦 Scheduled Notifications**: Shipping updates, subscription renewals, appointment reminders
|
|
106
|
+
|
|
107
|
+
**Waiting functions**:
|
|
108
|
+
- `wait_timer(duration_seconds)`: Wait for a relative duration
|
|
109
|
+
- `wait_until(until_time)`: Wait until an absolute datetime (e.g., campaign end date)
|
|
110
|
+
- `wait_event(event_type)`: Wait for external events (near real-time response)
|
|
111
|
+
|
|
112
|
+
```python
|
|
113
|
+
@workflow
|
|
114
|
+
async def onboarding_reminder(ctx: WorkflowContext, user_id: str):
|
|
115
|
+
await wait_timer(ctx, duration_seconds=3*24*60*60) # Wait 3 days
|
|
116
|
+
if not await check_completed(ctx, user_id):
|
|
117
|
+
await send_reminder(ctx, user_id)
|
|
118
|
+
```
|
|
119
|
+
|
|
94
120
|
**Key benefit**: Workflows **never lose progress** - crashes and restarts are handled automatically through deterministic replay.
|
|
95
121
|
|
|
96
122
|
## Architecture
|
|
@@ -730,6 +756,32 @@ Each `@durable_tool` automatically generates **three MCP tools**:
|
|
|
730
756
|
|
|
731
757
|
This enables AI assistants to work with workflows that take minutes, hours, or even days to complete.
|
|
732
758
|
|
|
759
|
+
### MCP Prompts
|
|
760
|
+
|
|
761
|
+
Define reusable prompt templates that can access workflow state:
|
|
762
|
+
|
|
763
|
+
```python
|
|
764
|
+
from mcp.server.fastmcp.prompts.base import UserMessage
|
|
765
|
+
from mcp.types import TextContent
|
|
766
|
+
|
|
767
|
+
@server.prompt(description="Analyze a workflow execution")
|
|
768
|
+
async def analyze_workflow(instance_id: str) -> UserMessage:
|
|
769
|
+
"""Generate analysis prompt for a specific workflow."""
|
|
770
|
+
instance = await server.storage.get_instance(instance_id)
|
|
771
|
+
history = await server.storage.get_history(instance_id)
|
|
772
|
+
|
|
773
|
+
text = f"""Analyze this workflow:
|
|
774
|
+
**Status**: {instance['status']}
|
|
775
|
+
**Activities**: {len(history)}
|
|
776
|
+
**Result**: {instance.get('output_data')}
|
|
777
|
+
|
|
778
|
+
Please provide insights and optimization suggestions."""
|
|
779
|
+
|
|
780
|
+
return UserMessage(content=TextContent(type="text", text=text))
|
|
781
|
+
```
|
|
782
|
+
|
|
783
|
+
AI clients can use these prompts to generate context-aware analysis of your workflows.
|
|
784
|
+
|
|
733
785
|
**For detailed documentation**, see [MCP Integration Guide](docs/integrations/mcp.md).
|
|
734
786
|
|
|
735
787
|
## Observability Hooks
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
edda/__init__.py,sha256=gmJd0ooVbGNMOLlSj-r6rEt3IkY-FZYCFjhWjIltqlk,1657
|
|
2
2
|
edda/activity.py,sha256=nRm9eBrr0lFe4ZRQ2whyZ6mo5xd171ITIVhqytUhOpw,21025
|
|
3
|
-
edda/app.py,sha256=
|
|
3
|
+
edda/app.py,sha256=8FKx9Kspbm5Fz-QDz4DgncNkkgdZkij209LHllWkRw4,38288
|
|
4
4
|
edda/compensation.py,sha256=CmnyJy4jAklVrtLJodNOcj6vxET6pdarxM1Yx2RHlL4,11898
|
|
5
5
|
edda/context.py,sha256=YZKBNtblRcaFqte1Y9t2cIP3JHzK-5Tu40x5i5FHtnU,17789
|
|
6
6
|
edda/events.py,sha256=KN06o-Umkwkg9-TwbN4jr1uBZrBrvVSc6m8mOlQGXkA,18043
|
|
7
7
|
edda/exceptions.py,sha256=-ntBLGpVQgPFG5N1o8m_7weejAYkNrUdxTkOP38vsHk,1766
|
|
8
|
-
edda/hooks.py,sha256=
|
|
8
|
+
edda/hooks.py,sha256=HUZ6FTM__DZjwuomDfTDEroQ3mugEPuJHcGm7CTQNvg,8193
|
|
9
9
|
edda/locking.py,sha256=l3YM7zdERizw27jQXfLN7EmcMcrJSVzd7LD8hhsXvIM,11003
|
|
10
10
|
edda/pydantic_utils.py,sha256=dGVPNrrttDeq1k233PopCtjORYjZitsgASPfPnO6R10,9056
|
|
11
11
|
edda/replay.py,sha256=5RIRd0q2ZrH9iiiy35eOUii2cipYg9dlua56OAXvIk4,32499
|
|
@@ -14,8 +14,10 @@ edda/workflow.py,sha256=daSppYAzgXkjY_9-HS93Zi7_tPR6srmchxY5YfwgU-4,7239
|
|
|
14
14
|
edda/wsgi.py,sha256=1pGE5fhHpcsYnDR8S3NEFKWUs5P0JK4roTAzX9BsIj0,2391
|
|
15
15
|
edda/integrations/__init__.py,sha256=F_CaTvlDEbldfOpPKq_U9ve1E573tS6XzqXnOtyHcXI,33
|
|
16
16
|
edda/integrations/mcp/__init__.py,sha256=YK-8m0DIdP-RSqewlIX7xnWU7TD3NioCiW2_aZSgnn8,1232
|
|
17
|
-
edda/integrations/mcp/decorators.py,sha256=
|
|
18
|
-
edda/integrations/mcp/server.py,sha256=
|
|
17
|
+
edda/integrations/mcp/decorators.py,sha256=UTBb-Un2JK938pDZmANOvfsdKOMI2AF9yGtfSuy8VrE,6284
|
|
18
|
+
edda/integrations/mcp/server.py,sha256=pzCG46Zko9hHmQ3REbo1w3A23SjrGFLiZupwSkIPhOA,13942
|
|
19
|
+
edda/integrations/opentelemetry/__init__.py,sha256=x1_PyyygGDW-rxQTwoIrGzyjKErXHOOKdquFAMlCOAo,906
|
|
20
|
+
edda/integrations/opentelemetry/hooks.py,sha256=FBYnSZBh8_0vw9M1E2AbJrx1cTTsKeiHf5wspr0UnzU,21288
|
|
19
21
|
edda/outbox/__init__.py,sha256=azXG1rtheJEjOyoWmMsBeR2jp8Bz02R3wDEd5tQnaWA,424
|
|
20
22
|
edda/outbox/relayer.py,sha256=2tnN1aOQ8pKWfwEGIlYwYLLwyOKXBjZ4XZsIr1HjgK4,9454
|
|
21
23
|
edda/outbox/transactional.py,sha256=LFfUjunqRlGibaINi-efGXFFivWGO7v3mhqrqyGW6Nw,3808
|
|
@@ -33,8 +35,8 @@ edda/viewer_ui/data_service.py,sha256=mXV6bL6REa_UKsk8xMGBIFbsbLpIxe91lX3wgn-FOj
|
|
|
33
35
|
edda/visualizer/__init__.py,sha256=DOpDstNhR0VcXAs_eMKxaL30p_0u4PKZ4o2ndnYhiRo,343
|
|
34
36
|
edda/visualizer/ast_analyzer.py,sha256=plmx7C9X_X35xLY80jxOL3ljg3afXxBePRZubqUIkxY,13663
|
|
35
37
|
edda/visualizer/mermaid_generator.py,sha256=XWa2egoOTNDfJEjPcwoxwQmblUqXf7YInWFjFRI1QGo,12457
|
|
36
|
-
edda_framework-0.
|
|
37
|
-
edda_framework-0.
|
|
38
|
-
edda_framework-0.
|
|
39
|
-
edda_framework-0.
|
|
40
|
-
edda_framework-0.
|
|
38
|
+
edda_framework-0.5.0.dist-info/METADATA,sha256=GfZ0zYUy_g4zjV_OkYPPmNRTwIxZb77nh-aaS6Z_OYE,31503
|
|
39
|
+
edda_framework-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
40
|
+
edda_framework-0.5.0.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
|
|
41
|
+
edda_framework-0.5.0.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
|
|
42
|
+
edda_framework-0.5.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|