edda-framework 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,14 +19,15 @@ def create_durable_tool(
19
19
  description: str = "",
20
20
  ) -> Workflow:
21
21
  """
22
- Create a durable workflow tool with auto-generated status/result tools.
22
+ Create a durable workflow tool with auto-generated status/result/cancel tools.
23
23
 
24
24
  This function:
25
25
  1. Wraps the function as an Edda @workflow
26
- 2. Registers three MCP tools:
26
+ 2. Registers four MCP tools:
27
27
  - {name}: Start workflow, return instance_id
28
28
  - {name}_status: Check workflow status
29
29
  - {name}_result: Get workflow result
30
+ - {name}_cancel: Cancel workflow (if running or waiting)
30
31
 
31
32
  Args:
32
33
  server: EddaMCPServer instance
@@ -93,9 +94,9 @@ def create_durable_tool(
93
94
  status_tool_name = f"{workflow_name}_status"
94
95
  status_tool_description = f"Check status of {workflow_name} workflow"
95
96
 
96
- @server._mcp.tool(name=status_tool_name, description=status_tool_description) # type: ignore[misc]
97
+ @server._mcp.tool(name=status_tool_name, description=status_tool_description)
97
98
  async def status_tool(instance_id: str) -> dict[str, Any]:
98
- """Check workflow status."""
99
+ """Check workflow status with progress metadata."""
99
100
  try:
100
101
  instance = await server.storage.get_instance(instance_id)
101
102
  if instance is None:
@@ -112,9 +113,22 @@ def create_durable_tool(
112
113
  status = instance["status"]
113
114
  current_activity_id = instance.get("current_activity_id", "N/A")
114
115
 
116
+ # Get history to count completed activities
117
+ history = await server.storage.get_history(instance_id)
118
+ completed_activities = len(
119
+ [h for h in history if h["event_type"] == "ActivityCompleted"]
120
+ )
121
+
122
+ # Suggest poll interval based on status
123
+ # Running workflows need more frequent polling (5s)
124
+ # Waiting workflows need less frequent polling (10s)
125
+ suggested_poll_interval_ms = 5000 if status == "running" else 10000
126
+
115
127
  status_text = (
116
128
  f"Workflow Status: {status}\n"
117
129
  f"Current Activity: {current_activity_id}\n"
130
+ f"Completed Activities: {completed_activities}\n"
131
+ f"Suggested Poll Interval: {suggested_poll_interval_ms}ms\n"
118
132
  f"Instance ID: {instance_id}"
119
133
  )
120
134
 
@@ -137,7 +151,7 @@ def create_durable_tool(
137
151
  result_tool_name = f"{workflow_name}_result"
138
152
  result_tool_description = f"Get result of {workflow_name} workflow (if completed)"
139
153
 
140
- @server._mcp.tool(name=result_tool_name, description=result_tool_description) # type: ignore[misc]
154
+ @server._mcp.tool(name=result_tool_name, description=result_tool_description)
141
155
  async def result_tool(instance_id: str) -> dict[str, Any]:
142
156
  """Get workflow result (if completed)."""
143
157
  try:
@@ -184,4 +198,86 @@ def create_durable_tool(
184
198
  "isError": True,
185
199
  }
186
200
 
201
+ # 5. Generate cancel tool
202
+ cancel_tool_name = f"{workflow_name}_cancel"
203
+ cancel_tool_description = f"Cancel {workflow_name} workflow (if running or waiting)"
204
+
205
+ @server._mcp.tool(name=cancel_tool_name, description=cancel_tool_description)
206
+ async def cancel_tool(instance_id: str) -> dict[str, Any]:
207
+ """Cancel a running or waiting workflow."""
208
+ try:
209
+ # Check if instance exists
210
+ instance = await server.storage.get_instance(instance_id)
211
+ if instance is None:
212
+ return {
213
+ "content": [
214
+ {
215
+ "type": "text",
216
+ "text": f"Workflow instance not found: {instance_id}",
217
+ }
218
+ ],
219
+ "isError": True,
220
+ }
221
+
222
+ current_status = instance["status"]
223
+
224
+ # Check if replay_engine is available
225
+ if server.replay_engine is None:
226
+ return {
227
+ "content": [
228
+ {
229
+ "type": "text",
230
+ "text": "Server not initialized. Call server.initialize() first.",
231
+ }
232
+ ],
233
+ "isError": True,
234
+ }
235
+
236
+ # Try to cancel
237
+ success = await server.replay_engine.cancel_workflow(
238
+ instance_id=instance_id,
239
+ cancelled_by="mcp_user",
240
+ )
241
+
242
+ if success:
243
+ return {
244
+ "content": [
245
+ {
246
+ "type": "text",
247
+ "text": (
248
+ f"Workflow '{workflow_name}' cancelled successfully.\n"
249
+ f"Instance ID: {instance_id}\n"
250
+ f"Compensations executed.\n\n"
251
+ f"The workflow has been stopped and any side effects "
252
+ f"have been rolled back."
253
+ ),
254
+ }
255
+ ],
256
+ "isError": False,
257
+ }
258
+ else:
259
+ return {
260
+ "content": [
261
+ {
262
+ "type": "text",
263
+ "text": (
264
+ f"Cannot cancel workflow: {instance_id}\n"
265
+ f"Current status: {current_status}\n"
266
+ f"Only running or waiting workflows can be cancelled."
267
+ ),
268
+ }
269
+ ],
270
+ "isError": True,
271
+ }
272
+ except Exception as e:
273
+ return {
274
+ "content": [
275
+ {
276
+ "type": "text",
277
+ "text": f"Error cancelling workflow: {str(e)}",
278
+ }
279
+ ],
280
+ "isError": True,
281
+ }
282
+
187
283
  return workflow_instance
@@ -9,10 +9,11 @@ from edda.app import EddaApp
9
9
  from edda.workflow import Workflow
10
10
 
11
11
  if TYPE_CHECKING:
12
+ from edda.replay import ReplayEngine
12
13
  from edda.storage.protocol import StorageProtocol
13
14
 
14
15
  try:
15
- from mcp.server.fastmcp import FastMCP # type: ignore[import-not-found]
16
+ from mcp.server.fastmcp import FastMCP
16
17
  except ImportError as e:
17
18
  raise ImportError(
18
19
  "MCP Python SDK is required for MCP integration. "
@@ -68,10 +69,11 @@ class EddaMCPServer:
68
69
  asyncio.run(main())
69
70
  ```
70
71
 
71
- The server automatically generates three MCP tools for each @durable_tool:
72
+ The server automatically generates four MCP tools for each @durable_tool:
72
73
  - `tool_name`: Start the workflow, returns instance_id
73
74
  - `tool_name_status`: Check workflow status
74
75
  - `tool_name_result`: Get workflow result (if completed)
76
+ - `tool_name_cancel`: Cancel workflow (if running or waiting)
75
77
  """
76
78
 
77
79
  def __init__(
@@ -122,6 +124,24 @@ class EddaMCPServer:
122
124
  """
123
125
  return self._edda_app.storage
124
126
 
127
+ @property
128
+ def replay_engine(self) -> ReplayEngine | None:
129
+ """
130
+ Access replay engine for workflow operations (cancel, resume, etc.).
131
+
132
+ Returns:
133
+ ReplayEngine or None if not initialized
134
+
135
+ Example:
136
+ ```python
137
+ # Cancel a running workflow
138
+ success = await server.replay_engine.cancel_workflow(
139
+ instance_id, "mcp_user"
140
+ )
141
+ ```
142
+ """
143
+ return self._edda_app.replay_engine
144
+
125
145
  def durable_tool(
126
146
  self,
127
147
  func: Callable[..., Any] | None = None,
@@ -131,10 +151,11 @@ class EddaMCPServer:
131
151
  """
132
152
  Decorator to define a durable workflow tool.
133
153
 
134
- Automatically generates three MCP tools:
154
+ Automatically generates four MCP tools:
135
155
  1. Main tool: Starts the workflow, returns instance_id
136
156
  2. Status tool: Checks workflow status
137
157
  3. Result tool: Gets workflow result (if completed)
158
+ 4. Cancel tool: Cancels workflow (if running or waiting)
138
159
 
139
160
  Args:
140
161
  func: Workflow function (async)
@@ -207,7 +228,7 @@ class EddaMCPServer:
207
228
  def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
208
229
  # Use FastMCP's native prompt decorator
209
230
  prompt_desc = description or f.__doc__ or f"Prompt: {f.__name__}"
210
- return cast(Callable[..., Any], self._mcp.prompt(description=prompt_desc)(f))
231
+ return self._mcp.prompt(description=prompt_desc)(f)
211
232
 
212
233
  if func is None:
213
234
  return decorator
@@ -228,8 +249,8 @@ class EddaMCPServer:
228
249
  Returns:
229
250
  ASGI callable (Starlette app)
230
251
  """
231
- from starlette.requests import Request # type: ignore[import-not-found]
232
- from starlette.responses import Response # type: ignore[import-not-found]
252
+ from starlette.requests import Request
253
+ from starlette.responses import Response
233
254
 
234
255
  # Get MCP's Starlette app (Issue #1367 workaround: use directly)
235
256
  app = self._mcp.streamable_http_app()
@@ -270,14 +291,13 @@ class EddaMCPServer:
270
291
  app.router.add_route("/cancel/{instance_id}", edda_cancel_handler, methods=["POST"])
271
292
 
272
293
  # Add authentication middleware if token_verifier provided (AFTER adding routes)
294
+ result_app: Any = app
273
295
  if self._token_verifier is not None:
274
- from starlette.middleware.base import ( # type: ignore[import-not-found]
275
- BaseHTTPMiddleware,
276
- )
296
+ from starlette.middleware.base import BaseHTTPMiddleware
277
297
 
278
- class AuthMiddleware(BaseHTTPMiddleware): # type: ignore[misc]
279
- def __init__(self, app: Any, token_verifier: Callable[[str], bool]):
280
- super().__init__(app)
298
+ class AuthMiddleware(BaseHTTPMiddleware):
299
+ def __init__(self, app_inner: Any, token_verifier: Callable[[str], bool]) -> None:
300
+ super().__init__(app_inner)
281
301
  self.token_verifier = token_verifier
282
302
 
283
303
  async def dispatch(
@@ -288,12 +308,13 @@ class EddaMCPServer:
288
308
  token = auth_header[7:]
289
309
  if not self.token_verifier(token):
290
310
  return Response("Unauthorized", status_code=401)
291
- return await call_next(request)
311
+ response: Response = await call_next(request)
312
+ return response
292
313
 
293
314
  # Wrap app with auth middleware
294
- app = AuthMiddleware(app, self._token_verifier)
315
+ result_app = AuthMiddleware(app, self._token_verifier)
295
316
 
296
- return cast(Callable[..., Any], app)
317
+ return cast(Callable[..., Any], result_app)
297
318
 
298
319
  async def initialize(self) -> None:
299
320
  """
edda/storage/protocol.py CHANGED
@@ -238,20 +238,34 @@ class StorageProtocol(Protocol):
238
238
  async def list_instances(
239
239
  self,
240
240
  limit: int = 50,
241
+ page_token: str | None = None,
241
242
  status_filter: str | None = None,
242
- ) -> list[dict[str, Any]]:
243
+ workflow_name_filter: str | None = None,
244
+ instance_id_filter: str | None = None,
245
+ started_after: datetime | None = None,
246
+ started_before: datetime | None = None,
247
+ ) -> dict[str, Any]:
243
248
  """
244
- List workflow instances with optional filtering.
249
+ List workflow instances with cursor-based pagination and filtering.
245
250
 
246
251
  This method JOINs workflow_instances with workflow_definitions to
247
252
  return instances along with their source code.
248
253
 
249
254
  Args:
250
- limit: Maximum number of instances to return
255
+ limit: Maximum number of instances to return per page
256
+ page_token: Cursor for pagination (format: "ISO_DATETIME||INSTANCE_ID")
251
257
  status_filter: Optional status filter (e.g., "running", "completed", "failed")
258
+ workflow_name_filter: Optional workflow name filter (partial match, case-insensitive)
259
+ instance_id_filter: Optional instance ID filter (partial match, case-insensitive)
260
+ started_after: Filter instances started after this datetime (inclusive)
261
+ started_before: Filter instances started before this datetime (inclusive)
252
262
 
253
263
  Returns:
254
- List of workflow instances, ordered by started_at DESC.
264
+ Dictionary containing:
265
+ - instances: List of workflow instances, ordered by started_at DESC
266
+ - next_page_token: Cursor for the next page, or None if no more pages
267
+ - has_more: Boolean indicating if there are more pages
268
+
255
269
  Each instance contains: instance_id, workflow_name, source_hash,
256
270
  owner_service, status, current_activity_id, started_at, updated_at,
257
271
  input_data, source_code, output_data, locked_by, locked_at
@@ -774,11 +774,17 @@ class SQLAlchemyStorage:
774
774
  async def list_instances(
775
775
  self,
776
776
  limit: int = 50,
777
+ page_token: str | None = None,
777
778
  status_filter: str | None = None,
778
- ) -> list[dict[str, Any]]:
779
- """List workflow instances with optional filtering."""
779
+ workflow_name_filter: str | None = None,
780
+ instance_id_filter: str | None = None,
781
+ started_after: datetime | None = None,
782
+ started_before: datetime | None = None,
783
+ ) -> dict[str, Any]:
784
+ """List workflow instances with cursor-based pagination and filtering."""
780
785
  session = self._get_session_for_operation()
781
786
  async with self._session_scope(session) as session:
787
+ # Base query with JOIN
782
788
  stmt = (
783
789
  select(WorkflowInstance, WorkflowDefinition.source_code)
784
790
  .join(
@@ -788,17 +794,105 @@ class SQLAlchemyStorage:
788
794
  WorkflowInstance.source_hash == WorkflowDefinition.source_hash,
789
795
  ),
790
796
  )
791
- .order_by(WorkflowInstance.started_at.desc())
792
- .limit(limit)
797
+ .order_by(
798
+ WorkflowInstance.started_at.desc(),
799
+ WorkflowInstance.instance_id.desc(),
800
+ )
793
801
  )
794
802
 
803
+ # Apply cursor-based pagination (page_token format: "ISO_DATETIME||INSTANCE_ID")
804
+ if page_token:
805
+ # Parse page_token: || separates datetime and instance_id
806
+ separator = "||"
807
+ if separator in page_token:
808
+ cursor_time_str, cursor_id = page_token.split(separator, 1)
809
+ cursor_time = datetime.fromisoformat(cursor_time_str)
810
+ # Use _make_datetime_comparable for SQLite compatibility
811
+ started_at_comparable = self._make_datetime_comparable(
812
+ WorkflowInstance.started_at
813
+ )
814
+ # For SQLite, also wrap the cursor_time in func.datetime()
815
+ cursor_time_comparable: Any
816
+ if self.engine.dialect.name == "sqlite":
817
+ cursor_time_comparable = func.datetime(cursor_time_str)
818
+ else:
819
+ cursor_time_comparable = cursor_time
820
+ # For DESC order, we want rows where (started_at, instance_id) < cursor
821
+ stmt = stmt.where(
822
+ or_(
823
+ started_at_comparable < cursor_time_comparable,
824
+ and_(
825
+ started_at_comparable == cursor_time_comparable,
826
+ WorkflowInstance.instance_id < cursor_id,
827
+ ),
828
+ )
829
+ )
830
+
831
+ # Apply status filter
795
832
  if status_filter:
796
833
  stmt = stmt.where(WorkflowInstance.status == status_filter)
797
834
 
835
+ # Apply workflow name and/or instance ID filter (partial match, case-insensitive)
836
+ # When both filters have the same value (unified search), use OR logic
837
+ if workflow_name_filter and instance_id_filter:
838
+ if workflow_name_filter == instance_id_filter:
839
+ # Unified search: match either workflow name OR instance ID
840
+ stmt = stmt.where(
841
+ or_(
842
+ WorkflowInstance.workflow_name.ilike(f"%{workflow_name_filter}%"),
843
+ WorkflowInstance.instance_id.ilike(f"%{instance_id_filter}%"),
844
+ )
845
+ )
846
+ else:
847
+ # Separate filters: match both (AND logic)
848
+ stmt = stmt.where(
849
+ WorkflowInstance.workflow_name.ilike(f"%{workflow_name_filter}%")
850
+ )
851
+ stmt = stmt.where(WorkflowInstance.instance_id.ilike(f"%{instance_id_filter}%"))
852
+ elif workflow_name_filter:
853
+ stmt = stmt.where(WorkflowInstance.workflow_name.ilike(f"%{workflow_name_filter}%"))
854
+ elif instance_id_filter:
855
+ stmt = stmt.where(WorkflowInstance.instance_id.ilike(f"%{instance_id_filter}%"))
856
+
857
+ # Apply date range filters (use _make_datetime_comparable for SQLite)
858
+ if started_after or started_before:
859
+ started_at_comparable = self._make_datetime_comparable(WorkflowInstance.started_at)
860
+ if started_after:
861
+ started_after_comparable: Any
862
+ if self.engine.dialect.name == "sqlite":
863
+ started_after_comparable = func.datetime(started_after.isoformat())
864
+ else:
865
+ started_after_comparable = started_after
866
+ stmt = stmt.where(started_at_comparable >= started_after_comparable)
867
+ if started_before:
868
+ started_before_comparable: Any
869
+ if self.engine.dialect.name == "sqlite":
870
+ started_before_comparable = func.datetime(started_before.isoformat())
871
+ else:
872
+ started_before_comparable = started_before
873
+ stmt = stmt.where(started_at_comparable <= started_before_comparable)
874
+
875
+ # Fetch limit+1 to determine if there are more pages
876
+ stmt = stmt.limit(limit + 1)
877
+
798
878
  result = await session.execute(stmt)
799
879
  rows = result.all()
800
880
 
801
- return [
881
+ # Determine has_more and next_page_token
882
+ has_more = len(rows) > limit
883
+ if has_more:
884
+ rows = rows[:limit] # Trim to actual limit
885
+
886
+ # Generate next_page_token from last row
887
+ next_page_token: str | None = None
888
+ if has_more and rows:
889
+ last_instance = rows[-1][0]
890
+ # Format: ISO_DATETIME||INSTANCE_ID (using || as separator)
891
+ next_page_token = (
892
+ f"{last_instance.started_at.isoformat()}||{last_instance.instance_id}"
893
+ )
894
+
895
+ instances = [
802
896
  {
803
897
  "instance_id": instance.instance_id,
804
898
  "workflow_name": instance.workflow_name,
@@ -820,6 +914,12 @@ class SQLAlchemyStorage:
820
914
  for instance, source_code in rows
821
915
  ]
822
916
 
917
+ return {
918
+ "instances": instances,
919
+ "next_page_token": next_page_token,
920
+ "has_more": has_more,
921
+ }
922
+
823
923
  # -------------------------------------------------------------------------
824
924
  # Distributed Locking Methods (ALWAYS use separate session/transaction)
825
925
  # -------------------------------------------------------------------------