mcp-eregistrations-bpa 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-eregistrations-bpa might be problematic. Click here for more details.
- mcp_eregistrations_bpa/__init__.py +121 -0
- mcp_eregistrations_bpa/__main__.py +6 -0
- mcp_eregistrations_bpa/arazzo/__init__.py +21 -0
- mcp_eregistrations_bpa/arazzo/expression.py +379 -0
- mcp_eregistrations_bpa/audit/__init__.py +56 -0
- mcp_eregistrations_bpa/audit/context.py +66 -0
- mcp_eregistrations_bpa/audit/logger.py +236 -0
- mcp_eregistrations_bpa/audit/models.py +131 -0
- mcp_eregistrations_bpa/auth/__init__.py +64 -0
- mcp_eregistrations_bpa/auth/callback.py +391 -0
- mcp_eregistrations_bpa/auth/cas.py +409 -0
- mcp_eregistrations_bpa/auth/oidc.py +252 -0
- mcp_eregistrations_bpa/auth/permissions.py +162 -0
- mcp_eregistrations_bpa/auth/token_manager.py +348 -0
- mcp_eregistrations_bpa/bpa_client/__init__.py +84 -0
- mcp_eregistrations_bpa/bpa_client/client.py +740 -0
- mcp_eregistrations_bpa/bpa_client/endpoints.py +193 -0
- mcp_eregistrations_bpa/bpa_client/errors.py +276 -0
- mcp_eregistrations_bpa/bpa_client/models.py +203 -0
- mcp_eregistrations_bpa/config.py +349 -0
- mcp_eregistrations_bpa/db/__init__.py +21 -0
- mcp_eregistrations_bpa/db/connection.py +64 -0
- mcp_eregistrations_bpa/db/migrations.py +168 -0
- mcp_eregistrations_bpa/exceptions.py +39 -0
- mcp_eregistrations_bpa/py.typed +0 -0
- mcp_eregistrations_bpa/rollback/__init__.py +19 -0
- mcp_eregistrations_bpa/rollback/manager.py +616 -0
- mcp_eregistrations_bpa/server.py +152 -0
- mcp_eregistrations_bpa/tools/__init__.py +372 -0
- mcp_eregistrations_bpa/tools/actions.py +155 -0
- mcp_eregistrations_bpa/tools/analysis.py +352 -0
- mcp_eregistrations_bpa/tools/audit.py +399 -0
- mcp_eregistrations_bpa/tools/behaviours.py +1042 -0
- mcp_eregistrations_bpa/tools/bots.py +627 -0
- mcp_eregistrations_bpa/tools/classifications.py +575 -0
- mcp_eregistrations_bpa/tools/costs.py +765 -0
- mcp_eregistrations_bpa/tools/debug_strategies.py +351 -0
- mcp_eregistrations_bpa/tools/debugger.py +1230 -0
- mcp_eregistrations_bpa/tools/determinants.py +2235 -0
- mcp_eregistrations_bpa/tools/document_requirements.py +670 -0
- mcp_eregistrations_bpa/tools/export.py +899 -0
- mcp_eregistrations_bpa/tools/fields.py +162 -0
- mcp_eregistrations_bpa/tools/form_errors.py +36 -0
- mcp_eregistrations_bpa/tools/formio_helpers.py +971 -0
- mcp_eregistrations_bpa/tools/forms.py +1269 -0
- mcp_eregistrations_bpa/tools/jsonlogic_builder.py +466 -0
- mcp_eregistrations_bpa/tools/large_response.py +163 -0
- mcp_eregistrations_bpa/tools/messages.py +523 -0
- mcp_eregistrations_bpa/tools/notifications.py +241 -0
- mcp_eregistrations_bpa/tools/registration_institutions.py +680 -0
- mcp_eregistrations_bpa/tools/registrations.py +897 -0
- mcp_eregistrations_bpa/tools/role_status.py +447 -0
- mcp_eregistrations_bpa/tools/role_units.py +400 -0
- mcp_eregistrations_bpa/tools/roles.py +1236 -0
- mcp_eregistrations_bpa/tools/rollback.py +335 -0
- mcp_eregistrations_bpa/tools/services.py +674 -0
- mcp_eregistrations_bpa/tools/workflows.py +2487 -0
- mcp_eregistrations_bpa/tools/yaml_transformer.py +991 -0
- mcp_eregistrations_bpa/workflows/__init__.py +28 -0
- mcp_eregistrations_bpa/workflows/loader.py +440 -0
- mcp_eregistrations_bpa/workflows/models.py +336 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/METADATA +965 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/RECORD +66 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/WHEEL +4 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/entry_points.txt +2 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/licenses/LICENSE +86 -0
|
@@ -0,0 +1,2487 @@
|
|
|
1
|
+
"""MCP tools for Arazzo workflow orchestration.
|
|
2
|
+
|
|
3
|
+
This module provides tools for:
|
|
4
|
+
- Story 5.1: Workflow Catalog & Discovery
|
|
5
|
+
- Story 5.4: Workflow Executor
|
|
6
|
+
- Story 5.5: Progress Reporting & Streaming
|
|
7
|
+
- Story 5.6: Error Recovery & Rollback
|
|
8
|
+
- Story 5.7: Workflow Chaining & Composition
|
|
9
|
+
- Story 5.8: Guided Interactive Mode
|
|
10
|
+
|
|
11
|
+
Note: Intent-to-Workflow Matching and Input Extraction are handled by the
|
|
12
|
+
calling AI agent, not by this MCP. The MCP provides catalog, schema, and
|
|
13
|
+
execution capabilities; the AI provides intent understanding and input extraction.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import re
|
|
19
|
+
import time
|
|
20
|
+
import uuid
|
|
21
|
+
from collections.abc import Callable
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
from mcp.server.fastmcp.exceptions import ToolError
|
|
25
|
+
|
|
26
|
+
from mcp_eregistrations_bpa.arazzo import resolve_string
|
|
27
|
+
from mcp_eregistrations_bpa.tools.large_response import large_response_handler
|
|
28
|
+
from mcp_eregistrations_bpa.workflows import (
|
|
29
|
+
get_workflow_catalog,
|
|
30
|
+
)
|
|
31
|
+
from mcp_eregistrations_bpa.workflows.models import OPERATION_TO_TOOL_MAP
|
|
32
|
+
|
|
33
|
+
__all__ = [
|
|
34
|
+
# Story 5.1: Workflow Catalog & Discovery
|
|
35
|
+
"workflow_list",
|
|
36
|
+
"workflow_describe",
|
|
37
|
+
"workflow_search",
|
|
38
|
+
# Story 5.4: Workflow Executor
|
|
39
|
+
"workflow_execute",
|
|
40
|
+
# Story 5.5: Progress Reporting & Streaming
|
|
41
|
+
"workflow_status",
|
|
42
|
+
"workflow_cancel",
|
|
43
|
+
# Story 5.6: Error Recovery & Rollback
|
|
44
|
+
"workflow_retry",
|
|
45
|
+
"workflow_rollback",
|
|
46
|
+
# Story 5.7: Workflow Chaining & Composition
|
|
47
|
+
"workflow_chain",
|
|
48
|
+
# Story 5.8: Guided Interactive Mode
|
|
49
|
+
"workflow_start_interactive",
|
|
50
|
+
"workflow_continue",
|
|
51
|
+
"workflow_confirm",
|
|
52
|
+
# Story 14.6: Workflow Validation
|
|
53
|
+
"workflow_validate",
|
|
54
|
+
# Registration
|
|
55
|
+
"register_workflow_tools",
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# =============================================================================
|
|
60
|
+
# Story 5.1: Workflow Catalog & Discovery
|
|
61
|
+
# =============================================================================
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@large_response_handler(
|
|
65
|
+
threshold_bytes=50 * 1024, # 50KB threshold for list tools
|
|
66
|
+
navigation={
|
|
67
|
+
"list_all": "jq '.workflows'",
|
|
68
|
+
"by_category": "jq '.workflows[] | select(.category==\"service-creation\")'",
|
|
69
|
+
"by_id": "jq '.workflows[] | select(.id | contains(\"x\"))'",
|
|
70
|
+
},
|
|
71
|
+
)
|
|
72
|
+
async def workflow_list(category: str | None = None) -> dict[str, Any]:
|
|
73
|
+
"""List available Arazzo workflows for BPA service design.
|
|
74
|
+
|
|
75
|
+
Large responses (>50KB) are saved to file with navigation hints.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
category: Filter by category (e.g., "service-creation", "roles-configuration").
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
dict with workflows (list), total (int), categories (list).
|
|
82
|
+
"""
|
|
83
|
+
catalog = get_workflow_catalog()
|
|
84
|
+
|
|
85
|
+
workflows = catalog.list_workflows(category=category)
|
|
86
|
+
all_categories = catalog.categories
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
"workflows": workflows,
|
|
90
|
+
"total": len(workflows),
|
|
91
|
+
"categories": all_categories,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
async def workflow_describe(workflow_id: str) -> dict[str, Any]:
|
|
96
|
+
"""Get detailed workflow specification including inputs, steps, and outputs.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
workflow_id: Workflow identifier (e.g., "createMinimalService").
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
dict with id, summary, description, category, inputs, steps, outputs.
|
|
103
|
+
"""
|
|
104
|
+
catalog = get_workflow_catalog()
|
|
105
|
+
workflow = catalog.get_workflow(workflow_id)
|
|
106
|
+
|
|
107
|
+
if workflow is None:
|
|
108
|
+
available = [wf["id"] for wf in catalog.list_workflows()[:5]]
|
|
109
|
+
suggestion = ", ".join(available)
|
|
110
|
+
raise ToolError(
|
|
111
|
+
f"Workflow '{workflow_id}' not found. "
|
|
112
|
+
f"Available workflows include: {suggestion}. "
|
|
113
|
+
"Use 'workflow_list' to see all available workflows."
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return workflow.to_detail_dict()
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
async def workflow_search(query: str, limit: int = 10) -> dict[str, Any]:
|
|
120
|
+
"""Search workflows by keyword in IDs, summaries, and descriptions.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
query: Search query (e.g., "role", "create service").
|
|
124
|
+
limit: Max results (default 10).
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
dict with query, matches (ranked by relevance), total.
|
|
128
|
+
"""
|
|
129
|
+
if not query or not query.strip():
|
|
130
|
+
raise ToolError(
|
|
131
|
+
"Search query cannot be empty. Provide a keyword to search for."
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
catalog = get_workflow_catalog()
|
|
135
|
+
matches = catalog.search_workflows(query.strip(), limit=limit)
|
|
136
|
+
|
|
137
|
+
return {
|
|
138
|
+
"query": query,
|
|
139
|
+
"matches": matches,
|
|
140
|
+
"total": len(matches),
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
# =============================================================================
|
|
145
|
+
# Story 5.4: Workflow Executor
|
|
146
|
+
# =============================================================================
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
async def workflow_execute(
|
|
150
|
+
workflow_id: str,
|
|
151
|
+
inputs: dict[str, Any],
|
|
152
|
+
dry_run: bool = False,
|
|
153
|
+
) -> dict[str, Any]:
|
|
154
|
+
"""Execute workflow steps sequentially, passing outputs between steps.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
workflow_id: Workflow to execute (e.g., "createMinimalService").
|
|
158
|
+
inputs: Input values for the workflow.
|
|
159
|
+
dry_run: Validate only, don't execute.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
dict with workflow_id, status, steps, outputs, message.
|
|
163
|
+
On failure: failed_at_step, error, rollback_available.
|
|
164
|
+
"""
|
|
165
|
+
catalog = get_workflow_catalog()
|
|
166
|
+
workflow = catalog.get_workflow(workflow_id)
|
|
167
|
+
|
|
168
|
+
if workflow is None:
|
|
169
|
+
raise ToolError(
|
|
170
|
+
f"Workflow '{workflow_id}' not found. "
|
|
171
|
+
"Use 'workflow_list' to see available workflows."
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Validate required inputs
|
|
175
|
+
missing_inputs = []
|
|
176
|
+
for inp in workflow.inputs:
|
|
177
|
+
if inp.required and inp.name not in inputs:
|
|
178
|
+
missing_inputs.append(inp.name)
|
|
179
|
+
|
|
180
|
+
if missing_inputs:
|
|
181
|
+
raise ToolError(
|
|
182
|
+
f"Missing required inputs: {', '.join(missing_inputs)}. "
|
|
183
|
+
f"Use 'workflow_describe {workflow_id}' to see required inputs."
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Initialize execution context
|
|
187
|
+
context: dict[str, Any] = {
|
|
188
|
+
"inputs": inputs.copy(),
|
|
189
|
+
"steps": {},
|
|
190
|
+
"outputs": {},
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
step_results: list[dict[str, Any]] = []
|
|
194
|
+
completed_steps: list[str] = []
|
|
195
|
+
|
|
196
|
+
# Dry run mode - return execution plan
|
|
197
|
+
if dry_run:
|
|
198
|
+
plan_steps = []
|
|
199
|
+
for step in workflow.steps:
|
|
200
|
+
plan_steps.append(
|
|
201
|
+
{
|
|
202
|
+
"step_id": step.step_id,
|
|
203
|
+
"description": step.description,
|
|
204
|
+
"tool": step.mcp_tool,
|
|
205
|
+
"inputs": _resolve_step_inputs(step, context, preview=True),
|
|
206
|
+
"status": "planned",
|
|
207
|
+
}
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
return {
|
|
211
|
+
"workflow_id": workflow_id,
|
|
212
|
+
"status": "dry_run",
|
|
213
|
+
"steps": plan_steps,
|
|
214
|
+
"message": f"Execution plan for '{workflow.summary}'",
|
|
215
|
+
"total_steps": len(plan_steps),
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
# Execute each step
|
|
219
|
+
for step in workflow.steps:
|
|
220
|
+
# Check conditional execution
|
|
221
|
+
if step.condition:
|
|
222
|
+
condition_met = _evaluate_condition(step.condition, context)
|
|
223
|
+
if not condition_met:
|
|
224
|
+
step_results.append(
|
|
225
|
+
{
|
|
226
|
+
"step_id": step.step_id,
|
|
227
|
+
"status": "skipped",
|
|
228
|
+
"reason": f"Condition not met: {step.condition}",
|
|
229
|
+
}
|
|
230
|
+
)
|
|
231
|
+
continue
|
|
232
|
+
|
|
233
|
+
# Resolve step inputs from context
|
|
234
|
+
step_inputs = _resolve_step_inputs(step, context)
|
|
235
|
+
|
|
236
|
+
# Execute the step
|
|
237
|
+
try:
|
|
238
|
+
step_output = await _execute_step(step, step_inputs)
|
|
239
|
+
|
|
240
|
+
# Store outputs in context
|
|
241
|
+
context["steps"][step.step_id] = step_output
|
|
242
|
+
for key, value in step_output.items():
|
|
243
|
+
context["outputs"][key] = value
|
|
244
|
+
|
|
245
|
+
step_results.append(
|
|
246
|
+
{
|
|
247
|
+
"step_id": step.step_id,
|
|
248
|
+
"tool": step.mcp_tool,
|
|
249
|
+
"inputs": step_inputs,
|
|
250
|
+
"status": "success",
|
|
251
|
+
"outputs": step_output,
|
|
252
|
+
}
|
|
253
|
+
)
|
|
254
|
+
completed_steps.append(step.step_id)
|
|
255
|
+
|
|
256
|
+
except Exception as e:
|
|
257
|
+
# Step failed - stop execution
|
|
258
|
+
step_results.append(
|
|
259
|
+
{
|
|
260
|
+
"step_id": step.step_id,
|
|
261
|
+
"tool": step.mcp_tool,
|
|
262
|
+
"inputs": step_inputs,
|
|
263
|
+
"status": "failed",
|
|
264
|
+
"error": str(e),
|
|
265
|
+
}
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
"workflow_id": workflow_id,
|
|
270
|
+
"status": "failed",
|
|
271
|
+
"failed_at_step": step.step_id,
|
|
272
|
+
"error": str(e),
|
|
273
|
+
"steps": step_results,
|
|
274
|
+
"completed_steps": completed_steps,
|
|
275
|
+
"rollback_available": len(completed_steps) > 0,
|
|
276
|
+
"suggestion": _generate_failure_suggestion(step, str(e)),
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
# Collect final outputs
|
|
280
|
+
final_outputs = _collect_workflow_outputs(workflow, context)
|
|
281
|
+
|
|
282
|
+
return {
|
|
283
|
+
"workflow_id": workflow_id,
|
|
284
|
+
"status": "completed",
|
|
285
|
+
"steps": step_results,
|
|
286
|
+
"outputs": final_outputs,
|
|
287
|
+
"message": _generate_success_message(workflow, final_outputs),
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def _resolve_step_inputs(
|
|
292
|
+
step: Any, # WorkflowStep
|
|
293
|
+
context: dict[str, Any],
|
|
294
|
+
preview: bool = False,
|
|
295
|
+
) -> dict[str, Any]:
|
|
296
|
+
"""Resolve step inputs from context using Arazzo expressions.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
step: The workflow step.
|
|
300
|
+
context: The execution context with inputs, steps, outputs.
|
|
301
|
+
preview: If True, show placeholders instead of resolving.
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Dictionary of resolved inputs for the step.
|
|
305
|
+
"""
|
|
306
|
+
resolved: dict[str, Any] = {}
|
|
307
|
+
|
|
308
|
+
# Process request body
|
|
309
|
+
if isinstance(step.request_body, dict):
|
|
310
|
+
for key, value in step.request_body.items():
|
|
311
|
+
# Convert camelCase keys to snake_case for Python tool parameters
|
|
312
|
+
snake_key = _camel_to_snake(key)
|
|
313
|
+
resolved[snake_key] = _resolve_expression(value, context, preview)
|
|
314
|
+
elif isinstance(step.request_body, str) and step.request_body:
|
|
315
|
+
# Handle string request body (e.g., for APIs that take raw string bodies)
|
|
316
|
+
# Extract parameter name from the expression and convert to snake_case
|
|
317
|
+
body_value = _resolve_expression(step.request_body, context, preview)
|
|
318
|
+
param_name = _extract_param_name_from_expression(step.request_body)
|
|
319
|
+
if param_name:
|
|
320
|
+
resolved[param_name] = body_value
|
|
321
|
+
else:
|
|
322
|
+
# Fallback: use a generic key
|
|
323
|
+
resolved["body"] = body_value
|
|
324
|
+
|
|
325
|
+
# Process parameters
|
|
326
|
+
for param in step.parameters:
|
|
327
|
+
name = param.get("name")
|
|
328
|
+
value = param.get("value")
|
|
329
|
+
if name and value:
|
|
330
|
+
resolved[name] = _resolve_expression(value, context, preview)
|
|
331
|
+
|
|
332
|
+
return resolved
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def _camel_to_snake(name: str) -> str:
|
|
336
|
+
"""Convert camelCase to snake_case.
|
|
337
|
+
|
|
338
|
+
Examples:
|
|
339
|
+
"shortName" -> "short_name"
|
|
340
|
+
"registrationId" -> "registration_id"
|
|
341
|
+
"name" -> "name"
|
|
342
|
+
"currencyCode" -> "currency_code"
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
name: The camelCase string.
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
The snake_case string.
|
|
349
|
+
"""
|
|
350
|
+
import re
|
|
351
|
+
|
|
352
|
+
# Insert underscore before uppercase letters and convert to lowercase
|
|
353
|
+
snake_name = re.sub(r"([A-Z])", r"_\1", name).lower()
|
|
354
|
+
return snake_name.lstrip("_")
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def _extract_param_name_from_expression(expr: str) -> str | None:
|
|
358
|
+
"""Extract parameter name from an Arazzo expression.
|
|
359
|
+
|
|
360
|
+
Converts camelCase to snake_case for Python function parameters.
|
|
361
|
+
|
|
362
|
+
Examples:
|
|
363
|
+
"$inputs.institutionId" -> "institution_id"
|
|
364
|
+
"{$inputs.institutionId}" -> "institution_id"
|
|
365
|
+
"$steps.foo.outputs.registrationId" -> "registration_id"
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
expr: The expression string.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
The extracted parameter name in snake_case, or None if not found.
|
|
372
|
+
"""
|
|
373
|
+
import re
|
|
374
|
+
|
|
375
|
+
# Match patterns like $inputs.fieldName or $steps.x.outputs.fieldName
|
|
376
|
+
match = re.search(r"\$(?:inputs|steps\.\w+\.outputs)\.(\w+)", expr)
|
|
377
|
+
if match:
|
|
378
|
+
camel_name = match.group(1)
|
|
379
|
+
return _camel_to_snake(camel_name)
|
|
380
|
+
|
|
381
|
+
return None
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
def _resolve_expression(
|
|
385
|
+
value: Any,
|
|
386
|
+
context: dict[str, Any],
|
|
387
|
+
preview: bool = False,
|
|
388
|
+
) -> Any:
|
|
389
|
+
"""Resolve an Arazzo runtime expression.
|
|
390
|
+
|
|
391
|
+
Delegates to the arazzo.expression module which implements proper
|
|
392
|
+
Arazzo specification expression parsing and resolution.
|
|
393
|
+
|
|
394
|
+
Supports:
|
|
395
|
+
- Bare expressions: $inputs.fieldName
|
|
396
|
+
- Embedded expressions: {$inputs.fieldName}
|
|
397
|
+
- Mixed strings: "prefix-{$inputs.id}-suffix"
|
|
398
|
+
- Step references: $steps.stepId.outputs.fieldName
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
value: The value to resolve (may be expression or literal).
|
|
402
|
+
context: The execution context with 'inputs', 'steps', 'outputs' keys.
|
|
403
|
+
preview: If True, return placeholder instead of resolving.
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
Resolved value.
|
|
407
|
+
"""
|
|
408
|
+
return resolve_string(value, context, preview)
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
def _evaluate_condition(condition: str, context: dict[str, Any]) -> bool:
|
|
412
|
+
"""Evaluate a step condition expression.
|
|
413
|
+
|
|
414
|
+
Args:
|
|
415
|
+
condition: The condition expression.
|
|
416
|
+
context: The execution context.
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
True if condition is met, False otherwise.
|
|
420
|
+
"""
|
|
421
|
+
# Simple condition evaluation
|
|
422
|
+
# Format: $inputs.fieldName == value or $inputs.fieldName != value
|
|
423
|
+
|
|
424
|
+
if "==" in condition:
|
|
425
|
+
left, right = condition.split("==", 1)
|
|
426
|
+
left_val = _resolve_expression(left.strip(), context)
|
|
427
|
+
right_val = right.strip().strip("'\"")
|
|
428
|
+
return str(left_val).lower() == right_val.lower()
|
|
429
|
+
|
|
430
|
+
if "!=" in condition:
|
|
431
|
+
left, right = condition.split("!=", 1)
|
|
432
|
+
left_val = _resolve_expression(left.strip(), context)
|
|
433
|
+
right_val = right.strip().strip("'\"")
|
|
434
|
+
return str(left_val).lower() != right_val.lower()
|
|
435
|
+
|
|
436
|
+
# Check for truthy value
|
|
437
|
+
resolved = _resolve_expression(condition, context)
|
|
438
|
+
if isinstance(resolved, bool):
|
|
439
|
+
return resolved
|
|
440
|
+
if isinstance(resolved, str):
|
|
441
|
+
return resolved.lower() in ("true", "yes", "1")
|
|
442
|
+
|
|
443
|
+
return bool(resolved)
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
# Pseudo-operations that don't require MCP tool calls
|
|
447
|
+
PSEUDO_OPERATIONS = {
|
|
448
|
+
"evaluateCondition", # Conditional branching - evaluated by workflow engine
|
|
449
|
+
"complete", # Workflow completion marker
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def _handle_pseudo_operation(
|
|
454
|
+
step: Any, # WorkflowStep
|
|
455
|
+
inputs: dict[str, Any],
|
|
456
|
+
) -> dict[str, Any]:
|
|
457
|
+
"""Handle pseudo-operations that don't require MCP tool calls.
|
|
458
|
+
|
|
459
|
+
Args:
|
|
460
|
+
step: The workflow step with pseudo-operation.
|
|
461
|
+
inputs: Resolved inputs for the step.
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
Dictionary of outputs (typically empty or with status).
|
|
465
|
+
"""
|
|
466
|
+
operation_id = step.operation_id
|
|
467
|
+
|
|
468
|
+
if operation_id == "complete":
|
|
469
|
+
# Workflow completion marker - just return success
|
|
470
|
+
return {"status": "completed"}
|
|
471
|
+
|
|
472
|
+
if operation_id == "evaluateCondition":
|
|
473
|
+
# Conditional branching - the actual condition evaluation is done
|
|
474
|
+
# by the workflow engine based on successCriteria. We just return
|
|
475
|
+
# the inputs for the condition to be evaluated.
|
|
476
|
+
return {"evaluated": True, **inputs}
|
|
477
|
+
|
|
478
|
+
# Unknown pseudo-operation - return empty outputs
|
|
479
|
+
return {}
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
async def _execute_step(
|
|
483
|
+
step: Any, # WorkflowStep
|
|
484
|
+
inputs: dict[str, Any],
|
|
485
|
+
) -> dict[str, Any]:
|
|
486
|
+
"""Execute a single workflow step by calling the mapped MCP tool.
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
step: The workflow step to execute.
|
|
490
|
+
inputs: Resolved inputs for the step.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
Dictionary of outputs from the step.
|
|
494
|
+
|
|
495
|
+
Raises:
|
|
496
|
+
ToolError: If the step fails.
|
|
497
|
+
"""
|
|
498
|
+
# Handle pseudo-operations that don't call MCP tools
|
|
499
|
+
if step.operation_id in PSEUDO_OPERATIONS:
|
|
500
|
+
return _handle_pseudo_operation(step, inputs)
|
|
501
|
+
|
|
502
|
+
tool_name = step.mcp_tool
|
|
503
|
+
if not tool_name:
|
|
504
|
+
raise ToolError(
|
|
505
|
+
f"Step '{step.step_id}' has no mapped MCP tool. "
|
|
506
|
+
f"operationId: {step.operation_id}"
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
# Import tools dynamically to avoid circular imports
|
|
510
|
+
tool_func = _get_tool_function(tool_name)
|
|
511
|
+
if tool_func is None:
|
|
512
|
+
raise ToolError(f"MCP tool '{tool_name}' not found for step '{step.step_id}'.")
|
|
513
|
+
|
|
514
|
+
# Execute the tool
|
|
515
|
+
result = await tool_func(**inputs)
|
|
516
|
+
|
|
517
|
+
# Extract outputs based on step output mappings
|
|
518
|
+
outputs: dict[str, Any] = {}
|
|
519
|
+
if isinstance(result, dict):
|
|
520
|
+
# Use step output mappings if defined
|
|
521
|
+
for output_name, output_expr in step.outputs.items():
|
|
522
|
+
if isinstance(output_expr, str) and output_expr.startswith("$"):
|
|
523
|
+
# Extract from result using Arazzo response expression
|
|
524
|
+
# Format: $response.body.fieldName or $response.body#/json/pointer
|
|
525
|
+
parts = output_expr.split(".")
|
|
526
|
+
if len(parts) >= 3 and parts[0] == "$response" and parts[1] == "body":
|
|
527
|
+
# $response.body.fieldName -> extract fieldName from result
|
|
528
|
+
field = parts[2]
|
|
529
|
+
if field in result:
|
|
530
|
+
outputs[output_name] = result[field]
|
|
531
|
+
elif len(parts) >= 2 and parts[0] == "$response":
|
|
532
|
+
# $response.fieldName -> extract from result directly
|
|
533
|
+
field = parts[1]
|
|
534
|
+
if field in result:
|
|
535
|
+
outputs[output_name] = result[field]
|
|
536
|
+
else:
|
|
537
|
+
# Direct mapping
|
|
538
|
+
if output_expr in result:
|
|
539
|
+
outputs[output_name] = result[output_expr]
|
|
540
|
+
|
|
541
|
+
# If no explicit mappings, use common output fields
|
|
542
|
+
if not outputs:
|
|
543
|
+
common_fields = ["id", "serviceId", "registrationId", "name", "key"]
|
|
544
|
+
for field in common_fields:
|
|
545
|
+
if field in result:
|
|
546
|
+
outputs[field] = result[field]
|
|
547
|
+
|
|
548
|
+
return outputs
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
def _get_tool_function(tool_name: str) -> Any:
|
|
552
|
+
"""Get the tool function by name.
|
|
553
|
+
|
|
554
|
+
Dynamically imports and returns the tool function from the appropriate module.
|
|
555
|
+
Tool names follow the pattern: {entity}_{action} (e.g., service_create, role_list).
|
|
556
|
+
|
|
557
|
+
Args:
|
|
558
|
+
tool_name: The MCP tool name.
|
|
559
|
+
|
|
560
|
+
Returns:
|
|
561
|
+
The tool function or None if not found.
|
|
562
|
+
"""
|
|
563
|
+
# Import all tool modules
|
|
564
|
+
from mcp_eregistrations_bpa.tools import (
|
|
565
|
+
actions,
|
|
566
|
+
analysis,
|
|
567
|
+
audit,
|
|
568
|
+
behaviours,
|
|
569
|
+
bots,
|
|
570
|
+
classifications,
|
|
571
|
+
costs,
|
|
572
|
+
determinants,
|
|
573
|
+
document_requirements,
|
|
574
|
+
fields,
|
|
575
|
+
forms,
|
|
576
|
+
messages,
|
|
577
|
+
notifications,
|
|
578
|
+
registration_institutions,
|
|
579
|
+
registrations,
|
|
580
|
+
role_status,
|
|
581
|
+
role_units,
|
|
582
|
+
roles,
|
|
583
|
+
rollback,
|
|
584
|
+
services,
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
# Map tool prefixes to modules
|
|
588
|
+
# Order matters for compound names - longer prefixes first
|
|
589
|
+
module_map = {
|
|
590
|
+
# Service tools
|
|
591
|
+
"service": services,
|
|
592
|
+
"serviceregistration": registrations, # serviceregistration_link
|
|
593
|
+
# Registration tools
|
|
594
|
+
"registration": registrations,
|
|
595
|
+
"registrationinstitution": registration_institutions,
|
|
596
|
+
# Role tools
|
|
597
|
+
"role": roles,
|
|
598
|
+
"roleinstitution": roles,
|
|
599
|
+
"roleregistration": roles,
|
|
600
|
+
"rolestatus": role_status,
|
|
601
|
+
"roleunit": role_units,
|
|
602
|
+
# Bot tools
|
|
603
|
+
"bot": bots,
|
|
604
|
+
# Cost tools
|
|
605
|
+
"cost": costs,
|
|
606
|
+
# Determinant tools (all types)
|
|
607
|
+
"textdeterminant": determinants,
|
|
608
|
+
"selectdeterminant": determinants,
|
|
609
|
+
"numericdeterminant": determinants,
|
|
610
|
+
"booleandeterminant": determinants,
|
|
611
|
+
"datedeterminant": determinants,
|
|
612
|
+
"classificationdeterminant": determinants,
|
|
613
|
+
"griddeterminant": determinants,
|
|
614
|
+
"determinant": determinants,
|
|
615
|
+
# Document tools
|
|
616
|
+
"documentrequirement": document_requirements,
|
|
617
|
+
"requirement": document_requirements,
|
|
618
|
+
# Form tools
|
|
619
|
+
"form": forms,
|
|
620
|
+
"field": fields,
|
|
621
|
+
# Behaviour tools
|
|
622
|
+
"componentbehaviour": behaviours,
|
|
623
|
+
"componentaction": actions,
|
|
624
|
+
"effect": behaviours,
|
|
625
|
+
# Classification tools
|
|
626
|
+
"classification": classifications,
|
|
627
|
+
# Message & notification tools
|
|
628
|
+
"message": messages,
|
|
629
|
+
"notification": notifications,
|
|
630
|
+
# Institution tools
|
|
631
|
+
"institution": registration_institutions,
|
|
632
|
+
# Analysis tools
|
|
633
|
+
"analyze": analysis,
|
|
634
|
+
# Audit & rollback tools
|
|
635
|
+
"audit": audit,
|
|
636
|
+
"rollback": rollback,
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
# Parse tool name to find module and function
|
|
640
|
+
# Handle compound names like "cost_create_fixed" or "textdeterminant_create"
|
|
641
|
+
parts = tool_name.split("_")
|
|
642
|
+
if not parts:
|
|
643
|
+
return None
|
|
644
|
+
|
|
645
|
+
# Try to find matching module
|
|
646
|
+
for prefix_len in range(len(parts), 0, -1):
|
|
647
|
+
prefix = "_".join(parts[:prefix_len])
|
|
648
|
+
if prefix in module_map:
|
|
649
|
+
module = module_map[prefix]
|
|
650
|
+
if module is not None:
|
|
651
|
+
func = getattr(module, tool_name, None)
|
|
652
|
+
if func is not None:
|
|
653
|
+
return func
|
|
654
|
+
break
|
|
655
|
+
|
|
656
|
+
# Fallback: try first part as module prefix
|
|
657
|
+
prefix = parts[0]
|
|
658
|
+
module = module_map.get(prefix)
|
|
659
|
+
if module is not None:
|
|
660
|
+
return getattr(module, tool_name, None)
|
|
661
|
+
|
|
662
|
+
return None
|
|
663
|
+
|
|
664
|
+
|
|
665
|
+
def _collect_workflow_outputs(
|
|
666
|
+
workflow: Any, # WorkflowDefinition
|
|
667
|
+
context: dict[str, Any],
|
|
668
|
+
) -> dict[str, Any]:
|
|
669
|
+
"""Collect final workflow outputs from context.
|
|
670
|
+
|
|
671
|
+
Args:
|
|
672
|
+
workflow: The workflow definition.
|
|
673
|
+
context: The execution context.
|
|
674
|
+
|
|
675
|
+
Returns:
|
|
676
|
+
Dictionary of workflow outputs.
|
|
677
|
+
"""
|
|
678
|
+
outputs: dict[str, Any] = {}
|
|
679
|
+
|
|
680
|
+
for output_name, output_expr in workflow.outputs.items():
|
|
681
|
+
resolved = _resolve_expression(output_expr, context)
|
|
682
|
+
if resolved != output_expr: # Only include if resolved
|
|
683
|
+
outputs[output_name] = resolved
|
|
684
|
+
elif output_name in context["outputs"]:
|
|
685
|
+
outputs[output_name] = context["outputs"][output_name]
|
|
686
|
+
|
|
687
|
+
# Include common outputs if not explicitly defined
|
|
688
|
+
if not outputs:
|
|
689
|
+
outputs = context["outputs"].copy()
|
|
690
|
+
|
|
691
|
+
return outputs
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
def _generate_success_message(
|
|
695
|
+
workflow: Any, # WorkflowDefinition
|
|
696
|
+
outputs: dict[str, Any],
|
|
697
|
+
) -> str:
|
|
698
|
+
"""Generate a success message for the workflow.
|
|
699
|
+
|
|
700
|
+
Args:
|
|
701
|
+
workflow: The workflow definition.
|
|
702
|
+
outputs: The workflow outputs.
|
|
703
|
+
|
|
704
|
+
Returns:
|
|
705
|
+
Human-readable success message.
|
|
706
|
+
"""
|
|
707
|
+
# Extract key values for message
|
|
708
|
+
service_name = outputs.get("serviceName", outputs.get("name", ""))
|
|
709
|
+
service_id = outputs.get("serviceId", outputs.get("id", ""))
|
|
710
|
+
|
|
711
|
+
if service_name:
|
|
712
|
+
return f"Workflow '{workflow.summary}' completed. Created '{service_name}'."
|
|
713
|
+
if service_id:
|
|
714
|
+
return f"Workflow '{workflow.summary}' completed. ID: {service_id}"
|
|
715
|
+
|
|
716
|
+
return f"Workflow '{workflow.summary}' completed successfully."
|
|
717
|
+
|
|
718
|
+
|
|
719
|
+
def _generate_failure_suggestion(
|
|
720
|
+
step: Any, # WorkflowStep
|
|
721
|
+
error: str,
|
|
722
|
+
) -> str:
|
|
723
|
+
"""Generate a suggestion for recovering from a failure.
|
|
724
|
+
|
|
725
|
+
Args:
|
|
726
|
+
step: The failed step.
|
|
727
|
+
error: The error message.
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
Suggestion for recovery.
|
|
731
|
+
"""
|
|
732
|
+
error_lower = error.lower()
|
|
733
|
+
|
|
734
|
+
if "already exists" in error_lower:
|
|
735
|
+
return "Use a different name/key or delete the existing resource."
|
|
736
|
+
if "not found" in error_lower:
|
|
737
|
+
return "Verify the referenced resource exists. Check IDs and names."
|
|
738
|
+
if "permission" in error_lower or "unauthorized" in error_lower:
|
|
739
|
+
return "Check your permissions. You may need a different role."
|
|
740
|
+
if "validation" in error_lower or "invalid" in error_lower:
|
|
741
|
+
return "Check input values match the required format."
|
|
742
|
+
|
|
743
|
+
return "Review the error message and try again with corrected inputs."
|
|
744
|
+
|
|
745
|
+
|
|
746
|
+
# =============================================================================
|
|
747
|
+
# Story 5.5: Progress Reporting & Streaming
|
|
748
|
+
# =============================================================================
|
|
749
|
+
|
|
750
|
+
# Track running workflow executions for status/cancel
|
|
751
|
+
_running_executions: dict[str, dict[str, Any]] = {}
|
|
752
|
+
|
|
753
|
+
# Track completed/failed executions for retry/rollback
|
|
754
|
+
_execution_history: dict[str, dict[str, Any]] = {}
|
|
755
|
+
|
|
756
|
+
|
|
757
|
+
async def workflow_execute_with_progress(
|
|
758
|
+
workflow_id: str,
|
|
759
|
+
inputs: dict[str, Any],
|
|
760
|
+
on_progress: Callable[[dict[str, Any]], None] | None = None,
|
|
761
|
+
) -> dict[str, Any]:
|
|
762
|
+
"""Execute workflow with progress callbacks after each step.
|
|
763
|
+
|
|
764
|
+
Args:
|
|
765
|
+
workflow_id: Workflow to execute.
|
|
766
|
+
inputs: Input values for the workflow.
|
|
767
|
+
on_progress: Callback receiving {type, execution_id, step, total, percent,
|
|
768
|
+
message, elapsed_ms, current_step}.
|
|
769
|
+
|
|
770
|
+
Returns:
|
|
771
|
+
dict with same structure as workflow_execute.
|
|
772
|
+
"""
|
|
773
|
+
catalog = get_workflow_catalog()
|
|
774
|
+
workflow = catalog.get_workflow(workflow_id)
|
|
775
|
+
|
|
776
|
+
if workflow is None:
|
|
777
|
+
raise ToolError(
|
|
778
|
+
f"Workflow '{workflow_id}' not found. "
|
|
779
|
+
"Use 'workflow_list' to see available workflows."
|
|
780
|
+
)
|
|
781
|
+
|
|
782
|
+
# Validate required inputs
|
|
783
|
+
missing_inputs = []
|
|
784
|
+
for inp in workflow.inputs:
|
|
785
|
+
if inp.required and inp.name not in inputs:
|
|
786
|
+
missing_inputs.append(inp.name)
|
|
787
|
+
|
|
788
|
+
if missing_inputs:
|
|
789
|
+
raise ToolError(
|
|
790
|
+
f"Missing required inputs: {', '.join(missing_inputs)}. "
|
|
791
|
+
f"Use 'workflow_describe {workflow_id}' to see required inputs."
|
|
792
|
+
)
|
|
793
|
+
|
|
794
|
+
# Generate execution ID
|
|
795
|
+
execution_id = f"exec-{uuid.uuid4().hex[:8]}"
|
|
796
|
+
start_time = time.time()
|
|
797
|
+
|
|
798
|
+
# Register execution for tracking
|
|
799
|
+
execution_state: dict[str, Any] = {
|
|
800
|
+
"execution_id": execution_id,
|
|
801
|
+
"workflow_id": workflow_id,
|
|
802
|
+
"status": "running",
|
|
803
|
+
"current_step": None,
|
|
804
|
+
"completed_steps": [],
|
|
805
|
+
"start_time": start_time,
|
|
806
|
+
"cancelled": False,
|
|
807
|
+
}
|
|
808
|
+
_running_executions[execution_id] = execution_state
|
|
809
|
+
|
|
810
|
+
# Initialize execution context
|
|
811
|
+
context: dict[str, Any] = {
|
|
812
|
+
"inputs": inputs.copy(),
|
|
813
|
+
"steps": {},
|
|
814
|
+
"outputs": {},
|
|
815
|
+
}
|
|
816
|
+
|
|
817
|
+
step_results: list[dict[str, Any]] = []
|
|
818
|
+
total_steps = len(workflow.steps)
|
|
819
|
+
|
|
820
|
+
try:
|
|
821
|
+
# Execute each step
|
|
822
|
+
for step_index, step in enumerate(workflow.steps, 1):
|
|
823
|
+
# Check for cancellation
|
|
824
|
+
if execution_state["cancelled"]:
|
|
825
|
+
return {
|
|
826
|
+
"workflow_id": workflow_id,
|
|
827
|
+
"execution_id": execution_id,
|
|
828
|
+
"status": "cancelled",
|
|
829
|
+
"cancelled_at_step": step.step_id,
|
|
830
|
+
"completed_steps": execution_state["completed_steps"],
|
|
831
|
+
"remaining_steps": total_steps - step_index + 1,
|
|
832
|
+
"rollback_available": len(execution_state["completed_steps"]) > 0,
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
execution_state["current_step"] = step.step_id
|
|
836
|
+
|
|
837
|
+
# Check conditional execution
|
|
838
|
+
if step.condition:
|
|
839
|
+
condition_met = _evaluate_condition(step.condition, context)
|
|
840
|
+
if not condition_met:
|
|
841
|
+
step_results.append(
|
|
842
|
+
{
|
|
843
|
+
"step_id": step.step_id,
|
|
844
|
+
"status": "skipped",
|
|
845
|
+
"reason": f"Condition not met: {step.condition}",
|
|
846
|
+
}
|
|
847
|
+
)
|
|
848
|
+
continue
|
|
849
|
+
|
|
850
|
+
# Resolve step inputs from context
|
|
851
|
+
step_inputs = _resolve_step_inputs(step, context)
|
|
852
|
+
|
|
853
|
+
# Execute the step
|
|
854
|
+
try:
|
|
855
|
+
step_output = await _execute_step(step, step_inputs)
|
|
856
|
+
|
|
857
|
+
# Store outputs in context
|
|
858
|
+
context["steps"][step.step_id] = step_output
|
|
859
|
+
for key, value in step_output.items():
|
|
860
|
+
context["outputs"][key] = value
|
|
861
|
+
|
|
862
|
+
step_results.append(
|
|
863
|
+
{
|
|
864
|
+
"step_id": step.step_id,
|
|
865
|
+
"tool": step.mcp_tool,
|
|
866
|
+
"inputs": step_inputs,
|
|
867
|
+
"status": "success",
|
|
868
|
+
"outputs": step_output,
|
|
869
|
+
}
|
|
870
|
+
)
|
|
871
|
+
execution_state["completed_steps"].append(step.step_id)
|
|
872
|
+
|
|
873
|
+
# Report progress
|
|
874
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
875
|
+
percent = int((step_index / total_steps) * 100)
|
|
876
|
+
|
|
877
|
+
progress_info = {
|
|
878
|
+
"type": "progress",
|
|
879
|
+
"execution_id": execution_id,
|
|
880
|
+
"workflow_id": workflow_id,
|
|
881
|
+
"step": step_index,
|
|
882
|
+
"total": total_steps,
|
|
883
|
+
"percent": percent,
|
|
884
|
+
"message": _generate_step_progress_message(step, step_output),
|
|
885
|
+
"elapsed_ms": elapsed_ms,
|
|
886
|
+
"current_step": step.step_id,
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
if on_progress:
|
|
890
|
+
on_progress(progress_info)
|
|
891
|
+
|
|
892
|
+
except Exception as e:
|
|
893
|
+
# Step failed - stop execution
|
|
894
|
+
step_results.append(
|
|
895
|
+
{
|
|
896
|
+
"step_id": step.step_id,
|
|
897
|
+
"tool": step.mcp_tool,
|
|
898
|
+
"inputs": step_inputs,
|
|
899
|
+
"status": "failed",
|
|
900
|
+
"error": str(e),
|
|
901
|
+
}
|
|
902
|
+
)
|
|
903
|
+
|
|
904
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
905
|
+
return {
|
|
906
|
+
"workflow_id": workflow_id,
|
|
907
|
+
"execution_id": execution_id,
|
|
908
|
+
"status": "failed",
|
|
909
|
+
"failed_at_step": step.step_id,
|
|
910
|
+
"error": str(e),
|
|
911
|
+
"steps": step_results,
|
|
912
|
+
"completed_steps": execution_state["completed_steps"],
|
|
913
|
+
"rollback_available": len(execution_state["completed_steps"]) > 0,
|
|
914
|
+
"suggestion": _generate_failure_suggestion(step, str(e)),
|
|
915
|
+
"duration_ms": elapsed_ms,
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
# Collect final outputs
|
|
919
|
+
final_outputs = _collect_workflow_outputs(workflow, context)
|
|
920
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
921
|
+
|
|
922
|
+
# Report completion
|
|
923
|
+
if on_progress:
|
|
924
|
+
on_progress(
|
|
925
|
+
{
|
|
926
|
+
"type": "complete",
|
|
927
|
+
"execution_id": execution_id,
|
|
928
|
+
"workflow_id": workflow_id,
|
|
929
|
+
"summary": _generate_success_message(workflow, final_outputs),
|
|
930
|
+
"duration_ms": elapsed_ms,
|
|
931
|
+
}
|
|
932
|
+
)
|
|
933
|
+
|
|
934
|
+
return {
|
|
935
|
+
"workflow_id": workflow_id,
|
|
936
|
+
"execution_id": execution_id,
|
|
937
|
+
"status": "completed",
|
|
938
|
+
"steps": step_results,
|
|
939
|
+
"outputs": final_outputs,
|
|
940
|
+
"message": _generate_success_message(workflow, final_outputs),
|
|
941
|
+
"duration_ms": elapsed_ms,
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
finally:
|
|
945
|
+
# Save execution history for retry/rollback before cleanup
|
|
946
|
+
if execution_id in _running_executions:
|
|
947
|
+
_execution_history[execution_id] = {
|
|
948
|
+
**_running_executions[execution_id],
|
|
949
|
+
"inputs": inputs.copy(),
|
|
950
|
+
"context": context.copy(),
|
|
951
|
+
"step_results": step_results,
|
|
952
|
+
"total_steps": total_steps,
|
|
953
|
+
}
|
|
954
|
+
del _running_executions[execution_id]
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def _generate_step_progress_message(
|
|
958
|
+
step: Any, # WorkflowStep
|
|
959
|
+
outputs: dict[str, Any],
|
|
960
|
+
) -> str:
|
|
961
|
+
"""Generate a progress message for a completed step.
|
|
962
|
+
|
|
963
|
+
Args:
|
|
964
|
+
step: The completed step.
|
|
965
|
+
outputs: The step outputs.
|
|
966
|
+
|
|
967
|
+
Returns:
|
|
968
|
+
Human-readable progress message.
|
|
969
|
+
"""
|
|
970
|
+
name = outputs.get("name", outputs.get("serviceName", ""))
|
|
971
|
+
if name:
|
|
972
|
+
return f"{step.description}: '{name}'"
|
|
973
|
+
return str(step.description)
|
|
974
|
+
|
|
975
|
+
|
|
976
|
+
async def workflow_status(execution_id: str) -> dict[str, Any]:
|
|
977
|
+
"""Get status of a running workflow execution.
|
|
978
|
+
|
|
979
|
+
Args:
|
|
980
|
+
execution_id: Execution ID from workflow_execute_with_progress.
|
|
981
|
+
|
|
982
|
+
Returns:
|
|
983
|
+
dict with execution_id, workflow_id, status, current_step, completed_steps,
|
|
984
|
+
elapsed_ms.
|
|
985
|
+
"""
|
|
986
|
+
if execution_id not in _running_executions:
|
|
987
|
+
return {
|
|
988
|
+
"execution_id": execution_id,
|
|
989
|
+
"status": "not_found",
|
|
990
|
+
"message": "Execution not found or already completed.",
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
state = _running_executions[execution_id]
|
|
994
|
+
elapsed_ms = int((time.time() - state["start_time"]) * 1000)
|
|
995
|
+
|
|
996
|
+
return {
|
|
997
|
+
"execution_id": execution_id,
|
|
998
|
+
"workflow_id": state["workflow_id"],
|
|
999
|
+
"status": "running" if not state["cancelled"] else "cancelling",
|
|
1000
|
+
"current_step": state["current_step"],
|
|
1001
|
+
"completed_steps": state["completed_steps"],
|
|
1002
|
+
"elapsed_ms": elapsed_ms,
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
|
|
1006
|
+
async def workflow_cancel(execution_id: str) -> dict[str, Any]:
|
|
1007
|
+
"""Cancel a running workflow. Stops after current step completes.
|
|
1008
|
+
|
|
1009
|
+
Args:
|
|
1010
|
+
execution_id: Execution ID from workflow_execute_with_progress.
|
|
1011
|
+
|
|
1012
|
+
Returns:
|
|
1013
|
+
dict with cancelled, execution_id, message. Completed steps can be rolled back.
|
|
1014
|
+
"""
|
|
1015
|
+
if execution_id not in _running_executions:
|
|
1016
|
+
raise ToolError(
|
|
1017
|
+
f"Execution '{execution_id}' not found or already completed. "
|
|
1018
|
+
"Use 'workflow_status' to check execution status."
|
|
1019
|
+
)
|
|
1020
|
+
|
|
1021
|
+
state = _running_executions[execution_id]
|
|
1022
|
+
state["cancelled"] = True
|
|
1023
|
+
|
|
1024
|
+
return {
|
|
1025
|
+
"cancelled": True,
|
|
1026
|
+
"execution_id": execution_id,
|
|
1027
|
+
"workflow_id": state["workflow_id"],
|
|
1028
|
+
"current_step": state["current_step"],
|
|
1029
|
+
"completed_steps": state["completed_steps"],
|
|
1030
|
+
"message": "Cancellation requested. Workflow will stop after current step.",
|
|
1031
|
+
}
|
|
1032
|
+
|
|
1033
|
+
|
|
1034
|
+
# =============================================================================
|
|
1035
|
+
# Story 5.6: Error Recovery & Rollback
|
|
1036
|
+
# =============================================================================
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
async def workflow_retry(
|
|
1040
|
+
execution_id: str,
|
|
1041
|
+
updated_inputs: dict[str, Any] | None = None,
|
|
1042
|
+
) -> dict[str, Any]:
|
|
1043
|
+
"""Retry failed workflow from point of failure with optional input updates.
|
|
1044
|
+
|
|
1045
|
+
Args:
|
|
1046
|
+
execution_id: Failed workflow's execution ID.
|
|
1047
|
+
updated_inputs: Updated input values to fix failure cause.
|
|
1048
|
+
|
|
1049
|
+
Returns:
|
|
1050
|
+
dict with execution_id, original_execution_id, resumed_from_step, status,
|
|
1051
|
+
outputs.
|
|
1052
|
+
"""
|
|
1053
|
+
if execution_id not in _execution_history:
|
|
1054
|
+
raise ToolError(
|
|
1055
|
+
f"Execution '{execution_id}' not found in history. "
|
|
1056
|
+
"Only failed or cancelled executions can be retried."
|
|
1057
|
+
)
|
|
1058
|
+
|
|
1059
|
+
history = _execution_history[execution_id]
|
|
1060
|
+
|
|
1061
|
+
# Check that execution failed or was cancelled
|
|
1062
|
+
if history.get("status") not in (None, "failed", "cancelled"):
|
|
1063
|
+
raise ToolError(
|
|
1064
|
+
f"Execution '{execution_id}' cannot be retried. "
|
|
1065
|
+
f"Status is '{history.get('status')}'. "
|
|
1066
|
+
"Only failed/cancelled executions can be retried."
|
|
1067
|
+
)
|
|
1068
|
+
|
|
1069
|
+
workflow_id = history["workflow_id"]
|
|
1070
|
+
catalog = get_workflow_catalog()
|
|
1071
|
+
workflow = catalog.get_workflow(workflow_id)
|
|
1072
|
+
|
|
1073
|
+
if workflow is None:
|
|
1074
|
+
raise ToolError(f"Workflow '{workflow_id}' no longer exists. Cannot retry.")
|
|
1075
|
+
|
|
1076
|
+
# Prepare inputs - merge original with updates
|
|
1077
|
+
inputs = history["inputs"].copy()
|
|
1078
|
+
if updated_inputs:
|
|
1079
|
+
inputs.update(updated_inputs)
|
|
1080
|
+
|
|
1081
|
+
# Find the failed step to resume from
|
|
1082
|
+
completed_steps = history.get("completed_steps", [])
|
|
1083
|
+
failed_step = None
|
|
1084
|
+
for step_result in history.get("step_results", []):
|
|
1085
|
+
if step_result.get("status") == "failed":
|
|
1086
|
+
failed_step = step_result.get("step_id")
|
|
1087
|
+
break
|
|
1088
|
+
|
|
1089
|
+
# If no failed step found (cancelled), find first incomplete step
|
|
1090
|
+
if not failed_step:
|
|
1091
|
+
for step in workflow.steps:
|
|
1092
|
+
if step.step_id not in completed_steps:
|
|
1093
|
+
failed_step = step.step_id
|
|
1094
|
+
break
|
|
1095
|
+
|
|
1096
|
+
if not failed_step:
|
|
1097
|
+
return {
|
|
1098
|
+
"execution_id": execution_id,
|
|
1099
|
+
"status": "already_completed",
|
|
1100
|
+
"message": "All steps were already completed. Nothing to retry.",
|
|
1101
|
+
}
|
|
1102
|
+
|
|
1103
|
+
# Generate new execution ID for retry
|
|
1104
|
+
new_execution_id = f"exec-{uuid.uuid4().hex[:8]}"
|
|
1105
|
+
start_time = time.time()
|
|
1106
|
+
|
|
1107
|
+
# Restore context from history
|
|
1108
|
+
context = history.get("context", {"inputs": inputs, "steps": {}, "outputs": {}})
|
|
1109
|
+
context["inputs"] = inputs # Use updated inputs
|
|
1110
|
+
|
|
1111
|
+
# Register new execution
|
|
1112
|
+
execution_state: dict[str, Any] = {
|
|
1113
|
+
"execution_id": new_execution_id,
|
|
1114
|
+
"workflow_id": workflow_id,
|
|
1115
|
+
"status": "running",
|
|
1116
|
+
"current_step": None,
|
|
1117
|
+
"completed_steps": completed_steps.copy(),
|
|
1118
|
+
"start_time": start_time,
|
|
1119
|
+
"cancelled": False,
|
|
1120
|
+
}
|
|
1121
|
+
_running_executions[new_execution_id] = execution_state
|
|
1122
|
+
|
|
1123
|
+
step_results: list[dict[str, Any]] = []
|
|
1124
|
+
total_steps = len(workflow.steps)
|
|
1125
|
+
resume_from_index = None
|
|
1126
|
+
|
|
1127
|
+
# Find the index to resume from
|
|
1128
|
+
for i, step in enumerate(workflow.steps):
|
|
1129
|
+
if step.step_id == failed_step:
|
|
1130
|
+
resume_from_index = i
|
|
1131
|
+
break
|
|
1132
|
+
|
|
1133
|
+
if resume_from_index is None:
|
|
1134
|
+
raise ToolError(f"Cannot find step '{failed_step}' in workflow.")
|
|
1135
|
+
|
|
1136
|
+
try:
|
|
1137
|
+
# Execute remaining steps
|
|
1138
|
+
for step_index, step in enumerate(
|
|
1139
|
+
workflow.steps[resume_from_index:], resume_from_index + 1
|
|
1140
|
+
):
|
|
1141
|
+
if execution_state["cancelled"]:
|
|
1142
|
+
return {
|
|
1143
|
+
"execution_id": new_execution_id,
|
|
1144
|
+
"original_execution_id": execution_id,
|
|
1145
|
+
"status": "cancelled",
|
|
1146
|
+
"resumed_from_step": failed_step,
|
|
1147
|
+
"completed_steps": execution_state["completed_steps"],
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
execution_state["current_step"] = step.step_id
|
|
1151
|
+
|
|
1152
|
+
# Check conditional execution
|
|
1153
|
+
if step.condition:
|
|
1154
|
+
condition_met = _evaluate_condition(step.condition, context)
|
|
1155
|
+
if not condition_met:
|
|
1156
|
+
step_results.append(
|
|
1157
|
+
{
|
|
1158
|
+
"step_id": step.step_id,
|
|
1159
|
+
"status": "skipped",
|
|
1160
|
+
"reason": f"Condition not met: {step.condition}",
|
|
1161
|
+
}
|
|
1162
|
+
)
|
|
1163
|
+
continue
|
|
1164
|
+
|
|
1165
|
+
# Resolve step inputs from context
|
|
1166
|
+
step_inputs = _resolve_step_inputs(step, context)
|
|
1167
|
+
|
|
1168
|
+
try:
|
|
1169
|
+
step_output = await _execute_step(step, step_inputs)
|
|
1170
|
+
|
|
1171
|
+
context["steps"][step.step_id] = step_output
|
|
1172
|
+
for key, value in step_output.items():
|
|
1173
|
+
context["outputs"][key] = value
|
|
1174
|
+
|
|
1175
|
+
step_results.append(
|
|
1176
|
+
{
|
|
1177
|
+
"step_id": step.step_id,
|
|
1178
|
+
"tool": step.mcp_tool,
|
|
1179
|
+
"inputs": step_inputs,
|
|
1180
|
+
"status": "success",
|
|
1181
|
+
"outputs": step_output,
|
|
1182
|
+
}
|
|
1183
|
+
)
|
|
1184
|
+
execution_state["completed_steps"].append(step.step_id)
|
|
1185
|
+
|
|
1186
|
+
except Exception as e:
|
|
1187
|
+
step_results.append(
|
|
1188
|
+
{
|
|
1189
|
+
"step_id": step.step_id,
|
|
1190
|
+
"tool": step.mcp_tool,
|
|
1191
|
+
"inputs": step_inputs,
|
|
1192
|
+
"status": "failed",
|
|
1193
|
+
"error": str(e),
|
|
1194
|
+
}
|
|
1195
|
+
)
|
|
1196
|
+
|
|
1197
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1198
|
+
return {
|
|
1199
|
+
"execution_id": new_execution_id,
|
|
1200
|
+
"original_execution_id": execution_id,
|
|
1201
|
+
"status": "failed",
|
|
1202
|
+
"resumed_from_step": failed_step,
|
|
1203
|
+
"failed_at_step": step.step_id,
|
|
1204
|
+
"error": str(e),
|
|
1205
|
+
"steps": step_results,
|
|
1206
|
+
"completed_steps": execution_state["completed_steps"],
|
|
1207
|
+
"rollback_available": True,
|
|
1208
|
+
"suggestion": _generate_failure_suggestion(step, str(e)),
|
|
1209
|
+
"duration_ms": elapsed_ms,
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
# Collect final outputs
|
|
1213
|
+
final_outputs = _collect_workflow_outputs(workflow, context)
|
|
1214
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1215
|
+
|
|
1216
|
+
return {
|
|
1217
|
+
"execution_id": new_execution_id,
|
|
1218
|
+
"original_execution_id": execution_id,
|
|
1219
|
+
"status": "completed",
|
|
1220
|
+
"resumed_from_step": failed_step,
|
|
1221
|
+
"steps": step_results,
|
|
1222
|
+
"outputs": final_outputs,
|
|
1223
|
+
"message": f"Workflow completed after retry from '{failed_step}'",
|
|
1224
|
+
"duration_ms": elapsed_ms,
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
finally:
|
|
1228
|
+
if new_execution_id in _running_executions:
|
|
1229
|
+
_execution_history[new_execution_id] = {
|
|
1230
|
+
**_running_executions[new_execution_id],
|
|
1231
|
+
"inputs": inputs.copy(),
|
|
1232
|
+
"context": context.copy(),
|
|
1233
|
+
"step_results": step_results,
|
|
1234
|
+
"total_steps": total_steps,
|
|
1235
|
+
}
|
|
1236
|
+
del _running_executions[new_execution_id]
|
|
1237
|
+
|
|
1238
|
+
|
|
1239
|
+
async def workflow_rollback(
|
|
1240
|
+
execution_id: str,
|
|
1241
|
+
keep_steps: list[str] | None = None,
|
|
1242
|
+
) -> dict[str, Any]:
|
|
1243
|
+
"""Roll back completed workflow steps using audit system.
|
|
1244
|
+
|
|
1245
|
+
Args:
|
|
1246
|
+
execution_id: Execution ID to roll back.
|
|
1247
|
+
keep_steps: Step IDs to keep (not roll back). None = rollback all.
|
|
1248
|
+
|
|
1249
|
+
Returns:
|
|
1250
|
+
dict with execution_id, rollback_status, rolled_back_steps, kept_steps, message.
|
|
1251
|
+
"""
|
|
1252
|
+
if execution_id not in _execution_history:
|
|
1253
|
+
raise ToolError(
|
|
1254
|
+
f"Execution '{execution_id}' not found in history. "
|
|
1255
|
+
"Cannot roll back executions that have no history."
|
|
1256
|
+
)
|
|
1257
|
+
|
|
1258
|
+
history = _execution_history[execution_id]
|
|
1259
|
+
completed_steps = history.get("completed_steps", [])
|
|
1260
|
+
|
|
1261
|
+
if not completed_steps:
|
|
1262
|
+
return {
|
|
1263
|
+
"execution_id": execution_id,
|
|
1264
|
+
"rollback_status": "nothing_to_rollback",
|
|
1265
|
+
"message": "No completed steps to roll back.",
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
# Determine which steps to roll back (reverse order)
|
|
1269
|
+
keep_steps = keep_steps or []
|
|
1270
|
+
steps_to_rollback = [s for s in reversed(completed_steps) if s not in keep_steps]
|
|
1271
|
+
kept_steps = [s for s in completed_steps if s in keep_steps]
|
|
1272
|
+
|
|
1273
|
+
if not steps_to_rollback:
|
|
1274
|
+
return {
|
|
1275
|
+
"execution_id": execution_id,
|
|
1276
|
+
"rollback_status": "nothing_to_rollback",
|
|
1277
|
+
"kept_steps": kept_steps,
|
|
1278
|
+
"message": "All steps were in keep_steps. Nothing to roll back.",
|
|
1279
|
+
}
|
|
1280
|
+
|
|
1281
|
+
# Get step results for audit info
|
|
1282
|
+
step_results = history.get("step_results", [])
|
|
1283
|
+
step_outputs: dict[str, dict[str, Any]] = {}
|
|
1284
|
+
for result in step_results:
|
|
1285
|
+
if result.get("status") == "success":
|
|
1286
|
+
step_outputs[result["step_id"]] = result.get("outputs", {})
|
|
1287
|
+
|
|
1288
|
+
rollback_details: list[dict[str, Any]] = []
|
|
1289
|
+
rollback_errors: list[str] = []
|
|
1290
|
+
|
|
1291
|
+
# Roll back each step in reverse order
|
|
1292
|
+
for step_id in steps_to_rollback:
|
|
1293
|
+
outputs = step_outputs.get(step_id, {})
|
|
1294
|
+
|
|
1295
|
+
# Determine what to roll back based on outputs
|
|
1296
|
+
# This integrates with the existing rollback system
|
|
1297
|
+
rollback_result = await _rollback_step_outputs(step_id, outputs)
|
|
1298
|
+
|
|
1299
|
+
if rollback_result.get("success"):
|
|
1300
|
+
rollback_details.append(
|
|
1301
|
+
{
|
|
1302
|
+
"step": step_id,
|
|
1303
|
+
"action": rollback_result.get("action", "rolled back"),
|
|
1304
|
+
"details": rollback_result.get("details"),
|
|
1305
|
+
}
|
|
1306
|
+
)
|
|
1307
|
+
else:
|
|
1308
|
+
rollback_errors.append(
|
|
1309
|
+
f"Failed to rollback {step_id}: {rollback_result.get('error')}"
|
|
1310
|
+
)
|
|
1311
|
+
|
|
1312
|
+
rollback_status = "completed" if not rollback_errors else "partial"
|
|
1313
|
+
|
|
1314
|
+
# Clean up execution history after rollback
|
|
1315
|
+
if rollback_status == "completed" and not kept_steps:
|
|
1316
|
+
del _execution_history[execution_id]
|
|
1317
|
+
|
|
1318
|
+
return {
|
|
1319
|
+
"execution_id": execution_id,
|
|
1320
|
+
"rollback_status": rollback_status,
|
|
1321
|
+
"rolled_back_steps": steps_to_rollback,
|
|
1322
|
+
"kept_steps": kept_steps,
|
|
1323
|
+
"rollback_details": rollback_details,
|
|
1324
|
+
"errors": rollback_errors if rollback_errors else None,
|
|
1325
|
+
"message": _generate_rollback_message(
|
|
1326
|
+
steps_to_rollback, kept_steps, rollback_errors
|
|
1327
|
+
),
|
|
1328
|
+
}
|
|
1329
|
+
|
|
1330
|
+
|
|
1331
|
+
async def _rollback_step_outputs(
|
|
1332
|
+
step_id: str,
|
|
1333
|
+
outputs: dict[str, Any],
|
|
1334
|
+
) -> dict[str, Any]:
|
|
1335
|
+
"""Roll back a step's outputs using the appropriate rollback method.
|
|
1336
|
+
|
|
1337
|
+
Args:
|
|
1338
|
+
step_id: The step ID being rolled back.
|
|
1339
|
+
outputs: The outputs from the step to roll back.
|
|
1340
|
+
|
|
1341
|
+
Returns:
|
|
1342
|
+
dict with success status and details.
|
|
1343
|
+
"""
|
|
1344
|
+
# For now, log the rollback action
|
|
1345
|
+
# Full integration with audit/rollback system would require audit_id
|
|
1346
|
+
action = f"Marked for rollback: {step_id}"
|
|
1347
|
+
details = {"outputs_to_rollback": outputs}
|
|
1348
|
+
|
|
1349
|
+
# If we have an audit_id in outputs, use the real rollback
|
|
1350
|
+
audit_id = outputs.get("audit_id")
|
|
1351
|
+
if audit_id:
|
|
1352
|
+
try:
|
|
1353
|
+
from mcp_eregistrations_bpa.tools.rollback import rollback
|
|
1354
|
+
|
|
1355
|
+
result = await rollback(audit_id)
|
|
1356
|
+
return {
|
|
1357
|
+
"success": True,
|
|
1358
|
+
"action": f"Rolled back via audit {audit_id}",
|
|
1359
|
+
"details": result,
|
|
1360
|
+
}
|
|
1361
|
+
except Exception as e:
|
|
1362
|
+
return {
|
|
1363
|
+
"success": False,
|
|
1364
|
+
"error": str(e),
|
|
1365
|
+
}
|
|
1366
|
+
|
|
1367
|
+
# Without audit_id, we record the intent but can't auto-rollback
|
|
1368
|
+
return {
|
|
1369
|
+
"success": True,
|
|
1370
|
+
"action": action,
|
|
1371
|
+
"details": details,
|
|
1372
|
+
"note": "Manual rollback may be required (no audit_id)",
|
|
1373
|
+
}
|
|
1374
|
+
|
|
1375
|
+
|
|
1376
|
+
def _generate_rollback_message(
|
|
1377
|
+
rolled_back: list[str],
|
|
1378
|
+
kept: list[str],
|
|
1379
|
+
errors: list[str],
|
|
1380
|
+
) -> str:
|
|
1381
|
+
"""Generate a human-readable rollback message.
|
|
1382
|
+
|
|
1383
|
+
Args:
|
|
1384
|
+
rolled_back: Steps that were rolled back.
|
|
1385
|
+
kept: Steps that were kept.
|
|
1386
|
+
errors: Any errors that occurred.
|
|
1387
|
+
|
|
1388
|
+
Returns:
|
|
1389
|
+
Rollback summary message.
|
|
1390
|
+
"""
|
|
1391
|
+
if errors:
|
|
1392
|
+
return f"Partial rollback completed with {len(errors)} error(s)."
|
|
1393
|
+
|
|
1394
|
+
if kept:
|
|
1395
|
+
return f"Rolled back {len(rolled_back)} step(s), kept {len(kept)} step(s)."
|
|
1396
|
+
|
|
1397
|
+
return "All workflow changes have been rolled back."
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
# =============================================================================
|
|
1401
|
+
# Story 5.7: Workflow Chaining & Composition
|
|
1402
|
+
# =============================================================================
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
async def workflow_chain(
|
|
1406
|
+
workflows: list[dict[str, Any]],
|
|
1407
|
+
rollback_on_failure: bool = True,
|
|
1408
|
+
) -> dict[str, Any]:
|
|
1409
|
+
"""Execute workflows sequentially, passing outputs via $chain[i].outputs.field.
|
|
1410
|
+
|
|
1411
|
+
Args:
|
|
1412
|
+
workflows: List of {workflow_id, inputs}. Inputs can reference outputs.
|
|
1413
|
+
rollback_on_failure: Auto-rollback on failure (default True).
|
|
1414
|
+
|
|
1415
|
+
Returns:
|
|
1416
|
+
dict with chain_id, status, results, failed_at (if failed), rolled_back,
|
|
1417
|
+
summary.
|
|
1418
|
+
"""
|
|
1419
|
+
if not workflows:
|
|
1420
|
+
raise ToolError("Workflow chain must contain at least one workflow.")
|
|
1421
|
+
|
|
1422
|
+
chain_id = f"chain-{uuid.uuid4().hex[:8]}"
|
|
1423
|
+
start_time = time.time()
|
|
1424
|
+
|
|
1425
|
+
results: list[dict[str, Any]] = []
|
|
1426
|
+
execution_ids: list[str] = []
|
|
1427
|
+
|
|
1428
|
+
for i, wf_spec in enumerate(workflows):
|
|
1429
|
+
workflow_id = wf_spec.get("workflow_id")
|
|
1430
|
+
if not workflow_id:
|
|
1431
|
+
raise ToolError(f"Workflow at index {i} is missing 'workflow_id'.")
|
|
1432
|
+
|
|
1433
|
+
# Resolve inputs that reference previous chain outputs
|
|
1434
|
+
raw_inputs = wf_spec.get("inputs", {})
|
|
1435
|
+
resolved_inputs = _resolve_chain_inputs(raw_inputs, results)
|
|
1436
|
+
|
|
1437
|
+
try:
|
|
1438
|
+
# Execute the workflow
|
|
1439
|
+
result = await workflow_execute_with_progress(
|
|
1440
|
+
workflow_id=workflow_id,
|
|
1441
|
+
inputs=resolved_inputs,
|
|
1442
|
+
)
|
|
1443
|
+
|
|
1444
|
+
results.append(
|
|
1445
|
+
{
|
|
1446
|
+
"workflow_id": workflow_id,
|
|
1447
|
+
"index": i,
|
|
1448
|
+
"status": result.get("status", "unknown"),
|
|
1449
|
+
"outputs": result.get("outputs", {}),
|
|
1450
|
+
"execution_id": result.get("execution_id"),
|
|
1451
|
+
}
|
|
1452
|
+
)
|
|
1453
|
+
|
|
1454
|
+
if result.get("execution_id"):
|
|
1455
|
+
execution_ids.append(result["execution_id"])
|
|
1456
|
+
|
|
1457
|
+
# Check for failure
|
|
1458
|
+
if result.get("status") == "failed":
|
|
1459
|
+
if rollback_on_failure and execution_ids:
|
|
1460
|
+
rollback_results = await _rollback_chain(execution_ids[:-1])
|
|
1461
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1462
|
+
|
|
1463
|
+
return {
|
|
1464
|
+
"chain_id": chain_id,
|
|
1465
|
+
"status": "rolled_back",
|
|
1466
|
+
"failed_at": i,
|
|
1467
|
+
"failed_workflow": workflow_id,
|
|
1468
|
+
"error": result.get("error"),
|
|
1469
|
+
"results": results,
|
|
1470
|
+
"rolled_back": [r["workflow_id"] for r in results[:-1]],
|
|
1471
|
+
"rollback_details": rollback_results,
|
|
1472
|
+
"summary": (
|
|
1473
|
+
f"Chain failed at '{workflow_id}'. "
|
|
1474
|
+
"Previous workflows rolled back."
|
|
1475
|
+
),
|
|
1476
|
+
"duration_ms": elapsed_ms,
|
|
1477
|
+
}
|
|
1478
|
+
else:
|
|
1479
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1480
|
+
return {
|
|
1481
|
+
"chain_id": chain_id,
|
|
1482
|
+
"status": "failed",
|
|
1483
|
+
"failed_at": i,
|
|
1484
|
+
"failed_workflow": workflow_id,
|
|
1485
|
+
"error": result.get("error"),
|
|
1486
|
+
"results": results,
|
|
1487
|
+
"summary": f"Chain failed at '{workflow_id}'.",
|
|
1488
|
+
"duration_ms": elapsed_ms,
|
|
1489
|
+
}
|
|
1490
|
+
|
|
1491
|
+
except Exception as e:
|
|
1492
|
+
# Handle unexpected execution errors
|
|
1493
|
+
results.append(
|
|
1494
|
+
{
|
|
1495
|
+
"workflow_id": workflow_id,
|
|
1496
|
+
"index": i,
|
|
1497
|
+
"status": "error",
|
|
1498
|
+
"error": str(e),
|
|
1499
|
+
}
|
|
1500
|
+
)
|
|
1501
|
+
|
|
1502
|
+
if rollback_on_failure and execution_ids:
|
|
1503
|
+
rollback_results = await _rollback_chain(execution_ids)
|
|
1504
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1505
|
+
|
|
1506
|
+
return {
|
|
1507
|
+
"chain_id": chain_id,
|
|
1508
|
+
"status": "rolled_back",
|
|
1509
|
+
"failed_at": i,
|
|
1510
|
+
"failed_workflow": workflow_id,
|
|
1511
|
+
"error": str(e),
|
|
1512
|
+
"results": results,
|
|
1513
|
+
"rolled_back": [
|
|
1514
|
+
r["workflow_id"] for r in results[:-1] if r.get("execution_id")
|
|
1515
|
+
],
|
|
1516
|
+
"rollback_details": rollback_results,
|
|
1517
|
+
"summary": (
|
|
1518
|
+
f"Chain error at '{workflow_id}'. "
|
|
1519
|
+
"Previous workflows rolled back."
|
|
1520
|
+
),
|
|
1521
|
+
"duration_ms": elapsed_ms,
|
|
1522
|
+
}
|
|
1523
|
+
else:
|
|
1524
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1525
|
+
return {
|
|
1526
|
+
"chain_id": chain_id,
|
|
1527
|
+
"status": "failed",
|
|
1528
|
+
"failed_at": i,
|
|
1529
|
+
"failed_workflow": workflow_id,
|
|
1530
|
+
"error": str(e),
|
|
1531
|
+
"results": results,
|
|
1532
|
+
"summary": f"Chain error at '{workflow_id}'.",
|
|
1533
|
+
"duration_ms": elapsed_ms,
|
|
1534
|
+
}
|
|
1535
|
+
|
|
1536
|
+
# All workflows completed successfully
|
|
1537
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
1538
|
+
|
|
1539
|
+
# Collect all outputs for easy access
|
|
1540
|
+
all_outputs: dict[str, Any] = {}
|
|
1541
|
+
for r in results:
|
|
1542
|
+
for key, value in r.get("outputs", {}).items():
|
|
1543
|
+
all_outputs[key] = value
|
|
1544
|
+
|
|
1545
|
+
return {
|
|
1546
|
+
"chain_id": chain_id,
|
|
1547
|
+
"status": "completed",
|
|
1548
|
+
"results": results,
|
|
1549
|
+
"outputs": all_outputs,
|
|
1550
|
+
"summary": f"Executed {len(workflows)} workflow(s) successfully.",
|
|
1551
|
+
"duration_ms": elapsed_ms,
|
|
1552
|
+
}
|
|
1553
|
+
|
|
1554
|
+
|
|
1555
|
+
def _resolve_chain_inputs(
|
|
1556
|
+
raw_inputs: dict[str, Any],
|
|
1557
|
+
chain_results: list[dict[str, Any]],
|
|
1558
|
+
) -> dict[str, Any]:
|
|
1559
|
+
"""Resolve chain references in input values.
|
|
1560
|
+
|
|
1561
|
+
Replaces $chain[i].outputs.fieldName with actual values from
|
|
1562
|
+
earlier workflow results.
|
|
1563
|
+
|
|
1564
|
+
Args:
|
|
1565
|
+
raw_inputs: Input dict that may contain chain references.
|
|
1566
|
+
chain_results: Results from workflows executed so far.
|
|
1567
|
+
|
|
1568
|
+
Returns:
|
|
1569
|
+
Resolved inputs with chain references replaced.
|
|
1570
|
+
"""
|
|
1571
|
+
resolved: dict[str, Any] = {}
|
|
1572
|
+
|
|
1573
|
+
for key, value in raw_inputs.items():
|
|
1574
|
+
if isinstance(value, str) and value.startswith("$chain["):
|
|
1575
|
+
resolved[key] = _resolve_chain_expression(value, chain_results)
|
|
1576
|
+
elif isinstance(value, dict):
|
|
1577
|
+
resolved[key] = _resolve_chain_inputs(value, chain_results)
|
|
1578
|
+
else:
|
|
1579
|
+
resolved[key] = value
|
|
1580
|
+
|
|
1581
|
+
return resolved
|
|
1582
|
+
|
|
1583
|
+
|
|
1584
|
+
def _resolve_chain_expression(
|
|
1585
|
+
expression: str,
|
|
1586
|
+
chain_results: list[dict[str, Any]],
|
|
1587
|
+
) -> Any:
|
|
1588
|
+
"""Resolve a single chain expression.
|
|
1589
|
+
|
|
1590
|
+
Parses expressions like $chain[0].outputs.serviceId and
|
|
1591
|
+
retrieves the value from chain results.
|
|
1592
|
+
|
|
1593
|
+
Args:
|
|
1594
|
+
expression: Expression in format $chain[i].outputs.fieldName
|
|
1595
|
+
chain_results: Results from workflows executed so far.
|
|
1596
|
+
|
|
1597
|
+
Returns:
|
|
1598
|
+
The resolved value, or the expression if not resolvable.
|
|
1599
|
+
"""
|
|
1600
|
+
# Pattern: $chain[0].outputs.fieldName
|
|
1601
|
+
match = re.match(r"\$chain\[(\d+)\]\.outputs\.(\w+)", expression)
|
|
1602
|
+
if not match:
|
|
1603
|
+
return expression
|
|
1604
|
+
|
|
1605
|
+
index = int(match.group(1))
|
|
1606
|
+
field_name = match.group(2)
|
|
1607
|
+
|
|
1608
|
+
if index >= len(chain_results):
|
|
1609
|
+
return expression
|
|
1610
|
+
|
|
1611
|
+
outputs = chain_results[index].get("outputs", {})
|
|
1612
|
+
return outputs.get(field_name, expression)
|
|
1613
|
+
|
|
1614
|
+
|
|
1615
|
+
async def _rollback_chain(execution_ids: list[str]) -> list[dict[str, Any]]:
|
|
1616
|
+
"""Roll back multiple workflow executions in reverse order.
|
|
1617
|
+
|
|
1618
|
+
Args:
|
|
1619
|
+
execution_ids: List of execution IDs to roll back.
|
|
1620
|
+
|
|
1621
|
+
Returns:
|
|
1622
|
+
List of rollback results.
|
|
1623
|
+
"""
|
|
1624
|
+
rollback_results: list[dict[str, Any]] = []
|
|
1625
|
+
|
|
1626
|
+
# Roll back in reverse order
|
|
1627
|
+
for exec_id in reversed(execution_ids):
|
|
1628
|
+
try:
|
|
1629
|
+
result = await workflow_rollback(exec_id)
|
|
1630
|
+
rollback_results.append(
|
|
1631
|
+
{
|
|
1632
|
+
"execution_id": exec_id,
|
|
1633
|
+
"status": "rolled_back",
|
|
1634
|
+
"details": result,
|
|
1635
|
+
}
|
|
1636
|
+
)
|
|
1637
|
+
except Exception as e:
|
|
1638
|
+
rollback_results.append(
|
|
1639
|
+
{
|
|
1640
|
+
"execution_id": exec_id,
|
|
1641
|
+
"status": "rollback_failed",
|
|
1642
|
+
"error": str(e),
|
|
1643
|
+
}
|
|
1644
|
+
)
|
|
1645
|
+
|
|
1646
|
+
return rollback_results
|
|
1647
|
+
|
|
1648
|
+
|
|
1649
|
+
# =============================================================================
|
|
1650
|
+
# Story 5.8: Guided Interactive Mode
|
|
1651
|
+
# =============================================================================
|
|
1652
|
+
|
|
1653
|
+
# Track interactive workflow sessions
|
|
1654
|
+
_interactive_sessions: dict[str, dict[str, Any]] = {}
|
|
1655
|
+
|
|
1656
|
+
|
|
1657
|
+
async def workflow_start_interactive(workflow_id: str) -> dict[str, Any]:
|
|
1658
|
+
"""Start interactive workflow session with step-by-step input prompts.
|
|
1659
|
+
|
|
1660
|
+
Args:
|
|
1661
|
+
workflow_id: Workflow to configure interactively.
|
|
1662
|
+
|
|
1663
|
+
Returns:
|
|
1664
|
+
dict with mode, session_id, workflow_id, current_prompt, progress.
|
|
1665
|
+
"""
|
|
1666
|
+
catalog = get_workflow_catalog()
|
|
1667
|
+
workflow = catalog.get_workflow(workflow_id)
|
|
1668
|
+
|
|
1669
|
+
if workflow is None:
|
|
1670
|
+
raise ToolError(
|
|
1671
|
+
f"Workflow '{workflow_id}' not found. "
|
|
1672
|
+
"Use 'workflow_list' to see available workflows."
|
|
1673
|
+
)
|
|
1674
|
+
|
|
1675
|
+
# Generate session ID
|
|
1676
|
+
session_id = f"sess-{uuid.uuid4().hex[:8]}"
|
|
1677
|
+
|
|
1678
|
+
# Get ordered list of inputs
|
|
1679
|
+
ordered_inputs = _order_inputs_for_interactive(workflow)
|
|
1680
|
+
|
|
1681
|
+
if not ordered_inputs:
|
|
1682
|
+
raise ToolError(
|
|
1683
|
+
f"Workflow '{workflow_id}' has no inputs to configure. "
|
|
1684
|
+
"Use 'workflow_execute' to run it directly."
|
|
1685
|
+
)
|
|
1686
|
+
|
|
1687
|
+
# Create session state
|
|
1688
|
+
session: dict[str, Any] = {
|
|
1689
|
+
"session_id": session_id,
|
|
1690
|
+
"workflow_id": workflow_id,
|
|
1691
|
+
"workflow": workflow,
|
|
1692
|
+
"ordered_inputs": ordered_inputs,
|
|
1693
|
+
"current_index": 0,
|
|
1694
|
+
"collected_inputs": {},
|
|
1695
|
+
"inferred_inputs": {},
|
|
1696
|
+
"started_at": time.time(),
|
|
1697
|
+
}
|
|
1698
|
+
_interactive_sessions[session_id] = session
|
|
1699
|
+
|
|
1700
|
+
# Generate first prompt
|
|
1701
|
+
first_input = ordered_inputs[0]
|
|
1702
|
+
current_prompt = _generate_interactive_prompt(first_input, session)
|
|
1703
|
+
|
|
1704
|
+
return {
|
|
1705
|
+
"mode": "interactive",
|
|
1706
|
+
"session_id": session_id,
|
|
1707
|
+
"workflow_id": workflow_id,
|
|
1708
|
+
"current_prompt": current_prompt,
|
|
1709
|
+
"progress": {
|
|
1710
|
+
"answered": 0,
|
|
1711
|
+
"total": len(ordered_inputs),
|
|
1712
|
+
},
|
|
1713
|
+
}
|
|
1714
|
+
|
|
1715
|
+
|
|
1716
|
+
async def workflow_continue(
|
|
1717
|
+
session_id: str,
|
|
1718
|
+
value: Any,
|
|
1719
|
+
) -> dict[str, Any]:
|
|
1720
|
+
"""Provide value for current input and advance to next prompt.
|
|
1721
|
+
|
|
1722
|
+
Args:
|
|
1723
|
+
session_id: Interactive session ID.
|
|
1724
|
+
value: Value for current input.
|
|
1725
|
+
|
|
1726
|
+
Returns:
|
|
1727
|
+
dict with mode, accepted, current_prompt (or preview if done), progress.
|
|
1728
|
+
"""
|
|
1729
|
+
if session_id not in _interactive_sessions:
|
|
1730
|
+
raise ToolError(
|
|
1731
|
+
f"Session '{session_id}' not found. "
|
|
1732
|
+
"Start a new session with 'workflow_start_interactive'."
|
|
1733
|
+
)
|
|
1734
|
+
|
|
1735
|
+
session = _interactive_sessions[session_id]
|
|
1736
|
+
ordered_inputs = session["ordered_inputs"]
|
|
1737
|
+
current_index = session["current_index"]
|
|
1738
|
+
|
|
1739
|
+
if current_index >= len(ordered_inputs):
|
|
1740
|
+
raise ToolError(
|
|
1741
|
+
f"Session '{session_id}' has already collected all inputs. "
|
|
1742
|
+
"Use 'workflow_confirm' to execute the workflow."
|
|
1743
|
+
)
|
|
1744
|
+
|
|
1745
|
+
current_input = ordered_inputs[current_index]
|
|
1746
|
+
|
|
1747
|
+
# Validate the provided value
|
|
1748
|
+
validation_error = _validate_input_value(current_input, value)
|
|
1749
|
+
if validation_error:
|
|
1750
|
+
return {
|
|
1751
|
+
"mode": "interactive",
|
|
1752
|
+
"session_id": session_id,
|
|
1753
|
+
"error": validation_error,
|
|
1754
|
+
"current_prompt": _generate_interactive_prompt(current_input, session),
|
|
1755
|
+
"progress": {
|
|
1756
|
+
"answered": len(session["collected_inputs"]),
|
|
1757
|
+
"total": len(ordered_inputs),
|
|
1758
|
+
},
|
|
1759
|
+
}
|
|
1760
|
+
|
|
1761
|
+
# Store the value
|
|
1762
|
+
session["collected_inputs"][current_input.name] = value
|
|
1763
|
+
|
|
1764
|
+
# Infer related inputs
|
|
1765
|
+
_infer_related_inputs(current_input, value, session)
|
|
1766
|
+
|
|
1767
|
+
# Move to next input (skip already inferred ones)
|
|
1768
|
+
session["current_index"] = current_index + 1
|
|
1769
|
+
while (
|
|
1770
|
+
session["current_index"] < len(ordered_inputs)
|
|
1771
|
+
and ordered_inputs[session["current_index"]].name in session["collected_inputs"]
|
|
1772
|
+
):
|
|
1773
|
+
session["current_index"] += 1
|
|
1774
|
+
|
|
1775
|
+
# Check if all inputs collected
|
|
1776
|
+
if session["current_index"] >= len(ordered_inputs):
|
|
1777
|
+
# All inputs collected - return preview
|
|
1778
|
+
return _generate_preview(session)
|
|
1779
|
+
|
|
1780
|
+
# Generate next prompt
|
|
1781
|
+
next_input = ordered_inputs[session["current_index"]]
|
|
1782
|
+
current_prompt = _generate_interactive_prompt(next_input, session)
|
|
1783
|
+
|
|
1784
|
+
return {
|
|
1785
|
+
"mode": "interactive",
|
|
1786
|
+
"session_id": session_id,
|
|
1787
|
+
"accepted": session["collected_inputs"].copy(),
|
|
1788
|
+
"inferred": session["inferred_inputs"].copy(),
|
|
1789
|
+
"current_prompt": current_prompt,
|
|
1790
|
+
"progress": {
|
|
1791
|
+
"answered": len(session["collected_inputs"]),
|
|
1792
|
+
"total": len(ordered_inputs),
|
|
1793
|
+
},
|
|
1794
|
+
}
|
|
1795
|
+
|
|
1796
|
+
|
|
1797
|
+
async def workflow_confirm(
|
|
1798
|
+
session_id: str,
|
|
1799
|
+
execute: bool = True,
|
|
1800
|
+
) -> dict[str, Any]:
|
|
1801
|
+
"""Confirm and execute workflow with collected inputs.
|
|
1802
|
+
|
|
1803
|
+
Args:
|
|
1804
|
+
session_id: Interactive session ID.
|
|
1805
|
+
execute: Execute workflow (default True) or just return preview.
|
|
1806
|
+
|
|
1807
|
+
Returns:
|
|
1808
|
+
dict with status, workflow_id, inputs, outputs, steps, message.
|
|
1809
|
+
"""
|
|
1810
|
+
if session_id not in _interactive_sessions:
|
|
1811
|
+
raise ToolError(
|
|
1812
|
+
f"Session '{session_id}' not found. "
|
|
1813
|
+
"Start a new session with 'workflow_start_interactive'."
|
|
1814
|
+
)
|
|
1815
|
+
|
|
1816
|
+
session = _interactive_sessions[session_id]
|
|
1817
|
+
ordered_inputs = session["ordered_inputs"]
|
|
1818
|
+
collected_inputs = session["collected_inputs"]
|
|
1819
|
+
|
|
1820
|
+
# Check all required inputs are present
|
|
1821
|
+
missing_required = []
|
|
1822
|
+
for inp in ordered_inputs:
|
|
1823
|
+
if inp.required and inp.name not in collected_inputs:
|
|
1824
|
+
missing_required.append(inp.name)
|
|
1825
|
+
|
|
1826
|
+
if missing_required:
|
|
1827
|
+
missing = ", ".join(missing_required)
|
|
1828
|
+
raise ToolError(
|
|
1829
|
+
f"Cannot confirm session. Missing required inputs: {missing}. "
|
|
1830
|
+
"Use 'workflow_continue' to provide values."
|
|
1831
|
+
)
|
|
1832
|
+
|
|
1833
|
+
if not execute:
|
|
1834
|
+
# Return preview without executing
|
|
1835
|
+
return _generate_preview(session)
|
|
1836
|
+
|
|
1837
|
+
# Execute the workflow
|
|
1838
|
+
try:
|
|
1839
|
+
result = await workflow_execute_with_progress(
|
|
1840
|
+
workflow_id=session["workflow_id"],
|
|
1841
|
+
inputs=collected_inputs,
|
|
1842
|
+
)
|
|
1843
|
+
|
|
1844
|
+
# Clean up session on success
|
|
1845
|
+
del _interactive_sessions[session_id]
|
|
1846
|
+
|
|
1847
|
+
return {
|
|
1848
|
+
"status": result.get("status", "unknown"),
|
|
1849
|
+
"session_id": session_id,
|
|
1850
|
+
"workflow_id": session["workflow_id"],
|
|
1851
|
+
"inputs": collected_inputs,
|
|
1852
|
+
"outputs": result.get("outputs", {}),
|
|
1853
|
+
"steps": result.get("steps", []),
|
|
1854
|
+
"message": result.get("message", "Workflow executed"),
|
|
1855
|
+
"duration_ms": result.get("duration_ms"),
|
|
1856
|
+
}
|
|
1857
|
+
|
|
1858
|
+
except Exception as e:
|
|
1859
|
+
# Keep session for retry
|
|
1860
|
+
return {
|
|
1861
|
+
"status": "failed",
|
|
1862
|
+
"session_id": session_id,
|
|
1863
|
+
"workflow_id": session["workflow_id"],
|
|
1864
|
+
"inputs": collected_inputs,
|
|
1865
|
+
"error": str(e),
|
|
1866
|
+
"message": f"Workflow execution failed: {e}",
|
|
1867
|
+
"suggestion": "Review inputs and use 'workflow_continue' to update them, "
|
|
1868
|
+
"then try 'workflow_confirm' again.",
|
|
1869
|
+
}
|
|
1870
|
+
|
|
1871
|
+
|
|
1872
|
+
def _order_inputs_for_interactive(workflow: Any) -> list[Any]:
|
|
1873
|
+
"""Order workflow inputs for interactive prompting.
|
|
1874
|
+
|
|
1875
|
+
Puts primary inputs first (service name, etc.) followed by
|
|
1876
|
+
derived inputs that can be auto-inferred.
|
|
1877
|
+
|
|
1878
|
+
Args:
|
|
1879
|
+
workflow: The workflow definition.
|
|
1880
|
+
|
|
1881
|
+
Returns:
|
|
1882
|
+
Ordered list of WorkflowInput objects.
|
|
1883
|
+
"""
|
|
1884
|
+
primary: list[Any] = []
|
|
1885
|
+
secondary: list[Any] = []
|
|
1886
|
+
derived: list[Any] = []
|
|
1887
|
+
|
|
1888
|
+
for inp in workflow.inputs:
|
|
1889
|
+
name = inp.name.lower()
|
|
1890
|
+
if "name" in name and "service" in name:
|
|
1891
|
+
primary.insert(0, inp)
|
|
1892
|
+
elif inp.required and "name" in name:
|
|
1893
|
+
primary.append(inp)
|
|
1894
|
+
elif inp.required:
|
|
1895
|
+
secondary.append(inp)
|
|
1896
|
+
else:
|
|
1897
|
+
derived.append(inp)
|
|
1898
|
+
|
|
1899
|
+
return primary + secondary + derived
|
|
1900
|
+
|
|
1901
|
+
|
|
1902
|
+
def _generate_interactive_prompt(
|
|
1903
|
+
inp: Any, # WorkflowInput
|
|
1904
|
+
session: dict[str, Any],
|
|
1905
|
+
) -> dict[str, Any]:
|
|
1906
|
+
"""Generate an interactive prompt for an input.
|
|
1907
|
+
|
|
1908
|
+
Args:
|
|
1909
|
+
inp: The WorkflowInput to prompt for.
|
|
1910
|
+
session: The current session state.
|
|
1911
|
+
|
|
1912
|
+
Returns:
|
|
1913
|
+
Prompt dictionary with question, type, examples, etc.
|
|
1914
|
+
"""
|
|
1915
|
+
prompt: dict[str, Any] = {
|
|
1916
|
+
"input": inp.name,
|
|
1917
|
+
"type": inp.input_type.value
|
|
1918
|
+
if hasattr(inp.input_type, "value")
|
|
1919
|
+
else str(inp.input_type),
|
|
1920
|
+
"required": inp.required,
|
|
1921
|
+
}
|
|
1922
|
+
|
|
1923
|
+
# Generate question
|
|
1924
|
+
prompt["question"] = _generate_input_question(inp, session)
|
|
1925
|
+
|
|
1926
|
+
# Add examples
|
|
1927
|
+
examples = _get_input_examples(inp)
|
|
1928
|
+
if examples:
|
|
1929
|
+
prompt["examples"] = examples
|
|
1930
|
+
|
|
1931
|
+
# Add default
|
|
1932
|
+
if inp.default is not None:
|
|
1933
|
+
prompt["default"] = inp.default
|
|
1934
|
+
|
|
1935
|
+
# Add suggestion based on previous inputs
|
|
1936
|
+
suggestion = _suggest_value(inp, session)
|
|
1937
|
+
if suggestion:
|
|
1938
|
+
prompt["suggestion"] = suggestion
|
|
1939
|
+
|
|
1940
|
+
# Add validation hints
|
|
1941
|
+
if inp.pattern:
|
|
1942
|
+
prompt["format"] = inp.pattern
|
|
1943
|
+
if inp.min_length is not None:
|
|
1944
|
+
prompt["min_length"] = inp.min_length
|
|
1945
|
+
if inp.max_length is not None:
|
|
1946
|
+
prompt["max_length"] = inp.max_length
|
|
1947
|
+
if inp.enum_values:
|
|
1948
|
+
prompt["options"] = inp.enum_values
|
|
1949
|
+
|
|
1950
|
+
return prompt
|
|
1951
|
+
|
|
1952
|
+
|
|
1953
|
+
def _generate_input_question(
|
|
1954
|
+
inp: Any, # WorkflowInput
|
|
1955
|
+
session: dict[str, Any],
|
|
1956
|
+
) -> str:
|
|
1957
|
+
"""Generate a human-readable question for an input.
|
|
1958
|
+
|
|
1959
|
+
Args:
|
|
1960
|
+
inp: The input to generate a question for.
|
|
1961
|
+
session: The current session.
|
|
1962
|
+
|
|
1963
|
+
Returns:
|
|
1964
|
+
A question string with (required) indicator if applicable.
|
|
1965
|
+
"""
|
|
1966
|
+
question: str
|
|
1967
|
+
|
|
1968
|
+
# Use description if available
|
|
1969
|
+
if inp.description:
|
|
1970
|
+
# Convert description to question form
|
|
1971
|
+
desc = str(inp.description).rstrip(".")
|
|
1972
|
+
if not desc.endswith("?"):
|
|
1973
|
+
question = f"What is the {desc.lower()}?"
|
|
1974
|
+
else:
|
|
1975
|
+
question = desc
|
|
1976
|
+
else:
|
|
1977
|
+
# Generate based on name
|
|
1978
|
+
name = inp.name
|
|
1979
|
+
# Convert camelCase to words
|
|
1980
|
+
words = re.sub(r"([A-Z])", r" \1", name).strip().lower()
|
|
1981
|
+
|
|
1982
|
+
# Special cases
|
|
1983
|
+
if "name" in words:
|
|
1984
|
+
question = f"What would you like to name the {words.replace(' name', '')}?"
|
|
1985
|
+
elif "key" in words:
|
|
1986
|
+
question = f"What should the {words} be? (URL-safe identifier)"
|
|
1987
|
+
elif "id" in words:
|
|
1988
|
+
question = f"What is the {words}?"
|
|
1989
|
+
else:
|
|
1990
|
+
question = f"What is the {words}?"
|
|
1991
|
+
|
|
1992
|
+
# Add required indicator (Story 10-3 AC3)
|
|
1993
|
+
if inp.required:
|
|
1994
|
+
question = f"{question} (required)"
|
|
1995
|
+
|
|
1996
|
+
return question
|
|
1997
|
+
|
|
1998
|
+
|
|
1999
|
+
def _validate_input_value(
|
|
2000
|
+
inp: Any, # WorkflowInput
|
|
2001
|
+
value: Any,
|
|
2002
|
+
) -> dict[str, Any] | None:
|
|
2003
|
+
"""Validate a single input value against its constraints.
|
|
2004
|
+
|
|
2005
|
+
Args:
|
|
2006
|
+
inp: The WorkflowInput definition.
|
|
2007
|
+
value: The value to validate.
|
|
2008
|
+
|
|
2009
|
+
Returns:
|
|
2010
|
+
Error dict if invalid, None if valid.
|
|
2011
|
+
"""
|
|
2012
|
+
from mcp_eregistrations_bpa.workflows.models import InputType
|
|
2013
|
+
|
|
2014
|
+
# String type validations
|
|
2015
|
+
if inp.input_type == InputType.STRING and isinstance(value, str):
|
|
2016
|
+
# Check pattern
|
|
2017
|
+
if inp.pattern:
|
|
2018
|
+
if not re.match(inp.pattern, value):
|
|
2019
|
+
return {
|
|
2020
|
+
"constraint": f"pattern: {inp.pattern}",
|
|
2021
|
+
"message": f"{inp.name} must match pattern {inp.pattern}",
|
|
2022
|
+
"suggestion": _suggest_pattern_fix(value, inp.pattern),
|
|
2023
|
+
}
|
|
2024
|
+
|
|
2025
|
+
# Check length
|
|
2026
|
+
if inp.min_length is not None and len(value) < inp.min_length:
|
|
2027
|
+
return {
|
|
2028
|
+
"constraint": f"minLength: {inp.min_length}",
|
|
2029
|
+
"message": f"{inp.name} must be at least {inp.min_length} characters",
|
|
2030
|
+
}
|
|
2031
|
+
if inp.max_length is not None and len(value) > inp.max_length:
|
|
2032
|
+
return {
|
|
2033
|
+
"constraint": f"maxLength: {inp.max_length}",
|
|
2034
|
+
"message": f"{inp.name} must be at most {inp.max_length} characters",
|
|
2035
|
+
"suggestion": value[: inp.max_length],
|
|
2036
|
+
}
|
|
2037
|
+
|
|
2038
|
+
# Check enum
|
|
2039
|
+
if inp.enum_values and value not in inp.enum_values:
|
|
2040
|
+
return {
|
|
2041
|
+
"constraint": f"enum: {inp.enum_values}",
|
|
2042
|
+
"message": f"{inp.name} must be one of: {', '.join(inp.enum_values)}",
|
|
2043
|
+
}
|
|
2044
|
+
|
|
2045
|
+
# Numeric validations
|
|
2046
|
+
if inp.input_type in (InputType.INTEGER, InputType.NUMBER):
|
|
2047
|
+
if isinstance(value, int | float):
|
|
2048
|
+
if inp.minimum is not None and value < inp.minimum:
|
|
2049
|
+
return {
|
|
2050
|
+
"constraint": f"minimum: {inp.minimum}",
|
|
2051
|
+
"message": f"{inp.name} must be at least {inp.minimum}",
|
|
2052
|
+
}
|
|
2053
|
+
if inp.maximum is not None and value > inp.maximum:
|
|
2054
|
+
return {
|
|
2055
|
+
"constraint": f"maximum: {inp.maximum}",
|
|
2056
|
+
"message": f"{inp.name} must be at most {inp.maximum}",
|
|
2057
|
+
}
|
|
2058
|
+
|
|
2059
|
+
return None
|
|
2060
|
+
|
|
2061
|
+
|
|
2062
|
+
def _suggest_pattern_fix(value: str, pattern: str) -> str | None:
|
|
2063
|
+
"""Suggest a fix for a pattern violation.
|
|
2064
|
+
|
|
2065
|
+
Args:
|
|
2066
|
+
value: The invalid value.
|
|
2067
|
+
pattern: The regex pattern.
|
|
2068
|
+
|
|
2069
|
+
Returns:
|
|
2070
|
+
Suggested fixed value or None.
|
|
2071
|
+
"""
|
|
2072
|
+
# Handle common patterns
|
|
2073
|
+
if pattern == "^[a-z0-9-]+$":
|
|
2074
|
+
# Kebab-case pattern
|
|
2075
|
+
fixed = re.sub(r"[^a-z0-9-]", "", value.lower().replace(" ", "-"))
|
|
2076
|
+
return fixed if fixed else None
|
|
2077
|
+
|
|
2078
|
+
return None
|
|
2079
|
+
|
|
2080
|
+
|
|
2081
|
+
def _to_short_name(name: str) -> str:
|
|
2082
|
+
"""Convert a name to a short name.
|
|
2083
|
+
|
|
2084
|
+
Args:
|
|
2085
|
+
name: The full name.
|
|
2086
|
+
|
|
2087
|
+
Returns:
|
|
2088
|
+
A short name (max 50 chars).
|
|
2089
|
+
"""
|
|
2090
|
+
return name[:50] if len(name) > 50 else name
|
|
2091
|
+
|
|
2092
|
+
|
|
2093
|
+
def _to_key(name: str) -> str:
|
|
2094
|
+
"""Convert a name to a URL-safe key.
|
|
2095
|
+
|
|
2096
|
+
Args:
|
|
2097
|
+
name: The full name.
|
|
2098
|
+
|
|
2099
|
+
Returns:
|
|
2100
|
+
A kebab-case key.
|
|
2101
|
+
"""
|
|
2102
|
+
# Remove non-alphanumeric, lowercase, replace spaces with hyphens
|
|
2103
|
+
key = re.sub(r"[^a-zA-Z0-9\s-]", "", name.lower())
|
|
2104
|
+
key = re.sub(r"\s+", "-", key.strip())
|
|
2105
|
+
return key[:100]
|
|
2106
|
+
|
|
2107
|
+
|
|
2108
|
+
def _get_input_examples(inp: Any) -> list[str]:
|
|
2109
|
+
"""Get example values for an input.
|
|
2110
|
+
|
|
2111
|
+
Args:
|
|
2112
|
+
inp: The input definition.
|
|
2113
|
+
|
|
2114
|
+
Returns:
|
|
2115
|
+
List of example strings.
|
|
2116
|
+
"""
|
|
2117
|
+
name = inp.name.lower()
|
|
2118
|
+
|
|
2119
|
+
# Known examples for common input types
|
|
2120
|
+
if "servicename" in name:
|
|
2121
|
+
return ["Business Registration", "Vehicle Permits", "Construction Licenses"]
|
|
2122
|
+
if "registrationname" in name:
|
|
2123
|
+
return ["New Registration", "Renewal Application", "License Request"]
|
|
2124
|
+
if "rolename" in name or name == "name":
|
|
2125
|
+
return ["Reviewer", "Approver", "Inspector"]
|
|
2126
|
+
if "key" in name:
|
|
2127
|
+
return ["business-reg", "vehicle-permit", "construction-license"]
|
|
2128
|
+
|
|
2129
|
+
# Use enum values as examples
|
|
2130
|
+
if inp.enum_values:
|
|
2131
|
+
return [str(v) for v in inp.enum_values[:3]]
|
|
2132
|
+
|
|
2133
|
+
return []
|
|
2134
|
+
|
|
2135
|
+
|
|
2136
|
+
def _suggest_value(
|
|
2137
|
+
inp: Any, # WorkflowInput
|
|
2138
|
+
session: dict[str, Any],
|
|
2139
|
+
) -> str | None:
|
|
2140
|
+
"""Suggest a value based on previously collected inputs.
|
|
2141
|
+
|
|
2142
|
+
Args:
|
|
2143
|
+
inp: The input to suggest for.
|
|
2144
|
+
session: The current session.
|
|
2145
|
+
|
|
2146
|
+
Returns:
|
|
2147
|
+
Suggested value or None.
|
|
2148
|
+
"""
|
|
2149
|
+
collected = session["collected_inputs"]
|
|
2150
|
+
name = inp.name.lower()
|
|
2151
|
+
|
|
2152
|
+
# Derive from serviceName
|
|
2153
|
+
service_name = collected.get("serviceName")
|
|
2154
|
+
if service_name:
|
|
2155
|
+
if "registrationname" in name:
|
|
2156
|
+
return str(service_name)
|
|
2157
|
+
if "registrationshortname" in name or "serviceshortname" in name:
|
|
2158
|
+
return _to_short_name(str(service_name))
|
|
2159
|
+
if "registrationkey" in name or "servicekey" in name:
|
|
2160
|
+
return _to_key(str(service_name))
|
|
2161
|
+
|
|
2162
|
+
# Derive from registrationName
|
|
2163
|
+
reg_name = collected.get("registrationName")
|
|
2164
|
+
if reg_name:
|
|
2165
|
+
if "shortname" in name:
|
|
2166
|
+
return _to_short_name(str(reg_name))
|
|
2167
|
+
if "key" in name:
|
|
2168
|
+
return _to_key(str(reg_name))
|
|
2169
|
+
|
|
2170
|
+
return None
|
|
2171
|
+
|
|
2172
|
+
|
|
2173
|
+
def _infer_related_inputs(
|
|
2174
|
+
inp: Any, # WorkflowInput
|
|
2175
|
+
value: Any,
|
|
2176
|
+
session: dict[str, Any],
|
|
2177
|
+
) -> None:
|
|
2178
|
+
"""Infer related inputs from a provided value.
|
|
2179
|
+
|
|
2180
|
+
Updates session with inferred values.
|
|
2181
|
+
|
|
2182
|
+
Args:
|
|
2183
|
+
inp: The input that was just provided.
|
|
2184
|
+
value: The value that was provided.
|
|
2185
|
+
session: The session to update.
|
|
2186
|
+
"""
|
|
2187
|
+
workflow = session["workflow"]
|
|
2188
|
+
name = inp.name.lower()
|
|
2189
|
+
|
|
2190
|
+
# Infer from serviceName
|
|
2191
|
+
if "servicename" in name and isinstance(value, str):
|
|
2192
|
+
_try_infer(
|
|
2193
|
+
workflow, session, "registrationName", value, "Derived from serviceName"
|
|
2194
|
+
)
|
|
2195
|
+
_try_infer(
|
|
2196
|
+
workflow,
|
|
2197
|
+
session,
|
|
2198
|
+
"registrationShortName",
|
|
2199
|
+
_to_short_name(value),
|
|
2200
|
+
"Generated from serviceName",
|
|
2201
|
+
)
|
|
2202
|
+
_try_infer(
|
|
2203
|
+
workflow,
|
|
2204
|
+
session,
|
|
2205
|
+
"registrationKey",
|
|
2206
|
+
_to_key(value),
|
|
2207
|
+
"Generated from serviceName",
|
|
2208
|
+
)
|
|
2209
|
+
_try_infer(
|
|
2210
|
+
workflow,
|
|
2211
|
+
session,
|
|
2212
|
+
"serviceShortName",
|
|
2213
|
+
_to_short_name(value),
|
|
2214
|
+
"Generated from serviceName",
|
|
2215
|
+
)
|
|
2216
|
+
|
|
2217
|
+
# Infer from registrationName
|
|
2218
|
+
if "registrationname" in name and isinstance(value, str):
|
|
2219
|
+
_try_infer(
|
|
2220
|
+
workflow,
|
|
2221
|
+
session,
|
|
2222
|
+
"registrationShortName",
|
|
2223
|
+
_to_short_name(value),
|
|
2224
|
+
"Generated from registrationName",
|
|
2225
|
+
)
|
|
2226
|
+
_try_infer(
|
|
2227
|
+
workflow,
|
|
2228
|
+
session,
|
|
2229
|
+
"registrationKey",
|
|
2230
|
+
_to_key(value),
|
|
2231
|
+
"Generated from registrationName",
|
|
2232
|
+
)
|
|
2233
|
+
|
|
2234
|
+
|
|
2235
|
+
def _try_infer(
|
|
2236
|
+
workflow: Any,
|
|
2237
|
+
session: dict[str, Any],
|
|
2238
|
+
field_name: str,
|
|
2239
|
+
value: Any,
|
|
2240
|
+
reason: str,
|
|
2241
|
+
) -> None:
|
|
2242
|
+
"""Try to infer a field value if it exists and isn't already set.
|
|
2243
|
+
|
|
2244
|
+
Args:
|
|
2245
|
+
workflow: The workflow definition.
|
|
2246
|
+
session: The session state.
|
|
2247
|
+
field_name: The field to infer.
|
|
2248
|
+
value: The inferred value.
|
|
2249
|
+
reason: Why this value was inferred.
|
|
2250
|
+
"""
|
|
2251
|
+
if field_name in session["collected_inputs"]:
|
|
2252
|
+
return # Already set
|
|
2253
|
+
|
|
2254
|
+
inp = workflow.get_input(field_name)
|
|
2255
|
+
if inp is None:
|
|
2256
|
+
return # Field doesn't exist in workflow
|
|
2257
|
+
|
|
2258
|
+
session["collected_inputs"][field_name] = value
|
|
2259
|
+
session["inferred_inputs"][field_name] = reason
|
|
2260
|
+
|
|
2261
|
+
|
|
2262
|
+
def _generate_preview(session: dict[str, Any]) -> dict[str, Any]:
|
|
2263
|
+
"""Generate a preview of the workflow configuration.
|
|
2264
|
+
|
|
2265
|
+
Args:
|
|
2266
|
+
session: The session state.
|
|
2267
|
+
|
|
2268
|
+
Returns:
|
|
2269
|
+
Preview dictionary.
|
|
2270
|
+
"""
|
|
2271
|
+
workflow = session["workflow"]
|
|
2272
|
+
inputs = session["collected_inputs"]
|
|
2273
|
+
|
|
2274
|
+
# Generate step preview
|
|
2275
|
+
steps_preview = []
|
|
2276
|
+
for i, step in enumerate(workflow.steps, 1):
|
|
2277
|
+
desc = step.description
|
|
2278
|
+
# Replace placeholders with actual values
|
|
2279
|
+
for key, value in inputs.items():
|
|
2280
|
+
if isinstance(value, str):
|
|
2281
|
+
desc = desc.replace(f"${{{key}}}", value)
|
|
2282
|
+
desc = desc.replace(f"$inputs.{key}", value)
|
|
2283
|
+
steps_preview.append(f"{i}. {desc}")
|
|
2284
|
+
|
|
2285
|
+
return {
|
|
2286
|
+
"mode": "preview",
|
|
2287
|
+
"session_id": session["session_id"],
|
|
2288
|
+
"workflow_id": session["workflow_id"],
|
|
2289
|
+
"inputs": inputs.copy(),
|
|
2290
|
+
"inferred": session["inferred_inputs"].copy(),
|
|
2291
|
+
"steps_preview": steps_preview,
|
|
2292
|
+
"confirm_prompt": "Ready to execute? Use workflow_confirm to proceed.",
|
|
2293
|
+
}
|
|
2294
|
+
|
|
2295
|
+
|
|
2296
|
+
# =============================================================================
|
|
2297
|
+
# Story 14.6: Workflow Validation
|
|
2298
|
+
# =============================================================================
|
|
2299
|
+
|
|
2300
|
+
|
|
2301
|
+
async def workflow_validate(workflow_id: str | None = None) -> dict[str, Any]:
|
|
2302
|
+
"""Validate workflow definitions for issues.
|
|
2303
|
+
|
|
2304
|
+
Checks:
|
|
2305
|
+
1. All operationIds have MCP tool mappings
|
|
2306
|
+
2. Required inputs are defined in properties
|
|
2307
|
+
3. Step references ($steps.X.outputs.Y) are valid
|
|
2308
|
+
4. No duplicate workflow IDs across files
|
|
2309
|
+
|
|
2310
|
+
Args:
|
|
2311
|
+
workflow_id: Optional specific workflow to validate. If None, validates all.
|
|
2312
|
+
|
|
2313
|
+
Returns:
|
|
2314
|
+
dict with valid_count, invalid_count, issues list, and summary.
|
|
2315
|
+
"""
|
|
2316
|
+
catalog = get_workflow_catalog()
|
|
2317
|
+
issues: list[dict[str, Any]] = []
|
|
2318
|
+
validated_count = 0
|
|
2319
|
+
seen_workflow_ids: dict[str, str] = {} # workflow_id -> source_file
|
|
2320
|
+
|
|
2321
|
+
# Get all workflows
|
|
2322
|
+
workflows = catalog.list_workflows()
|
|
2323
|
+
|
|
2324
|
+
for wf_entry in workflows:
|
|
2325
|
+
wf_id = wf_entry["id"]
|
|
2326
|
+
|
|
2327
|
+
# If specific workflow requested, skip others
|
|
2328
|
+
if workflow_id is not None and wf_id != workflow_id:
|
|
2329
|
+
continue
|
|
2330
|
+
|
|
2331
|
+
workflow = catalog.get_workflow(wf_id)
|
|
2332
|
+
if workflow is None:
|
|
2333
|
+
continue
|
|
2334
|
+
|
|
2335
|
+
validated_count += 1
|
|
2336
|
+
source = workflow.source_file
|
|
2337
|
+
|
|
2338
|
+
# Check 1: Duplicate workflow IDs
|
|
2339
|
+
if wf_id in seen_workflow_ids:
|
|
2340
|
+
issues.append(
|
|
2341
|
+
{
|
|
2342
|
+
"workflow_id": wf_id,
|
|
2343
|
+
"issue": "duplicate_id",
|
|
2344
|
+
"severity": "error",
|
|
2345
|
+
"detail": (
|
|
2346
|
+
f"Duplicate workflow ID. First: {seen_workflow_ids[wf_id]}, "
|
|
2347
|
+
f"Second: {source}"
|
|
2348
|
+
),
|
|
2349
|
+
}
|
|
2350
|
+
)
|
|
2351
|
+
seen_workflow_ids[wf_id] = source
|
|
2352
|
+
|
|
2353
|
+
# Check 2: operationIds have MCP tool mappings
|
|
2354
|
+
for step in workflow.steps:
|
|
2355
|
+
if step.operation_id and step.mcp_tool is None:
|
|
2356
|
+
# Check if it's in the map
|
|
2357
|
+
if step.operation_id not in OPERATION_TO_TOOL_MAP:
|
|
2358
|
+
issues.append(
|
|
2359
|
+
{
|
|
2360
|
+
"workflow_id": wf_id,
|
|
2361
|
+
"issue": "unmapped_operation",
|
|
2362
|
+
"severity": "warning",
|
|
2363
|
+
"detail": (
|
|
2364
|
+
f"Step '{step.step_id}' has operationId "
|
|
2365
|
+
f"'{step.operation_id}' with no MCP tool mapping"
|
|
2366
|
+
),
|
|
2367
|
+
}
|
|
2368
|
+
)
|
|
2369
|
+
|
|
2370
|
+
# Check 3: Required inputs defined in properties
|
|
2371
|
+
input_names = {inp.name for inp in workflow.inputs}
|
|
2372
|
+
required_inputs = [inp.name for inp in workflow.inputs if inp.required]
|
|
2373
|
+
|
|
2374
|
+
# Check if required inputs are actually in the properties
|
|
2375
|
+
for req_input in required_inputs:
|
|
2376
|
+
if req_input not in input_names:
|
|
2377
|
+
issues.append(
|
|
2378
|
+
{
|
|
2379
|
+
"workflow_id": wf_id,
|
|
2380
|
+
"issue": "missing_required_input",
|
|
2381
|
+
"severity": "error",
|
|
2382
|
+
"detail": f"Required input '{req_input}' not in properties",
|
|
2383
|
+
}
|
|
2384
|
+
)
|
|
2385
|
+
|
|
2386
|
+
# Check 4: Step references are valid
|
|
2387
|
+
step_ids = {step.step_id for step in workflow.steps}
|
|
2388
|
+
step_output_pattern = re.compile(r"\$steps\.(\w+)\.outputs\.(\w+)")
|
|
2389
|
+
|
|
2390
|
+
for step in workflow.steps:
|
|
2391
|
+
# Check request body for step references
|
|
2392
|
+
request_body_str = str(step.request_body)
|
|
2393
|
+
for match in step_output_pattern.finditer(request_body_str):
|
|
2394
|
+
ref_step_id = match.group(1)
|
|
2395
|
+
if ref_step_id not in step_ids:
|
|
2396
|
+
issues.append(
|
|
2397
|
+
{
|
|
2398
|
+
"workflow_id": wf_id,
|
|
2399
|
+
"issue": "invalid_step_reference",
|
|
2400
|
+
"severity": "error",
|
|
2401
|
+
"detail": (
|
|
2402
|
+
f"Step '{step.step_id}' references unknown step "
|
|
2403
|
+
f"'{ref_step_id}'"
|
|
2404
|
+
),
|
|
2405
|
+
}
|
|
2406
|
+
)
|
|
2407
|
+
|
|
2408
|
+
# Check onSuccess/onFailure goto targets
|
|
2409
|
+
for action in step.on_success + step.on_failure:
|
|
2410
|
+
if isinstance(action, dict):
|
|
2411
|
+
goto_step = action.get("stepId")
|
|
2412
|
+
if goto_step and goto_step not in step_ids:
|
|
2413
|
+
issues.append(
|
|
2414
|
+
{
|
|
2415
|
+
"workflow_id": wf_id,
|
|
2416
|
+
"issue": "invalid_goto_step",
|
|
2417
|
+
"severity": "error",
|
|
2418
|
+
"detail": (
|
|
2419
|
+
f"Step '{step.step_id}' has goto to unknown "
|
|
2420
|
+
f"step '{goto_step}'"
|
|
2421
|
+
),
|
|
2422
|
+
}
|
|
2423
|
+
)
|
|
2424
|
+
|
|
2425
|
+
# Count issues by severity
|
|
2426
|
+
error_count = sum(1 for i in issues if i.get("severity") == "error")
|
|
2427
|
+
warning_count = sum(1 for i in issues if i.get("severity") == "warning")
|
|
2428
|
+
valid_count = validated_count - len({i["workflow_id"] for i in issues})
|
|
2429
|
+
|
|
2430
|
+
# Build summary
|
|
2431
|
+
if not issues:
|
|
2432
|
+
summary = f"All {validated_count} workflows validated successfully."
|
|
2433
|
+
else:
|
|
2434
|
+
summary = (
|
|
2435
|
+
f"Validated {validated_count} workflows. "
|
|
2436
|
+
f"Found {error_count} errors, {warning_count} warnings "
|
|
2437
|
+
f"in {len({i['workflow_id'] for i in issues})} workflows."
|
|
2438
|
+
)
|
|
2439
|
+
|
|
2440
|
+
return {
|
|
2441
|
+
"validated_count": validated_count,
|
|
2442
|
+
"valid_count": valid_count,
|
|
2443
|
+
"invalid_count": validated_count - valid_count,
|
|
2444
|
+
"error_count": error_count,
|
|
2445
|
+
"warning_count": warning_count,
|
|
2446
|
+
"issues": issues,
|
|
2447
|
+
"summary": summary,
|
|
2448
|
+
}
|
|
2449
|
+
|
|
2450
|
+
|
|
2451
|
+
# =============================================================================
|
|
2452
|
+
# Registration
|
|
2453
|
+
# =============================================================================
|
|
2454
|
+
|
|
2455
|
+
|
|
2456
|
+
def register_workflow_tools(mcp: Any) -> None:
|
|
2457
|
+
"""Register workflow orchestration tools with the MCP server.
|
|
2458
|
+
|
|
2459
|
+
Args:
|
|
2460
|
+
mcp: The FastMCP server instance.
|
|
2461
|
+
"""
|
|
2462
|
+
# Story 5.1: Workflow Catalog & Discovery
|
|
2463
|
+
mcp.tool()(workflow_list)
|
|
2464
|
+
mcp.tool()(workflow_describe)
|
|
2465
|
+
mcp.tool()(workflow_search)
|
|
2466
|
+
|
|
2467
|
+
# Story 5.4: Workflow Executor
|
|
2468
|
+
mcp.tool()(workflow_execute)
|
|
2469
|
+
|
|
2470
|
+
# Story 5.5: Progress Reporting & Streaming
|
|
2471
|
+
mcp.tool()(workflow_status)
|
|
2472
|
+
mcp.tool()(workflow_cancel)
|
|
2473
|
+
|
|
2474
|
+
# Story 5.6: Error Recovery & Rollback
|
|
2475
|
+
mcp.tool()(workflow_retry)
|
|
2476
|
+
mcp.tool()(workflow_rollback)
|
|
2477
|
+
|
|
2478
|
+
# Story 5.7: Workflow Chaining & Composition
|
|
2479
|
+
mcp.tool()(workflow_chain)
|
|
2480
|
+
|
|
2481
|
+
# Story 5.8: Guided Interactive Mode
|
|
2482
|
+
mcp.tool()(workflow_start_interactive)
|
|
2483
|
+
mcp.tool()(workflow_continue)
|
|
2484
|
+
mcp.tool()(workflow_confirm)
|
|
2485
|
+
|
|
2486
|
+
# Story 14.6: Workflow Validation
|
|
2487
|
+
mcp.tool()(workflow_validate)
|