mcp-eregistrations-bpa 0.8.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-eregistrations-bpa might be problematic. Click here for more details.
- mcp_eregistrations_bpa/__init__.py +121 -0
- mcp_eregistrations_bpa/__main__.py +6 -0
- mcp_eregistrations_bpa/arazzo/__init__.py +21 -0
- mcp_eregistrations_bpa/arazzo/expression.py +379 -0
- mcp_eregistrations_bpa/audit/__init__.py +56 -0
- mcp_eregistrations_bpa/audit/context.py +66 -0
- mcp_eregistrations_bpa/audit/logger.py +236 -0
- mcp_eregistrations_bpa/audit/models.py +131 -0
- mcp_eregistrations_bpa/auth/__init__.py +64 -0
- mcp_eregistrations_bpa/auth/callback.py +391 -0
- mcp_eregistrations_bpa/auth/cas.py +409 -0
- mcp_eregistrations_bpa/auth/oidc.py +252 -0
- mcp_eregistrations_bpa/auth/permissions.py +162 -0
- mcp_eregistrations_bpa/auth/token_manager.py +348 -0
- mcp_eregistrations_bpa/bpa_client/__init__.py +84 -0
- mcp_eregistrations_bpa/bpa_client/client.py +740 -0
- mcp_eregistrations_bpa/bpa_client/endpoints.py +193 -0
- mcp_eregistrations_bpa/bpa_client/errors.py +276 -0
- mcp_eregistrations_bpa/bpa_client/models.py +203 -0
- mcp_eregistrations_bpa/config.py +349 -0
- mcp_eregistrations_bpa/db/__init__.py +21 -0
- mcp_eregistrations_bpa/db/connection.py +64 -0
- mcp_eregistrations_bpa/db/migrations.py +168 -0
- mcp_eregistrations_bpa/exceptions.py +39 -0
- mcp_eregistrations_bpa/py.typed +0 -0
- mcp_eregistrations_bpa/rollback/__init__.py +19 -0
- mcp_eregistrations_bpa/rollback/manager.py +616 -0
- mcp_eregistrations_bpa/server.py +152 -0
- mcp_eregistrations_bpa/tools/__init__.py +372 -0
- mcp_eregistrations_bpa/tools/actions.py +155 -0
- mcp_eregistrations_bpa/tools/analysis.py +352 -0
- mcp_eregistrations_bpa/tools/audit.py +399 -0
- mcp_eregistrations_bpa/tools/behaviours.py +1042 -0
- mcp_eregistrations_bpa/tools/bots.py +627 -0
- mcp_eregistrations_bpa/tools/classifications.py +575 -0
- mcp_eregistrations_bpa/tools/costs.py +765 -0
- mcp_eregistrations_bpa/tools/debug_strategies.py +351 -0
- mcp_eregistrations_bpa/tools/debugger.py +1230 -0
- mcp_eregistrations_bpa/tools/determinants.py +2235 -0
- mcp_eregistrations_bpa/tools/document_requirements.py +670 -0
- mcp_eregistrations_bpa/tools/export.py +899 -0
- mcp_eregistrations_bpa/tools/fields.py +162 -0
- mcp_eregistrations_bpa/tools/form_errors.py +36 -0
- mcp_eregistrations_bpa/tools/formio_helpers.py +971 -0
- mcp_eregistrations_bpa/tools/forms.py +1269 -0
- mcp_eregistrations_bpa/tools/jsonlogic_builder.py +466 -0
- mcp_eregistrations_bpa/tools/large_response.py +163 -0
- mcp_eregistrations_bpa/tools/messages.py +523 -0
- mcp_eregistrations_bpa/tools/notifications.py +241 -0
- mcp_eregistrations_bpa/tools/registration_institutions.py +680 -0
- mcp_eregistrations_bpa/tools/registrations.py +897 -0
- mcp_eregistrations_bpa/tools/role_status.py +447 -0
- mcp_eregistrations_bpa/tools/role_units.py +400 -0
- mcp_eregistrations_bpa/tools/roles.py +1236 -0
- mcp_eregistrations_bpa/tools/rollback.py +335 -0
- mcp_eregistrations_bpa/tools/services.py +674 -0
- mcp_eregistrations_bpa/tools/workflows.py +2487 -0
- mcp_eregistrations_bpa/tools/yaml_transformer.py +991 -0
- mcp_eregistrations_bpa/workflows/__init__.py +28 -0
- mcp_eregistrations_bpa/workflows/loader.py +440 -0
- mcp_eregistrations_bpa/workflows/models.py +336 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/METADATA +965 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/RECORD +66 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/WHEEL +4 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/entry_points.txt +2 -0
- mcp_eregistrations_bpa-0.8.5.dist-info/licenses/LICENSE +86 -0
|
@@ -0,0 +1,1230 @@
|
|
|
1
|
+
"""Service debugger tools for BPA configuration issues.
|
|
2
|
+
|
|
3
|
+
This module provides MCP tools for detecting, investigating, and fixing
|
|
4
|
+
orphaned references and configuration issues in BPA services.
|
|
5
|
+
|
|
6
|
+
The workflow is collaborative:
|
|
7
|
+
1. debug_scan - Scan for issues, group by type
|
|
8
|
+
2. debug_investigate - Investigate root cause of specific issue
|
|
9
|
+
3. debug_fix - Execute fix after user approval
|
|
10
|
+
|
|
11
|
+
Write operations follow the audit-before-write pattern.
|
|
12
|
+
|
|
13
|
+
API Endpoint used:
|
|
14
|
+
- POST /service/{service_id}/recover-orphan-config - Scan for issues
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from mcp.server.fastmcp.exceptions import ToolError
|
|
22
|
+
|
|
23
|
+
from mcp_eregistrations_bpa.audit.context import (
|
|
24
|
+
NotAuthenticatedError,
|
|
25
|
+
get_current_user_email,
|
|
26
|
+
)
|
|
27
|
+
from mcp_eregistrations_bpa.audit.logger import AuditLogger
|
|
28
|
+
from mcp_eregistrations_bpa.bpa_client import BPAClient
|
|
29
|
+
from mcp_eregistrations_bpa.bpa_client.errors import (
|
|
30
|
+
BPAClientError,
|
|
31
|
+
BPANotFoundError,
|
|
32
|
+
translate_error,
|
|
33
|
+
)
|
|
34
|
+
from mcp_eregistrations_bpa.tools.debug_strategies import (
|
|
35
|
+
get_fix_summary,
|
|
36
|
+
get_issue_info,
|
|
37
|
+
group_issues_by_severity,
|
|
38
|
+
group_issues_by_type,
|
|
39
|
+
prioritize_issues,
|
|
40
|
+
)
|
|
41
|
+
from mcp_eregistrations_bpa.tools.large_response import large_response_handler
|
|
42
|
+
|
|
43
|
+
__all__ = [
|
|
44
|
+
"debug_scan",
|
|
45
|
+
"debug_investigate",
|
|
46
|
+
"debug_fix",
|
|
47
|
+
"debug_fix_batch",
|
|
48
|
+
"debug_group_issues",
|
|
49
|
+
"debug_plan",
|
|
50
|
+
"debug_verify",
|
|
51
|
+
"register_debug_tools",
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@large_response_handler(
|
|
56
|
+
navigation={
|
|
57
|
+
"list_issues": "jq '.all_issues'",
|
|
58
|
+
"by_severity": "jq '.by_severity'",
|
|
59
|
+
"high_severity": "jq '.all_issues[] | select(.severity == \"high\")'",
|
|
60
|
+
"by_type": "jq '.by_type'",
|
|
61
|
+
"batch_fixable": "jq '.by_type[] | select(.batch_fixable == true)'",
|
|
62
|
+
}
|
|
63
|
+
)
|
|
64
|
+
async def debug_scan(service_id: str | int) -> dict[str, Any]:
|
|
65
|
+
"""Scan service for configuration issues and orphaned references.
|
|
66
|
+
|
|
67
|
+
Calls BPA debug endpoint and returns grouped issues with fix recommendations.
|
|
68
|
+
Large responses (>100KB) are saved to file with navigation hints.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
service_id: BPA service UUID.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
dict with service_id, total_issues, by_severity, by_type, summary,
|
|
75
|
+
prioritized_issues.
|
|
76
|
+
"""
|
|
77
|
+
if not service_id:
|
|
78
|
+
raise ToolError(
|
|
79
|
+
"Cannot scan service: 'service_id' is required. "
|
|
80
|
+
"Use 'service_list' to find valid IDs."
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
issues_list: list[dict[str, Any]] = []
|
|
84
|
+
try:
|
|
85
|
+
async with BPAClient() as client:
|
|
86
|
+
try:
|
|
87
|
+
# Call the debug endpoint - returns a list of issues
|
|
88
|
+
response = await client.post(
|
|
89
|
+
"/service/{service_id}/recover-orphan-config",
|
|
90
|
+
path_params={"service_id": service_id},
|
|
91
|
+
json={},
|
|
92
|
+
resource_type="debug",
|
|
93
|
+
)
|
|
94
|
+
# Response is a list wrapped in dict or direct list
|
|
95
|
+
if isinstance(response, list):
|
|
96
|
+
issues_list = response
|
|
97
|
+
elif isinstance(response, dict) and "issues" in response:
|
|
98
|
+
issues_list = response["issues"]
|
|
99
|
+
except BPANotFoundError:
|
|
100
|
+
raise ToolError(
|
|
101
|
+
f"Service '{service_id}' not found. "
|
|
102
|
+
"Use 'service_list' to see available services."
|
|
103
|
+
)
|
|
104
|
+
except ToolError:
|
|
105
|
+
raise
|
|
106
|
+
except BPAClientError as e:
|
|
107
|
+
raise translate_error(e, resource_type="service", resource_id=str(service_id))
|
|
108
|
+
|
|
109
|
+
# Group and analyze issues
|
|
110
|
+
by_type = group_issues_by_type(issues_list)
|
|
111
|
+
by_severity = group_issues_by_severity(issues_list)
|
|
112
|
+
summary = get_fix_summary(issues_list)
|
|
113
|
+
prioritized = prioritize_issues(issues_list)
|
|
114
|
+
|
|
115
|
+
# Build type breakdown with fix info
|
|
116
|
+
type_details: list[dict[str, Any]] = []
|
|
117
|
+
for obj_type, type_issues in by_type.items():
|
|
118
|
+
info = get_issue_info(obj_type)
|
|
119
|
+
type_details.append(
|
|
120
|
+
{
|
|
121
|
+
"type": obj_type,
|
|
122
|
+
"count": len(type_issues),
|
|
123
|
+
"severity": info.severity.value if info else "unknown",
|
|
124
|
+
"fix_strategy": info.fix_strategy.value if info else "manual_review",
|
|
125
|
+
"batch_fixable": info.batch_fixable if info else False,
|
|
126
|
+
"fix_tool": info.fix_tool if info else None,
|
|
127
|
+
"sample_issue": type_issues[0] if type_issues else None,
|
|
128
|
+
}
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Sort by count descending
|
|
132
|
+
type_details.sort(key=lambda x: int(x["count"]), reverse=True)
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
"service_id": str(service_id),
|
|
136
|
+
"total_issues": len(issues_list),
|
|
137
|
+
"by_severity": {
|
|
138
|
+
"high": len(by_severity["high"]),
|
|
139
|
+
"medium": len(by_severity["medium"]),
|
|
140
|
+
"low": len(by_severity["low"]),
|
|
141
|
+
"unknown": len(by_severity["unknown"]),
|
|
142
|
+
},
|
|
143
|
+
"by_type": type_details,
|
|
144
|
+
"summary": summary,
|
|
145
|
+
"prioritized_issues": prioritized[:20], # Return top 20 for context
|
|
146
|
+
"all_issues": issues_list, # Full list for batch operations
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
async def debug_investigate(
|
|
151
|
+
service_id: str | int,
|
|
152
|
+
issue: dict[str, Any],
|
|
153
|
+
) -> dict[str, Any]:
|
|
154
|
+
"""Investigate root cause of a specific configuration issue.
|
|
155
|
+
|
|
156
|
+
Fetches context using existing tools to determine why the issue exists
|
|
157
|
+
and what fix options are available.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
service_id: BPA service UUID.
|
|
161
|
+
issue: Issue dict from debug_scan (needs objectType, componentKey, etc.).
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
dict with issue, root_cause, context, fix_options, recommended_fix.
|
|
165
|
+
"""
|
|
166
|
+
if not service_id:
|
|
167
|
+
raise ToolError("'service_id' is required.")
|
|
168
|
+
if not issue:
|
|
169
|
+
raise ToolError("'issue' dict is required (from debug_scan).")
|
|
170
|
+
|
|
171
|
+
object_type = issue.get("objectType", "unknown")
|
|
172
|
+
component_key = issue.get("componentKey")
|
|
173
|
+
parent_id = issue.get("parentId")
|
|
174
|
+
conflicting_value = issue.get("conflictingValue")
|
|
175
|
+
|
|
176
|
+
info = get_issue_info(object_type)
|
|
177
|
+
if not info:
|
|
178
|
+
return {
|
|
179
|
+
"issue": issue,
|
|
180
|
+
"root_cause": f"Unknown issue type: {object_type}",
|
|
181
|
+
"context": None,
|
|
182
|
+
"fix_options": [
|
|
183
|
+
{
|
|
184
|
+
"id": "manual",
|
|
185
|
+
"description": "Manual review required - unknown issue type",
|
|
186
|
+
"confidence": "low",
|
|
187
|
+
"action": None,
|
|
188
|
+
}
|
|
189
|
+
],
|
|
190
|
+
"recommended_fix": "manual",
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
context: dict[str, Any] = {}
|
|
194
|
+
root_cause = ""
|
|
195
|
+
fix_options: list[dict[str, Any]] = []
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
async with BPAClient() as client:
|
|
199
|
+
# Investigation depends on issue type
|
|
200
|
+
if object_type == "effects_determinant":
|
|
201
|
+
# Behaviour references non-existent determinant
|
|
202
|
+
root_cause = (
|
|
203
|
+
f"Component '{component_key}' has a behaviour that references "
|
|
204
|
+
f"determinant '{conflicting_value}' which no longer exists."
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
# Try to get the behaviour
|
|
208
|
+
if component_key:
|
|
209
|
+
try:
|
|
210
|
+
behaviour = await client.get(
|
|
211
|
+
"/service/{service_id}/componentactions/{component_key}",
|
|
212
|
+
path_params={
|
|
213
|
+
"service_id": service_id,
|
|
214
|
+
"component_key": component_key,
|
|
215
|
+
},
|
|
216
|
+
resource_type="behaviour",
|
|
217
|
+
)
|
|
218
|
+
context["behaviour"] = behaviour
|
|
219
|
+
except BPAClientError:
|
|
220
|
+
context["behaviour"] = "Could not fetch behaviour details"
|
|
221
|
+
|
|
222
|
+
fix_options = [
|
|
223
|
+
{
|
|
224
|
+
"id": "delete_effect",
|
|
225
|
+
"description": (
|
|
226
|
+
f"Delete the behaviour/effect from component "
|
|
227
|
+
f"'{component_key}'"
|
|
228
|
+
),
|
|
229
|
+
"confidence": "high",
|
|
230
|
+
"action": {
|
|
231
|
+
"tool": "effect_delete",
|
|
232
|
+
"params": {"behaviour_id": parent_id},
|
|
233
|
+
},
|
|
234
|
+
},
|
|
235
|
+
]
|
|
236
|
+
|
|
237
|
+
elif object_type == "determinant":
|
|
238
|
+
# Orphaned determinant
|
|
239
|
+
root_cause = (
|
|
240
|
+
f"Determinant '{conflicting_value}' references a field or "
|
|
241
|
+
"component that no longer exists."
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# Try to get determinant details
|
|
245
|
+
if conflicting_value:
|
|
246
|
+
try:
|
|
247
|
+
det = await client.get(
|
|
248
|
+
"/determinant/{id}",
|
|
249
|
+
path_params={"id": conflicting_value},
|
|
250
|
+
resource_type="determinant",
|
|
251
|
+
resource_id=conflicting_value,
|
|
252
|
+
)
|
|
253
|
+
context["determinant"] = det
|
|
254
|
+
except BPAClientError:
|
|
255
|
+
context["determinant"] = "Could not fetch determinant details"
|
|
256
|
+
|
|
257
|
+
fix_options = [
|
|
258
|
+
{
|
|
259
|
+
"id": "delete_determinant",
|
|
260
|
+
"description": (
|
|
261
|
+
f"Delete orphaned determinant '{conflicting_value}'"
|
|
262
|
+
),
|
|
263
|
+
"confidence": "high",
|
|
264
|
+
"action": {
|
|
265
|
+
"tool": "determinant_delete",
|
|
266
|
+
"params": {
|
|
267
|
+
"service_id": service_id,
|
|
268
|
+
"determinant_id": conflicting_value,
|
|
269
|
+
},
|
|
270
|
+
},
|
|
271
|
+
},
|
|
272
|
+
]
|
|
273
|
+
|
|
274
|
+
elif object_type == "missing_determinants_in_component_behaviours":
|
|
275
|
+
# Behaviour with empty determinant list
|
|
276
|
+
root_cause = (
|
|
277
|
+
f"Component '{component_key}' has a behaviour with no determinants "
|
|
278
|
+
"- it will never trigger."
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
fix_options = [
|
|
282
|
+
{
|
|
283
|
+
"id": "delete_behaviour",
|
|
284
|
+
"description": (
|
|
285
|
+
f"Delete the empty behaviour from component "
|
|
286
|
+
f"'{component_key}'"
|
|
287
|
+
),
|
|
288
|
+
"confidence": "high",
|
|
289
|
+
"action": {
|
|
290
|
+
"tool": "effect_delete",
|
|
291
|
+
"params": {"behaviour_id": parent_id},
|
|
292
|
+
},
|
|
293
|
+
},
|
|
294
|
+
]
|
|
295
|
+
|
|
296
|
+
elif object_type == "catalog":
|
|
297
|
+
# Component references non-existent catalog
|
|
298
|
+
root_cause = (
|
|
299
|
+
f"Component '{component_key}' references catalog "
|
|
300
|
+
f"'{conflicting_value}' which does not exist."
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
fix_options = [
|
|
304
|
+
{
|
|
305
|
+
"id": "clear_catalog",
|
|
306
|
+
"description": (
|
|
307
|
+
f"Clear catalog reference from component '{component_key}'"
|
|
308
|
+
),
|
|
309
|
+
"confidence": "medium",
|
|
310
|
+
"action": {
|
|
311
|
+
"tool": "form_component_update",
|
|
312
|
+
"params": {
|
|
313
|
+
"service_id": service_id,
|
|
314
|
+
"component_key": component_key,
|
|
315
|
+
"updates": {
|
|
316
|
+
"data": {"dataSrc": "values", "values": []},
|
|
317
|
+
},
|
|
318
|
+
},
|
|
319
|
+
},
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
"id": "manual_reassign",
|
|
323
|
+
"description": "Manually assign a different catalog",
|
|
324
|
+
"confidence": "low",
|
|
325
|
+
"action": None,
|
|
326
|
+
},
|
|
327
|
+
]
|
|
328
|
+
|
|
329
|
+
elif object_type in (
|
|
330
|
+
"translation_moustache",
|
|
331
|
+
"component_content_moustache",
|
|
332
|
+
"component_html_moustache",
|
|
333
|
+
"component_label_missing_moustache",
|
|
334
|
+
"message_moustache",
|
|
335
|
+
):
|
|
336
|
+
# Template references missing field
|
|
337
|
+
root_cause = (
|
|
338
|
+
f"Template in '{issue.get('parentName', 'unknown')}' uses "
|
|
339
|
+
f"variable '{{{{conflicting_value}}}}' which references a "
|
|
340
|
+
f"field that no longer exists."
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
fix_options = [
|
|
344
|
+
{
|
|
345
|
+
"id": "manual_review",
|
|
346
|
+
"description": (
|
|
347
|
+
"Review and update the template to remove or fix "
|
|
348
|
+
"the reference"
|
|
349
|
+
),
|
|
350
|
+
"confidence": "medium",
|
|
351
|
+
"action": None,
|
|
352
|
+
},
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
else:
|
|
356
|
+
# Default for other types
|
|
357
|
+
root_cause = issue.get("message", f"Configuration issue: {object_type}")
|
|
358
|
+
fix_options = [
|
|
359
|
+
{
|
|
360
|
+
"id": "manual_review",
|
|
361
|
+
"description": f"Manual review required for {object_type}",
|
|
362
|
+
"confidence": "low",
|
|
363
|
+
"action": None,
|
|
364
|
+
},
|
|
365
|
+
]
|
|
366
|
+
|
|
367
|
+
except BPAClientError as e:
|
|
368
|
+
context["error"] = str(e)
|
|
369
|
+
|
|
370
|
+
# Determine recommended fix
|
|
371
|
+
recommended = (
|
|
372
|
+
fix_options[0]["id"]
|
|
373
|
+
if fix_options and fix_options[0].get("confidence") in ("high", "medium")
|
|
374
|
+
else "manual_review"
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
return {
|
|
378
|
+
"issue": issue,
|
|
379
|
+
"issue_type_info": info.to_dict() if info else None,
|
|
380
|
+
"root_cause": root_cause,
|
|
381
|
+
"context": context,
|
|
382
|
+
"fix_options": fix_options,
|
|
383
|
+
"recommended_fix": recommended,
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
async def debug_fix(
|
|
388
|
+
service_id: str | int,
|
|
389
|
+
issue: dict[str, Any],
|
|
390
|
+
fix_option: str,
|
|
391
|
+
) -> dict[str, Any]:
|
|
392
|
+
"""Execute a fix for a configuration issue. Audited write operation.
|
|
393
|
+
|
|
394
|
+
This tool executes the specified fix using existing MCP tools.
|
|
395
|
+
Audit trail is created for rollback support.
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
service_id: BPA service UUID.
|
|
399
|
+
issue: Issue dict from debug_scan.
|
|
400
|
+
fix_option: Fix option ID from debug_investigate (e.g., "delete_effect").
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
dict with success, issue, fix_applied, result, audit_id.
|
|
404
|
+
"""
|
|
405
|
+
if not service_id:
|
|
406
|
+
raise ToolError("'service_id' is required.")
|
|
407
|
+
if not issue:
|
|
408
|
+
raise ToolError("'issue' dict is required (from debug_scan).")
|
|
409
|
+
if not fix_option:
|
|
410
|
+
raise ToolError("'fix_option' is required (from debug_investigate).")
|
|
411
|
+
|
|
412
|
+
# Get authenticated user for audit
|
|
413
|
+
try:
|
|
414
|
+
user_email = get_current_user_email()
|
|
415
|
+
except NotAuthenticatedError as e:
|
|
416
|
+
raise ToolError(str(e))
|
|
417
|
+
|
|
418
|
+
object_type = issue.get("objectType", "unknown")
|
|
419
|
+
component_key = issue.get("componentKey")
|
|
420
|
+
parent_id = issue.get("parentId")
|
|
421
|
+
conflicting_value = issue.get("conflictingValue")
|
|
422
|
+
|
|
423
|
+
audit_logger = AuditLogger()
|
|
424
|
+
|
|
425
|
+
# Create audit record
|
|
426
|
+
audit_id = await audit_logger.record_pending(
|
|
427
|
+
user_email=user_email,
|
|
428
|
+
operation_type="debug_fix",
|
|
429
|
+
object_type=f"debug_{object_type}",
|
|
430
|
+
object_id=parent_id or conflicting_value or component_key,
|
|
431
|
+
params={
|
|
432
|
+
"service_id": str(service_id),
|
|
433
|
+
"issue": issue,
|
|
434
|
+
"fix_option": fix_option,
|
|
435
|
+
},
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
async with BPAClient() as client:
|
|
440
|
+
result: dict[str, Any] = {}
|
|
441
|
+
|
|
442
|
+
if fix_option == "delete_effect" and parent_id:
|
|
443
|
+
# Delete the behaviour/effect
|
|
444
|
+
await client.delete(
|
|
445
|
+
"/componentbehaviour/{behaviour_id}",
|
|
446
|
+
path_params={"behaviour_id": parent_id},
|
|
447
|
+
resource_type="behaviour",
|
|
448
|
+
resource_id=parent_id,
|
|
449
|
+
)
|
|
450
|
+
result = {
|
|
451
|
+
"action": "deleted_behaviour",
|
|
452
|
+
"behaviour_id": parent_id,
|
|
453
|
+
"component_key": component_key,
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
elif fix_option == "delete_determinant" and conflicting_value:
|
|
457
|
+
# Delete the orphaned determinant
|
|
458
|
+
await client.delete(
|
|
459
|
+
"/service/{service_id}/determinant/{determinant_id}",
|
|
460
|
+
path_params={
|
|
461
|
+
"service_id": service_id,
|
|
462
|
+
"determinant_id": conflicting_value,
|
|
463
|
+
},
|
|
464
|
+
resource_type="determinant",
|
|
465
|
+
resource_id=conflicting_value,
|
|
466
|
+
)
|
|
467
|
+
result = {
|
|
468
|
+
"action": "deleted_determinant",
|
|
469
|
+
"determinant_id": conflicting_value,
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
elif fix_option == "delete_behaviour" and parent_id:
|
|
473
|
+
# Same as delete_effect
|
|
474
|
+
await client.delete(
|
|
475
|
+
"/componentbehaviour/{behaviour_id}",
|
|
476
|
+
path_params={"behaviour_id": parent_id},
|
|
477
|
+
resource_type="behaviour",
|
|
478
|
+
resource_id=parent_id,
|
|
479
|
+
)
|
|
480
|
+
result = {
|
|
481
|
+
"action": "deleted_behaviour",
|
|
482
|
+
"behaviour_id": parent_id,
|
|
483
|
+
"component_key": component_key,
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
elif fix_option == "clear_catalog" and component_key:
|
|
487
|
+
# Clear catalog reference from form component
|
|
488
|
+
# Get current component and update to clear dataSrc
|
|
489
|
+
try:
|
|
490
|
+
comp = await client.get(
|
|
491
|
+
"/service/{service_id}/form/{form_type}/component/{component_key}",
|
|
492
|
+
path_params={
|
|
493
|
+
"service_id": service_id,
|
|
494
|
+
"form_type": "applicant",
|
|
495
|
+
"component_key": component_key,
|
|
496
|
+
},
|
|
497
|
+
)
|
|
498
|
+
# Prepare update to clear catalog reference
|
|
499
|
+
updates = {
|
|
500
|
+
"dataSrc": "",
|
|
501
|
+
"data": {"values": []},
|
|
502
|
+
}
|
|
503
|
+
await client.put(
|
|
504
|
+
"/service/{service_id}/form/{form_type}/component/{component_key}",
|
|
505
|
+
path_params={
|
|
506
|
+
"service_id": service_id,
|
|
507
|
+
"form_type": "applicant",
|
|
508
|
+
"component_key": component_key,
|
|
509
|
+
},
|
|
510
|
+
json=updates,
|
|
511
|
+
)
|
|
512
|
+
result = {
|
|
513
|
+
"action": "cleared_catalog",
|
|
514
|
+
"component_key": component_key,
|
|
515
|
+
"previous_dataSrc": comp.get("dataSrc"),
|
|
516
|
+
}
|
|
517
|
+
except BPAClientError:
|
|
518
|
+
# Fallback to guidance if direct update fails
|
|
519
|
+
result = {
|
|
520
|
+
"action": "manual_required",
|
|
521
|
+
"message": (
|
|
522
|
+
f"Use form_component_update to clear catalog from "
|
|
523
|
+
f"'{component_key}'"
|
|
524
|
+
),
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
elif fix_option == "remove_translation":
|
|
528
|
+
# Sync translations to clean up orphaned references
|
|
529
|
+
try:
|
|
530
|
+
await client.put(
|
|
531
|
+
"/translations/sync",
|
|
532
|
+
json={},
|
|
533
|
+
)
|
|
534
|
+
result = {
|
|
535
|
+
"action": "synced_translations",
|
|
536
|
+
"message": (
|
|
537
|
+
"Translation sync completed. Orphaned references may be "
|
|
538
|
+
"cleaned. Re-scan to verify."
|
|
539
|
+
),
|
|
540
|
+
"component_key": component_key,
|
|
541
|
+
"conflicting_value": conflicting_value,
|
|
542
|
+
}
|
|
543
|
+
except BPAClientError:
|
|
544
|
+
# Sync not available, provide guidance
|
|
545
|
+
result = {
|
|
546
|
+
"action": "manual_required",
|
|
547
|
+
"message": (
|
|
548
|
+
f"Translation sync unavailable. Manually edit translation "
|
|
549
|
+
f"to remove reference to '{conflicting_value}'"
|
|
550
|
+
),
|
|
551
|
+
"component_key": component_key,
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
else:
|
|
555
|
+
await audit_logger.mark_failed(
|
|
556
|
+
audit_id=audit_id,
|
|
557
|
+
error_message=f"Unknown fix option: {fix_option}",
|
|
558
|
+
)
|
|
559
|
+
raise ToolError(
|
|
560
|
+
f"Unknown or unsupported fix option: {fix_option}. "
|
|
561
|
+
"Use debug_investigate to get valid fix options."
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
# Mark audit as success
|
|
565
|
+
await audit_logger.mark_success(audit_id=audit_id, result=result)
|
|
566
|
+
|
|
567
|
+
return {
|
|
568
|
+
"success": True,
|
|
569
|
+
"issue": issue,
|
|
570
|
+
"fix_applied": fix_option,
|
|
571
|
+
"result": result,
|
|
572
|
+
"audit_id": audit_id,
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
except ToolError:
|
|
576
|
+
raise
|
|
577
|
+
except BPAClientError as e:
|
|
578
|
+
await audit_logger.mark_failed(audit_id=audit_id, error_message=str(e))
|
|
579
|
+
raise translate_error(e, resource_type="debug_fix", resource_id=str(service_id))
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
async def debug_group_issues(
|
|
583
|
+
service_id: str | int,
|
|
584
|
+
issues: list[dict[str, Any]],
|
|
585
|
+
group_by: str = "type",
|
|
586
|
+
) -> dict[str, Any]:
|
|
587
|
+
"""Group configuration issues by specified criteria.
|
|
588
|
+
|
|
589
|
+
Read-only analysis tool - does not call BPA API.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
service_id: BPA service UUID (for context).
|
|
593
|
+
issues: Issue list from debug_scan.
|
|
594
|
+
group_by: type, severity, parent, fix_strategy, or batch_fixable.
|
|
595
|
+
|
|
596
|
+
Returns:
|
|
597
|
+
dict with service_id, group_by, groups (list with name, count, issues,
|
|
598
|
+
sample_issue, aggregate_impact), total_groups.
|
|
599
|
+
"""
|
|
600
|
+
valid_group_by = {"type", "severity", "parent", "fix_strategy", "batch_fixable"}
|
|
601
|
+
if group_by not in valid_group_by:
|
|
602
|
+
valid_opts = ", ".join(sorted(valid_group_by))
|
|
603
|
+
raise ToolError(f"Invalid group_by '{group_by}'. Must be one of: {valid_opts}")
|
|
604
|
+
|
|
605
|
+
if not issues:
|
|
606
|
+
return {
|
|
607
|
+
"service_id": str(service_id),
|
|
608
|
+
"group_by": group_by,
|
|
609
|
+
"groups": [],
|
|
610
|
+
"total_groups": 0,
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
# Group issues based on criterion
|
|
614
|
+
grouped: dict[str, list[dict[str, Any]]] = {}
|
|
615
|
+
|
|
616
|
+
if group_by == "type":
|
|
617
|
+
grouped = group_issues_by_type(issues)
|
|
618
|
+
|
|
619
|
+
elif group_by == "severity":
|
|
620
|
+
by_severity = group_issues_by_severity(issues)
|
|
621
|
+
# Filter out empty severity groups
|
|
622
|
+
grouped = {k: v for k, v in by_severity.items() if v}
|
|
623
|
+
|
|
624
|
+
elif group_by == "parent":
|
|
625
|
+
for issue in issues:
|
|
626
|
+
parent_id = issue.get("parentId", "unknown")
|
|
627
|
+
parent_name = issue.get("parentName", "unknown")
|
|
628
|
+
if parent_id != "unknown":
|
|
629
|
+
key = f"{parent_name} ({parent_id})"
|
|
630
|
+
else:
|
|
631
|
+
key = parent_name
|
|
632
|
+
if key not in grouped:
|
|
633
|
+
grouped[key] = []
|
|
634
|
+
grouped[key].append(issue)
|
|
635
|
+
|
|
636
|
+
elif group_by == "fix_strategy":
|
|
637
|
+
for issue in issues:
|
|
638
|
+
obj_type = issue.get("objectType", "unknown")
|
|
639
|
+
info = get_issue_info(obj_type)
|
|
640
|
+
strategy = info.fix_strategy.value if info else "manual_review"
|
|
641
|
+
if strategy not in grouped:
|
|
642
|
+
grouped[strategy] = []
|
|
643
|
+
grouped[strategy].append(issue)
|
|
644
|
+
|
|
645
|
+
elif group_by == "batch_fixable":
|
|
646
|
+
grouped = {"batch_fixable": [], "manual_required": []}
|
|
647
|
+
for issue in issues:
|
|
648
|
+
obj_type = issue.get("objectType", "unknown")
|
|
649
|
+
info = get_issue_info(obj_type)
|
|
650
|
+
if info and info.batch_fixable:
|
|
651
|
+
grouped["batch_fixable"].append(issue)
|
|
652
|
+
else:
|
|
653
|
+
grouped["manual_required"].append(issue)
|
|
654
|
+
# Filter out empty groups
|
|
655
|
+
grouped = {k: v for k, v in grouped.items() if v}
|
|
656
|
+
|
|
657
|
+
# Build groups with aggregate impact
|
|
658
|
+
groups: list[dict[str, Any]] = []
|
|
659
|
+
for name, group_issues in grouped.items():
|
|
660
|
+
# Calculate aggregate impact
|
|
661
|
+
severity_breakdown: dict[str, int] = {
|
|
662
|
+
"high": 0,
|
|
663
|
+
"medium": 0,
|
|
664
|
+
"low": 0,
|
|
665
|
+
"unknown": 0,
|
|
666
|
+
}
|
|
667
|
+
batch_fixable_count = 0
|
|
668
|
+
manual_count = 0
|
|
669
|
+
fix_strategies: set[str] = set()
|
|
670
|
+
|
|
671
|
+
for issue in group_issues:
|
|
672
|
+
obj_type = issue.get("objectType", "unknown")
|
|
673
|
+
info = get_issue_info(obj_type)
|
|
674
|
+
if info:
|
|
675
|
+
severity_breakdown[info.severity.value] += 1
|
|
676
|
+
if info.batch_fixable:
|
|
677
|
+
batch_fixable_count += 1
|
|
678
|
+
else:
|
|
679
|
+
manual_count += 1
|
|
680
|
+
fix_strategies.add(info.fix_strategy.value)
|
|
681
|
+
else:
|
|
682
|
+
severity_breakdown["unknown"] += 1
|
|
683
|
+
manual_count += 1
|
|
684
|
+
|
|
685
|
+
# Filter out zero-count severities
|
|
686
|
+
filtered_severity = {k: v for k, v in severity_breakdown.items() if v > 0}
|
|
687
|
+
|
|
688
|
+
groups.append(
|
|
689
|
+
{
|
|
690
|
+
"name": name,
|
|
691
|
+
"count": len(group_issues),
|
|
692
|
+
"issues": group_issues,
|
|
693
|
+
"sample_issue": group_issues[0] if group_issues else None,
|
|
694
|
+
"aggregate_impact": {
|
|
695
|
+
"severity_breakdown": filtered_severity,
|
|
696
|
+
"batch_fixable_count": batch_fixable_count,
|
|
697
|
+
"manual_count": manual_count,
|
|
698
|
+
"fix_strategies": sorted(fix_strategies),
|
|
699
|
+
},
|
|
700
|
+
}
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
# Sort groups by priority: high severity batch-fixable first
|
|
704
|
+
def group_priority(g: dict[str, Any]) -> tuple[int, int, int]:
|
|
705
|
+
impact = g["aggregate_impact"]
|
|
706
|
+
high_count = impact["severity_breakdown"].get("high", 0)
|
|
707
|
+
batch_count = impact["batch_fixable_count"]
|
|
708
|
+
total = g["count"]
|
|
709
|
+
# Higher high severity = lower priority number (comes first)
|
|
710
|
+
# Higher batch fixable = lower priority number (comes first)
|
|
711
|
+
return (-high_count, -batch_count, -total)
|
|
712
|
+
|
|
713
|
+
groups.sort(key=group_priority)
|
|
714
|
+
|
|
715
|
+
return {
|
|
716
|
+
"service_id": str(service_id),
|
|
717
|
+
"group_by": group_by,
|
|
718
|
+
"groups": groups,
|
|
719
|
+
"total_groups": len(groups),
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
async def debug_fix_batch(
|
|
724
|
+
service_id: str | int,
|
|
725
|
+
issues: list[dict[str, Any]],
|
|
726
|
+
fix_option: str,
|
|
727
|
+
) -> dict[str, Any]:
|
|
728
|
+
"""Execute fixes for multiple issues in a batch. Audited write operation.
|
|
729
|
+
|
|
730
|
+
Executes fixes sequentially with single audit trail. Stops on first
|
|
731
|
+
failure and returns rollback info for completed fixes.
|
|
732
|
+
|
|
733
|
+
Args:
|
|
734
|
+
service_id: BPA service UUID.
|
|
735
|
+
issues: List of issue dicts from debug_scan.
|
|
736
|
+
fix_option: Fix option ID (e.g., "delete_effect", "delete_determinant").
|
|
737
|
+
|
|
738
|
+
Returns:
|
|
739
|
+
dict with success, total, success_count, failed_count, results,
|
|
740
|
+
failed_at (if failed), rollback_info, audit_id.
|
|
741
|
+
"""
|
|
742
|
+
if not service_id:
|
|
743
|
+
raise ToolError("'service_id' is required.")
|
|
744
|
+
if not fix_option:
|
|
745
|
+
raise ToolError("'fix_option' is required.")
|
|
746
|
+
|
|
747
|
+
# Handle empty issues list
|
|
748
|
+
if not issues:
|
|
749
|
+
return {
|
|
750
|
+
"success": True,
|
|
751
|
+
"total": 0,
|
|
752
|
+
"success_count": 0,
|
|
753
|
+
"failed_count": 0,
|
|
754
|
+
"results": [],
|
|
755
|
+
"failed_at": None,
|
|
756
|
+
"rollback_info": None,
|
|
757
|
+
"audit_id": None,
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
# Get authenticated user for audit
|
|
761
|
+
try:
|
|
762
|
+
user_email = get_current_user_email()
|
|
763
|
+
except NotAuthenticatedError as e:
|
|
764
|
+
raise ToolError(str(e))
|
|
765
|
+
|
|
766
|
+
audit_logger = AuditLogger()
|
|
767
|
+
|
|
768
|
+
# Create single audit record for the batch
|
|
769
|
+
audit_id = await audit_logger.record_pending(
|
|
770
|
+
user_email=user_email,
|
|
771
|
+
operation_type="debug_fix_batch",
|
|
772
|
+
object_type="debug_batch",
|
|
773
|
+
object_id=str(service_id),
|
|
774
|
+
params={
|
|
775
|
+
"service_id": str(service_id),
|
|
776
|
+
"fix_option": fix_option,
|
|
777
|
+
"issue_count": len(issues),
|
|
778
|
+
},
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
results: list[dict[str, Any]] = []
|
|
782
|
+
success_count = 0
|
|
783
|
+
failed_count = 0
|
|
784
|
+
failed_at: int | None = None
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
async with BPAClient() as client:
|
|
788
|
+
for idx, issue in enumerate(issues):
|
|
789
|
+
object_type = issue.get("objectType", "unknown")
|
|
790
|
+
component_key = issue.get("componentKey")
|
|
791
|
+
parent_id = issue.get("parentId")
|
|
792
|
+
conflicting_value = issue.get("conflictingValue")
|
|
793
|
+
|
|
794
|
+
try:
|
|
795
|
+
result: dict[str, Any] = {}
|
|
796
|
+
|
|
797
|
+
if fix_option == "delete_effect" and parent_id:
|
|
798
|
+
await client.delete(
|
|
799
|
+
"/componentbehaviour/{behaviour_id}",
|
|
800
|
+
path_params={"behaviour_id": parent_id},
|
|
801
|
+
resource_type="behaviour",
|
|
802
|
+
resource_id=parent_id,
|
|
803
|
+
)
|
|
804
|
+
result = {
|
|
805
|
+
"action": "deleted_behaviour",
|
|
806
|
+
"behaviour_id": parent_id,
|
|
807
|
+
"component_key": component_key,
|
|
808
|
+
}
|
|
809
|
+
|
|
810
|
+
elif fix_option == "delete_determinant" and conflicting_value:
|
|
811
|
+
await client.delete(
|
|
812
|
+
"/service/{service_id}/determinant/{determinant_id}",
|
|
813
|
+
path_params={
|
|
814
|
+
"service_id": service_id,
|
|
815
|
+
"determinant_id": conflicting_value,
|
|
816
|
+
},
|
|
817
|
+
resource_type="determinant",
|
|
818
|
+
resource_id=conflicting_value,
|
|
819
|
+
)
|
|
820
|
+
result = {
|
|
821
|
+
"action": "deleted_determinant",
|
|
822
|
+
"determinant_id": conflicting_value,
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
elif fix_option == "delete_behaviour" and parent_id:
|
|
826
|
+
await client.delete(
|
|
827
|
+
"/componentbehaviour/{behaviour_id}",
|
|
828
|
+
path_params={"behaviour_id": parent_id},
|
|
829
|
+
resource_type="behaviour",
|
|
830
|
+
resource_id=parent_id,
|
|
831
|
+
)
|
|
832
|
+
result = {
|
|
833
|
+
"action": "deleted_behaviour",
|
|
834
|
+
"behaviour_id": parent_id,
|
|
835
|
+
"component_key": component_key,
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
else:
|
|
839
|
+
# Unsupported fix option for this issue
|
|
840
|
+
raise ToolError(
|
|
841
|
+
f"Fix option '{fix_option}' not applicable for "
|
|
842
|
+
f"issue type '{object_type}'"
|
|
843
|
+
)
|
|
844
|
+
|
|
845
|
+
# Success for this issue
|
|
846
|
+
results.append(
|
|
847
|
+
{
|
|
848
|
+
"index": idx,
|
|
849
|
+
"issue": issue,
|
|
850
|
+
"success": True,
|
|
851
|
+
"result": result,
|
|
852
|
+
}
|
|
853
|
+
)
|
|
854
|
+
success_count += 1
|
|
855
|
+
|
|
856
|
+
except (BPAClientError, ToolError) as e:
|
|
857
|
+
# Failure - stop batch execution
|
|
858
|
+
failed_count = 1
|
|
859
|
+
failed_at = idx
|
|
860
|
+
results.append(
|
|
861
|
+
{
|
|
862
|
+
"index": idx,
|
|
863
|
+
"issue": issue,
|
|
864
|
+
"success": False,
|
|
865
|
+
"error": str(e),
|
|
866
|
+
}
|
|
867
|
+
)
|
|
868
|
+
break
|
|
869
|
+
|
|
870
|
+
except BPAClientError as e:
|
|
871
|
+
# Connection-level failure
|
|
872
|
+
await audit_logger.mark_failed(audit_id=audit_id, error_message=str(e))
|
|
873
|
+
raise translate_error(
|
|
874
|
+
e, resource_type="debug_fix_batch", resource_id=str(service_id)
|
|
875
|
+
)
|
|
876
|
+
|
|
877
|
+
# Determine overall success
|
|
878
|
+
overall_success = failed_at is None
|
|
879
|
+
|
|
880
|
+
# Build rollback info
|
|
881
|
+
rollback_info = None
|
|
882
|
+
if success_count > 0:
|
|
883
|
+
rollback_info = {
|
|
884
|
+
"audit_id": audit_id,
|
|
885
|
+
"completed_count": success_count,
|
|
886
|
+
"can_rollback": True,
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
# Mark audit based on outcome
|
|
890
|
+
if overall_success:
|
|
891
|
+
await audit_logger.mark_success(
|
|
892
|
+
audit_id=audit_id,
|
|
893
|
+
result={
|
|
894
|
+
"total": len(issues),
|
|
895
|
+
"success_count": success_count,
|
|
896
|
+
"results": results,
|
|
897
|
+
},
|
|
898
|
+
)
|
|
899
|
+
else:
|
|
900
|
+
await audit_logger.mark_failed(
|
|
901
|
+
audit_id=audit_id,
|
|
902
|
+
error_message=f"Batch failed at index {failed_at}",
|
|
903
|
+
)
|
|
904
|
+
|
|
905
|
+
return {
|
|
906
|
+
"success": overall_success,
|
|
907
|
+
"total": len(issues),
|
|
908
|
+
"success_count": success_count,
|
|
909
|
+
"failed_count": failed_count,
|
|
910
|
+
"results": results,
|
|
911
|
+
"failed_at": failed_at,
|
|
912
|
+
"rollback_info": rollback_info,
|
|
913
|
+
"audit_id": audit_id,
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
|
|
917
|
+
# Phase ordering for fix dependencies
|
|
918
|
+
# Effects reference determinants, so delete effects first
|
|
919
|
+
PHASE_ORDER = [
|
|
920
|
+
("delete_effect", "Delete Orphaned Effects"),
|
|
921
|
+
("delete_behaviour", "Delete Empty Behaviours"),
|
|
922
|
+
("delete_determinant", "Delete Orphaned Determinants"),
|
|
923
|
+
("clear_catalog_reference", "Clear Catalog References"),
|
|
924
|
+
("remove_translation", "Fix Translation References"),
|
|
925
|
+
("manual_review", "Manual Review Required"),
|
|
926
|
+
]
|
|
927
|
+
|
|
928
|
+
|
|
929
|
+
async def debug_plan(service_id: str | int) -> dict[str, Any]:
|
|
930
|
+
"""Generate a fix plan for all issues in a service.
|
|
931
|
+
|
|
932
|
+
Performs full scan and generates ordered phases respecting dependencies.
|
|
933
|
+
Effects must be fixed before orphaned determinants.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
service_id: BPA service UUID.
|
|
937
|
+
|
|
938
|
+
Returns:
|
|
939
|
+
dict with service_id, total_issues, phases (ordered list with phase_id,
|
|
940
|
+
name, description, fix_strategy, issues, batch_fixable, approval_required,
|
|
941
|
+
estimated_impact), summary.
|
|
942
|
+
"""
|
|
943
|
+
if not service_id:
|
|
944
|
+
raise ToolError(
|
|
945
|
+
"Cannot generate plan: 'service_id' is required. "
|
|
946
|
+
"Use 'service_list' to find valid IDs."
|
|
947
|
+
)
|
|
948
|
+
|
|
949
|
+
# Perform the scan
|
|
950
|
+
scan_result = await debug_scan(service_id)
|
|
951
|
+
all_issues = scan_result.get("all_issues", [])
|
|
952
|
+
|
|
953
|
+
if not all_issues:
|
|
954
|
+
return {
|
|
955
|
+
"service_id": str(service_id),
|
|
956
|
+
"total_issues": 0,
|
|
957
|
+
"phases": [],
|
|
958
|
+
"summary": {
|
|
959
|
+
"total_phases": 0,
|
|
960
|
+
"batch_fixable_count": 0,
|
|
961
|
+
"manual_count": 0,
|
|
962
|
+
"estimated_time": "No issues to fix",
|
|
963
|
+
},
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
# Group issues by fix strategy
|
|
967
|
+
by_strategy: dict[str, list[dict[str, Any]]] = {}
|
|
968
|
+
for issue in all_issues:
|
|
969
|
+
obj_type = issue.get("objectType", "unknown")
|
|
970
|
+
info = get_issue_info(obj_type)
|
|
971
|
+
strategy = info.fix_strategy.value if info else "manual_review"
|
|
972
|
+
if strategy not in by_strategy:
|
|
973
|
+
by_strategy[strategy] = []
|
|
974
|
+
by_strategy[strategy].append(issue)
|
|
975
|
+
|
|
976
|
+
# Build phases in dependency order
|
|
977
|
+
phases: list[dict[str, Any]] = []
|
|
978
|
+
phase_num = 1
|
|
979
|
+
total_batch_fixable = 0
|
|
980
|
+
total_manual = 0
|
|
981
|
+
|
|
982
|
+
for strategy, phase_name in PHASE_ORDER:
|
|
983
|
+
if strategy not in by_strategy:
|
|
984
|
+
continue
|
|
985
|
+
|
|
986
|
+
phase_issues = by_strategy[strategy]
|
|
987
|
+
if not phase_issues:
|
|
988
|
+
continue
|
|
989
|
+
|
|
990
|
+
# Calculate phase metadata
|
|
991
|
+
batch_fixable = all(
|
|
992
|
+
(info := get_issue_info(i.get("objectType", ""))) and info.batch_fixable
|
|
993
|
+
for i in phase_issues
|
|
994
|
+
)
|
|
995
|
+
|
|
996
|
+
# Get affected components
|
|
997
|
+
affected_components: set[str] = set()
|
|
998
|
+
for issue in phase_issues:
|
|
999
|
+
comp_key = issue.get("componentKey")
|
|
1000
|
+
if comp_key:
|
|
1001
|
+
affected_components.add(comp_key)
|
|
1002
|
+
|
|
1003
|
+
# Determine severity (highest in phase)
|
|
1004
|
+
severity_priority = {"high": 0, "medium": 1, "low": 2}
|
|
1005
|
+
phase_severity = "low"
|
|
1006
|
+
for issue in phase_issues:
|
|
1007
|
+
info = get_issue_info(issue.get("objectType", ""))
|
|
1008
|
+
if info and severity_priority.get(
|
|
1009
|
+
info.severity.value, 3
|
|
1010
|
+
) < severity_priority.get(phase_severity, 3):
|
|
1011
|
+
phase_severity = info.severity.value
|
|
1012
|
+
|
|
1013
|
+
# Build phase description
|
|
1014
|
+
sample_issue = phase_issues[0]
|
|
1015
|
+
obj_type = sample_issue.get("objectType", "unknown")
|
|
1016
|
+
info = get_issue_info(obj_type)
|
|
1017
|
+
description = info.description if info else f"Fix {obj_type} issues"
|
|
1018
|
+
|
|
1019
|
+
phase = {
|
|
1020
|
+
"phase_id": f"phase-{phase_num}",
|
|
1021
|
+
"name": phase_name,
|
|
1022
|
+
"description": description,
|
|
1023
|
+
"fix_strategy": strategy,
|
|
1024
|
+
"issues": phase_issues,
|
|
1025
|
+
"issue_count": len(phase_issues),
|
|
1026
|
+
"batch_fixable": batch_fixable,
|
|
1027
|
+
"approval_required": True, # All phases need approval
|
|
1028
|
+
"estimated_impact": {
|
|
1029
|
+
"affected_components": sorted(affected_components)[:10],
|
|
1030
|
+
"total_affected": len(affected_components),
|
|
1031
|
+
"severity": phase_severity,
|
|
1032
|
+
},
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
phases.append(phase)
|
|
1036
|
+
phase_num += 1
|
|
1037
|
+
|
|
1038
|
+
if batch_fixable:
|
|
1039
|
+
total_batch_fixable += len(phase_issues)
|
|
1040
|
+
else:
|
|
1041
|
+
total_manual += len(phase_issues)
|
|
1042
|
+
|
|
1043
|
+
# Add any remaining strategies not in PHASE_ORDER
|
|
1044
|
+
for strategy, phase_issues in by_strategy.items():
|
|
1045
|
+
if any(strategy == s for s, _ in PHASE_ORDER):
|
|
1046
|
+
continue # Already processed
|
|
1047
|
+
if not phase_issues:
|
|
1048
|
+
continue
|
|
1049
|
+
|
|
1050
|
+
phases.append(
|
|
1051
|
+
{
|
|
1052
|
+
"phase_id": f"phase-{phase_num}",
|
|
1053
|
+
"name": f"Fix {strategy.replace('_', ' ').title()}",
|
|
1054
|
+
"description": f"Resolve issues using {strategy} strategy",
|
|
1055
|
+
"fix_strategy": strategy,
|
|
1056
|
+
"issues": phase_issues,
|
|
1057
|
+
"issue_count": len(phase_issues),
|
|
1058
|
+
"batch_fixable": False,
|
|
1059
|
+
"approval_required": True,
|
|
1060
|
+
"estimated_impact": {
|
|
1061
|
+
"affected_components": [],
|
|
1062
|
+
"total_affected": 0,
|
|
1063
|
+
"severity": "unknown",
|
|
1064
|
+
},
|
|
1065
|
+
}
|
|
1066
|
+
)
|
|
1067
|
+
total_manual += len(phase_issues)
|
|
1068
|
+
phase_num += 1
|
|
1069
|
+
|
|
1070
|
+
# Generate summary
|
|
1071
|
+
summary = {
|
|
1072
|
+
"total_phases": len(phases),
|
|
1073
|
+
"batch_fixable_count": total_batch_fixable,
|
|
1074
|
+
"manual_count": total_manual,
|
|
1075
|
+
"phase_order": [p["phase_id"] for p in phases],
|
|
1076
|
+
"estimated_workflow": (
|
|
1077
|
+
f"{len(phases)} approval steps: "
|
|
1078
|
+
f"{total_batch_fixable} auto-fixable, "
|
|
1079
|
+
f"{total_manual} require manual review"
|
|
1080
|
+
),
|
|
1081
|
+
}
|
|
1082
|
+
|
|
1083
|
+
return {
|
|
1084
|
+
"service_id": str(service_id),
|
|
1085
|
+
"total_issues": len(all_issues),
|
|
1086
|
+
"phases": phases,
|
|
1087
|
+
"summary": summary,
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
|
|
1091
|
+
def _issue_key(issue: dict[str, Any]) -> str:
|
|
1092
|
+
"""Create unique key for an issue for comparison."""
|
|
1093
|
+
obj_type = issue.get("objectType", "")
|
|
1094
|
+
parent_id = issue.get("parentId", "")
|
|
1095
|
+
component_key = issue.get("componentKey", "")
|
|
1096
|
+
conflicting_value = issue.get("conflictingValue", "")
|
|
1097
|
+
return f"{obj_type}:{parent_id}:{component_key}:{conflicting_value}"
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
async def debug_verify(
|
|
1101
|
+
service_id: str | int,
|
|
1102
|
+
fix_result: dict[str, Any] | None = None,
|
|
1103
|
+
) -> dict[str, Any]:
|
|
1104
|
+
"""Verify fixes were applied successfully by rescanning service.
|
|
1105
|
+
|
|
1106
|
+
Compares current state with previous state (from fix_result) if provided.
|
|
1107
|
+
|
|
1108
|
+
Args:
|
|
1109
|
+
service_id: BPA service UUID.
|
|
1110
|
+
fix_result: Optional result from debug_fix or debug_fix_batch.
|
|
1111
|
+
|
|
1112
|
+
Returns:
|
|
1113
|
+
dict with service_id, current_issues, total_current, verification
|
|
1114
|
+
(resolved_count, remaining_count, new_count, improvement_percentage),
|
|
1115
|
+
status (verified|partial|failed|baseline).
|
|
1116
|
+
"""
|
|
1117
|
+
if not service_id:
|
|
1118
|
+
raise ToolError(
|
|
1119
|
+
"Cannot verify: 'service_id' is required. "
|
|
1120
|
+
"Use 'service_list' to find valid IDs."
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
# Perform fresh scan
|
|
1124
|
+
scan_result = await debug_scan(service_id)
|
|
1125
|
+
current_issues = scan_result.get("all_issues", [])
|
|
1126
|
+
total_current = len(current_issues)
|
|
1127
|
+
|
|
1128
|
+
# Build current issue set for comparison
|
|
1129
|
+
current_keys = {_issue_key(issue) for issue in current_issues}
|
|
1130
|
+
|
|
1131
|
+
# If no fix_result provided, return baseline status
|
|
1132
|
+
if not fix_result:
|
|
1133
|
+
return {
|
|
1134
|
+
"service_id": str(service_id),
|
|
1135
|
+
"current_issues": scan_result,
|
|
1136
|
+
"total_current": total_current,
|
|
1137
|
+
"verification": {
|
|
1138
|
+
"resolved_count": 0,
|
|
1139
|
+
"remaining_count": total_current,
|
|
1140
|
+
"new_count": 0,
|
|
1141
|
+
"improvement_percentage": 0.0,
|
|
1142
|
+
"comparison_available": False,
|
|
1143
|
+
},
|
|
1144
|
+
"status": "baseline",
|
|
1145
|
+
"summary": f"Baseline scan: {total_current} issues found",
|
|
1146
|
+
}
|
|
1147
|
+
|
|
1148
|
+
# Extract previous issues from fix_result
|
|
1149
|
+
# Handle both debug_fix (single) and debug_fix_batch (batch) results
|
|
1150
|
+
previous_issues: list[dict[str, Any]] = []
|
|
1151
|
+
|
|
1152
|
+
if "issue" in fix_result:
|
|
1153
|
+
# Single fix result
|
|
1154
|
+
previous_issues = [fix_result["issue"]]
|
|
1155
|
+
elif "results" in fix_result:
|
|
1156
|
+
# Batch fix result - extract issues from results
|
|
1157
|
+
for result in fix_result.get("results", []):
|
|
1158
|
+
if "issue" in result:
|
|
1159
|
+
previous_issues.append(result["issue"])
|
|
1160
|
+
|
|
1161
|
+
# If no previous issues found, we can still compare with total
|
|
1162
|
+
previous_keys = {_issue_key(issue) for issue in previous_issues}
|
|
1163
|
+
total_previous = (
|
|
1164
|
+
len(previous_issues) if previous_issues else fix_result.get("total", 0)
|
|
1165
|
+
)
|
|
1166
|
+
|
|
1167
|
+
# Calculate verification metrics
|
|
1168
|
+
resolved_keys = previous_keys - current_keys
|
|
1169
|
+
remaining_keys = previous_keys & current_keys
|
|
1170
|
+
new_keys = current_keys - previous_keys
|
|
1171
|
+
|
|
1172
|
+
resolved_count = len(resolved_keys)
|
|
1173
|
+
remaining_count = len(remaining_keys)
|
|
1174
|
+
new_count = len(new_keys)
|
|
1175
|
+
|
|
1176
|
+
# Calculate improvement percentage
|
|
1177
|
+
if total_previous > 0:
|
|
1178
|
+
improvement = (resolved_count / total_previous) * 100
|
|
1179
|
+
else:
|
|
1180
|
+
improvement = 100.0 if total_current == 0 else 0.0
|
|
1181
|
+
|
|
1182
|
+
# Determine status
|
|
1183
|
+
if resolved_count > 0 and remaining_count == 0 and new_count == 0:
|
|
1184
|
+
status = "verified"
|
|
1185
|
+
summary = f"All {resolved_count} targeted issues resolved successfully"
|
|
1186
|
+
elif resolved_count > 0:
|
|
1187
|
+
status = "partial"
|
|
1188
|
+
summary = (
|
|
1189
|
+
f"Resolved {resolved_count} of {total_previous} issues "
|
|
1190
|
+
f"({improvement:.1f}% improvement)"
|
|
1191
|
+
)
|
|
1192
|
+
if new_count > 0:
|
|
1193
|
+
summary += f", but {new_count} new issues detected"
|
|
1194
|
+
elif new_count > 0:
|
|
1195
|
+
status = "failed"
|
|
1196
|
+
summary = f"No issues resolved, {new_count} new issues detected"
|
|
1197
|
+
else:
|
|
1198
|
+
status = "failed"
|
|
1199
|
+
summary = "No improvement detected"
|
|
1200
|
+
|
|
1201
|
+
return {
|
|
1202
|
+
"service_id": str(service_id),
|
|
1203
|
+
"current_issues": scan_result,
|
|
1204
|
+
"total_current": total_current,
|
|
1205
|
+
"verification": {
|
|
1206
|
+
"resolved_count": resolved_count,
|
|
1207
|
+
"remaining_count": remaining_count,
|
|
1208
|
+
"new_count": new_count,
|
|
1209
|
+
"improvement_percentage": round(improvement, 1),
|
|
1210
|
+
"comparison_available": True,
|
|
1211
|
+
"previous_total": total_previous,
|
|
1212
|
+
},
|
|
1213
|
+
"status": status,
|
|
1214
|
+
"summary": summary,
|
|
1215
|
+
}
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
def register_debug_tools(mcp: Any) -> None:
|
|
1219
|
+
"""Register debug tools with the MCP server.
|
|
1220
|
+
|
|
1221
|
+
Args:
|
|
1222
|
+
mcp: The FastMCP server instance.
|
|
1223
|
+
"""
|
|
1224
|
+
mcp.tool()(debug_scan)
|
|
1225
|
+
mcp.tool()(debug_investigate)
|
|
1226
|
+
mcp.tool()(debug_fix)
|
|
1227
|
+
mcp.tool()(debug_fix_batch)
|
|
1228
|
+
mcp.tool()(debug_group_issues)
|
|
1229
|
+
mcp.tool()(debug_plan)
|
|
1230
|
+
mcp.tool()(debug_verify)
|