mcp-ticketer 0.1.30__py3-none-any.whl → 1.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-ticketer might be problematic. Click here for more details.
- mcp_ticketer/__init__.py +10 -10
- mcp_ticketer/__version__.py +3 -3
- mcp_ticketer/adapters/__init__.py +2 -0
- mcp_ticketer/adapters/aitrackdown.py +796 -46
- mcp_ticketer/adapters/asana/__init__.py +15 -0
- mcp_ticketer/adapters/asana/adapter.py +1416 -0
- mcp_ticketer/adapters/asana/client.py +292 -0
- mcp_ticketer/adapters/asana/mappers.py +348 -0
- mcp_ticketer/adapters/asana/types.py +146 -0
- mcp_ticketer/adapters/github.py +879 -129
- mcp_ticketer/adapters/hybrid.py +11 -11
- mcp_ticketer/adapters/jira.py +973 -73
- mcp_ticketer/adapters/linear/__init__.py +24 -0
- mcp_ticketer/adapters/linear/adapter.py +2732 -0
- mcp_ticketer/adapters/linear/client.py +344 -0
- mcp_ticketer/adapters/linear/mappers.py +420 -0
- mcp_ticketer/adapters/linear/queries.py +479 -0
- mcp_ticketer/adapters/linear/types.py +360 -0
- mcp_ticketer/adapters/linear.py +10 -2315
- mcp_ticketer/analysis/__init__.py +23 -0
- mcp_ticketer/analysis/orphaned.py +218 -0
- mcp_ticketer/analysis/similarity.py +224 -0
- mcp_ticketer/analysis/staleness.py +266 -0
- mcp_ticketer/cache/memory.py +9 -8
- mcp_ticketer/cli/adapter_diagnostics.py +421 -0
- mcp_ticketer/cli/auggie_configure.py +116 -15
- mcp_ticketer/cli/codex_configure.py +274 -82
- mcp_ticketer/cli/configure.py +888 -151
- mcp_ticketer/cli/diagnostics.py +400 -157
- mcp_ticketer/cli/discover.py +297 -26
- mcp_ticketer/cli/gemini_configure.py +119 -26
- mcp_ticketer/cli/init_command.py +880 -0
- mcp_ticketer/cli/instruction_commands.py +435 -0
- mcp_ticketer/cli/linear_commands.py +616 -0
- mcp_ticketer/cli/main.py +203 -1165
- mcp_ticketer/cli/mcp_configure.py +474 -90
- mcp_ticketer/cli/mcp_server_commands.py +415 -0
- mcp_ticketer/cli/migrate_config.py +12 -8
- mcp_ticketer/cli/platform_commands.py +123 -0
- mcp_ticketer/cli/platform_detection.py +418 -0
- mcp_ticketer/cli/platform_installer.py +513 -0
- mcp_ticketer/cli/python_detection.py +126 -0
- mcp_ticketer/cli/queue_commands.py +15 -15
- mcp_ticketer/cli/setup_command.py +639 -0
- mcp_ticketer/cli/simple_health.py +90 -65
- mcp_ticketer/cli/ticket_commands.py +1013 -0
- mcp_ticketer/cli/update_checker.py +313 -0
- mcp_ticketer/cli/utils.py +114 -66
- mcp_ticketer/core/__init__.py +24 -1
- mcp_ticketer/core/adapter.py +250 -16
- mcp_ticketer/core/config.py +145 -37
- mcp_ticketer/core/env_discovery.py +101 -22
- mcp_ticketer/core/env_loader.py +349 -0
- mcp_ticketer/core/exceptions.py +160 -0
- mcp_ticketer/core/http_client.py +26 -26
- mcp_ticketer/core/instructions.py +405 -0
- mcp_ticketer/core/label_manager.py +732 -0
- mcp_ticketer/core/mappers.py +42 -30
- mcp_ticketer/core/models.py +280 -28
- mcp_ticketer/core/onepassword_secrets.py +379 -0
- mcp_ticketer/core/project_config.py +183 -49
- mcp_ticketer/core/registry.py +3 -3
- mcp_ticketer/core/session_state.py +171 -0
- mcp_ticketer/core/state_matcher.py +592 -0
- mcp_ticketer/core/url_parser.py +425 -0
- mcp_ticketer/core/validators.py +69 -0
- mcp_ticketer/defaults/ticket_instructions.md +644 -0
- mcp_ticketer/mcp/__init__.py +29 -1
- mcp_ticketer/mcp/__main__.py +60 -0
- mcp_ticketer/mcp/server/__init__.py +25 -0
- mcp_ticketer/mcp/server/__main__.py +60 -0
- mcp_ticketer/mcp/server/constants.py +58 -0
- mcp_ticketer/mcp/server/diagnostic_helper.py +175 -0
- mcp_ticketer/mcp/server/dto.py +195 -0
- mcp_ticketer/mcp/server/main.py +1343 -0
- mcp_ticketer/mcp/server/response_builder.py +206 -0
- mcp_ticketer/mcp/server/routing.py +655 -0
- mcp_ticketer/mcp/server/server_sdk.py +151 -0
- mcp_ticketer/mcp/server/tools/__init__.py +56 -0
- mcp_ticketer/mcp/server/tools/analysis_tools.py +495 -0
- mcp_ticketer/mcp/server/tools/attachment_tools.py +226 -0
- mcp_ticketer/mcp/server/tools/bulk_tools.py +273 -0
- mcp_ticketer/mcp/server/tools/comment_tools.py +152 -0
- mcp_ticketer/mcp/server/tools/config_tools.py +1439 -0
- mcp_ticketer/mcp/server/tools/diagnostic_tools.py +211 -0
- mcp_ticketer/mcp/server/tools/hierarchy_tools.py +921 -0
- mcp_ticketer/mcp/server/tools/instruction_tools.py +300 -0
- mcp_ticketer/mcp/server/tools/label_tools.py +948 -0
- mcp_ticketer/mcp/server/tools/pr_tools.py +152 -0
- mcp_ticketer/mcp/server/tools/search_tools.py +215 -0
- mcp_ticketer/mcp/server/tools/session_tools.py +170 -0
- mcp_ticketer/mcp/server/tools/ticket_tools.py +1268 -0
- mcp_ticketer/mcp/server/tools/user_ticket_tools.py +547 -0
- mcp_ticketer/queue/__init__.py +1 -0
- mcp_ticketer/queue/health_monitor.py +168 -136
- mcp_ticketer/queue/manager.py +95 -25
- mcp_ticketer/queue/queue.py +40 -21
- mcp_ticketer/queue/run_worker.py +6 -1
- mcp_ticketer/queue/ticket_registry.py +213 -155
- mcp_ticketer/queue/worker.py +109 -49
- mcp_ticketer-1.2.11.dist-info/METADATA +792 -0
- mcp_ticketer-1.2.11.dist-info/RECORD +110 -0
- mcp_ticketer/mcp/server.py +0 -1895
- mcp_ticketer-0.1.30.dist-info/METADATA +0 -413
- mcp_ticketer-0.1.30.dist-info/RECORD +0 -49
- {mcp_ticketer-0.1.30.dist-info → mcp_ticketer-1.2.11.dist-info}/WHEEL +0 -0
- {mcp_ticketer-0.1.30.dist-info → mcp_ticketer-1.2.11.dist-info}/entry_points.txt +0 -0
- {mcp_ticketer-0.1.30.dist-info → mcp_ticketer-1.2.11.dist-info}/licenses/LICENSE +0 -0
- {mcp_ticketer-0.1.30.dist-info → mcp_ticketer-1.2.11.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,948 @@
|
|
|
1
|
+
"""Label management tools for MCP.
|
|
2
|
+
|
|
3
|
+
This module provides MCP tools for label normalization, deduplication, merging,
|
|
4
|
+
and cleanup operations across ticket systems.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- List labels from adapters with usage statistics
|
|
8
|
+
- Normalize label names with configurable casing
|
|
9
|
+
- Find duplicate labels with similarity scoring
|
|
10
|
+
- Merge and rename labels across tickets
|
|
11
|
+
- Generate comprehensive cleanup reports
|
|
12
|
+
|
|
13
|
+
All tools follow the MCP response pattern:
|
|
14
|
+
{
|
|
15
|
+
"status": "completed" | "error",
|
|
16
|
+
"adapter": "adapter_type",
|
|
17
|
+
"adapter_name": "Adapter Display Name",
|
|
18
|
+
... tool-specific data ...
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from ....core.label_manager import CasingStrategy, LabelDeduplicator, LabelNormalizer
|
|
27
|
+
from ....core.models import SearchQuery
|
|
28
|
+
from ..server_sdk import get_adapter, get_router, has_router, mcp
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _build_adapter_metadata(adapter: Any) -> dict[str, Any]:
|
|
34
|
+
"""Build adapter metadata for MCP responses.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
----
|
|
38
|
+
adapter: The adapter that handled the operation
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
-------
|
|
42
|
+
Dictionary with adapter metadata fields
|
|
43
|
+
|
|
44
|
+
"""
|
|
45
|
+
return {
|
|
46
|
+
"adapter": adapter.adapter_type,
|
|
47
|
+
"adapter_name": adapter.adapter_display_name,
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@mcp.tool()
|
|
52
|
+
async def label_list(
|
|
53
|
+
adapter_name: str | None = None,
|
|
54
|
+
include_usage_count: bool = False,
|
|
55
|
+
) -> dict[str, Any]:
|
|
56
|
+
"""List all available labels/tags from the ticket system.
|
|
57
|
+
|
|
58
|
+
Retrieves labels from the configured adapter or a specific adapter if specified.
|
|
59
|
+
Optionally includes usage statistics showing how many tickets use each label.
|
|
60
|
+
|
|
61
|
+
Multi-Adapter Support:
|
|
62
|
+
- Without adapter_name: Uses default configured adapter
|
|
63
|
+
- With adapter_name: Uses specified adapter (requires multi-adapter setup)
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
----
|
|
67
|
+
adapter_name: Optional adapter to query (e.g., "linear", "github", "jira")
|
|
68
|
+
include_usage_count: Include usage statistics for each label (default: False)
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
-------
|
|
72
|
+
Dictionary containing:
|
|
73
|
+
- status: "completed" or "error"
|
|
74
|
+
- adapter: Adapter type that was queried
|
|
75
|
+
- adapter_name: Human-readable adapter name
|
|
76
|
+
- labels: List of label objects with id, name, and optionally usage_count
|
|
77
|
+
- total_labels: Total number of labels available
|
|
78
|
+
- error: Error message (if failed)
|
|
79
|
+
|
|
80
|
+
Example:
|
|
81
|
+
-------
|
|
82
|
+
>>> result = await label_list()
|
|
83
|
+
>>> print(result)
|
|
84
|
+
{
|
|
85
|
+
"status": "completed",
|
|
86
|
+
"adapter": "linear",
|
|
87
|
+
"adapter_name": "Linear",
|
|
88
|
+
"labels": [
|
|
89
|
+
{"id": "...", "name": "bug", "color": "#ff0000"},
|
|
90
|
+
{"id": "...", "name": "feature", "color": "#00ff00"}
|
|
91
|
+
],
|
|
92
|
+
"total_labels": 2
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
>>> result = await label_list(include_usage_count=True)
|
|
96
|
+
>>> print(result["labels"][0])
|
|
97
|
+
{"id": "...", "name": "bug", "usage_count": 42}
|
|
98
|
+
|
|
99
|
+
"""
|
|
100
|
+
try:
|
|
101
|
+
# Get adapter (default or specified)
|
|
102
|
+
if adapter_name:
|
|
103
|
+
if not has_router():
|
|
104
|
+
return {
|
|
105
|
+
"status": "error",
|
|
106
|
+
"error": f"Cannot use adapter_name='{adapter_name}' - multi-adapter routing not configured",
|
|
107
|
+
}
|
|
108
|
+
router = get_router()
|
|
109
|
+
adapter = router._get_adapter(adapter_name)
|
|
110
|
+
else:
|
|
111
|
+
adapter = get_adapter()
|
|
112
|
+
|
|
113
|
+
# Check if adapter supports list_labels
|
|
114
|
+
if not hasattr(adapter, "list_labels"):
|
|
115
|
+
return {
|
|
116
|
+
"status": "error",
|
|
117
|
+
**_build_adapter_metadata(adapter),
|
|
118
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Get labels from adapter
|
|
122
|
+
labels = await adapter.list_labels()
|
|
123
|
+
|
|
124
|
+
# Add usage counts if requested
|
|
125
|
+
if include_usage_count:
|
|
126
|
+
# Count label usage across all tickets
|
|
127
|
+
try:
|
|
128
|
+
tickets = await adapter.list(
|
|
129
|
+
limit=1000
|
|
130
|
+
) # Large limit to get all tickets
|
|
131
|
+
label_counts: dict[str, int] = {}
|
|
132
|
+
|
|
133
|
+
for ticket in tickets:
|
|
134
|
+
ticket_labels = ticket.tags or []
|
|
135
|
+
for label_name in ticket_labels:
|
|
136
|
+
label_counts[label_name] = label_counts.get(label_name, 0) + 1
|
|
137
|
+
|
|
138
|
+
# Enrich labels with usage counts
|
|
139
|
+
for label in labels:
|
|
140
|
+
label_name = label.get("name", "")
|
|
141
|
+
label["usage_count"] = label_counts.get(label_name, 0)
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
logger.warning(f"Failed to calculate usage counts: {e}")
|
|
145
|
+
# Continue without usage counts rather than failing
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
"status": "completed",
|
|
149
|
+
**_build_adapter_metadata(adapter),
|
|
150
|
+
"labels": labels,
|
|
151
|
+
"total_labels": len(labels),
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
error_response = {
|
|
156
|
+
"status": "error",
|
|
157
|
+
"error": f"Failed to list labels: {str(e)}",
|
|
158
|
+
}
|
|
159
|
+
try:
|
|
160
|
+
adapter = get_adapter()
|
|
161
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
162
|
+
except Exception:
|
|
163
|
+
pass
|
|
164
|
+
return error_response
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
@mcp.tool()
|
|
168
|
+
async def label_normalize(
|
|
169
|
+
label_name: str,
|
|
170
|
+
casing: str = "lowercase",
|
|
171
|
+
) -> dict[str, Any]:
|
|
172
|
+
"""Normalize a label name using specified casing strategy.
|
|
173
|
+
|
|
174
|
+
Applies consistent casing rules to label names for standardization.
|
|
175
|
+
Useful for ensuring labels follow a consistent naming convention.
|
|
176
|
+
|
|
177
|
+
Supported Casing Strategies:
|
|
178
|
+
- lowercase: Convert to lowercase (e.g., "Bug Report" → "bug report")
|
|
179
|
+
- titlecase: Convert to title case (e.g., "bug report" → "Bug Report")
|
|
180
|
+
- uppercase: Convert to uppercase (e.g., "bug report" → "BUG REPORT")
|
|
181
|
+
- kebab-case: Convert to kebab-case (e.g., "Bug Report" → "bug-report")
|
|
182
|
+
- snake_case: Convert to snake_case (e.g., "Bug Report" → "bug_report")
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
----
|
|
186
|
+
label_name: Label name to normalize (required)
|
|
187
|
+
casing: Casing strategy to apply (default: "lowercase")
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
-------
|
|
191
|
+
Dictionary containing:
|
|
192
|
+
- status: "completed" or "error"
|
|
193
|
+
- original: Original label name
|
|
194
|
+
- normalized: Normalized label name
|
|
195
|
+
- casing: Casing strategy applied
|
|
196
|
+
- changed: Whether normalization changed the label
|
|
197
|
+
- error: Error message (if failed)
|
|
198
|
+
|
|
199
|
+
Example:
|
|
200
|
+
-------
|
|
201
|
+
>>> result = await label_normalize("Bug Report", casing="kebab-case")
|
|
202
|
+
>>> print(result)
|
|
203
|
+
{
|
|
204
|
+
"status": "completed",
|
|
205
|
+
"original": "Bug Report",
|
|
206
|
+
"normalized": "bug-report",
|
|
207
|
+
"casing": "kebab-case",
|
|
208
|
+
"changed": True
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
"""
|
|
212
|
+
try:
|
|
213
|
+
# Validate casing strategy
|
|
214
|
+
try:
|
|
215
|
+
CasingStrategy(casing)
|
|
216
|
+
except ValueError:
|
|
217
|
+
valid_options = ", ".join(c.value for c in CasingStrategy)
|
|
218
|
+
return {
|
|
219
|
+
"status": "error",
|
|
220
|
+
"error": f"Invalid casing strategy '{casing}'. Valid options: {valid_options}",
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
# Normalize label
|
|
224
|
+
normalizer = LabelNormalizer(casing=casing)
|
|
225
|
+
normalized = normalizer.normalize(label_name)
|
|
226
|
+
|
|
227
|
+
return {
|
|
228
|
+
"status": "completed",
|
|
229
|
+
"original": label_name,
|
|
230
|
+
"normalized": normalized,
|
|
231
|
+
"casing": casing,
|
|
232
|
+
"changed": normalized != label_name,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
except Exception as e:
|
|
236
|
+
return {
|
|
237
|
+
"status": "error",
|
|
238
|
+
"error": f"Failed to normalize label: {str(e)}",
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
@mcp.tool()
|
|
243
|
+
async def label_find_duplicates(
|
|
244
|
+
threshold: float = 0.85,
|
|
245
|
+
limit: int = 50,
|
|
246
|
+
) -> dict[str, Any]:
|
|
247
|
+
"""Find duplicate or similar labels in the ticket system.
|
|
248
|
+
|
|
249
|
+
Uses fuzzy matching to identify labels that are likely duplicates due to:
|
|
250
|
+
- Case variations (e.g., "bug" vs "Bug")
|
|
251
|
+
- Spelling variations (e.g., "feature" vs "feture")
|
|
252
|
+
- Plural forms (e.g., "bug" vs "bugs")
|
|
253
|
+
- Similar wording (e.g., "bug" vs "issue")
|
|
254
|
+
|
|
255
|
+
Similarity Scoring:
|
|
256
|
+
- 1.0: Exact match (case-insensitive)
|
|
257
|
+
- 0.95: Spelling correction or synonym
|
|
258
|
+
- 0.70-0.95: Fuzzy match based on string similarity
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
----
|
|
262
|
+
threshold: Minimum similarity threshold (0.0-1.0, default: 0.85)
|
|
263
|
+
limit: Maximum number of duplicate pairs to return (default: 50)
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
-------
|
|
267
|
+
Dictionary containing:
|
|
268
|
+
- status: "completed" or "error"
|
|
269
|
+
- adapter: Adapter type queried
|
|
270
|
+
- adapter_name: Human-readable adapter name
|
|
271
|
+
- duplicates: List of duplicate pairs with similarity scores
|
|
272
|
+
- total_duplicates: Total number of duplicate pairs found
|
|
273
|
+
- error: Error message (if failed)
|
|
274
|
+
|
|
275
|
+
Example:
|
|
276
|
+
-------
|
|
277
|
+
>>> result = await label_find_duplicates(threshold=0.80)
|
|
278
|
+
>>> print(result)
|
|
279
|
+
{
|
|
280
|
+
"status": "completed",
|
|
281
|
+
"adapter": "linear",
|
|
282
|
+
"adapter_name": "Linear",
|
|
283
|
+
"duplicates": [
|
|
284
|
+
{
|
|
285
|
+
"label1": "bug",
|
|
286
|
+
"label2": "Bug",
|
|
287
|
+
"similarity": 1.0,
|
|
288
|
+
"recommendation": "Merge 'Bug' into 'bug'"
|
|
289
|
+
},
|
|
290
|
+
{
|
|
291
|
+
"label1": "feature",
|
|
292
|
+
"label2": "feture",
|
|
293
|
+
"similarity": 0.92,
|
|
294
|
+
"recommendation": "Merge 'feture' into 'feature' (likely typo)"
|
|
295
|
+
}
|
|
296
|
+
],
|
|
297
|
+
"total_duplicates": 2
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
"""
|
|
301
|
+
try:
|
|
302
|
+
adapter = get_adapter()
|
|
303
|
+
|
|
304
|
+
# Check if adapter supports list_labels
|
|
305
|
+
if not hasattr(adapter, "list_labels"):
|
|
306
|
+
return {
|
|
307
|
+
"status": "error",
|
|
308
|
+
**_build_adapter_metadata(adapter),
|
|
309
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
# Get all labels
|
|
313
|
+
labels = await adapter.list_labels()
|
|
314
|
+
label_names = [
|
|
315
|
+
label.get("name", "") if isinstance(label, dict) else str(label)
|
|
316
|
+
for label in labels
|
|
317
|
+
]
|
|
318
|
+
|
|
319
|
+
# Find duplicates
|
|
320
|
+
deduplicator = LabelDeduplicator()
|
|
321
|
+
duplicates = deduplicator.find_duplicates(label_names, threshold=threshold)
|
|
322
|
+
|
|
323
|
+
# Format results with recommendations
|
|
324
|
+
formatted_duplicates = []
|
|
325
|
+
for label1, label2, similarity in duplicates[:limit]:
|
|
326
|
+
# Determine recommendation
|
|
327
|
+
if similarity == 1.0:
|
|
328
|
+
recommendation = (
|
|
329
|
+
f"Merge '{label2}' into '{label1}' (exact match, case difference)"
|
|
330
|
+
)
|
|
331
|
+
elif similarity >= 0.95:
|
|
332
|
+
recommendation = (
|
|
333
|
+
f"Merge '{label2}' into '{label1}' (likely typo or synonym)"
|
|
334
|
+
)
|
|
335
|
+
elif similarity >= 0.85:
|
|
336
|
+
recommendation = f"Review: '{label1}' and '{label2}' are very similar"
|
|
337
|
+
else:
|
|
338
|
+
recommendation = f"Review: '{label1}' and '{label2}' may be duplicates"
|
|
339
|
+
|
|
340
|
+
formatted_duplicates.append(
|
|
341
|
+
{
|
|
342
|
+
"label1": label1,
|
|
343
|
+
"label2": label2,
|
|
344
|
+
"similarity": round(similarity, 3),
|
|
345
|
+
"recommendation": recommendation,
|
|
346
|
+
}
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
return {
|
|
350
|
+
"status": "completed",
|
|
351
|
+
**_build_adapter_metadata(adapter),
|
|
352
|
+
"duplicates": formatted_duplicates,
|
|
353
|
+
"total_duplicates": len(duplicates),
|
|
354
|
+
"threshold": threshold,
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
except Exception as e:
|
|
358
|
+
error_response = {
|
|
359
|
+
"status": "error",
|
|
360
|
+
"error": f"Failed to find duplicates: {str(e)}",
|
|
361
|
+
}
|
|
362
|
+
try:
|
|
363
|
+
adapter = get_adapter()
|
|
364
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
365
|
+
except Exception:
|
|
366
|
+
pass
|
|
367
|
+
return error_response
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
@mcp.tool()
|
|
371
|
+
async def label_suggest_merge(
|
|
372
|
+
source_label: str,
|
|
373
|
+
target_label: str,
|
|
374
|
+
) -> dict[str, Any]:
|
|
375
|
+
"""Preview a label merge operation without executing it.
|
|
376
|
+
|
|
377
|
+
Shows what would happen if source_label was merged into target_label,
|
|
378
|
+
including the number of tickets that would be affected.
|
|
379
|
+
|
|
380
|
+
Merge Operation Preview:
|
|
381
|
+
- All tickets with source_label will be updated to use target_label
|
|
382
|
+
- The source_label itself is NOT deleted from the system
|
|
383
|
+
- Tickets that already have both labels will only keep target_label
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
----
|
|
387
|
+
source_label: Label to merge from (will be replaced on tickets)
|
|
388
|
+
target_label: Label to merge into (replacement label)
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
-------
|
|
392
|
+
Dictionary containing:
|
|
393
|
+
- status: "completed" or "error"
|
|
394
|
+
- adapter: Adapter type
|
|
395
|
+
- adapter_name: Human-readable adapter name
|
|
396
|
+
- source_label: Source label name
|
|
397
|
+
- target_label: Target label name
|
|
398
|
+
- affected_tickets: Number of tickets that would be updated
|
|
399
|
+
- preview: List of ticket IDs that would be affected (up to 10)
|
|
400
|
+
- warning: Any warnings about the operation
|
|
401
|
+
- error: Error message (if failed)
|
|
402
|
+
|
|
403
|
+
Example:
|
|
404
|
+
-------
|
|
405
|
+
>>> result = await label_suggest_merge("Bug", "bug")
|
|
406
|
+
>>> print(result)
|
|
407
|
+
{
|
|
408
|
+
"status": "completed",
|
|
409
|
+
"adapter": "linear",
|
|
410
|
+
"adapter_name": "Linear",
|
|
411
|
+
"source_label": "Bug",
|
|
412
|
+
"target_label": "bug",
|
|
413
|
+
"affected_tickets": 15,
|
|
414
|
+
"preview": ["PROJ-123", "PROJ-456", "PROJ-789"],
|
|
415
|
+
"warning": null
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
"""
|
|
419
|
+
try:
|
|
420
|
+
adapter = get_adapter()
|
|
421
|
+
|
|
422
|
+
# Find all tickets with source label
|
|
423
|
+
try:
|
|
424
|
+
tickets = await adapter.search(
|
|
425
|
+
SearchQuery(
|
|
426
|
+
query=f"label:{source_label}",
|
|
427
|
+
limit=1000,
|
|
428
|
+
state=None,
|
|
429
|
+
priority=None,
|
|
430
|
+
tags=None,
|
|
431
|
+
assignee=None,
|
|
432
|
+
project=None,
|
|
433
|
+
offset=0,
|
|
434
|
+
)
|
|
435
|
+
)
|
|
436
|
+
except Exception:
|
|
437
|
+
# Fallback: list all tickets and filter manually
|
|
438
|
+
all_tickets = await adapter.list(limit=1000)
|
|
439
|
+
tickets = [t for t in all_tickets if source_label in (t.tags or [])]
|
|
440
|
+
|
|
441
|
+
affected_count = len(tickets)
|
|
442
|
+
preview_ids = [t.id for t in tickets[:10]] # First 10 tickets
|
|
443
|
+
|
|
444
|
+
# Check for potential issues
|
|
445
|
+
warning = None
|
|
446
|
+
if affected_count == 0:
|
|
447
|
+
warning = f"No tickets found with label '{source_label}'"
|
|
448
|
+
elif source_label == target_label:
|
|
449
|
+
warning = "Source and target labels are identical - no changes needed"
|
|
450
|
+
|
|
451
|
+
return {
|
|
452
|
+
"status": "completed",
|
|
453
|
+
**_build_adapter_metadata(adapter),
|
|
454
|
+
"source_label": source_label,
|
|
455
|
+
"target_label": target_label,
|
|
456
|
+
"affected_tickets": affected_count,
|
|
457
|
+
"preview": preview_ids,
|
|
458
|
+
"warning": warning,
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
except Exception as e:
|
|
462
|
+
error_response = {
|
|
463
|
+
"status": "error",
|
|
464
|
+
"error": f"Failed to preview merge: {str(e)}",
|
|
465
|
+
}
|
|
466
|
+
try:
|
|
467
|
+
adapter = get_adapter()
|
|
468
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
469
|
+
except Exception:
|
|
470
|
+
pass
|
|
471
|
+
return error_response
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
@mcp.tool()
|
|
475
|
+
async def label_merge(
|
|
476
|
+
source_label: str,
|
|
477
|
+
target_label: str,
|
|
478
|
+
update_tickets: bool = True,
|
|
479
|
+
dry_run: bool = False,
|
|
480
|
+
) -> dict[str, Any]:
|
|
481
|
+
"""Merge source label into target label across all tickets.
|
|
482
|
+
|
|
483
|
+
Replaces all occurrences of source_label with target_label on affected tickets.
|
|
484
|
+
This operation updates tickets but does NOT delete the source label definition.
|
|
485
|
+
|
|
486
|
+
Merge Behavior:
|
|
487
|
+
- Tickets with source_label: Label replaced with target_label
|
|
488
|
+
- Tickets with both labels: Keep only target_label (remove duplicate)
|
|
489
|
+
- Tickets with neither: No changes
|
|
490
|
+
- Source label definition: Remains in system (use adapter's label delete API separately)
|
|
491
|
+
|
|
492
|
+
Safety Features:
|
|
493
|
+
- dry_run mode: Preview changes without applying them
|
|
494
|
+
- update_tickets=False: Only show what would change, don't modify anything
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
----
|
|
498
|
+
source_label: Label to merge from (will be replaced on tickets)
|
|
499
|
+
target_label: Label to merge into (replacement label)
|
|
500
|
+
update_tickets: Actually update tickets (default: True)
|
|
501
|
+
dry_run: Preview mode - show changes without applying (default: False)
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
-------
|
|
505
|
+
Dictionary containing:
|
|
506
|
+
- status: "completed" or "error"
|
|
507
|
+
- adapter: Adapter type
|
|
508
|
+
- adapter_name: Human-readable adapter name
|
|
509
|
+
- tickets_updated: Number of tickets modified
|
|
510
|
+
- tickets_skipped: Number of tickets skipped (already had target)
|
|
511
|
+
- dry_run: Whether this was a dry run
|
|
512
|
+
- changes: List of changes made (up to 20)
|
|
513
|
+
- error: Error message (if failed)
|
|
514
|
+
|
|
515
|
+
Example:
|
|
516
|
+
-------
|
|
517
|
+
>>> # Dry run first
|
|
518
|
+
>>> result = await label_merge("Bug", "bug", dry_run=True)
|
|
519
|
+
>>> print(result)
|
|
520
|
+
{
|
|
521
|
+
"status": "completed",
|
|
522
|
+
"dry_run": True,
|
|
523
|
+
"tickets_updated": 0,
|
|
524
|
+
"tickets_would_update": 15,
|
|
525
|
+
"changes": [
|
|
526
|
+
{"ticket_id": "PROJ-123", "action": "Replace 'Bug' with 'bug'"}
|
|
527
|
+
]
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
>>> # Execute merge
|
|
531
|
+
>>> result = await label_merge("Bug", "bug", update_tickets=True)
|
|
532
|
+
>>> print(result)
|
|
533
|
+
{
|
|
534
|
+
"status": "completed",
|
|
535
|
+
"tickets_updated": 15,
|
|
536
|
+
"tickets_skipped": 3,
|
|
537
|
+
"changes": [...]
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
"""
|
|
541
|
+
try:
|
|
542
|
+
adapter = get_adapter()
|
|
543
|
+
|
|
544
|
+
# Validate inputs
|
|
545
|
+
if source_label == target_label:
|
|
546
|
+
return {
|
|
547
|
+
"status": "error",
|
|
548
|
+
"error": "Source and target labels are identical - no merge needed",
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
# Find all tickets with source label
|
|
552
|
+
try:
|
|
553
|
+
tickets = await adapter.search(
|
|
554
|
+
SearchQuery(
|
|
555
|
+
query=f"label:{source_label}",
|
|
556
|
+
limit=1000,
|
|
557
|
+
state=None,
|
|
558
|
+
priority=None,
|
|
559
|
+
tags=None,
|
|
560
|
+
assignee=None,
|
|
561
|
+
project=None,
|
|
562
|
+
offset=0,
|
|
563
|
+
)
|
|
564
|
+
)
|
|
565
|
+
except Exception:
|
|
566
|
+
# Fallback: list all tickets and filter manually
|
|
567
|
+
all_tickets = await adapter.list(limit=1000)
|
|
568
|
+
tickets = [t for t in all_tickets if source_label in (t.tags or [])]
|
|
569
|
+
|
|
570
|
+
changes = []
|
|
571
|
+
updated_count = 0
|
|
572
|
+
skipped_count = 0
|
|
573
|
+
|
|
574
|
+
for ticket in tickets:
|
|
575
|
+
ticket_tags = list(ticket.tags or [])
|
|
576
|
+
|
|
577
|
+
# Skip if already has target and not source
|
|
578
|
+
if target_label in ticket_tags and source_label not in ticket_tags:
|
|
579
|
+
skipped_count += 1
|
|
580
|
+
continue
|
|
581
|
+
|
|
582
|
+
# Build new tag list
|
|
583
|
+
new_tags = []
|
|
584
|
+
replaced = False
|
|
585
|
+
|
|
586
|
+
for tag in ticket_tags:
|
|
587
|
+
if tag == source_label:
|
|
588
|
+
if target_label not in new_tags:
|
|
589
|
+
new_tags.append(target_label)
|
|
590
|
+
replaced = True
|
|
591
|
+
elif tag not in new_tags:
|
|
592
|
+
new_tags.append(tag)
|
|
593
|
+
|
|
594
|
+
if not replaced:
|
|
595
|
+
skipped_count += 1
|
|
596
|
+
continue
|
|
597
|
+
|
|
598
|
+
# Record change
|
|
599
|
+
change_entry = {
|
|
600
|
+
"ticket_id": ticket.id,
|
|
601
|
+
"action": f"Replace '{source_label}' with '{target_label}'",
|
|
602
|
+
"old_tags": ticket_tags,
|
|
603
|
+
"new_tags": new_tags,
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
# Apply update if not dry run
|
|
607
|
+
if update_tickets and not dry_run:
|
|
608
|
+
try:
|
|
609
|
+
await adapter.update(ticket.id, {"tags": new_tags})
|
|
610
|
+
change_entry["status"] = "updated"
|
|
611
|
+
updated_count += 1
|
|
612
|
+
except Exception as e:
|
|
613
|
+
change_entry["status"] = "failed"
|
|
614
|
+
change_entry["error"] = str(e)
|
|
615
|
+
else:
|
|
616
|
+
change_entry["status"] = "would_update"
|
|
617
|
+
|
|
618
|
+
changes.append(change_entry)
|
|
619
|
+
|
|
620
|
+
result = {
|
|
621
|
+
"status": "completed",
|
|
622
|
+
**_build_adapter_metadata(adapter),
|
|
623
|
+
"source_label": source_label,
|
|
624
|
+
"target_label": target_label,
|
|
625
|
+
"dry_run": dry_run,
|
|
626
|
+
"tickets_skipped": skipped_count,
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
if dry_run or not update_tickets:
|
|
630
|
+
result["tickets_would_update"] = len(changes)
|
|
631
|
+
result["tickets_updated"] = 0
|
|
632
|
+
else:
|
|
633
|
+
result["tickets_updated"] = updated_count
|
|
634
|
+
|
|
635
|
+
# Limit changes to first 20 for response size
|
|
636
|
+
result["changes"] = changes[:20]
|
|
637
|
+
if len(changes) > 20:
|
|
638
|
+
result["changes_truncated"] = True
|
|
639
|
+
result["total_changes"] = len(changes)
|
|
640
|
+
|
|
641
|
+
return result
|
|
642
|
+
|
|
643
|
+
except Exception as e:
|
|
644
|
+
error_response = {
|
|
645
|
+
"status": "error",
|
|
646
|
+
"error": f"Failed to merge labels: {str(e)}",
|
|
647
|
+
}
|
|
648
|
+
try:
|
|
649
|
+
adapter = get_adapter()
|
|
650
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
651
|
+
except Exception:
|
|
652
|
+
pass
|
|
653
|
+
return error_response
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
@mcp.tool()
|
|
657
|
+
async def label_rename(
|
|
658
|
+
old_name: str,
|
|
659
|
+
new_name: str,
|
|
660
|
+
update_tickets: bool = True,
|
|
661
|
+
) -> dict[str, Any]:
|
|
662
|
+
"""Rename a label across all tickets.
|
|
663
|
+
|
|
664
|
+
Updates all tickets using old_name to use new_name instead.
|
|
665
|
+
This is effectively an alias for label_merge with different semantics.
|
|
666
|
+
|
|
667
|
+
Use label_rename when:
|
|
668
|
+
- Fixing typos in label names
|
|
669
|
+
- Standardizing label naming conventions
|
|
670
|
+
- Rebranding labels for clarity
|
|
671
|
+
|
|
672
|
+
Args:
|
|
673
|
+
----
|
|
674
|
+
old_name: Current label name to rename
|
|
675
|
+
new_name: New label name to use
|
|
676
|
+
update_tickets: Actually update tickets (default: True)
|
|
677
|
+
|
|
678
|
+
Returns:
|
|
679
|
+
-------
|
|
680
|
+
Dictionary containing:
|
|
681
|
+
- status: "completed" or "error"
|
|
682
|
+
- adapter: Adapter type
|
|
683
|
+
- adapter_name: Human-readable adapter name
|
|
684
|
+
- tickets_updated: Number of tickets modified
|
|
685
|
+
- old_name: Original label name
|
|
686
|
+
- new_name: New label name
|
|
687
|
+
- error: Error message (if failed)
|
|
688
|
+
|
|
689
|
+
Example:
|
|
690
|
+
-------
|
|
691
|
+
>>> result = await label_rename("feture", "feature", update_tickets=True)
|
|
692
|
+
>>> print(result)
|
|
693
|
+
{
|
|
694
|
+
"status": "completed",
|
|
695
|
+
"adapter": "linear",
|
|
696
|
+
"adapter_name": "Linear",
|
|
697
|
+
"old_name": "feture",
|
|
698
|
+
"new_name": "feature",
|
|
699
|
+
"tickets_updated": 8
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
"""
|
|
703
|
+
# Delegate to label_merge (rename is just a semantic alias)
|
|
704
|
+
result: dict[str, Any] = await label_merge(
|
|
705
|
+
source_label=old_name,
|
|
706
|
+
target_label=new_name,
|
|
707
|
+
update_tickets=update_tickets,
|
|
708
|
+
dry_run=False,
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# Adjust response keys for rename semantics
|
|
712
|
+
if result["status"] == "completed":
|
|
713
|
+
result["old_name"] = old_name
|
|
714
|
+
result["new_name"] = new_name
|
|
715
|
+
result.pop("source_label", None)
|
|
716
|
+
result.pop("target_label", None)
|
|
717
|
+
|
|
718
|
+
return result
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
@mcp.tool()
|
|
722
|
+
async def label_cleanup_report(
|
|
723
|
+
include_spelling: bool = True,
|
|
724
|
+
include_duplicates: bool = True,
|
|
725
|
+
include_unused: bool = True,
|
|
726
|
+
) -> dict[str, Any]:
|
|
727
|
+
"""Generate comprehensive label cleanup report with actionable recommendations.
|
|
728
|
+
|
|
729
|
+
Analyzes all labels in the ticket system and identifies:
|
|
730
|
+
- Spelling errors and typos (using spelling dictionary)
|
|
731
|
+
- Duplicate or similar labels (using fuzzy matching)
|
|
732
|
+
- Unused labels (labels with zero tickets)
|
|
733
|
+
|
|
734
|
+
Report Sections:
|
|
735
|
+
1. Spelling Issues: Labels that match known misspellings
|
|
736
|
+
2. Duplicate Labels: Similar labels that should be consolidated
|
|
737
|
+
3. Unused Labels: Labels not assigned to any tickets
|
|
738
|
+
|
|
739
|
+
Each issue includes actionable recommendations and severity ratings.
|
|
740
|
+
|
|
741
|
+
Args:
|
|
742
|
+
----
|
|
743
|
+
include_spelling: Include spelling error analysis (default: True)
|
|
744
|
+
include_duplicates: Include duplicate detection (default: True)
|
|
745
|
+
include_unused: Include unused label detection (default: True)
|
|
746
|
+
|
|
747
|
+
Returns:
|
|
748
|
+
-------
|
|
749
|
+
Dictionary containing:
|
|
750
|
+
- status: "completed" or "error"
|
|
751
|
+
- adapter: Adapter type
|
|
752
|
+
- adapter_name: Human-readable adapter name
|
|
753
|
+
- summary: High-level statistics
|
|
754
|
+
- spelling_issues: List of spelling problems (if enabled)
|
|
755
|
+
- duplicate_groups: List of duplicate label groups (if enabled)
|
|
756
|
+
- unused_labels: List of unused labels (if enabled)
|
|
757
|
+
- recommendations: Prioritized list of actions to take
|
|
758
|
+
- error: Error message (if failed)
|
|
759
|
+
|
|
760
|
+
Example:
|
|
761
|
+
-------
|
|
762
|
+
>>> result = await label_cleanup_report()
|
|
763
|
+
>>> print(result["summary"])
|
|
764
|
+
{
|
|
765
|
+
"total_labels": 45,
|
|
766
|
+
"spelling_issues": 3,
|
|
767
|
+
"duplicate_groups": 5,
|
|
768
|
+
"unused_labels": 8,
|
|
769
|
+
"estimated_cleanup_savings": "16 labels can be consolidated"
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
>>> print(result["recommendations"][0])
|
|
773
|
+
{
|
|
774
|
+
"priority": "high",
|
|
775
|
+
"category": "spelling",
|
|
776
|
+
"action": "Rename 'feture' to 'feature'",
|
|
777
|
+
"affected_tickets": 12,
|
|
778
|
+
"command": "label_rename(old_name='feture', new_name='feature')"
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
"""
|
|
782
|
+
try:
|
|
783
|
+
adapter = get_adapter()
|
|
784
|
+
|
|
785
|
+
# Check if adapter supports list_labels
|
|
786
|
+
if not hasattr(adapter, "list_labels"):
|
|
787
|
+
return {
|
|
788
|
+
"status": "error",
|
|
789
|
+
**_build_adapter_metadata(adapter),
|
|
790
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
# Get all labels and tickets
|
|
794
|
+
labels = await adapter.list_labels()
|
|
795
|
+
label_names = [
|
|
796
|
+
label.get("name", "") if isinstance(label, dict) else str(label)
|
|
797
|
+
for label in labels
|
|
798
|
+
]
|
|
799
|
+
|
|
800
|
+
# Get tickets for usage analysis
|
|
801
|
+
tickets = await adapter.list(limit=1000)
|
|
802
|
+
|
|
803
|
+
# Initialize report sections
|
|
804
|
+
spelling_issues: list[dict[str, Any]] = []
|
|
805
|
+
duplicate_groups: list[dict[str, Any]] = []
|
|
806
|
+
unused_labels: list[dict[str, Any]] = []
|
|
807
|
+
recommendations: list[dict[str, Any]] = []
|
|
808
|
+
|
|
809
|
+
# 1. Spelling Issues Analysis
|
|
810
|
+
if include_spelling:
|
|
811
|
+
normalizer = LabelNormalizer()
|
|
812
|
+
for label_name in label_names:
|
|
813
|
+
# Check if label has known spelling correction
|
|
814
|
+
normalized = normalizer._apply_spelling_correction(
|
|
815
|
+
label_name.lower().replace(" ", "-")
|
|
816
|
+
)
|
|
817
|
+
if normalized != label_name.lower().replace(" ", "-"):
|
|
818
|
+
# Count affected tickets
|
|
819
|
+
affected = sum(1 for t in tickets if label_name in (t.tags or []))
|
|
820
|
+
|
|
821
|
+
spelling_issues.append(
|
|
822
|
+
{
|
|
823
|
+
"current": label_name,
|
|
824
|
+
"suggested": normalized,
|
|
825
|
+
"affected_tickets": affected,
|
|
826
|
+
}
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
recommendations.append(
|
|
830
|
+
{
|
|
831
|
+
"priority": "high" if affected > 5 else "medium",
|
|
832
|
+
"category": "spelling",
|
|
833
|
+
"action": f"Rename '{label_name}' to '{normalized}' (spelling correction)",
|
|
834
|
+
"affected_tickets": affected,
|
|
835
|
+
"command": f"label_rename(old_name='{label_name}', new_name='{normalized}')",
|
|
836
|
+
}
|
|
837
|
+
)
|
|
838
|
+
|
|
839
|
+
# 2. Duplicate Labels Analysis
|
|
840
|
+
if include_duplicates:
|
|
841
|
+
deduplicator = LabelDeduplicator()
|
|
842
|
+
consolidations = deduplicator.suggest_consolidation(
|
|
843
|
+
label_names, threshold=0.85
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
for canonical, variants in consolidations.items():
|
|
847
|
+
# Count tickets for each variant
|
|
848
|
+
canonical_count = sum(1 for t in tickets if canonical in (t.tags or []))
|
|
849
|
+
variant_counts = {
|
|
850
|
+
v: sum(1 for t in tickets if v in (t.tags or [])) for v in variants
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
duplicate_groups.append(
|
|
854
|
+
{
|
|
855
|
+
"canonical": canonical,
|
|
856
|
+
"variants": variants,
|
|
857
|
+
"canonical_usage": canonical_count,
|
|
858
|
+
"variant_usage": variant_counts,
|
|
859
|
+
}
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
# Add recommendations for each variant
|
|
863
|
+
for variant in variants:
|
|
864
|
+
affected = variant_counts[variant]
|
|
865
|
+
recommendations.append(
|
|
866
|
+
{
|
|
867
|
+
"priority": "high" if affected > 3 else "low",
|
|
868
|
+
"category": "duplicate",
|
|
869
|
+
"action": f"Merge '{variant}' into '{canonical}'",
|
|
870
|
+
"affected_tickets": affected,
|
|
871
|
+
"command": f"label_merge(source_label='{variant}', target_label='{canonical}')",
|
|
872
|
+
}
|
|
873
|
+
)
|
|
874
|
+
|
|
875
|
+
# 3. Unused Labels Analysis
|
|
876
|
+
if include_unused:
|
|
877
|
+
label_usage: dict[str, int] = dict.fromkeys(label_names, 0)
|
|
878
|
+
for ticket in tickets:
|
|
879
|
+
for tag in ticket.tags or []:
|
|
880
|
+
if tag in label_usage:
|
|
881
|
+
label_usage[tag] += 1
|
|
882
|
+
|
|
883
|
+
unused_labels = [
|
|
884
|
+
{"name": name, "usage_count": 0}
|
|
885
|
+
for name, count in label_usage.items()
|
|
886
|
+
if count == 0
|
|
887
|
+
]
|
|
888
|
+
|
|
889
|
+
if unused_labels:
|
|
890
|
+
recommendations.append(
|
|
891
|
+
{
|
|
892
|
+
"priority": "low",
|
|
893
|
+
"category": "unused",
|
|
894
|
+
"action": f"Review {len(unused_labels)} unused labels for deletion",
|
|
895
|
+
"affected_tickets": 0,
|
|
896
|
+
"labels": [str(lbl["name"]) for lbl in unused_labels[:10]],
|
|
897
|
+
}
|
|
898
|
+
)
|
|
899
|
+
|
|
900
|
+
# Sort recommendations by priority
|
|
901
|
+
priority_order: dict[str, int] = {"high": 0, "medium": 1, "low": 2}
|
|
902
|
+
recommendations.sort(key=lambda x: priority_order.get(str(x["priority"]), 3))
|
|
903
|
+
|
|
904
|
+
# Build summary
|
|
905
|
+
summary: dict[str, Any] = {
|
|
906
|
+
"total_labels": len(label_names),
|
|
907
|
+
"spelling_issues": len(spelling_issues),
|
|
908
|
+
"duplicate_groups": len(duplicate_groups),
|
|
909
|
+
"unused_labels": len(unused_labels),
|
|
910
|
+
"total_recommendations": len(recommendations),
|
|
911
|
+
}
|
|
912
|
+
|
|
913
|
+
# Calculate potential consolidation
|
|
914
|
+
consolidation_potential = sum(
|
|
915
|
+
(
|
|
916
|
+
len(list(grp["variants"]))
|
|
917
|
+
if isinstance(grp["variants"], (list, tuple))
|
|
918
|
+
else 0
|
|
919
|
+
)
|
|
920
|
+
for grp in duplicate_groups
|
|
921
|
+
) + len(spelling_issues)
|
|
922
|
+
|
|
923
|
+
if consolidation_potential > 0:
|
|
924
|
+
summary["estimated_cleanup_savings"] = (
|
|
925
|
+
f"{consolidation_potential} labels can be consolidated"
|
|
926
|
+
)
|
|
927
|
+
|
|
928
|
+
return {
|
|
929
|
+
"status": "completed",
|
|
930
|
+
**_build_adapter_metadata(adapter),
|
|
931
|
+
"summary": summary,
|
|
932
|
+
"spelling_issues": spelling_issues if include_spelling else None,
|
|
933
|
+
"duplicate_groups": duplicate_groups if include_duplicates else None,
|
|
934
|
+
"unused_labels": unused_labels if include_unused else None,
|
|
935
|
+
"recommendations": recommendations,
|
|
936
|
+
}
|
|
937
|
+
|
|
938
|
+
except Exception as e:
|
|
939
|
+
error_response = {
|
|
940
|
+
"status": "error",
|
|
941
|
+
"error": f"Failed to generate cleanup report: {str(e)}",
|
|
942
|
+
}
|
|
943
|
+
try:
|
|
944
|
+
adapter = get_adapter()
|
|
945
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
946
|
+
except Exception:
|
|
947
|
+
pass
|
|
948
|
+
return error_response
|