mcp-ticketer 0.4.11__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-ticketer might be problematic. Click here for more details.
- mcp_ticketer/__init__.py +10 -10
- mcp_ticketer/__version__.py +3 -3
- mcp_ticketer/adapters/__init__.py +2 -0
- mcp_ticketer/adapters/aitrackdown.py +394 -9
- mcp_ticketer/adapters/asana/__init__.py +15 -0
- mcp_ticketer/adapters/asana/adapter.py +1416 -0
- mcp_ticketer/adapters/asana/client.py +292 -0
- mcp_ticketer/adapters/asana/mappers.py +348 -0
- mcp_ticketer/adapters/asana/types.py +146 -0
- mcp_ticketer/adapters/github.py +836 -105
- mcp_ticketer/adapters/hybrid.py +47 -5
- mcp_ticketer/adapters/jira.py +772 -1
- mcp_ticketer/adapters/linear/adapter.py +2293 -108
- mcp_ticketer/adapters/linear/client.py +146 -12
- mcp_ticketer/adapters/linear/mappers.py +105 -11
- mcp_ticketer/adapters/linear/queries.py +168 -1
- mcp_ticketer/adapters/linear/types.py +80 -4
- mcp_ticketer/analysis/__init__.py +56 -0
- mcp_ticketer/analysis/dependency_graph.py +255 -0
- mcp_ticketer/analysis/health_assessment.py +304 -0
- mcp_ticketer/analysis/orphaned.py +218 -0
- mcp_ticketer/analysis/project_status.py +594 -0
- mcp_ticketer/analysis/similarity.py +224 -0
- mcp_ticketer/analysis/staleness.py +266 -0
- mcp_ticketer/automation/__init__.py +11 -0
- mcp_ticketer/automation/project_updates.py +378 -0
- mcp_ticketer/cache/memory.py +3 -3
- mcp_ticketer/cli/adapter_diagnostics.py +4 -2
- mcp_ticketer/cli/auggie_configure.py +18 -6
- mcp_ticketer/cli/codex_configure.py +175 -60
- mcp_ticketer/cli/configure.py +884 -146
- mcp_ticketer/cli/cursor_configure.py +314 -0
- mcp_ticketer/cli/diagnostics.py +31 -28
- mcp_ticketer/cli/discover.py +293 -21
- mcp_ticketer/cli/gemini_configure.py +18 -6
- mcp_ticketer/cli/init_command.py +880 -0
- mcp_ticketer/cli/instruction_commands.py +435 -0
- mcp_ticketer/cli/linear_commands.py +99 -15
- mcp_ticketer/cli/main.py +109 -2055
- mcp_ticketer/cli/mcp_configure.py +673 -99
- mcp_ticketer/cli/mcp_server_commands.py +415 -0
- mcp_ticketer/cli/migrate_config.py +12 -8
- mcp_ticketer/cli/platform_commands.py +6 -6
- mcp_ticketer/cli/platform_detection.py +477 -0
- mcp_ticketer/cli/platform_installer.py +536 -0
- mcp_ticketer/cli/project_update_commands.py +350 -0
- mcp_ticketer/cli/queue_commands.py +15 -15
- mcp_ticketer/cli/setup_command.py +639 -0
- mcp_ticketer/cli/simple_health.py +13 -11
- mcp_ticketer/cli/ticket_commands.py +277 -36
- mcp_ticketer/cli/update_checker.py +313 -0
- mcp_ticketer/cli/utils.py +45 -41
- mcp_ticketer/core/__init__.py +35 -1
- mcp_ticketer/core/adapter.py +170 -5
- mcp_ticketer/core/config.py +38 -31
- mcp_ticketer/core/env_discovery.py +33 -3
- mcp_ticketer/core/env_loader.py +7 -6
- mcp_ticketer/core/exceptions.py +10 -4
- mcp_ticketer/core/http_client.py +10 -10
- mcp_ticketer/core/instructions.py +405 -0
- mcp_ticketer/core/label_manager.py +732 -0
- mcp_ticketer/core/mappers.py +32 -20
- mcp_ticketer/core/models.py +136 -1
- mcp_ticketer/core/onepassword_secrets.py +379 -0
- mcp_ticketer/core/priority_matcher.py +463 -0
- mcp_ticketer/core/project_config.py +148 -14
- mcp_ticketer/core/registry.py +1 -1
- mcp_ticketer/core/session_state.py +171 -0
- mcp_ticketer/core/state_matcher.py +592 -0
- mcp_ticketer/core/url_parser.py +425 -0
- mcp_ticketer/core/validators.py +69 -0
- mcp_ticketer/defaults/ticket_instructions.md +644 -0
- mcp_ticketer/mcp/__init__.py +2 -2
- mcp_ticketer/mcp/server/__init__.py +2 -2
- mcp_ticketer/mcp/server/diagnostic_helper.py +175 -0
- mcp_ticketer/mcp/server/main.py +187 -93
- mcp_ticketer/mcp/server/routing.py +655 -0
- mcp_ticketer/mcp/server/server_sdk.py +58 -0
- mcp_ticketer/mcp/server/tools/__init__.py +37 -9
- mcp_ticketer/mcp/server/tools/analysis_tools.py +854 -0
- mcp_ticketer/mcp/server/tools/attachment_tools.py +65 -20
- mcp_ticketer/mcp/server/tools/bulk_tools.py +259 -202
- mcp_ticketer/mcp/server/tools/comment_tools.py +74 -12
- mcp_ticketer/mcp/server/tools/config_tools.py +1429 -0
- mcp_ticketer/mcp/server/tools/diagnostic_tools.py +211 -0
- mcp_ticketer/mcp/server/tools/hierarchy_tools.py +878 -319
- mcp_ticketer/mcp/server/tools/instruction_tools.py +295 -0
- mcp_ticketer/mcp/server/tools/label_tools.py +942 -0
- mcp_ticketer/mcp/server/tools/pr_tools.py +3 -7
- mcp_ticketer/mcp/server/tools/project_status_tools.py +158 -0
- mcp_ticketer/mcp/server/tools/project_update_tools.py +473 -0
- mcp_ticketer/mcp/server/tools/search_tools.py +180 -97
- mcp_ticketer/mcp/server/tools/session_tools.py +308 -0
- mcp_ticketer/mcp/server/tools/ticket_tools.py +1182 -82
- mcp_ticketer/mcp/server/tools/user_ticket_tools.py +364 -0
- mcp_ticketer/queue/health_monitor.py +1 -0
- mcp_ticketer/queue/manager.py +4 -4
- mcp_ticketer/queue/queue.py +3 -3
- mcp_ticketer/queue/run_worker.py +1 -1
- mcp_ticketer/queue/ticket_registry.py +2 -2
- mcp_ticketer/queue/worker.py +15 -13
- mcp_ticketer/utils/__init__.py +5 -0
- mcp_ticketer/utils/token_utils.py +246 -0
- mcp_ticketer-2.0.1.dist-info/METADATA +1366 -0
- mcp_ticketer-2.0.1.dist-info/RECORD +122 -0
- mcp_ticketer-0.4.11.dist-info/METADATA +0 -496
- mcp_ticketer-0.4.11.dist-info/RECORD +0 -77
- {mcp_ticketer-0.4.11.dist-info → mcp_ticketer-2.0.1.dist-info}/WHEEL +0 -0
- {mcp_ticketer-0.4.11.dist-info → mcp_ticketer-2.0.1.dist-info}/entry_points.txt +0 -0
- {mcp_ticketer-0.4.11.dist-info → mcp_ticketer-2.0.1.dist-info}/licenses/LICENSE +0 -0
- {mcp_ticketer-0.4.11.dist-info → mcp_ticketer-2.0.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,942 @@
|
|
|
1
|
+
"""Label management tools for MCP.
|
|
2
|
+
|
|
3
|
+
This module provides MCP tools for label normalization, deduplication, merging,
|
|
4
|
+
and cleanup operations across ticket systems.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- label: Unified interface for all label operations (list, normalize, merge, etc.)
|
|
8
|
+
- label_list: List labels from adapters (deprecated, use label(action="list"))
|
|
9
|
+
- label_normalize: Normalize label names (deprecated, use label(action="normalize"))
|
|
10
|
+
- label_find_duplicates: Find duplicate labels (deprecated, use label(action="find_duplicates"))
|
|
11
|
+
- label_suggest_merge: Preview merge operation (deprecated, use label(action="suggest_merge"))
|
|
12
|
+
- label_merge: Merge labels (deprecated, use label(action="merge"))
|
|
13
|
+
- label_rename: Rename labels (deprecated, use label(action="rename"))
|
|
14
|
+
- label_cleanup_report: Generate cleanup report (deprecated, use label(action="cleanup_report"))
|
|
15
|
+
|
|
16
|
+
All tools follow the MCP response pattern:
|
|
17
|
+
{
|
|
18
|
+
"status": "completed" | "error",
|
|
19
|
+
"adapter": "adapter_type",
|
|
20
|
+
"adapter_name": "Adapter Display Name",
|
|
21
|
+
... tool-specific data ...
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import logging
|
|
27
|
+
import warnings
|
|
28
|
+
from typing import Any
|
|
29
|
+
|
|
30
|
+
from ....core.label_manager import CasingStrategy, LabelDeduplicator, LabelNormalizer
|
|
31
|
+
from ....core.models import SearchQuery
|
|
32
|
+
from ....utils.token_utils import estimate_json_tokens
|
|
33
|
+
from ..server_sdk import get_adapter, get_router, has_router, mcp
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _build_adapter_metadata(adapter: Any) -> dict[str, Any]:
|
|
39
|
+
"""Build adapter metadata for MCP responses.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
----
|
|
43
|
+
adapter: The adapter that handled the operation
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
-------
|
|
47
|
+
Dictionary with adapter metadata fields
|
|
48
|
+
|
|
49
|
+
"""
|
|
50
|
+
return {
|
|
51
|
+
"adapter": adapter.adapter_type,
|
|
52
|
+
"adapter_name": adapter.adapter_display_name,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@mcp.tool()
|
|
57
|
+
async def label(
|
|
58
|
+
action: str,
|
|
59
|
+
adapter_name: str | None = None,
|
|
60
|
+
include_usage_count: bool = False,
|
|
61
|
+
limit: int = 100,
|
|
62
|
+
offset: int = 0,
|
|
63
|
+
label_name: str | None = None,
|
|
64
|
+
casing: str = "lowercase",
|
|
65
|
+
threshold: float = 0.85,
|
|
66
|
+
source_label: str | None = None,
|
|
67
|
+
target_label: str | None = None,
|
|
68
|
+
update_tickets: bool = True,
|
|
69
|
+
dry_run: bool = False,
|
|
70
|
+
old_name: str | None = None,
|
|
71
|
+
new_name: str | None = None,
|
|
72
|
+
include_spelling: bool = True,
|
|
73
|
+
include_duplicates: bool = True,
|
|
74
|
+
include_unused: bool = True,
|
|
75
|
+
) -> dict[str, Any]:
|
|
76
|
+
"""Unified label management tool with action-based routing.
|
|
77
|
+
|
|
78
|
+
This tool consolidates all label operations into a single interface:
|
|
79
|
+
- list: List all available labels
|
|
80
|
+
- normalize: Normalize label name with casing strategy
|
|
81
|
+
- find_duplicates: Find duplicate/similar labels
|
|
82
|
+
- suggest_merge: Preview label merge operation
|
|
83
|
+
- merge: Merge source label into target
|
|
84
|
+
- rename: Rename label (alias for merge)
|
|
85
|
+
- cleanup_report: Generate comprehensive cleanup report
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
action: Operation to perform. Valid values:
|
|
89
|
+
- "list": List labels from adapter
|
|
90
|
+
- "normalize": Normalize label name
|
|
91
|
+
- "find_duplicates": Find duplicate/similar labels
|
|
92
|
+
- "suggest_merge": Preview merge operation
|
|
93
|
+
- "merge": Merge labels across tickets
|
|
94
|
+
- "rename": Rename label (semantic alias for merge)
|
|
95
|
+
- "cleanup_report": Generate cleanup report
|
|
96
|
+
|
|
97
|
+
# Parameters for "list" action
|
|
98
|
+
adapter_name: Optional adapter name (for multi-adapter setups)
|
|
99
|
+
include_usage_count: Include usage statistics (default: False)
|
|
100
|
+
limit: Maximum results (default: 100, max: 500)
|
|
101
|
+
offset: Pagination offset (default: 0)
|
|
102
|
+
|
|
103
|
+
# Parameters for "normalize" action
|
|
104
|
+
label_name: Label name to normalize (required for normalize)
|
|
105
|
+
casing: Casing strategy - lowercase, titlecase, uppercase, kebab-case, snake_case (default: "lowercase")
|
|
106
|
+
|
|
107
|
+
# Parameters for "find_duplicates" action
|
|
108
|
+
threshold: Similarity threshold 0.0-1.0 (default: 0.85)
|
|
109
|
+
# limit also used here (default: 50)
|
|
110
|
+
|
|
111
|
+
# Parameters for "suggest_merge" action
|
|
112
|
+
source_label: Source label to merge from (required for suggest_merge, merge)
|
|
113
|
+
target_label: Target label to merge into (required for suggest_merge, merge)
|
|
114
|
+
|
|
115
|
+
# Parameters for "merge" action
|
|
116
|
+
# source_label, target_label (required)
|
|
117
|
+
update_tickets: Actually update tickets (default: True)
|
|
118
|
+
dry_run: Preview changes without applying (default: False)
|
|
119
|
+
|
|
120
|
+
# Parameters for "rename" action
|
|
121
|
+
old_name: Current label name (required for rename)
|
|
122
|
+
new_name: New label name (required for rename)
|
|
123
|
+
# update_tickets also used here
|
|
124
|
+
|
|
125
|
+
# Parameters for "cleanup_report" action
|
|
126
|
+
include_spelling: Include spelling analysis (default: True)
|
|
127
|
+
include_duplicates: Include duplicate analysis (default: True)
|
|
128
|
+
include_unused: Include unused label analysis (default: True)
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Results specific to action with status and relevant data
|
|
132
|
+
|
|
133
|
+
Examples:
|
|
134
|
+
# List labels
|
|
135
|
+
label(action="list", limit=50)
|
|
136
|
+
|
|
137
|
+
# Normalize label
|
|
138
|
+
label(action="normalize", label_name="Bug Report", casing="kebab-case")
|
|
139
|
+
|
|
140
|
+
# Find duplicates
|
|
141
|
+
label(action="find_duplicates", threshold=0.9, limit=20)
|
|
142
|
+
|
|
143
|
+
# Preview merge
|
|
144
|
+
label(action="suggest_merge", source_label="bug", target_label="bugfix")
|
|
145
|
+
|
|
146
|
+
# Merge labels
|
|
147
|
+
label(action="merge", source_label="bug", target_label="bugfix")
|
|
148
|
+
|
|
149
|
+
# Rename label
|
|
150
|
+
label(action="rename", old_name="feture", new_name="feature")
|
|
151
|
+
|
|
152
|
+
# Generate cleanup report
|
|
153
|
+
label(action="cleanup_report", include_spelling=True)
|
|
154
|
+
|
|
155
|
+
Migration from old tools:
|
|
156
|
+
- label_list(...) → label(action="list", ...)
|
|
157
|
+
- label_normalize(...) → label(action="normalize", ...)
|
|
158
|
+
- label_find_duplicates(...) → label(action="find_duplicates", ...)
|
|
159
|
+
- label_suggest_merge(...) → label(action="suggest_merge", ...)
|
|
160
|
+
- label_merge(...) → label(action="merge", ...)
|
|
161
|
+
- label_rename(...) → label(action="rename", ...)
|
|
162
|
+
- label_cleanup_report(...) → label(action="cleanup_report", ...)
|
|
163
|
+
|
|
164
|
+
See: docs/mcp-api-reference.md for detailed response formats
|
|
165
|
+
"""
|
|
166
|
+
action_lower = action.lower()
|
|
167
|
+
|
|
168
|
+
# Route to appropriate handler based on action
|
|
169
|
+
if action_lower == "list":
|
|
170
|
+
return await label_list(
|
|
171
|
+
adapter_name=adapter_name,
|
|
172
|
+
include_usage_count=include_usage_count,
|
|
173
|
+
limit=limit,
|
|
174
|
+
offset=offset,
|
|
175
|
+
)
|
|
176
|
+
elif action_lower == "normalize":
|
|
177
|
+
if label_name is None:
|
|
178
|
+
return {
|
|
179
|
+
"status": "error",
|
|
180
|
+
"error": "label_name is required for normalize action",
|
|
181
|
+
}
|
|
182
|
+
return await label_normalize(label_name=label_name, casing=casing)
|
|
183
|
+
elif action_lower == "find_duplicates":
|
|
184
|
+
return await label_find_duplicates(threshold=threshold, limit=limit)
|
|
185
|
+
elif action_lower == "suggest_merge":
|
|
186
|
+
if source_label is None or target_label is None:
|
|
187
|
+
return {
|
|
188
|
+
"status": "error",
|
|
189
|
+
"error": "source_label and target_label are required for suggest_merge action",
|
|
190
|
+
}
|
|
191
|
+
return await label_suggest_merge(
|
|
192
|
+
source_label=source_label, target_label=target_label
|
|
193
|
+
)
|
|
194
|
+
elif action_lower == "merge":
|
|
195
|
+
if source_label is None or target_label is None:
|
|
196
|
+
return {
|
|
197
|
+
"status": "error",
|
|
198
|
+
"error": "source_label and target_label are required for merge action",
|
|
199
|
+
}
|
|
200
|
+
return await label_merge(
|
|
201
|
+
source_label=source_label,
|
|
202
|
+
target_label=target_label,
|
|
203
|
+
update_tickets=update_tickets,
|
|
204
|
+
dry_run=dry_run,
|
|
205
|
+
)
|
|
206
|
+
elif action_lower == "rename":
|
|
207
|
+
if old_name is None or new_name is None:
|
|
208
|
+
return {
|
|
209
|
+
"status": "error",
|
|
210
|
+
"error": "old_name and new_name are required for rename action",
|
|
211
|
+
}
|
|
212
|
+
return await label_rename(
|
|
213
|
+
old_name=old_name, new_name=new_name, update_tickets=update_tickets
|
|
214
|
+
)
|
|
215
|
+
elif action_lower == "cleanup_report":
|
|
216
|
+
return await label_cleanup_report(
|
|
217
|
+
include_spelling=include_spelling,
|
|
218
|
+
include_duplicates=include_duplicates,
|
|
219
|
+
include_unused=include_unused,
|
|
220
|
+
)
|
|
221
|
+
else:
|
|
222
|
+
valid_actions = [
|
|
223
|
+
"list",
|
|
224
|
+
"normalize",
|
|
225
|
+
"find_duplicates",
|
|
226
|
+
"suggest_merge",
|
|
227
|
+
"merge",
|
|
228
|
+
"rename",
|
|
229
|
+
"cleanup_report",
|
|
230
|
+
]
|
|
231
|
+
return {
|
|
232
|
+
"status": "error",
|
|
233
|
+
"error": f"Invalid action '{action}'. Must be one of: {', '.join(valid_actions)}",
|
|
234
|
+
"valid_actions": valid_actions,
|
|
235
|
+
"hint": "Use label(action='list'|'normalize'|'find_duplicates'|'suggest_merge'|'merge'|'rename'|'cleanup_report', ...)",
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
async def label_list(
|
|
240
|
+
adapter_name: str | None = None,
|
|
241
|
+
include_usage_count: bool = False,
|
|
242
|
+
limit: int = 100,
|
|
243
|
+
offset: int = 0,
|
|
244
|
+
) -> dict[str, Any]:
|
|
245
|
+
"""List all available labels/tags from the ticket system.
|
|
246
|
+
|
|
247
|
+
.. deprecated::
|
|
248
|
+
Use label(action="list", ...) instead.
|
|
249
|
+
This tool will be removed in a future version.
|
|
250
|
+
|
|
251
|
+
Args: adapter_name (optional adapter), include_usage_count (default: False), limit (max: 500), offset (pagination)
|
|
252
|
+
Returns: LabelListResponse with labels array, count, pagination, estimated_tokens
|
|
253
|
+
See: docs/mcp-api-reference.md#label-response-format
|
|
254
|
+
"""
|
|
255
|
+
warnings.warn(
|
|
256
|
+
"label_list is deprecated. Use label(action='list', ...) instead.",
|
|
257
|
+
DeprecationWarning,
|
|
258
|
+
stacklevel=2,
|
|
259
|
+
)
|
|
260
|
+
try:
|
|
261
|
+
# Validate and cap limits
|
|
262
|
+
if limit > 500:
|
|
263
|
+
logger.warning(f"Limit {limit} exceeds maximum 500, using 500")
|
|
264
|
+
limit = 500
|
|
265
|
+
|
|
266
|
+
if offset < 0:
|
|
267
|
+
logger.warning(f"Invalid offset {offset}, using 0")
|
|
268
|
+
offset = 0
|
|
269
|
+
|
|
270
|
+
# Warn about usage_count with large limits
|
|
271
|
+
if include_usage_count and limit > 100:
|
|
272
|
+
logger.warning(
|
|
273
|
+
f"Calculating usage counts for {limit} labels may be slow and use significant tokens. "
|
|
274
|
+
f"Consider reducing limit or omitting usage_count."
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Get adapter (default or specified)
|
|
278
|
+
if adapter_name:
|
|
279
|
+
if not has_router():
|
|
280
|
+
return {
|
|
281
|
+
"status": "error",
|
|
282
|
+
"error": f"Cannot use adapter_name='{adapter_name}' - multi-adapter routing not configured",
|
|
283
|
+
}
|
|
284
|
+
router = get_router()
|
|
285
|
+
adapter = router._get_adapter(adapter_name)
|
|
286
|
+
else:
|
|
287
|
+
adapter = get_adapter()
|
|
288
|
+
|
|
289
|
+
# Check if adapter supports list_labels
|
|
290
|
+
if not hasattr(adapter, "list_labels"):
|
|
291
|
+
return {
|
|
292
|
+
"status": "error",
|
|
293
|
+
**_build_adapter_metadata(adapter),
|
|
294
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
# Get ALL labels from adapter (adapters don't support pagination for labels)
|
|
298
|
+
all_labels = await adapter.list_labels()
|
|
299
|
+
total_labels = len(all_labels)
|
|
300
|
+
|
|
301
|
+
# Add usage counts if requested (before pagination)
|
|
302
|
+
if include_usage_count:
|
|
303
|
+
# Count label usage across all tickets
|
|
304
|
+
try:
|
|
305
|
+
tickets = await adapter.list(
|
|
306
|
+
limit=1000
|
|
307
|
+
) # Large limit to get all tickets
|
|
308
|
+
label_counts: dict[str, int] = {}
|
|
309
|
+
|
|
310
|
+
for ticket in tickets:
|
|
311
|
+
ticket_labels = ticket.tags or []
|
|
312
|
+
for label_name in ticket_labels:
|
|
313
|
+
label_counts[label_name] = label_counts.get(label_name, 0) + 1
|
|
314
|
+
|
|
315
|
+
# Enrich labels with usage counts
|
|
316
|
+
for label in all_labels:
|
|
317
|
+
label_name = label.get("name", "")
|
|
318
|
+
label["usage_count"] = label_counts.get(label_name, 0)
|
|
319
|
+
|
|
320
|
+
except Exception as e:
|
|
321
|
+
logger.warning(f"Failed to calculate usage counts: {e}")
|
|
322
|
+
# Continue without usage counts rather than failing
|
|
323
|
+
|
|
324
|
+
# Apply manual pagination to labels
|
|
325
|
+
start_idx = offset
|
|
326
|
+
end_idx = offset + limit
|
|
327
|
+
paginated_labels = all_labels[start_idx:end_idx]
|
|
328
|
+
has_more = end_idx < total_labels
|
|
329
|
+
|
|
330
|
+
# Build response
|
|
331
|
+
response = {
|
|
332
|
+
"status": "completed",
|
|
333
|
+
**_build_adapter_metadata(adapter),
|
|
334
|
+
"labels": paginated_labels,
|
|
335
|
+
"total_labels": total_labels,
|
|
336
|
+
"count": len(paginated_labels),
|
|
337
|
+
"limit": limit,
|
|
338
|
+
"offset": offset,
|
|
339
|
+
"has_more": has_more,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
# Estimate tokens and warn if approaching limit
|
|
343
|
+
estimated_tokens = estimate_json_tokens(response)
|
|
344
|
+
response["estimated_tokens"] = estimated_tokens
|
|
345
|
+
|
|
346
|
+
if estimated_tokens > 15_000:
|
|
347
|
+
logger.warning(
|
|
348
|
+
f"Label list response contains ~{estimated_tokens} tokens. "
|
|
349
|
+
f"Consider reducing limit or omitting usage_count."
|
|
350
|
+
)
|
|
351
|
+
response["token_warning"] = (
|
|
352
|
+
f"Response approaching token limit ({estimated_tokens} tokens). "
|
|
353
|
+
f"Use smaller limit or omit usage_count."
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
return response
|
|
357
|
+
|
|
358
|
+
except Exception as e:
|
|
359
|
+
error_response = {
|
|
360
|
+
"status": "error",
|
|
361
|
+
"error": f"Failed to list labels: {str(e)}",
|
|
362
|
+
}
|
|
363
|
+
try:
|
|
364
|
+
adapter = get_adapter()
|
|
365
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
366
|
+
except Exception:
|
|
367
|
+
pass
|
|
368
|
+
return error_response
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
async def label_normalize(
|
|
372
|
+
label_name: str,
|
|
373
|
+
casing: str = "lowercase",
|
|
374
|
+
) -> dict[str, Any]:
|
|
375
|
+
"""Normalize label name using casing strategy (lowercase, titlecase, uppercase, kebab-case, snake_case).
|
|
376
|
+
|
|
377
|
+
.. deprecated::
|
|
378
|
+
Use label(action="normalize", ...) instead.
|
|
379
|
+
This tool will be removed in a future version.
|
|
380
|
+
|
|
381
|
+
Args: label_name (required), casing (default: "lowercase")
|
|
382
|
+
Returns: NormalizationResponse with original, normalized, casing, changed
|
|
383
|
+
See: docs/mcp-api-reference.md#label-normalization
|
|
384
|
+
"""
|
|
385
|
+
warnings.warn(
|
|
386
|
+
"label_normalize is deprecated. Use label(action='normalize', ...) instead.",
|
|
387
|
+
DeprecationWarning,
|
|
388
|
+
stacklevel=2,
|
|
389
|
+
)
|
|
390
|
+
try:
|
|
391
|
+
# Validate casing strategy
|
|
392
|
+
try:
|
|
393
|
+
CasingStrategy(casing)
|
|
394
|
+
except ValueError:
|
|
395
|
+
valid_options = ", ".join(c.value for c in CasingStrategy)
|
|
396
|
+
return {
|
|
397
|
+
"status": "error",
|
|
398
|
+
"error": f"Invalid casing strategy '{casing}'. Valid options: {valid_options}",
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
# Normalize label
|
|
402
|
+
normalizer = LabelNormalizer(casing=casing)
|
|
403
|
+
normalized = normalizer.normalize(label_name)
|
|
404
|
+
|
|
405
|
+
return {
|
|
406
|
+
"status": "completed",
|
|
407
|
+
"original": label_name,
|
|
408
|
+
"normalized": normalized,
|
|
409
|
+
"casing": casing,
|
|
410
|
+
"changed": normalized != label_name,
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
return {
|
|
415
|
+
"status": "error",
|
|
416
|
+
"error": f"Failed to normalize label: {str(e)}",
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
async def label_find_duplicates(
|
|
421
|
+
threshold: float = 0.85,
|
|
422
|
+
limit: int = 50,
|
|
423
|
+
) -> dict[str, Any]:
|
|
424
|
+
"""Find duplicate/similar labels using fuzzy matching (case, spelling, plurals).
|
|
425
|
+
|
|
426
|
+
.. deprecated::
|
|
427
|
+
Use label(action="find_duplicates", ...) instead.
|
|
428
|
+
This tool will be removed in a future version.
|
|
429
|
+
|
|
430
|
+
Args: threshold (0.0-1.0, default: 0.85), limit (default: 50)
|
|
431
|
+
Returns: DuplicateResponse with duplicates array (similarity scores, recommendations), total_duplicates
|
|
432
|
+
See: docs/mcp-api-reference.md#label-similarity-scoring
|
|
433
|
+
"""
|
|
434
|
+
warnings.warn(
|
|
435
|
+
"label_find_duplicates is deprecated. Use label(action='find_duplicates', ...) instead.",
|
|
436
|
+
DeprecationWarning,
|
|
437
|
+
stacklevel=2,
|
|
438
|
+
)
|
|
439
|
+
try:
|
|
440
|
+
adapter = get_adapter()
|
|
441
|
+
|
|
442
|
+
# Check if adapter supports list_labels
|
|
443
|
+
if not hasattr(adapter, "list_labels"):
|
|
444
|
+
return {
|
|
445
|
+
"status": "error",
|
|
446
|
+
**_build_adapter_metadata(adapter),
|
|
447
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# Get all labels
|
|
451
|
+
labels = await adapter.list_labels()
|
|
452
|
+
label_names = [
|
|
453
|
+
label.get("name", "") if isinstance(label, dict) else str(label)
|
|
454
|
+
for label in labels
|
|
455
|
+
]
|
|
456
|
+
|
|
457
|
+
# Find duplicates
|
|
458
|
+
deduplicator = LabelDeduplicator()
|
|
459
|
+
duplicates = deduplicator.find_duplicates(label_names, threshold=threshold)
|
|
460
|
+
|
|
461
|
+
# Format results with recommendations
|
|
462
|
+
formatted_duplicates = []
|
|
463
|
+
for label1, label2, similarity in duplicates[:limit]:
|
|
464
|
+
# Determine recommendation
|
|
465
|
+
if similarity == 1.0:
|
|
466
|
+
recommendation = (
|
|
467
|
+
f"Merge '{label2}' into '{label1}' (exact match, case difference)"
|
|
468
|
+
)
|
|
469
|
+
elif similarity >= 0.95:
|
|
470
|
+
recommendation = (
|
|
471
|
+
f"Merge '{label2}' into '{label1}' (likely typo or synonym)"
|
|
472
|
+
)
|
|
473
|
+
elif similarity >= 0.85:
|
|
474
|
+
recommendation = f"Review: '{label1}' and '{label2}' are very similar"
|
|
475
|
+
else:
|
|
476
|
+
recommendation = f"Review: '{label1}' and '{label2}' may be duplicates"
|
|
477
|
+
|
|
478
|
+
formatted_duplicates.append(
|
|
479
|
+
{
|
|
480
|
+
"label1": label1,
|
|
481
|
+
"label2": label2,
|
|
482
|
+
"similarity": round(similarity, 3),
|
|
483
|
+
"recommendation": recommendation,
|
|
484
|
+
}
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
return {
|
|
488
|
+
"status": "completed",
|
|
489
|
+
**_build_adapter_metadata(adapter),
|
|
490
|
+
"duplicates": formatted_duplicates,
|
|
491
|
+
"total_duplicates": len(duplicates),
|
|
492
|
+
"threshold": threshold,
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
except Exception as e:
|
|
496
|
+
error_response = {
|
|
497
|
+
"status": "error",
|
|
498
|
+
"error": f"Failed to find duplicates: {str(e)}",
|
|
499
|
+
}
|
|
500
|
+
try:
|
|
501
|
+
adapter = get_adapter()
|
|
502
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
503
|
+
except Exception:
|
|
504
|
+
pass
|
|
505
|
+
return error_response
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
async def label_suggest_merge(
|
|
509
|
+
source_label: str,
|
|
510
|
+
target_label: str,
|
|
511
|
+
) -> dict[str, Any]:
|
|
512
|
+
"""Preview label merge operation (dry run, shows affected tickets).
|
|
513
|
+
|
|
514
|
+
.. deprecated::
|
|
515
|
+
Use label(action="suggest_merge", ...) instead.
|
|
516
|
+
This tool will be removed in a future version.
|
|
517
|
+
|
|
518
|
+
Args: source_label (from), target_label (to)
|
|
519
|
+
Returns: MergePreviewResponse with affected_tickets count, preview IDs (up to 10), warnings
|
|
520
|
+
See: docs/mcp-api-reference.md#label-merge-preview
|
|
521
|
+
"""
|
|
522
|
+
warnings.warn(
|
|
523
|
+
"label_suggest_merge is deprecated. Use label(action='suggest_merge', ...) instead.",
|
|
524
|
+
DeprecationWarning,
|
|
525
|
+
stacklevel=2,
|
|
526
|
+
)
|
|
527
|
+
try:
|
|
528
|
+
adapter = get_adapter()
|
|
529
|
+
|
|
530
|
+
# Find all tickets with source label
|
|
531
|
+
try:
|
|
532
|
+
tickets = await adapter.search(
|
|
533
|
+
SearchQuery(
|
|
534
|
+
query=f"label:{source_label}",
|
|
535
|
+
limit=1000,
|
|
536
|
+
state=None,
|
|
537
|
+
priority=None,
|
|
538
|
+
tags=None,
|
|
539
|
+
assignee=None,
|
|
540
|
+
project=None,
|
|
541
|
+
offset=0,
|
|
542
|
+
)
|
|
543
|
+
)
|
|
544
|
+
except Exception:
|
|
545
|
+
# Fallback: list all tickets and filter manually
|
|
546
|
+
all_tickets = await adapter.list(limit=1000)
|
|
547
|
+
tickets = [t for t in all_tickets if source_label in (t.tags or [])]
|
|
548
|
+
|
|
549
|
+
affected_count = len(tickets)
|
|
550
|
+
preview_ids = [t.id for t in tickets[:10]] # First 10 tickets
|
|
551
|
+
|
|
552
|
+
# Check for potential issues
|
|
553
|
+
warning = None
|
|
554
|
+
if affected_count == 0:
|
|
555
|
+
warning = f"No tickets found with label '{source_label}'"
|
|
556
|
+
elif source_label == target_label:
|
|
557
|
+
warning = "Source and target labels are identical - no changes needed"
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
"status": "completed",
|
|
561
|
+
**_build_adapter_metadata(adapter),
|
|
562
|
+
"source_label": source_label,
|
|
563
|
+
"target_label": target_label,
|
|
564
|
+
"affected_tickets": affected_count,
|
|
565
|
+
"preview": preview_ids,
|
|
566
|
+
"warning": warning,
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
except Exception as e:
|
|
570
|
+
error_response = {
|
|
571
|
+
"status": "error",
|
|
572
|
+
"error": f"Failed to preview merge: {str(e)}",
|
|
573
|
+
}
|
|
574
|
+
try:
|
|
575
|
+
adapter = get_adapter()
|
|
576
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
577
|
+
except Exception:
|
|
578
|
+
pass
|
|
579
|
+
return error_response
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
async def label_merge(
|
|
583
|
+
source_label: str,
|
|
584
|
+
target_label: str,
|
|
585
|
+
update_tickets: bool = True,
|
|
586
|
+
dry_run: bool = False,
|
|
587
|
+
) -> dict[str, Any]:
|
|
588
|
+
"""Merge source label into target across all tickets (does NOT delete source definition).
|
|
589
|
+
|
|
590
|
+
.. deprecated::
|
|
591
|
+
Use label(action="merge", ...) instead.
|
|
592
|
+
This tool will be removed in a future version.
|
|
593
|
+
|
|
594
|
+
Args: source_label (from), target_label (to), update_tickets (default: True), dry_run (default: False)
|
|
595
|
+
Returns: MergeResponse with tickets_updated, tickets_skipped, changes array (up to 20)
|
|
596
|
+
See: docs/mcp-api-reference.md#label-merge-behavior
|
|
597
|
+
"""
|
|
598
|
+
warnings.warn(
|
|
599
|
+
"label_merge is deprecated. Use label(action='merge', ...) instead.",
|
|
600
|
+
DeprecationWarning,
|
|
601
|
+
stacklevel=2,
|
|
602
|
+
)
|
|
603
|
+
try:
|
|
604
|
+
adapter = get_adapter()
|
|
605
|
+
|
|
606
|
+
# Validate inputs
|
|
607
|
+
if source_label == target_label:
|
|
608
|
+
return {
|
|
609
|
+
"status": "error",
|
|
610
|
+
"error": "Source and target labels are identical - no merge needed",
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
# Find all tickets with source label
|
|
614
|
+
try:
|
|
615
|
+
tickets = await adapter.search(
|
|
616
|
+
SearchQuery(
|
|
617
|
+
query=f"label:{source_label}",
|
|
618
|
+
limit=1000,
|
|
619
|
+
state=None,
|
|
620
|
+
priority=None,
|
|
621
|
+
tags=None,
|
|
622
|
+
assignee=None,
|
|
623
|
+
project=None,
|
|
624
|
+
offset=0,
|
|
625
|
+
)
|
|
626
|
+
)
|
|
627
|
+
except Exception:
|
|
628
|
+
# Fallback: list all tickets and filter manually
|
|
629
|
+
all_tickets = await adapter.list(limit=1000)
|
|
630
|
+
tickets = [t for t in all_tickets if source_label in (t.tags or [])]
|
|
631
|
+
|
|
632
|
+
changes = []
|
|
633
|
+
updated_count = 0
|
|
634
|
+
skipped_count = 0
|
|
635
|
+
|
|
636
|
+
for ticket in tickets:
|
|
637
|
+
ticket_tags = list(ticket.tags or [])
|
|
638
|
+
|
|
639
|
+
# Skip if already has target and not source
|
|
640
|
+
if target_label in ticket_tags and source_label not in ticket_tags:
|
|
641
|
+
skipped_count += 1
|
|
642
|
+
continue
|
|
643
|
+
|
|
644
|
+
# Build new tag list
|
|
645
|
+
new_tags = []
|
|
646
|
+
replaced = False
|
|
647
|
+
|
|
648
|
+
for tag in ticket_tags:
|
|
649
|
+
if tag == source_label:
|
|
650
|
+
if target_label not in new_tags:
|
|
651
|
+
new_tags.append(target_label)
|
|
652
|
+
replaced = True
|
|
653
|
+
elif tag not in new_tags:
|
|
654
|
+
new_tags.append(tag)
|
|
655
|
+
|
|
656
|
+
if not replaced:
|
|
657
|
+
skipped_count += 1
|
|
658
|
+
continue
|
|
659
|
+
|
|
660
|
+
# Record change
|
|
661
|
+
change_entry = {
|
|
662
|
+
"ticket_id": ticket.id,
|
|
663
|
+
"action": f"Replace '{source_label}' with '{target_label}'",
|
|
664
|
+
"old_tags": ticket_tags,
|
|
665
|
+
"new_tags": new_tags,
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
# Apply update if not dry run
|
|
669
|
+
if update_tickets and not dry_run:
|
|
670
|
+
try:
|
|
671
|
+
await adapter.update(ticket.id, {"tags": new_tags})
|
|
672
|
+
change_entry["status"] = "updated"
|
|
673
|
+
updated_count += 1
|
|
674
|
+
except Exception as e:
|
|
675
|
+
change_entry["status"] = "failed"
|
|
676
|
+
change_entry["error"] = str(e)
|
|
677
|
+
else:
|
|
678
|
+
change_entry["status"] = "would_update"
|
|
679
|
+
|
|
680
|
+
changes.append(change_entry)
|
|
681
|
+
|
|
682
|
+
result = {
|
|
683
|
+
"status": "completed",
|
|
684
|
+
**_build_adapter_metadata(adapter),
|
|
685
|
+
"source_label": source_label,
|
|
686
|
+
"target_label": target_label,
|
|
687
|
+
"dry_run": dry_run,
|
|
688
|
+
"tickets_skipped": skipped_count,
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
if dry_run or not update_tickets:
|
|
692
|
+
result["tickets_would_update"] = len(changes)
|
|
693
|
+
result["tickets_updated"] = 0
|
|
694
|
+
else:
|
|
695
|
+
result["tickets_updated"] = updated_count
|
|
696
|
+
|
|
697
|
+
# Limit changes to first 20 for response size
|
|
698
|
+
result["changes"] = changes[:20]
|
|
699
|
+
if len(changes) > 20:
|
|
700
|
+
result["changes_truncated"] = True
|
|
701
|
+
result["total_changes"] = len(changes)
|
|
702
|
+
|
|
703
|
+
return result
|
|
704
|
+
|
|
705
|
+
except Exception as e:
|
|
706
|
+
error_response = {
|
|
707
|
+
"status": "error",
|
|
708
|
+
"error": f"Failed to merge labels: {str(e)}",
|
|
709
|
+
}
|
|
710
|
+
try:
|
|
711
|
+
adapter = get_adapter()
|
|
712
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
713
|
+
except Exception:
|
|
714
|
+
pass
|
|
715
|
+
return error_response
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
async def label_rename(
|
|
719
|
+
old_name: str,
|
|
720
|
+
new_name: str,
|
|
721
|
+
update_tickets: bool = True,
|
|
722
|
+
) -> dict[str, Any]:
|
|
723
|
+
"""Rename label across all tickets (alias for label_merge, semantic variant for typo fixes).
|
|
724
|
+
|
|
725
|
+
.. deprecated::
|
|
726
|
+
Use label(action="rename", ...) instead.
|
|
727
|
+
This tool will be removed in a future version.
|
|
728
|
+
|
|
729
|
+
Args: old_name (current), new_name (replacement), update_tickets (default: True)
|
|
730
|
+
Returns: RenameResponse with tickets_updated, old_name, new_name
|
|
731
|
+
See: docs/mcp-api-reference.md#label-merge-behavior
|
|
732
|
+
"""
|
|
733
|
+
warnings.warn(
|
|
734
|
+
"label_rename is deprecated. Use label(action='rename', ...) instead.",
|
|
735
|
+
DeprecationWarning,
|
|
736
|
+
stacklevel=2,
|
|
737
|
+
)
|
|
738
|
+
# Delegate to label_merge (rename is just a semantic alias)
|
|
739
|
+
result: dict[str, Any] = await label_merge(
|
|
740
|
+
source_label=old_name,
|
|
741
|
+
target_label=new_name,
|
|
742
|
+
update_tickets=update_tickets,
|
|
743
|
+
dry_run=False,
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
# Adjust response keys for rename semantics
|
|
747
|
+
if result["status"] == "completed":
|
|
748
|
+
result["old_name"] = old_name
|
|
749
|
+
result["new_name"] = new_name
|
|
750
|
+
result.pop("source_label", None)
|
|
751
|
+
result.pop("target_label", None)
|
|
752
|
+
|
|
753
|
+
return result
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
async def label_cleanup_report(
|
|
757
|
+
include_spelling: bool = True,
|
|
758
|
+
include_duplicates: bool = True,
|
|
759
|
+
include_unused: bool = True,
|
|
760
|
+
) -> dict[str, Any]:
|
|
761
|
+
"""Generate label cleanup report (spelling errors, duplicates, unused labels with recommendations).
|
|
762
|
+
|
|
763
|
+
.. deprecated::
|
|
764
|
+
Use label(action="cleanup_report", ...) instead.
|
|
765
|
+
This tool will be removed in a future version.
|
|
766
|
+
|
|
767
|
+
Args: include_spelling (default: True), include_duplicates (default: True), include_unused (default: True)
|
|
768
|
+
Returns: CleanupReportResponse with summary, spelling_issues, duplicate_groups, unused_labels, recommendations (prioritized)
|
|
769
|
+
See: docs/mcp-api-reference.md#label-cleanup-report
|
|
770
|
+
"""
|
|
771
|
+
warnings.warn(
|
|
772
|
+
"label_cleanup_report is deprecated. Use label(action='cleanup_report', ...) instead.",
|
|
773
|
+
DeprecationWarning,
|
|
774
|
+
stacklevel=2,
|
|
775
|
+
)
|
|
776
|
+
try:
|
|
777
|
+
adapter = get_adapter()
|
|
778
|
+
|
|
779
|
+
# Check if adapter supports list_labels
|
|
780
|
+
if not hasattr(adapter, "list_labels"):
|
|
781
|
+
return {
|
|
782
|
+
"status": "error",
|
|
783
|
+
**_build_adapter_metadata(adapter),
|
|
784
|
+
"error": f"Adapter {adapter.adapter_type} does not support label listing",
|
|
785
|
+
}
|
|
786
|
+
|
|
787
|
+
# Get all labels and tickets
|
|
788
|
+
labels = await adapter.list_labels()
|
|
789
|
+
label_names = [
|
|
790
|
+
label.get("name", "") if isinstance(label, dict) else str(label)
|
|
791
|
+
for label in labels
|
|
792
|
+
]
|
|
793
|
+
|
|
794
|
+
# Get tickets for usage analysis
|
|
795
|
+
tickets = await adapter.list(limit=1000)
|
|
796
|
+
|
|
797
|
+
# Initialize report sections
|
|
798
|
+
spelling_issues: list[dict[str, Any]] = []
|
|
799
|
+
duplicate_groups: list[dict[str, Any]] = []
|
|
800
|
+
unused_labels: list[dict[str, Any]] = []
|
|
801
|
+
recommendations: list[dict[str, Any]] = []
|
|
802
|
+
|
|
803
|
+
# 1. Spelling Issues Analysis
|
|
804
|
+
if include_spelling:
|
|
805
|
+
normalizer = LabelNormalizer()
|
|
806
|
+
for label_name in label_names:
|
|
807
|
+
# Check if label has known spelling correction
|
|
808
|
+
normalized = normalizer._apply_spelling_correction(
|
|
809
|
+
label_name.lower().replace(" ", "-")
|
|
810
|
+
)
|
|
811
|
+
if normalized != label_name.lower().replace(" ", "-"):
|
|
812
|
+
# Count affected tickets
|
|
813
|
+
affected = sum(1 for t in tickets if label_name in (t.tags or []))
|
|
814
|
+
|
|
815
|
+
spelling_issues.append(
|
|
816
|
+
{
|
|
817
|
+
"current": label_name,
|
|
818
|
+
"suggested": normalized,
|
|
819
|
+
"affected_tickets": affected,
|
|
820
|
+
}
|
|
821
|
+
)
|
|
822
|
+
|
|
823
|
+
recommendations.append(
|
|
824
|
+
{
|
|
825
|
+
"priority": "high" if affected > 5 else "medium",
|
|
826
|
+
"category": "spelling",
|
|
827
|
+
"action": f"Rename '{label_name}' to '{normalized}' (spelling correction)",
|
|
828
|
+
"affected_tickets": affected,
|
|
829
|
+
"command": f"label_rename(old_name='{label_name}', new_name='{normalized}')",
|
|
830
|
+
}
|
|
831
|
+
)
|
|
832
|
+
|
|
833
|
+
# 2. Duplicate Labels Analysis
|
|
834
|
+
if include_duplicates:
|
|
835
|
+
deduplicator = LabelDeduplicator()
|
|
836
|
+
consolidations = deduplicator.suggest_consolidation(
|
|
837
|
+
label_names, threshold=0.85
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
for canonical, variants in consolidations.items():
|
|
841
|
+
# Count tickets for each variant
|
|
842
|
+
canonical_count = sum(1 for t in tickets if canonical in (t.tags or []))
|
|
843
|
+
variant_counts = {
|
|
844
|
+
v: sum(1 for t in tickets if v in (t.tags or [])) for v in variants
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
duplicate_groups.append(
|
|
848
|
+
{
|
|
849
|
+
"canonical": canonical,
|
|
850
|
+
"variants": variants,
|
|
851
|
+
"canonical_usage": canonical_count,
|
|
852
|
+
"variant_usage": variant_counts,
|
|
853
|
+
}
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
# Add recommendations for each variant
|
|
857
|
+
for variant in variants:
|
|
858
|
+
affected = variant_counts[variant]
|
|
859
|
+
recommendations.append(
|
|
860
|
+
{
|
|
861
|
+
"priority": "high" if affected > 3 else "low",
|
|
862
|
+
"category": "duplicate",
|
|
863
|
+
"action": f"Merge '{variant}' into '{canonical}'",
|
|
864
|
+
"affected_tickets": affected,
|
|
865
|
+
"command": f"label_merge(source_label='{variant}', target_label='{canonical}')",
|
|
866
|
+
}
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# 3. Unused Labels Analysis
|
|
870
|
+
if include_unused:
|
|
871
|
+
label_usage: dict[str, int] = dict.fromkeys(label_names, 0)
|
|
872
|
+
for ticket in tickets:
|
|
873
|
+
for tag in ticket.tags or []:
|
|
874
|
+
if tag in label_usage:
|
|
875
|
+
label_usage[tag] += 1
|
|
876
|
+
|
|
877
|
+
unused_labels = [
|
|
878
|
+
{"name": name, "usage_count": 0}
|
|
879
|
+
for name, count in label_usage.items()
|
|
880
|
+
if count == 0
|
|
881
|
+
]
|
|
882
|
+
|
|
883
|
+
if unused_labels:
|
|
884
|
+
recommendations.append(
|
|
885
|
+
{
|
|
886
|
+
"priority": "low",
|
|
887
|
+
"category": "unused",
|
|
888
|
+
"action": f"Review {len(unused_labels)} unused labels for deletion",
|
|
889
|
+
"affected_tickets": 0,
|
|
890
|
+
"labels": [str(lbl["name"]) for lbl in unused_labels[:10]],
|
|
891
|
+
}
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
# Sort recommendations by priority
|
|
895
|
+
priority_order: dict[str, int] = {"high": 0, "medium": 1, "low": 2}
|
|
896
|
+
recommendations.sort(key=lambda x: priority_order.get(str(x["priority"]), 3))
|
|
897
|
+
|
|
898
|
+
# Build summary
|
|
899
|
+
summary: dict[str, Any] = {
|
|
900
|
+
"total_labels": len(label_names),
|
|
901
|
+
"spelling_issues": len(spelling_issues),
|
|
902
|
+
"duplicate_groups": len(duplicate_groups),
|
|
903
|
+
"unused_labels": len(unused_labels),
|
|
904
|
+
"total_recommendations": len(recommendations),
|
|
905
|
+
}
|
|
906
|
+
|
|
907
|
+
# Calculate potential consolidation
|
|
908
|
+
consolidation_potential = sum(
|
|
909
|
+
(
|
|
910
|
+
len(list(grp["variants"]))
|
|
911
|
+
if isinstance(grp["variants"], list | tuple)
|
|
912
|
+
else 0
|
|
913
|
+
)
|
|
914
|
+
for grp in duplicate_groups
|
|
915
|
+
) + len(spelling_issues)
|
|
916
|
+
|
|
917
|
+
if consolidation_potential > 0:
|
|
918
|
+
summary["estimated_cleanup_savings"] = (
|
|
919
|
+
f"{consolidation_potential} labels can be consolidated"
|
|
920
|
+
)
|
|
921
|
+
|
|
922
|
+
return {
|
|
923
|
+
"status": "completed",
|
|
924
|
+
**_build_adapter_metadata(adapter),
|
|
925
|
+
"summary": summary,
|
|
926
|
+
"spelling_issues": spelling_issues if include_spelling else None,
|
|
927
|
+
"duplicate_groups": duplicate_groups if include_duplicates else None,
|
|
928
|
+
"unused_labels": unused_labels if include_unused else None,
|
|
929
|
+
"recommendations": recommendations,
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
except Exception as e:
|
|
933
|
+
error_response = {
|
|
934
|
+
"status": "error",
|
|
935
|
+
"error": f"Failed to generate cleanup report: {str(e)}",
|
|
936
|
+
}
|
|
937
|
+
try:
|
|
938
|
+
adapter = get_adapter()
|
|
939
|
+
error_response.update(_build_adapter_metadata(adapter))
|
|
940
|
+
except Exception:
|
|
941
|
+
pass
|
|
942
|
+
return error_response
|