mcp-ticketer 0.2.0__py3-none-any.whl → 2.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. mcp_ticketer/__init__.py +10 -10
  2. mcp_ticketer/__version__.py +3 -3
  3. mcp_ticketer/_version_scm.py +1 -0
  4. mcp_ticketer/adapters/__init__.py +2 -0
  5. mcp_ticketer/adapters/aitrackdown.py +930 -52
  6. mcp_ticketer/adapters/asana/__init__.py +15 -0
  7. mcp_ticketer/adapters/asana/adapter.py +1537 -0
  8. mcp_ticketer/adapters/asana/client.py +292 -0
  9. mcp_ticketer/adapters/asana/mappers.py +348 -0
  10. mcp_ticketer/adapters/asana/types.py +146 -0
  11. mcp_ticketer/adapters/github/__init__.py +26 -0
  12. mcp_ticketer/adapters/github/adapter.py +3229 -0
  13. mcp_ticketer/adapters/github/client.py +335 -0
  14. mcp_ticketer/adapters/github/mappers.py +797 -0
  15. mcp_ticketer/adapters/github/queries.py +692 -0
  16. mcp_ticketer/adapters/github/types.py +460 -0
  17. mcp_ticketer/adapters/hybrid.py +58 -16
  18. mcp_ticketer/adapters/jira/__init__.py +35 -0
  19. mcp_ticketer/adapters/jira/adapter.py +1351 -0
  20. mcp_ticketer/adapters/jira/client.py +271 -0
  21. mcp_ticketer/adapters/jira/mappers.py +246 -0
  22. mcp_ticketer/adapters/jira/queries.py +216 -0
  23. mcp_ticketer/adapters/jira/types.py +304 -0
  24. mcp_ticketer/adapters/linear/__init__.py +1 -1
  25. mcp_ticketer/adapters/linear/adapter.py +3810 -462
  26. mcp_ticketer/adapters/linear/client.py +312 -69
  27. mcp_ticketer/adapters/linear/mappers.py +305 -85
  28. mcp_ticketer/adapters/linear/queries.py +317 -17
  29. mcp_ticketer/adapters/linear/types.py +187 -64
  30. mcp_ticketer/adapters/linear.py +2 -2
  31. mcp_ticketer/analysis/__init__.py +56 -0
  32. mcp_ticketer/analysis/dependency_graph.py +255 -0
  33. mcp_ticketer/analysis/health_assessment.py +304 -0
  34. mcp_ticketer/analysis/orphaned.py +218 -0
  35. mcp_ticketer/analysis/project_status.py +594 -0
  36. mcp_ticketer/analysis/similarity.py +224 -0
  37. mcp_ticketer/analysis/staleness.py +266 -0
  38. mcp_ticketer/automation/__init__.py +11 -0
  39. mcp_ticketer/automation/project_updates.py +378 -0
  40. mcp_ticketer/cache/memory.py +9 -8
  41. mcp_ticketer/cli/adapter_diagnostics.py +421 -0
  42. mcp_ticketer/cli/auggie_configure.py +116 -15
  43. mcp_ticketer/cli/codex_configure.py +274 -82
  44. mcp_ticketer/cli/configure.py +1323 -151
  45. mcp_ticketer/cli/cursor_configure.py +314 -0
  46. mcp_ticketer/cli/diagnostics.py +209 -114
  47. mcp_ticketer/cli/discover.py +297 -26
  48. mcp_ticketer/cli/gemini_configure.py +119 -26
  49. mcp_ticketer/cli/init_command.py +880 -0
  50. mcp_ticketer/cli/install_mcp_server.py +418 -0
  51. mcp_ticketer/cli/instruction_commands.py +435 -0
  52. mcp_ticketer/cli/linear_commands.py +256 -130
  53. mcp_ticketer/cli/main.py +140 -1284
  54. mcp_ticketer/cli/mcp_configure.py +1013 -100
  55. mcp_ticketer/cli/mcp_server_commands.py +415 -0
  56. mcp_ticketer/cli/migrate_config.py +12 -8
  57. mcp_ticketer/cli/platform_commands.py +123 -0
  58. mcp_ticketer/cli/platform_detection.py +477 -0
  59. mcp_ticketer/cli/platform_installer.py +545 -0
  60. mcp_ticketer/cli/project_update_commands.py +350 -0
  61. mcp_ticketer/cli/python_detection.py +126 -0
  62. mcp_ticketer/cli/queue_commands.py +15 -15
  63. mcp_ticketer/cli/setup_command.py +794 -0
  64. mcp_ticketer/cli/simple_health.py +84 -59
  65. mcp_ticketer/cli/ticket_commands.py +1375 -0
  66. mcp_ticketer/cli/update_checker.py +313 -0
  67. mcp_ticketer/cli/utils.py +195 -72
  68. mcp_ticketer/core/__init__.py +64 -1
  69. mcp_ticketer/core/adapter.py +618 -18
  70. mcp_ticketer/core/config.py +77 -68
  71. mcp_ticketer/core/env_discovery.py +75 -16
  72. mcp_ticketer/core/env_loader.py +121 -97
  73. mcp_ticketer/core/exceptions.py +32 -24
  74. mcp_ticketer/core/http_client.py +26 -26
  75. mcp_ticketer/core/instructions.py +405 -0
  76. mcp_ticketer/core/label_manager.py +732 -0
  77. mcp_ticketer/core/mappers.py +42 -30
  78. mcp_ticketer/core/milestone_manager.py +252 -0
  79. mcp_ticketer/core/models.py +566 -19
  80. mcp_ticketer/core/onepassword_secrets.py +379 -0
  81. mcp_ticketer/core/priority_matcher.py +463 -0
  82. mcp_ticketer/core/project_config.py +189 -49
  83. mcp_ticketer/core/project_utils.py +281 -0
  84. mcp_ticketer/core/project_validator.py +376 -0
  85. mcp_ticketer/core/registry.py +3 -3
  86. mcp_ticketer/core/session_state.py +176 -0
  87. mcp_ticketer/core/state_matcher.py +592 -0
  88. mcp_ticketer/core/url_parser.py +425 -0
  89. mcp_ticketer/core/validators.py +69 -0
  90. mcp_ticketer/defaults/ticket_instructions.md +644 -0
  91. mcp_ticketer/mcp/__init__.py +29 -1
  92. mcp_ticketer/mcp/__main__.py +60 -0
  93. mcp_ticketer/mcp/server/__init__.py +25 -0
  94. mcp_ticketer/mcp/server/__main__.py +60 -0
  95. mcp_ticketer/mcp/server/constants.py +58 -0
  96. mcp_ticketer/mcp/server/diagnostic_helper.py +175 -0
  97. mcp_ticketer/mcp/server/dto.py +195 -0
  98. mcp_ticketer/mcp/server/main.py +1343 -0
  99. mcp_ticketer/mcp/server/response_builder.py +206 -0
  100. mcp_ticketer/mcp/server/routing.py +723 -0
  101. mcp_ticketer/mcp/server/server_sdk.py +151 -0
  102. mcp_ticketer/mcp/server/tools/__init__.py +69 -0
  103. mcp_ticketer/mcp/server/tools/analysis_tools.py +854 -0
  104. mcp_ticketer/mcp/server/tools/attachment_tools.py +224 -0
  105. mcp_ticketer/mcp/server/tools/bulk_tools.py +330 -0
  106. mcp_ticketer/mcp/server/tools/comment_tools.py +152 -0
  107. mcp_ticketer/mcp/server/tools/config_tools.py +1564 -0
  108. mcp_ticketer/mcp/server/tools/diagnostic_tools.py +211 -0
  109. mcp_ticketer/mcp/server/tools/hierarchy_tools.py +942 -0
  110. mcp_ticketer/mcp/server/tools/instruction_tools.py +295 -0
  111. mcp_ticketer/mcp/server/tools/label_tools.py +942 -0
  112. mcp_ticketer/mcp/server/tools/milestone_tools.py +338 -0
  113. mcp_ticketer/mcp/server/tools/pr_tools.py +150 -0
  114. mcp_ticketer/mcp/server/tools/project_status_tools.py +158 -0
  115. mcp_ticketer/mcp/server/tools/project_update_tools.py +473 -0
  116. mcp_ticketer/mcp/server/tools/search_tools.py +318 -0
  117. mcp_ticketer/mcp/server/tools/session_tools.py +308 -0
  118. mcp_ticketer/mcp/server/tools/ticket_tools.py +1413 -0
  119. mcp_ticketer/mcp/server/tools/user_ticket_tools.py +364 -0
  120. mcp_ticketer/queue/__init__.py +1 -0
  121. mcp_ticketer/queue/health_monitor.py +168 -136
  122. mcp_ticketer/queue/manager.py +78 -63
  123. mcp_ticketer/queue/queue.py +108 -21
  124. mcp_ticketer/queue/run_worker.py +2 -2
  125. mcp_ticketer/queue/ticket_registry.py +213 -155
  126. mcp_ticketer/queue/worker.py +96 -58
  127. mcp_ticketer/utils/__init__.py +5 -0
  128. mcp_ticketer/utils/token_utils.py +246 -0
  129. mcp_ticketer-2.2.9.dist-info/METADATA +1396 -0
  130. mcp_ticketer-2.2.9.dist-info/RECORD +158 -0
  131. mcp_ticketer-2.2.9.dist-info/top_level.txt +2 -0
  132. py_mcp_installer/examples/phase3_demo.py +178 -0
  133. py_mcp_installer/scripts/manage_version.py +54 -0
  134. py_mcp_installer/setup.py +6 -0
  135. py_mcp_installer/src/py_mcp_installer/__init__.py +153 -0
  136. py_mcp_installer/src/py_mcp_installer/command_builder.py +445 -0
  137. py_mcp_installer/src/py_mcp_installer/config_manager.py +541 -0
  138. py_mcp_installer/src/py_mcp_installer/exceptions.py +243 -0
  139. py_mcp_installer/src/py_mcp_installer/installation_strategy.py +617 -0
  140. py_mcp_installer/src/py_mcp_installer/installer.py +656 -0
  141. py_mcp_installer/src/py_mcp_installer/mcp_inspector.py +750 -0
  142. py_mcp_installer/src/py_mcp_installer/platform_detector.py +451 -0
  143. py_mcp_installer/src/py_mcp_installer/platforms/__init__.py +26 -0
  144. py_mcp_installer/src/py_mcp_installer/platforms/claude_code.py +225 -0
  145. py_mcp_installer/src/py_mcp_installer/platforms/codex.py +181 -0
  146. py_mcp_installer/src/py_mcp_installer/platforms/cursor.py +191 -0
  147. py_mcp_installer/src/py_mcp_installer/types.py +222 -0
  148. py_mcp_installer/src/py_mcp_installer/utils.py +463 -0
  149. py_mcp_installer/tests/__init__.py +0 -0
  150. py_mcp_installer/tests/platforms/__init__.py +0 -0
  151. py_mcp_installer/tests/test_platform_detector.py +17 -0
  152. mcp_ticketer/adapters/github.py +0 -1354
  153. mcp_ticketer/adapters/jira.py +0 -1011
  154. mcp_ticketer/mcp/server.py +0 -1895
  155. mcp_ticketer-0.2.0.dist-info/METADATA +0 -414
  156. mcp_ticketer-0.2.0.dist-info/RECORD +0 -58
  157. mcp_ticketer-0.2.0.dist-info/top_level.txt +0 -1
  158. {mcp_ticketer-0.2.0.dist-info → mcp_ticketer-2.2.9.dist-info}/WHEEL +0 -0
  159. {mcp_ticketer-0.2.0.dist-info → mcp_ticketer-2.2.9.dist-info}/entry_points.txt +0 -0
  160. {mcp_ticketer-0.2.0.dist-info → mcp_ticketer-2.2.9.dist-info}/licenses/LICENSE +0 -0
@@ -7,17 +7,17 @@ import threading
7
7
  import time
8
8
  from datetime import datetime
9
9
  from pathlib import Path
10
- from typing import Any, Optional
10
+ from typing import Any
11
11
 
12
12
  from dotenv import load_dotenv
13
13
 
14
+ # Import adapters module to trigger registration
15
+ import mcp_ticketer.adapters # noqa: F401
16
+
14
17
  from ..core import AdapterRegistry, Task
15
18
  from .queue import Queue, QueueItem, QueueStatus
16
19
  from .ticket_registry import TicketRegistry
17
20
 
18
- # Import adapters module to trigger registration
19
- import mcp_ticketer.adapters # noqa: F401
20
-
21
21
  # Load environment variables from .env.local
22
22
  env_path = Path.cwd() / ".env.local"
23
23
  if env_path.exists():
@@ -58,7 +58,7 @@ class Worker:
58
58
 
59
59
  def __init__(
60
60
  self,
61
- queue: Optional[Queue] = None,
61
+ queue: Queue | None = None,
62
62
  batch_size: int = DEFAULT_BATCH_SIZE,
63
63
  max_concurrent: int = DEFAULT_MAX_CONCURRENT,
64
64
  ):
@@ -97,12 +97,12 @@ class Worker:
97
97
  f"Worker initialized with batch_size={batch_size}, max_concurrent={max_concurrent}"
98
98
  )
99
99
 
100
- def _signal_handler(self, signum, frame):
100
+ def _signal_handler(self, signum: int, frame: Any) -> None:
101
101
  """Handle shutdown signals."""
102
102
  logger.info(f"Received signal {signum}, shutting down...")
103
103
  self.stop()
104
104
 
105
- def start(self, daemon: bool = True):
105
+ def start(self, daemon: bool = True) -> None:
106
106
  """Start the worker.
107
107
 
108
108
  Args:
@@ -126,14 +126,14 @@ class Worker:
126
126
  # Run in main thread
127
127
  self._run_loop()
128
128
 
129
- def stop(self):
129
+ def stop(self) -> None:
130
130
  """Stop the worker."""
131
131
  logger.info("Stopping worker...")
132
132
  self.running = False
133
133
  self.stop_event.set()
134
134
 
135
- def _run_loop(self):
136
- """Main worker loop with batch processing."""
135
+ def _run_loop(self) -> None:
136
+ """Run main worker loop with batch processing."""
137
137
  logger.info("Worker loop started")
138
138
 
139
139
  # Reset any stuck items on startup
@@ -174,7 +174,7 @@ class Worker:
174
174
  break
175
175
  return batch
176
176
 
177
- async def _process_batch(self, batch: list[QueueItem]):
177
+ async def _process_batch(self, batch: list[QueueItem]) -> None:
178
178
  """Process a batch of queue items with concurrency control.
179
179
 
180
180
  Args:
@@ -184,7 +184,7 @@ class Worker:
184
184
  logger.info(f"Processing batch of {len(batch)} items")
185
185
 
186
186
  # Group items by adapter for concurrent processing
187
- adapter_groups = {}
187
+ adapter_groups: dict[str, list[Any]] = {}
188
188
  for item in batch:
189
189
  if item.adapter not in adapter_groups:
190
190
  adapter_groups[item.adapter] = []
@@ -199,7 +199,9 @@ class Worker:
199
199
  # Wait for all adapter groups to complete
200
200
  await asyncio.gather(*tasks, return_exceptions=True)
201
201
 
202
- async def _process_adapter_group(self, adapter: str, items: list[QueueItem]):
202
+ async def _process_adapter_group(
203
+ self, adapter: str, items: list[QueueItem]
204
+ ) -> None:
203
205
  """Process items for a specific adapter with concurrency control.
204
206
 
205
207
  Args:
@@ -216,7 +218,7 @@ class Worker:
216
218
  semaphore = self.adapter_semaphores[adapter]
217
219
 
218
220
  # Process items with concurrency control
219
- async def process_with_semaphore(item):
221
+ async def process_with_semaphore(item: QueueItem) -> None:
220
222
  async with semaphore:
221
223
  await self._process_item(item)
222
224
 
@@ -226,7 +228,7 @@ class Worker:
226
228
  # Process with concurrency control
227
229
  await asyncio.gather(*tasks, return_exceptions=True)
228
230
 
229
- async def _process_item(self, item: QueueItem):
231
+ async def _process_item(self, item: QueueItem) -> None:
230
232
  """Process a single queue item.
231
233
 
232
234
  Args:
@@ -263,15 +265,19 @@ class Worker:
263
265
 
264
266
  # Mark as completed in both queue and registry (atomic)
265
267
  success = self.queue.update_status(
266
- item.id, QueueStatus.COMPLETED, result=result,
267
- expected_status=QueueStatus.PROCESSING
268
+ item.id,
269
+ QueueStatus.COMPLETED,
270
+ result=result,
271
+ expected_status=QueueStatus.PROCESSING,
268
272
  )
269
273
  if success:
270
274
  self.ticket_registry.update_ticket_status(
271
275
  item.id, "completed", ticket_id=ticket_id, result_data=result
272
276
  )
273
277
  else:
274
- logger.warning(f"Failed to update status for {item.id} - item may have been processed by another worker")
278
+ logger.warning(
279
+ f"Failed to update status for {item.id} - item may have been processed by another worker"
280
+ )
275
281
 
276
282
  self.stats["items_processed"] += 1
277
283
  logger.info(f"Successfully processed {item.id}, ticket ID: {ticket_id}")
@@ -301,26 +307,35 @@ class Worker:
301
307
  item.id, "queued", retry_count=new_retry_count
302
308
  )
303
309
  else:
304
- logger.warning(f"Failed to increment retry for {item.id} - item may have been processed by another worker")
310
+ logger.warning(
311
+ f"Failed to increment retry for {item.id} - item may have been processed by another worker"
312
+ )
305
313
 
306
314
  # Wait before retry
307
315
  await asyncio.sleep(retry_delay)
308
316
  else:
309
317
  # Max retries exceeded, mark as failed (atomic)
310
318
  success = self.queue.update_status(
311
- item.id, QueueStatus.FAILED, error_message=str(e),
312
- expected_status=QueueStatus.PROCESSING
319
+ item.id,
320
+ QueueStatus.FAILED,
321
+ error_message=str(e),
322
+ expected_status=QueueStatus.PROCESSING,
313
323
  )
314
324
  if success:
315
325
  self.ticket_registry.update_ticket_status(
316
- item.id, "failed", error_message=str(e), retry_count=item.retry_count
326
+ item.id,
327
+ "failed",
328
+ error_message=str(e),
329
+ retry_count=item.retry_count,
317
330
  )
318
331
  else:
319
- logger.warning(f"Failed to mark {item.id} as failed - item may have been processed by another worker")
332
+ logger.warning(
333
+ f"Failed to mark {item.id} as failed - item may have been processed by another worker"
334
+ )
320
335
  self.stats["items_failed"] += 1
321
336
  logger.error(f"Max retries exceeded for {item.id}, marking as failed")
322
337
 
323
- async def _check_rate_limit(self, adapter: str):
338
+ async def _check_rate_limit(self, adapter: str) -> None:
324
339
  """Check and enforce rate limits.
325
340
 
326
341
  Args:
@@ -344,7 +359,7 @@ class Worker:
344
359
 
345
360
  self.last_request_times[adapter] = datetime.now()
346
361
 
347
- def _get_adapter(self, item: QueueItem):
362
+ def _get_adapter(self, item: QueueItem) -> Any:
348
363
  """Get adapter instance for item.
349
364
 
350
365
  Args:
@@ -360,24 +375,31 @@ class Worker:
360
375
 
361
376
  from ..cli.main import load_config
362
377
 
363
- # Use item's project_dir if available, otherwise use current directory
364
- project_path = Path(item.project_dir) if item.project_dir else None
365
-
366
- # Load environment variables from project directory's .env.local if it exists
367
- if project_path:
368
- env_file = project_path / ".env.local"
369
- if env_file.exists():
370
- logger.info(f"Worker loading environment from {env_file}")
371
- load_dotenv(env_file)
372
-
373
- logger.info(f"Worker project_path: {project_path}")
374
- logger.info(f"Worker current working directory: {os.getcwd()}")
375
-
376
- config = load_config(project_dir=project_path)
377
- logger.info(f"Worker loaded config: {config}")
378
- adapters_config = config.get("adapters", {})
379
- adapter_config = adapters_config.get(item.adapter, {})
380
- logger.info(f"Worker adapter config for {item.adapter}: {adapter_config}")
378
+ # PRIORITY 1: Use adapter_config from queue item if available (explicit config)
379
+ if item.adapter_config:
380
+ logger.info("Worker using explicit adapter_config from queue item")
381
+ adapter_config = item.adapter_config
382
+ logger.info(f"Worker adapter config for {item.adapter}: {adapter_config}")
383
+ else:
384
+ # PRIORITY 2: Load from project config file
385
+ # Use item's project_dir if available, otherwise use current directory
386
+ project_path = Path(item.project_dir) if item.project_dir else None
387
+
388
+ # Load environment variables from project directory's .env.local if it exists
389
+ if project_path:
390
+ env_file = project_path / ".env.local"
391
+ if env_file.exists():
392
+ logger.info(f"Worker loading environment from {env_file}")
393
+ load_dotenv(env_file)
394
+
395
+ logger.info(f"Worker project_path: {project_path}")
396
+ logger.info(f"Worker current working directory: {os.getcwd()}")
397
+
398
+ config = load_config(project_dir=project_path)
399
+ logger.info(f"Worker loaded config: {config}")
400
+ adapters_config = config.get("adapters", {})
401
+ adapter_config = adapters_config.get(item.adapter, {})
402
+ logger.info(f"Worker adapter config for {item.adapter}: {adapter_config}")
381
403
 
382
404
  # Add environment variables for authentication
383
405
  if item.adapter == "linear":
@@ -397,21 +419,32 @@ class Worker:
397
419
  # Add debugging for Linear adapter specifically
398
420
  if item.adapter == "linear":
399
421
  import os
422
+
400
423
  linear_api_key = os.getenv("LINEAR_API_KEY", "Not set")
401
- logger.info(f"Worker LINEAR_API_KEY: {linear_api_key[:20] if linear_api_key != 'Not set' else 'Not set'}...")
402
- logger.info(f"Worker adapter_config api_key: {adapter_config.get('api_key', 'Not set')[:20] if adapter_config.get('api_key') else 'Not set'}...")
424
+ logger.info(
425
+ f"Worker LINEAR_API_KEY: {linear_api_key[:20] if linear_api_key != 'Not set' else 'Not set'}..."
426
+ )
427
+ logger.info(
428
+ f"Worker adapter_config api_key: {adapter_config.get('api_key', 'Not set')[:20] if adapter_config.get('api_key') else 'Not set'}..."
429
+ )
403
430
 
404
431
  adapter = AdapterRegistry.get_adapter(item.adapter, adapter_config)
405
- logger.info(f"Worker created adapter: {type(adapter)} with team_id: {getattr(adapter, 'team_id_config', 'Not set')}")
432
+ logger.info(
433
+ f"Worker created adapter: {type(adapter)} with team_id: {getattr(adapter, 'team_id_config', 'Not set')}"
434
+ )
406
435
 
407
436
  # Add more debugging for Linear adapter
408
437
  if item.adapter == "linear":
409
- logger.info(f"Worker Linear adapter api_key: {getattr(adapter, 'api_key', 'Not set')[:20] if getattr(adapter, 'api_key', None) else 'Not set'}...")
410
- logger.info(f"Worker Linear adapter team_key: {getattr(adapter, 'team_key', 'Not set')}")
438
+ logger.info(
439
+ f"Worker Linear adapter api_key: {getattr(adapter, 'api_key', 'Not set')[:20] if getattr(adapter, 'api_key', None) else 'Not set'}..."
440
+ )
441
+ logger.info(
442
+ f"Worker Linear adapter team_key: {getattr(adapter, 'team_key', 'Not set')}"
443
+ )
411
444
 
412
445
  return adapter
413
446
 
414
- async def _execute_operation(self, adapter, item: QueueItem) -> dict[str, Any]:
447
+ async def _execute_operation(self, adapter: Any, item: QueueItem) -> dict[str, Any]:
415
448
  """Execute the queued operation.
416
449
 
417
450
  Args:
@@ -461,14 +494,13 @@ class Worker:
461
494
  result = await adapter.create_epic(
462
495
  title=data["title"],
463
496
  description=data.get("description"),
464
- **{k: v for k, v in data.items()
465
- if k not in ["title", "description"]}
497
+ **{k: v for k, v in data.items() if k not in ["title", "description"]},
466
498
  )
467
499
  return {
468
500
  "id": result.id if result else None,
469
501
  "title": result.title if result else None,
470
502
  "type": "epic",
471
- "success": bool(result)
503
+ "success": bool(result),
472
504
  }
473
505
 
474
506
  elif operation == "create_issue":
@@ -476,15 +508,18 @@ class Worker:
476
508
  title=data["title"],
477
509
  description=data.get("description"),
478
510
  epic_id=data.get("epic_id"),
479
- **{k: v for k, v in data.items()
480
- if k not in ["title", "description", "epic_id"]}
511
+ **{
512
+ k: v
513
+ for k, v in data.items()
514
+ if k not in ["title", "description", "epic_id"]
515
+ },
481
516
  )
482
517
  return {
483
518
  "id": result.id if result else None,
484
519
  "title": result.title if result else None,
485
520
  "type": "issue",
486
521
  "epic_id": data.get("epic_id"),
487
- "success": bool(result)
522
+ "success": bool(result),
488
523
  }
489
524
 
490
525
  elif operation == "create_task":
@@ -492,15 +527,18 @@ class Worker:
492
527
  title=data["title"],
493
528
  parent_id=data["parent_id"],
494
529
  description=data.get("description"),
495
- **{k: v for k, v in data.items()
496
- if k not in ["title", "parent_id", "description"]}
530
+ **{
531
+ k: v
532
+ for k, v in data.items()
533
+ if k not in ["title", "parent_id", "description"]
534
+ },
497
535
  )
498
536
  return {
499
537
  "id": result.id if result else None,
500
538
  "title": result.title if result else None,
501
539
  "type": "task",
502
540
  "parent_id": data["parent_id"],
503
- "success": bool(result)
541
+ "success": bool(result),
504
542
  }
505
543
 
506
544
  else:
@@ -0,0 +1,5 @@
1
+ """Utility modules for mcp-ticketer."""
2
+
3
+ from .token_utils import estimate_json_tokens, estimate_tokens, paginate_response
4
+
5
+ __all__ = ["estimate_tokens", "estimate_json_tokens", "paginate_response"]
@@ -0,0 +1,246 @@
1
+ """Token counting and pagination utilities for MCP tool responses.
2
+
3
+ This module provides utilities for estimating token counts and implementing
4
+ token-aware pagination to ensure responses stay under 20k token limits.
5
+
6
+ Design Decision: Token estimation vs. exact counting
7
+ - Uses 4-chars-per-token heuristic (conservative)
8
+ - Rationale: Actual tokenization requires tiktoken library and GPT-specific
9
+ tokenizer, which adds dependency and runtime overhead
10
+ - Trade-off: Approximate (±10%) vs. exact, but fast and dependency-free
11
+ - Extension Point: Can add tiktoken support via optional dependency later
12
+
13
+ Performance: O(1) for token estimation (string length only)
14
+ Memory: O(1) auxiliary space (no allocations beyond JSON serialization)
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ from collections.abc import Callable
20
+ from typing import Any, TypeVar
21
+
22
+ # Type variable for generic list items
23
+ T = TypeVar("T")
24
+
25
+ # Conservative token estimation: 1 token ≈ 4 characters
26
+ # Based on OpenAI/Anthropic averages for English text + JSON structure
27
+ CHARS_PER_TOKEN = 4
28
+
29
+ # Default maximum tokens per MCP response
30
+ DEFAULT_MAX_TOKENS = 20_000
31
+
32
+ # Overhead estimation for response metadata (status, adapter info, etc.)
33
+ BASE_RESPONSE_OVERHEAD = 100
34
+
35
+
36
+ def estimate_tokens(text: str) -> int:
37
+ """Estimate token count for a text string.
38
+
39
+ Uses conservative heuristic: 1 token ≈ 4 characters.
40
+ This works reasonably well for English text and JSON structures.
41
+
42
+ Design Trade-off:
43
+ - Fast: O(len(text)) string length check
44
+ - Approximate: ±10% accuracy vs. exact tokenization
45
+ - Zero dependencies: No tiktoken or model-specific tokenizers needed
46
+
47
+ Performance:
48
+ - Time Complexity: O(n) where n is string length
49
+ - Space Complexity: O(1)
50
+
51
+ Args:
52
+ text: Input text to estimate token count for
53
+
54
+ Returns:
55
+ Estimated token count (conservative, may overestimate slightly)
56
+
57
+ Example:
58
+ >>> estimate_tokens("Hello world")
59
+ 3 # "Hello world" = 11 chars / 4 ≈ 2.75 → rounds to 3
60
+ >>> estimate_tokens(json.dumps({"id": "123", "title": "Test"}))
61
+ 8 # JSON structure increases char count
62
+ """
63
+ if not text:
64
+ return 0
65
+ return max(1, len(text) // CHARS_PER_TOKEN)
66
+
67
+
68
+ def estimate_json_tokens(data: dict | list | Any) -> int:
69
+ """Estimate token count for JSON-serializable data.
70
+
71
+ Serializes data to JSON string then estimates tokens.
72
+ Accounts for JSON structure overhead (brackets, quotes, commas).
73
+
74
+ Performance:
75
+ - Time Complexity: O(n) where n is serialized JSON size
76
+ - Space Complexity: O(n) for JSON string (temporary)
77
+
78
+ Args:
79
+ data: Any JSON-serializable data (dict, list, primitives)
80
+
81
+ Returns:
82
+ Estimated token count for serialized representation
83
+
84
+ Example:
85
+ >>> estimate_json_tokens({"id": "123", "title": "Test"})
86
+ 8
87
+ >>> estimate_json_tokens([1, 2, 3])
88
+ 2
89
+ """
90
+ try:
91
+ json_str = json.dumps(data, default=str) # default=str for non-serializable
92
+ return estimate_tokens(json_str)
93
+ except (TypeError, ValueError) as e:
94
+ logging.warning(f"Failed to serialize data for token estimation: {e}")
95
+ # Fallback: estimate based on string representation
96
+ return estimate_tokens(str(data))
97
+
98
+
99
+ def paginate_response(
100
+ items: list[T],
101
+ limit: int = 20,
102
+ offset: int = 0,
103
+ max_tokens: int = DEFAULT_MAX_TOKENS,
104
+ serialize_fn: Callable[[T], dict] | None = None,
105
+ compact_fn: Callable[[dict], dict] | None = None,
106
+ compact: bool = True,
107
+ ) -> dict[str, Any]:
108
+ """Paginate a list of items with token-aware limiting.
109
+
110
+ This function implements automatic pagination that:
111
+ 1. Respects explicit limit/offset parameters
112
+ 2. Stops adding items if response would exceed max_tokens
113
+ 3. Optionally applies compact transformation to reduce token usage
114
+ 4. Returns pagination metadata for client-side handling
115
+
116
+ Design Decision: Token-aware vs. count-based pagination
117
+ - Hybrid approach: Uses both item count AND token limits
118
+ - Rationale: Prevents oversized responses even with small item counts
119
+ - Example: 10 tickets with huge descriptions could exceed 20k tokens
120
+ - Trade-off: Slightly more complex but safer for production use
121
+
122
+ Performance:
123
+ - Time Complexity: O(n) where n is min(limit, items until token limit)
124
+ - Space Complexity: O(n) for result items list
125
+ - Early termination: Stops as soon as token limit would be exceeded
126
+
127
+ Args:
128
+ items: List of items to paginate
129
+ limit: Maximum number of items to return (default: 20)
130
+ offset: Number of items to skip (default: 0)
131
+ max_tokens: Maximum tokens allowed in response (default: 20,000)
132
+ serialize_fn: Optional function to convert item to dict (e.g., model.model_dump)
133
+ compact_fn: Optional function to create compact representation
134
+ compact: Whether to apply compact_fn if provided (default: True)
135
+
136
+ Returns:
137
+ Dictionary containing:
138
+ - items: List of paginated items (serialized)
139
+ - count: Number of items returned
140
+ - total: Total items available (before pagination)
141
+ - offset: Offset used for this page
142
+ - limit: Limit requested
143
+ - has_more: Boolean indicating if more items exist
144
+ - truncated_by_tokens: Boolean indicating if token limit caused truncation
145
+ - estimated_tokens: Approximate token count for response
146
+
147
+ Error Conditions:
148
+ - Invalid limit (<= 0): Returns empty result with error flag
149
+ - Invalid offset (< 0): Uses offset=0
150
+ - serialize_fn fails: Logs warning and skips item
151
+
152
+ Example:
153
+ >>> tickets = [Ticket(...), Ticket(...), ...] # 100 tickets
154
+ >>> result = paginate_response(
155
+ ... tickets,
156
+ ... limit=20,
157
+ ... offset=0,
158
+ ... serialize_fn=lambda t: t.model_dump(),
159
+ ... compact_fn=_compact_ticket,
160
+ ... )
161
+ >>> result["count"] # 20 (or less if token limit hit)
162
+ >>> result["has_more"] # True
163
+ >>> result["estimated_tokens"] # ~2500
164
+ """
165
+ # Validate parameters
166
+ if limit <= 0:
167
+ logging.warning(f"Invalid limit {limit}, using default 20")
168
+ limit = 20
169
+
170
+ if offset < 0:
171
+ logging.warning(f"Invalid offset {offset}, using 0")
172
+ offset = 0
173
+
174
+ total_items = len(items)
175
+
176
+ # Apply offset
177
+ items_after_offset = items[offset:]
178
+
179
+ # Track token usage
180
+ estimated_tokens = BASE_RESPONSE_OVERHEAD # Base response overhead
181
+ result_items: list[dict] = []
182
+ truncated_by_tokens = False
183
+
184
+ # Process items up to limit or token threshold
185
+ for idx, item in enumerate(items_after_offset):
186
+ # Check if we've hit the limit
187
+ if idx >= limit:
188
+ break
189
+
190
+ # Serialize item
191
+ try:
192
+ if serialize_fn:
193
+ item_dict = serialize_fn(item)
194
+ elif hasattr(item, "model_dump"):
195
+ item_dict = item.model_dump()
196
+ elif isinstance(item, dict):
197
+ item_dict = item
198
+ else:
199
+ item_dict = {"data": str(item)}
200
+
201
+ # Apply compact mode if requested and function provided
202
+ if compact and compact_fn:
203
+ item_dict = compact_fn(item_dict)
204
+
205
+ # Estimate tokens for this item
206
+ item_tokens = estimate_json_tokens(item_dict)
207
+
208
+ # Check if adding this item would exceed token limit
209
+ if estimated_tokens + item_tokens > max_tokens:
210
+ logging.info(
211
+ f"Token limit reached: {estimated_tokens + item_tokens} > {max_tokens}. "
212
+ f"Returning {len(result_items)} items instead of requested {limit}."
213
+ )
214
+ truncated_by_tokens = True
215
+ break
216
+
217
+ # Add item to results
218
+ result_items.append(item_dict)
219
+ estimated_tokens += item_tokens
220
+
221
+ except Exception as e:
222
+ logging.warning(f"Failed to serialize item at index {idx + offset}: {e}")
223
+ # Skip this item and continue
224
+ continue
225
+
226
+ # Calculate pagination metadata
227
+ items_returned = len(result_items)
228
+ has_more = (offset + items_returned) < total_items
229
+
230
+ # Warn if approaching token limit
231
+ if estimated_tokens > max_tokens * 0.8:
232
+ logging.warning(
233
+ f"Response approaching token limit: {estimated_tokens}/{max_tokens} tokens "
234
+ f"({estimated_tokens/max_tokens*100:.1f}%). Consider using compact mode or reducing limit."
235
+ )
236
+
237
+ return {
238
+ "items": result_items,
239
+ "count": items_returned,
240
+ "total": total_items,
241
+ "offset": offset,
242
+ "limit": limit,
243
+ "has_more": has_more,
244
+ "truncated_by_tokens": truncated_by_tokens,
245
+ "estimated_tokens": estimated_tokens,
246
+ }