mcp-ticketer 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-ticketer might be problematic. Click here for more details.

Files changed (37) hide show
  1. mcp_ticketer/__version__.py +1 -1
  2. mcp_ticketer/adapters/aitrackdown.py +12 -15
  3. mcp_ticketer/adapters/github.py +7 -4
  4. mcp_ticketer/adapters/jira.py +23 -22
  5. mcp_ticketer/adapters/linear/__init__.py +1 -1
  6. mcp_ticketer/adapters/linear/adapter.py +88 -89
  7. mcp_ticketer/adapters/linear/client.py +71 -52
  8. mcp_ticketer/adapters/linear/mappers.py +88 -68
  9. mcp_ticketer/adapters/linear/queries.py +28 -7
  10. mcp_ticketer/adapters/linear/types.py +57 -50
  11. mcp_ticketer/adapters/linear.py +2 -2
  12. mcp_ticketer/cli/adapter_diagnostics.py +86 -51
  13. mcp_ticketer/cli/diagnostics.py +165 -72
  14. mcp_ticketer/cli/linear_commands.py +156 -113
  15. mcp_ticketer/cli/main.py +153 -82
  16. mcp_ticketer/cli/simple_health.py +73 -45
  17. mcp_ticketer/cli/utils.py +15 -10
  18. mcp_ticketer/core/config.py +23 -19
  19. mcp_ticketer/core/env_discovery.py +5 -4
  20. mcp_ticketer/core/env_loader.py +109 -86
  21. mcp_ticketer/core/exceptions.py +20 -18
  22. mcp_ticketer/core/models.py +9 -0
  23. mcp_ticketer/core/project_config.py +1 -1
  24. mcp_ticketer/mcp/server.py +294 -139
  25. mcp_ticketer/queue/health_monitor.py +152 -121
  26. mcp_ticketer/queue/manager.py +11 -4
  27. mcp_ticketer/queue/queue.py +15 -3
  28. mcp_ticketer/queue/run_worker.py +1 -1
  29. mcp_ticketer/queue/ticket_registry.py +190 -132
  30. mcp_ticketer/queue/worker.py +54 -25
  31. {mcp_ticketer-0.3.1.dist-info → mcp_ticketer-0.3.2.dist-info}/METADATA +1 -1
  32. mcp_ticketer-0.3.2.dist-info/RECORD +59 -0
  33. mcp_ticketer-0.3.1.dist-info/RECORD +0 -59
  34. {mcp_ticketer-0.3.1.dist-info → mcp_ticketer-0.3.2.dist-info}/WHEEL +0 -0
  35. {mcp_ticketer-0.3.1.dist-info → mcp_ticketer-0.3.2.dist-info}/entry_points.txt +0 -0
  36. {mcp_ticketer-0.3.1.dist-info → mcp_ticketer-0.3.2.dist-info}/licenses/LICENSE +0 -0
  37. {mcp_ticketer-0.3.1.dist-info → mcp_ticketer-0.3.2.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,9 @@
1
1
  """Queue health monitoring and alerting system."""
2
2
 
3
- import asyncio
4
3
  import logging
5
4
  import time
6
5
  from datetime import datetime, timedelta
7
6
  from enum import Enum
8
- from pathlib import Path
9
7
  from typing import Any, Dict, List, Optional
10
8
 
11
9
  import psutil
@@ -18,6 +16,7 @@ logger = logging.getLogger(__name__)
18
16
 
19
17
  class HealthStatus(str, Enum):
20
18
  """Health status levels."""
19
+
21
20
  HEALTHY = "healthy"
22
21
  WARNING = "warning"
23
22
  CRITICAL = "critical"
@@ -26,108 +25,121 @@ class HealthStatus(str, Enum):
26
25
 
27
26
  class HealthAlert:
28
27
  """Health alert with severity and details."""
29
-
28
+
30
29
  def __init__(
31
30
  self,
32
31
  level: HealthStatus,
33
32
  message: str,
34
33
  details: Optional[Dict[str, Any]] = None,
35
- timestamp: Optional[datetime] = None
34
+ timestamp: Optional[datetime] = None,
36
35
  ):
37
36
  self.level = level
38
37
  self.message = message
39
38
  self.details = details or {}
40
39
  self.timestamp = timestamp or datetime.now()
41
-
40
+
42
41
  def __str__(self) -> str:
43
42
  return f"[{self.level.upper()}] {self.message}"
44
43
 
45
44
 
46
45
  class QueueHealthMonitor:
47
46
  """Monitors queue health and provides immediate alerts."""
48
-
47
+
49
48
  # Health check thresholds
50
49
  WORKER_TIMEOUT_SECONDS = 30 # Worker should process items within 30s
51
- STUCK_ITEM_THRESHOLD = 300 # 5 minutes for stuck items
52
- HIGH_FAILURE_RATE = 0.3 # 30% failure rate is concerning
53
- QUEUE_BACKLOG_WARNING = 10 # Warn if more than 10 pending items
50
+ STUCK_ITEM_THRESHOLD = 300 # 5 minutes for stuck items
51
+ HIGH_FAILURE_RATE = 0.3 # 30% failure rate is concerning
52
+ QUEUE_BACKLOG_WARNING = 10 # Warn if more than 10 pending items
54
53
  QUEUE_BACKLOG_CRITICAL = 50 # Critical if more than 50 pending items
55
-
54
+
56
55
  def __init__(self, queue: Optional[Queue] = None):
57
56
  """Initialize health monitor.
58
-
57
+
59
58
  Args:
60
59
  queue: Queue instance to monitor. Creates new if None.
60
+
61
61
  """
62
62
  self.queue = queue or Queue()
63
63
  self.manager = WorkerManager()
64
64
  self.last_check = datetime.now()
65
65
  self.alerts: List[HealthAlert] = []
66
-
66
+
67
67
  def check_health(self) -> Dict[str, Any]:
68
68
  """Perform comprehensive health check.
69
-
69
+
70
70
  Returns:
71
71
  Health status with alerts and metrics
72
+
72
73
  """
73
74
  self.alerts.clear()
74
-
75
+
75
76
  # Check worker status
76
77
  worker_health = self._check_worker_health()
77
-
78
+
78
79
  # Check queue status
79
80
  queue_health = self._check_queue_health()
80
-
81
+
81
82
  # Check for stuck items
82
83
  stuck_health = self._check_stuck_items()
83
-
84
+
84
85
  # Check failure rates
85
86
  failure_health = self._check_failure_rates()
86
-
87
+
87
88
  # Determine overall health
88
89
  overall_status = self._determine_overall_status()
89
-
90
+
90
91
  health_report = {
91
92
  "status": overall_status,
92
93
  "timestamp": datetime.now().isoformat(),
93
- "alerts": [{"level": alert.level, "message": alert.message, "details": alert.details} for alert in self.alerts],
94
+ "alerts": [
95
+ {
96
+ "level": alert.level,
97
+ "message": alert.message,
98
+ "details": alert.details,
99
+ }
100
+ for alert in self.alerts
101
+ ],
94
102
  "metrics": {
95
103
  "worker": worker_health,
96
104
  "queue": queue_health,
97
105
  "stuck_items": stuck_health,
98
- "failure_rate": failure_health
99
- }
106
+ "failure_rate": failure_health,
107
+ },
100
108
  }
101
-
109
+
102
110
  self.last_check = datetime.now()
103
111
  return health_report
104
-
112
+
105
113
  def _check_worker_health(self) -> Dict[str, Any]:
106
114
  """Check worker process health."""
107
115
  worker_status = self.manager.get_status()
108
-
116
+
109
117
  metrics = {
110
118
  "running": worker_status["running"],
111
119
  "pid": worker_status.get("pid"),
112
120
  "cpu_percent": worker_status.get("cpu_percent", 0),
113
- "memory_mb": worker_status.get("memory_mb", 0)
121
+ "memory_mb": worker_status.get("memory_mb", 0),
114
122
  }
115
-
123
+
116
124
  if not worker_status["running"]:
117
125
  # Check if we have pending items but no worker
118
126
  pending_count = self.queue.get_pending_count()
119
127
  if pending_count > 0:
120
- self.alerts.append(HealthAlert(
121
- HealthStatus.CRITICAL,
122
- f"Worker not running but {pending_count} items pending",
123
- {"pending_count": pending_count, "action": "start_worker"}
124
- ))
128
+ self.alerts.append(
129
+ HealthAlert(
130
+ HealthStatus.CRITICAL,
131
+ f"Worker not running but {pending_count} items pending",
132
+ {"pending_count": pending_count, "action": "start_worker"},
133
+ )
134
+ )
125
135
  else:
126
- self.alerts.append(HealthAlert(
127
- HealthStatus.WARNING,
128
- "Worker not running (no pending items)",
129
- {"action": "worker_idle"}
130
- ))
136
+ self.alerts.append(
137
+ HealthAlert(
138
+ HealthStatus.WARNING,
139
+ "Worker not running (no pending items)",
140
+ {"action": "worker_idle"},
141
+ )
142
+ )
131
143
  else:
132
144
  # Worker is running, check if it's responsive
133
145
  pid = worker_status.get("pid")
@@ -140,168 +152,185 @@ class QueueHealthMonitor:
140
152
  # Check for items that have been pending too long
141
153
  old_pending = self._get_old_pending_items()
142
154
  if old_pending:
143
- self.alerts.append(HealthAlert(
144
- HealthStatus.WARNING,
145
- f"Worker running but {len(old_pending)} items pending for >30s",
146
- {"old_pending_count": len(old_pending), "worker_pid": pid}
147
- ))
155
+ self.alerts.append(
156
+ HealthAlert(
157
+ HealthStatus.WARNING,
158
+ f"Worker running but {len(old_pending)} items pending for >30s",
159
+ {
160
+ "old_pending_count": len(old_pending),
161
+ "worker_pid": pid,
162
+ },
163
+ )
164
+ )
148
165
  except (psutil.NoSuchProcess, psutil.AccessDenied):
149
- self.alerts.append(HealthAlert(
150
- HealthStatus.CRITICAL,
151
- "Worker PID exists but process not accessible",
152
- {"pid": pid, "action": "restart_worker"}
153
- ))
154
-
166
+ self.alerts.append(
167
+ HealthAlert(
168
+ HealthStatus.CRITICAL,
169
+ "Worker PID exists but process not accessible",
170
+ {"pid": pid, "action": "restart_worker"},
171
+ )
172
+ )
173
+
155
174
  return metrics
156
-
175
+
157
176
  def _check_queue_health(self) -> Dict[str, Any]:
158
177
  """Check queue status and backlog."""
159
178
  stats = self.queue.get_stats()
160
-
179
+
161
180
  pending = stats.get("pending", 0)
162
181
  processing = stats.get("processing", 0)
163
182
  failed = stats.get("failed", 0)
164
183
  completed = stats.get("completed", 0)
165
-
184
+
166
185
  metrics = {
167
186
  "pending": pending,
168
187
  "processing": processing,
169
188
  "failed": failed,
170
189
  "completed": completed,
171
- "total": pending + processing + failed + completed
190
+ "total": pending + processing + failed + completed,
172
191
  }
173
-
192
+
174
193
  # Check backlog levels
175
194
  if pending >= self.QUEUE_BACKLOG_CRITICAL:
176
- self.alerts.append(HealthAlert(
177
- HealthStatus.CRITICAL,
178
- f"Critical queue backlog: {pending} pending items",
179
- {"pending_count": pending, "action": "scale_workers"}
180
- ))
195
+ self.alerts.append(
196
+ HealthAlert(
197
+ HealthStatus.CRITICAL,
198
+ f"Critical queue backlog: {pending} pending items",
199
+ {"pending_count": pending, "action": "scale_workers"},
200
+ )
201
+ )
181
202
  elif pending >= self.QUEUE_BACKLOG_WARNING:
182
- self.alerts.append(HealthAlert(
183
- HealthStatus.WARNING,
184
- f"High queue backlog: {pending} pending items",
185
- {"pending_count": pending}
186
- ))
187
-
203
+ self.alerts.append(
204
+ HealthAlert(
205
+ HealthStatus.WARNING,
206
+ f"High queue backlog: {pending} pending items",
207
+ {"pending_count": pending},
208
+ )
209
+ )
210
+
188
211
  # Check for too many processing items (might indicate stuck workers)
189
212
  if processing > 5: # Should rarely have more than a few processing
190
- self.alerts.append(HealthAlert(
191
- HealthStatus.WARNING,
192
- f"Many items in processing state: {processing}",
193
- {"processing_count": processing, "action": "check_stuck_items"}
194
- ))
195
-
213
+ self.alerts.append(
214
+ HealthAlert(
215
+ HealthStatus.WARNING,
216
+ f"Many items in processing state: {processing}",
217
+ {"processing_count": processing, "action": "check_stuck_items"},
218
+ )
219
+ )
220
+
196
221
  return metrics
197
-
222
+
198
223
  def _check_stuck_items(self) -> Dict[str, Any]:
199
224
  """Check for items stuck in processing state."""
200
225
  # Reset stuck items first
201
226
  self.queue.reset_stuck_items(timeout_minutes=5) # 5 minute timeout
202
-
227
+
203
228
  # Get current stuck items
204
229
  stuck_items = self._get_stuck_processing_items()
205
-
230
+
206
231
  metrics = {
207
232
  "stuck_count": len(stuck_items),
208
- "stuck_items": [item.id for item in stuck_items]
233
+ "stuck_items": [item.id for item in stuck_items],
209
234
  }
210
-
235
+
211
236
  if stuck_items:
212
- self.alerts.append(HealthAlert(
213
- HealthStatus.WARNING,
214
- f"Found {len(stuck_items)} stuck items, auto-reset applied",
215
- {"stuck_items": [item.id for item in stuck_items], "action": "items_reset"}
216
- ))
217
-
237
+ self.alerts.append(
238
+ HealthAlert(
239
+ HealthStatus.WARNING,
240
+ f"Found {len(stuck_items)} stuck items, auto-reset applied",
241
+ {
242
+ "stuck_items": [item.id for item in stuck_items],
243
+ "action": "items_reset",
244
+ },
245
+ )
246
+ )
247
+
218
248
  return metrics
219
-
249
+
220
250
  def _check_failure_rates(self) -> Dict[str, Any]:
221
251
  """Check recent failure rates."""
222
252
  stats = self.queue.get_stats()
223
-
253
+
224
254
  total_items = sum(stats.values())
225
255
  failed_items = stats.get("failed", 0)
226
-
256
+
227
257
  failure_rate = failed_items / total_items if total_items > 0 else 0
228
-
258
+
229
259
  metrics = {
230
260
  "failure_rate": failure_rate,
231
261
  "failed_count": failed_items,
232
- "total_count": total_items
262
+ "total_count": total_items,
233
263
  }
234
-
264
+
235
265
  if failure_rate >= self.HIGH_FAILURE_RATE and total_items >= 10:
236
- self.alerts.append(HealthAlert(
237
- HealthStatus.CRITICAL,
238
- f"High failure rate: {failure_rate:.1%} ({failed_items}/{total_items})",
239
- {"failure_rate": failure_rate, "action": "investigate_failures"}
240
- ))
241
-
266
+ self.alerts.append(
267
+ HealthAlert(
268
+ HealthStatus.CRITICAL,
269
+ f"High failure rate: {failure_rate:.1%} ({failed_items}/{total_items})",
270
+ {"failure_rate": failure_rate, "action": "investigate_failures"},
271
+ )
272
+ )
273
+
242
274
  return metrics
243
-
275
+
244
276
  def _determine_overall_status(self) -> HealthStatus:
245
277
  """Determine overall health status from alerts."""
246
278
  if not self.alerts:
247
279
  return HealthStatus.HEALTHY
248
-
280
+
249
281
  # Check for critical alerts
250
282
  if any(alert.level == HealthStatus.CRITICAL for alert in self.alerts):
251
283
  return HealthStatus.CRITICAL
252
-
284
+
253
285
  # Check for warnings
254
286
  if any(alert.level == HealthStatus.WARNING for alert in self.alerts):
255
287
  return HealthStatus.WARNING
256
-
288
+
257
289
  return HealthStatus.HEALTHY
258
-
290
+
259
291
  def _get_old_pending_items(self) -> List:
260
292
  """Get items that have been pending for too long."""
261
293
  cutoff_time = datetime.now() - timedelta(seconds=self.WORKER_TIMEOUT_SECONDS)
262
-
294
+
263
295
  items = self.queue.list_items(status=QueueStatus.PENDING, limit=100)
264
- return [
265
- item for item in items
266
- if item.created_at < cutoff_time
267
- ]
268
-
296
+ return [item for item in items if item.created_at < cutoff_time]
297
+
269
298
  def _get_stuck_processing_items(self) -> List:
270
299
  """Get items stuck in processing state."""
271
300
  cutoff_time = datetime.now() - timedelta(seconds=self.STUCK_ITEM_THRESHOLD)
272
-
301
+
273
302
  items = self.queue.list_items(status=QueueStatus.PROCESSING, limit=100)
274
- return [
275
- item for item in items
276
- if item.created_at < cutoff_time
277
- ]
278
-
303
+ return [item for item in items if item.created_at < cutoff_time]
304
+
279
305
  def get_immediate_alerts(self) -> List[HealthAlert]:
280
306
  """Get alerts that require immediate attention."""
281
307
  return [
282
- alert for alert in self.alerts
308
+ alert
309
+ for alert in self.alerts
283
310
  if alert.level in [HealthStatus.CRITICAL, HealthStatus.FAILED]
284
311
  ]
285
-
312
+
286
313
  def auto_repair(self) -> Dict[str, Any]:
287
314
  """Attempt automatic repair of detected issues."""
288
315
  repair_actions = []
289
-
316
+
290
317
  # Check health first
291
318
  health = self.check_health()
292
-
319
+
293
320
  for alert in self.alerts:
294
321
  action = alert.details.get("action")
295
-
322
+
296
323
  if action == "start_worker":
297
324
  try:
298
325
  if self.manager.start():
299
- repair_actions.append(f"Started worker for {alert.details.get('pending_count')} pending items")
326
+ repair_actions.append(
327
+ f"Started worker for {alert.details.get('pending_count')} pending items"
328
+ )
300
329
  else:
301
330
  repair_actions.append("Failed to start worker")
302
331
  except Exception as e:
303
332
  repair_actions.append(f"Error starting worker: {e}")
304
-
333
+
305
334
  elif action == "restart_worker":
306
335
  try:
307
336
  self.manager.stop()
@@ -312,11 +341,13 @@ class QueueHealthMonitor:
312
341
  repair_actions.append("Failed to restart worker")
313
342
  except Exception as e:
314
343
  repair_actions.append(f"Error restarting worker: {e}")
315
-
344
+
316
345
  elif action == "items_reset":
317
- repair_actions.append(f"Reset {alert.details.get('stuck_items', [])} stuck items")
318
-
346
+ repair_actions.append(
347
+ f"Reset {alert.details.get('stuck_items', [])} stuck items"
348
+ )
349
+
319
350
  return {
320
351
  "actions_taken": repair_actions,
321
- "timestamp": datetime.now().isoformat()
352
+ "timestamp": datetime.now().isoformat(),
322
353
  }
@@ -113,9 +113,12 @@ class WorkerManager:
113
113
  if env_file.exists():
114
114
  logger.debug(f"Loading environment from {env_file} for subprocess")
115
115
  from dotenv import dotenv_values
116
+
116
117
  env_vars = dotenv_values(env_file)
117
118
  subprocess_env.update(env_vars)
118
- logger.debug(f"Added {len(env_vars)} environment variables from .env.local")
119
+ logger.debug(
120
+ f"Added {len(env_vars)} environment variables from .env.local"
121
+ )
119
122
 
120
123
  # Start as background process
121
124
  process = subprocess.Popen(
@@ -281,6 +284,7 @@ class WorkerManager:
281
284
 
282
285
  Returns:
283
286
  Path to Python executable
287
+
284
288
  """
285
289
  # First, try to detect if we're running in a pipx environment
286
290
  # by checking if the current executable is in a pipx venv
@@ -293,18 +297,21 @@ class WorkerManager:
293
297
 
294
298
  # Check if we can find the mcp-ticketer executable and extract its Python
295
299
  import shutil
300
+
296
301
  mcp_ticketer_path = shutil.which("mcp-ticketer")
297
302
  if mcp_ticketer_path:
298
303
  try:
299
304
  # Read the shebang line to get the Python executable
300
- with open(mcp_ticketer_path, 'r') as f:
305
+ with open(mcp_ticketer_path) as f:
301
306
  first_line = f.readline().strip()
302
307
  if first_line.startswith("#!") and "python" in first_line:
303
308
  python_path = first_line[2:].strip()
304
309
  if os.path.exists(python_path):
305
- logger.debug(f"Using Python from mcp-ticketer shebang: {python_path}")
310
+ logger.debug(
311
+ f"Using Python from mcp-ticketer shebang: {python_path}"
312
+ )
306
313
  return python_path
307
- except (OSError, IOError):
314
+ except OSError:
308
315
  pass
309
316
 
310
317
  # Fallback to sys.executable
@@ -211,7 +211,12 @@ class Queue:
211
211
  SET status = ?, processed_at = ?
212
212
  WHERE id = ? AND status = ?
213
213
  """,
214
- (QueueStatus.PROCESSING.value, datetime.now().isoformat(), row[0], QueueStatus.PENDING.value),
214
+ (
215
+ QueueStatus.PROCESSING.value,
216
+ datetime.now().isoformat(),
217
+ row[0],
218
+ QueueStatus.PENDING.value,
219
+ ),
215
220
  )
216
221
 
217
222
  # Check if update was successful (prevents race conditions)
@@ -254,6 +259,7 @@ class Queue:
254
259
 
255
260
  Returns:
256
261
  True if update was successful, False if item was in unexpected state
262
+
257
263
  """
258
264
  with self._lock:
259
265
  with sqlite3.connect(self.db_path) as conn:
@@ -314,7 +320,9 @@ class Queue:
314
320
  conn.rollback()
315
321
  raise
316
322
 
317
- def increment_retry(self, queue_id: str, expected_status: Optional[QueueStatus] = None) -> int:
323
+ def increment_retry(
324
+ self, queue_id: str, expected_status: Optional[QueueStatus] = None
325
+ ) -> int:
318
326
  """Increment retry count and reset to pending atomically.
319
327
 
320
328
  Args:
@@ -340,7 +348,11 @@ class Queue:
340
348
  WHERE id = ? AND status = ?
341
349
  RETURNING retry_count
342
350
  """,
343
- (QueueStatus.PENDING.value, queue_id, expected_status.value),
351
+ (
352
+ QueueStatus.PENDING.value,
353
+ queue_id,
354
+ expected_status.value,
355
+ ),
344
356
  )
345
357
  else:
346
358
  # Regular increment
@@ -15,8 +15,8 @@ logger = logging.getLogger(__name__)
15
15
 
16
16
  def main():
17
17
  """Run the worker process."""
18
- import sys
19
18
  import os
19
+
20
20
  logger.info("Starting standalone worker process")
21
21
  logger.info(f"Worker Python executable: {sys.executable}")
22
22
  logger.info(f"Worker working directory: {os.getcwd()}")