mcp-ticketer 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-ticketer might be problematic. Click here for more details.
- mcp_ticketer/__init__.py +27 -0
- mcp_ticketer/__version__.py +40 -0
- mcp_ticketer/adapters/__init__.py +8 -0
- mcp_ticketer/adapters/aitrackdown.py +396 -0
- mcp_ticketer/adapters/github.py +974 -0
- mcp_ticketer/adapters/jira.py +831 -0
- mcp_ticketer/adapters/linear.py +1355 -0
- mcp_ticketer/cache/__init__.py +5 -0
- mcp_ticketer/cache/memory.py +193 -0
- mcp_ticketer/cli/__init__.py +5 -0
- mcp_ticketer/cli/main.py +812 -0
- mcp_ticketer/cli/queue_commands.py +285 -0
- mcp_ticketer/cli/utils.py +523 -0
- mcp_ticketer/core/__init__.py +15 -0
- mcp_ticketer/core/adapter.py +211 -0
- mcp_ticketer/core/config.py +403 -0
- mcp_ticketer/core/http_client.py +430 -0
- mcp_ticketer/core/mappers.py +492 -0
- mcp_ticketer/core/models.py +111 -0
- mcp_ticketer/core/registry.py +128 -0
- mcp_ticketer/mcp/__init__.py +5 -0
- mcp_ticketer/mcp/server.py +459 -0
- mcp_ticketer/py.typed +0 -0
- mcp_ticketer/queue/__init__.py +7 -0
- mcp_ticketer/queue/__main__.py +6 -0
- mcp_ticketer/queue/manager.py +261 -0
- mcp_ticketer/queue/queue.py +357 -0
- mcp_ticketer/queue/run_worker.py +38 -0
- mcp_ticketer/queue/worker.py +425 -0
- mcp_ticketer-0.1.1.dist-info/METADATA +362 -0
- mcp_ticketer-0.1.1.dist-info/RECORD +35 -0
- mcp_ticketer-0.1.1.dist-info/WHEEL +5 -0
- mcp_ticketer-0.1.1.dist-info/entry_points.txt +3 -0
- mcp_ticketer-0.1.1.dist-info/licenses/LICENSE +21 -0
- mcp_ticketer-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""Background worker for processing queued ticket operations."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import signal
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional, Dict, Any, List
|
|
12
|
+
import threading
|
|
13
|
+
from dotenv import load_dotenv
|
|
14
|
+
|
|
15
|
+
from .queue import Queue, QueueItem, QueueStatus
|
|
16
|
+
from ..core import AdapterRegistry, Task
|
|
17
|
+
|
|
18
|
+
# Load environment variables from .env.local
|
|
19
|
+
env_path = Path.cwd() / ".env.local"
|
|
20
|
+
if env_path.exists():
|
|
21
|
+
load_dotenv(env_path)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Configure logging
|
|
25
|
+
LOG_DIR = Path.home() / ".mcp-ticketer" / "logs"
|
|
26
|
+
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
LOG_FILE = LOG_DIR / "worker.log"
|
|
28
|
+
|
|
29
|
+
logging.basicConfig(
|
|
30
|
+
level=logging.INFO,
|
|
31
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
32
|
+
handlers=[
|
|
33
|
+
logging.FileHandler(LOG_FILE),
|
|
34
|
+
logging.StreamHandler()
|
|
35
|
+
]
|
|
36
|
+
)
|
|
37
|
+
logger = logging.getLogger(__name__)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Worker:
|
|
41
|
+
"""Background worker for processing queue items with batch processing and concurrency."""
|
|
42
|
+
|
|
43
|
+
# Rate limits per adapter (requests per minute)
|
|
44
|
+
RATE_LIMITS = {
|
|
45
|
+
"linear": 60,
|
|
46
|
+
"jira": 30,
|
|
47
|
+
"github": 60,
|
|
48
|
+
"aitrackdown": 1000 # Local, no rate limit
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
# Retry configuration
|
|
52
|
+
MAX_RETRIES = 3
|
|
53
|
+
BASE_RETRY_DELAY = 5 # seconds
|
|
54
|
+
|
|
55
|
+
# Batch processing configuration
|
|
56
|
+
DEFAULT_BATCH_SIZE = 10
|
|
57
|
+
DEFAULT_MAX_CONCURRENT = 5
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
queue: Optional[Queue] = None,
|
|
62
|
+
batch_size: int = DEFAULT_BATCH_SIZE,
|
|
63
|
+
max_concurrent: int = DEFAULT_MAX_CONCURRENT
|
|
64
|
+
):
|
|
65
|
+
"""Initialize worker.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
queue: Queue instance (creates default if not provided)
|
|
69
|
+
batch_size: Number of items to process in a batch
|
|
70
|
+
max_concurrent: Maximum concurrent operations per adapter
|
|
71
|
+
"""
|
|
72
|
+
self.queue = queue or Queue()
|
|
73
|
+
self.running = False
|
|
74
|
+
self.stop_event = threading.Event()
|
|
75
|
+
self.batch_size = batch_size
|
|
76
|
+
self.max_concurrent = max_concurrent
|
|
77
|
+
|
|
78
|
+
# Track rate limits per adapter
|
|
79
|
+
self.last_request_times: Dict[str, datetime] = {}
|
|
80
|
+
self.adapter_semaphores: Dict[str, asyncio.Semaphore] = {}
|
|
81
|
+
|
|
82
|
+
# Statistics
|
|
83
|
+
self.stats = {
|
|
84
|
+
"items_processed": 0,
|
|
85
|
+
"items_failed": 0,
|
|
86
|
+
"batches_processed": 0,
|
|
87
|
+
"start_time": None,
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# Set up signal handlers
|
|
91
|
+
signal.signal(signal.SIGTERM, self._signal_handler)
|
|
92
|
+
signal.signal(signal.SIGINT, self._signal_handler)
|
|
93
|
+
|
|
94
|
+
logger.info(f"Worker initialized with batch_size={batch_size}, max_concurrent={max_concurrent}")
|
|
95
|
+
|
|
96
|
+
def _signal_handler(self, signum, frame):
|
|
97
|
+
"""Handle shutdown signals."""
|
|
98
|
+
logger.info(f"Received signal {signum}, shutting down...")
|
|
99
|
+
self.stop()
|
|
100
|
+
|
|
101
|
+
def start(self, daemon: bool = True):
|
|
102
|
+
"""Start the worker.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
daemon: Run as daemon process
|
|
106
|
+
"""
|
|
107
|
+
if self.running:
|
|
108
|
+
logger.warning("Worker already running")
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
self.running = True
|
|
112
|
+
self.stats["start_time"] = datetime.now()
|
|
113
|
+
logger.info("Starting worker...")
|
|
114
|
+
|
|
115
|
+
if daemon:
|
|
116
|
+
# Run in separate thread for daemon mode
|
|
117
|
+
thread = threading.Thread(target=self._run_loop)
|
|
118
|
+
thread.daemon = True
|
|
119
|
+
thread.start()
|
|
120
|
+
else:
|
|
121
|
+
# Run in main thread
|
|
122
|
+
self._run_loop()
|
|
123
|
+
|
|
124
|
+
def stop(self):
|
|
125
|
+
"""Stop the worker."""
|
|
126
|
+
logger.info("Stopping worker...")
|
|
127
|
+
self.running = False
|
|
128
|
+
self.stop_event.set()
|
|
129
|
+
|
|
130
|
+
def _run_loop(self):
|
|
131
|
+
"""Main worker loop with batch processing."""
|
|
132
|
+
logger.info("Worker loop started")
|
|
133
|
+
|
|
134
|
+
# Reset any stuck items on startup
|
|
135
|
+
self.queue.reset_stuck_items()
|
|
136
|
+
|
|
137
|
+
while self.running:
|
|
138
|
+
try:
|
|
139
|
+
# Get batch of pending items
|
|
140
|
+
batch = self._get_batch()
|
|
141
|
+
|
|
142
|
+
if batch:
|
|
143
|
+
# Process batch
|
|
144
|
+
asyncio.run(self._process_batch(batch))
|
|
145
|
+
self.stats["batches_processed"] += 1
|
|
146
|
+
else:
|
|
147
|
+
# No items, wait a bit
|
|
148
|
+
self.stop_event.wait(timeout=1)
|
|
149
|
+
|
|
150
|
+
except Exception as e:
|
|
151
|
+
logger.error(f"Unexpected error in worker loop: {e}", exc_info=True)
|
|
152
|
+
time.sleep(5) # Prevent tight error loop
|
|
153
|
+
|
|
154
|
+
logger.info("Worker loop stopped")
|
|
155
|
+
|
|
156
|
+
def _get_batch(self) -> List[QueueItem]:
|
|
157
|
+
"""Get a batch of pending items from the queue.
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
List of queue items to process
|
|
161
|
+
"""
|
|
162
|
+
batch = []
|
|
163
|
+
for _ in range(self.batch_size):
|
|
164
|
+
item = self.queue.get_next_pending()
|
|
165
|
+
if item:
|
|
166
|
+
batch.append(item)
|
|
167
|
+
else:
|
|
168
|
+
break
|
|
169
|
+
return batch
|
|
170
|
+
|
|
171
|
+
async def _process_batch(self, batch: List[QueueItem]):
|
|
172
|
+
"""Process a batch of queue items with concurrency control.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
batch: List of queue items to process
|
|
176
|
+
"""
|
|
177
|
+
logger.info(f"Processing batch of {len(batch)} items")
|
|
178
|
+
|
|
179
|
+
# Group items by adapter for concurrent processing
|
|
180
|
+
adapter_groups = {}
|
|
181
|
+
for item in batch:
|
|
182
|
+
if item.adapter not in adapter_groups:
|
|
183
|
+
adapter_groups[item.adapter] = []
|
|
184
|
+
adapter_groups[item.adapter].append(item)
|
|
185
|
+
|
|
186
|
+
# Process each adapter group concurrently
|
|
187
|
+
tasks = []
|
|
188
|
+
for adapter, items in adapter_groups.items():
|
|
189
|
+
task = self._process_adapter_group(adapter, items)
|
|
190
|
+
tasks.append(task)
|
|
191
|
+
|
|
192
|
+
# Wait for all adapter groups to complete
|
|
193
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
194
|
+
|
|
195
|
+
async def _process_adapter_group(self, adapter: str, items: List[QueueItem]):
|
|
196
|
+
"""Process items for a specific adapter with concurrency control.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
adapter: Adapter name
|
|
200
|
+
items: List of items for this adapter
|
|
201
|
+
"""
|
|
202
|
+
logger.debug(f"Processing {len(items)} items for adapter {adapter}")
|
|
203
|
+
|
|
204
|
+
# Get or create semaphore for this adapter
|
|
205
|
+
if adapter not in self.adapter_semaphores:
|
|
206
|
+
self.adapter_semaphores[adapter] = asyncio.Semaphore(self.max_concurrent)
|
|
207
|
+
|
|
208
|
+
semaphore = self.adapter_semaphores[adapter]
|
|
209
|
+
|
|
210
|
+
# Process items with concurrency control
|
|
211
|
+
async def process_with_semaphore(item):
|
|
212
|
+
async with semaphore:
|
|
213
|
+
await self._process_item(item)
|
|
214
|
+
|
|
215
|
+
# Create tasks for all items
|
|
216
|
+
tasks = [process_with_semaphore(item) for item in items]
|
|
217
|
+
|
|
218
|
+
# Process with concurrency control
|
|
219
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
220
|
+
|
|
221
|
+
async def _process_item(self, item: QueueItem):
|
|
222
|
+
"""Process a single queue item.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
item: Queue item to process
|
|
226
|
+
"""
|
|
227
|
+
logger.info(f"Processing queue item {item.id}: {item.operation} on {item.adapter}")
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
# Check rate limit
|
|
231
|
+
await self._check_rate_limit(item.adapter)
|
|
232
|
+
|
|
233
|
+
# Get adapter
|
|
234
|
+
adapter = self._get_adapter(item)
|
|
235
|
+
if not adapter:
|
|
236
|
+
raise ValueError(f"Unknown adapter: {item.adapter}")
|
|
237
|
+
|
|
238
|
+
# Process operation
|
|
239
|
+
result = await self._execute_operation(adapter, item)
|
|
240
|
+
|
|
241
|
+
# Mark as completed
|
|
242
|
+
self.queue.update_status(
|
|
243
|
+
item.id,
|
|
244
|
+
QueueStatus.COMPLETED,
|
|
245
|
+
result=result
|
|
246
|
+
)
|
|
247
|
+
self.stats["items_processed"] += 1
|
|
248
|
+
logger.info(f"Successfully processed {item.id}")
|
|
249
|
+
|
|
250
|
+
except Exception as e:
|
|
251
|
+
logger.error(f"Error processing {item.id}: {e}")
|
|
252
|
+
|
|
253
|
+
# Check retry count
|
|
254
|
+
if item.retry_count < self.MAX_RETRIES:
|
|
255
|
+
# Retry with exponential backoff
|
|
256
|
+
retry_delay = self.BASE_RETRY_DELAY * (2 ** item.retry_count)
|
|
257
|
+
logger.info(f"Retrying {item.id} after {retry_delay}s (attempt {item.retry_count + 1}/{self.MAX_RETRIES})")
|
|
258
|
+
|
|
259
|
+
# Increment retry count and reset to pending
|
|
260
|
+
self.queue.increment_retry(item.id)
|
|
261
|
+
|
|
262
|
+
# Wait before retry
|
|
263
|
+
await asyncio.sleep(retry_delay)
|
|
264
|
+
else:
|
|
265
|
+
# Max retries exceeded, mark as failed
|
|
266
|
+
self.queue.update_status(
|
|
267
|
+
item.id,
|
|
268
|
+
QueueStatus.FAILED,
|
|
269
|
+
error_message=str(e)
|
|
270
|
+
)
|
|
271
|
+
self.stats["items_failed"] += 1
|
|
272
|
+
logger.error(f"Max retries exceeded for {item.id}, marking as failed")
|
|
273
|
+
|
|
274
|
+
async def _check_rate_limit(self, adapter: str):
|
|
275
|
+
"""Check and enforce rate limits.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
adapter: Adapter name
|
|
279
|
+
"""
|
|
280
|
+
if adapter not in self.RATE_LIMITS:
|
|
281
|
+
return
|
|
282
|
+
|
|
283
|
+
limit = self.RATE_LIMITS[adapter]
|
|
284
|
+
min_interval = 60.0 / limit # seconds between requests
|
|
285
|
+
|
|
286
|
+
if adapter in self.last_request_times:
|
|
287
|
+
last_time = self.last_request_times[adapter]
|
|
288
|
+
elapsed = (datetime.now() - last_time).total_seconds()
|
|
289
|
+
|
|
290
|
+
if elapsed < min_interval:
|
|
291
|
+
wait_time = min_interval - elapsed
|
|
292
|
+
logger.debug(f"Rate limit: waiting {wait_time:.2f}s for {adapter}")
|
|
293
|
+
await asyncio.sleep(wait_time)
|
|
294
|
+
|
|
295
|
+
self.last_request_times[adapter] = datetime.now()
|
|
296
|
+
|
|
297
|
+
def _get_adapter(self, item: QueueItem):
|
|
298
|
+
"""Get adapter instance for item.
|
|
299
|
+
|
|
300
|
+
Args:
|
|
301
|
+
item: Queue item
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
Adapter instance
|
|
305
|
+
"""
|
|
306
|
+
# Load configuration
|
|
307
|
+
from ..cli.main import load_config
|
|
308
|
+
|
|
309
|
+
config = load_config()
|
|
310
|
+
adapters_config = config.get("adapters", {})
|
|
311
|
+
adapter_config = adapters_config.get(item.adapter, {})
|
|
312
|
+
|
|
313
|
+
# Add environment variables for authentication
|
|
314
|
+
import os
|
|
315
|
+
if item.adapter == "linear":
|
|
316
|
+
if not adapter_config.get("api_key"):
|
|
317
|
+
adapter_config["api_key"] = os.getenv("LINEAR_API_KEY")
|
|
318
|
+
elif item.adapter == "github":
|
|
319
|
+
if not adapter_config.get("token"):
|
|
320
|
+
adapter_config["token"] = os.getenv("GITHUB_TOKEN")
|
|
321
|
+
elif item.adapter == "jira":
|
|
322
|
+
if not adapter_config.get("api_token"):
|
|
323
|
+
adapter_config["api_token"] = os.getenv("JIRA_ACCESS_TOKEN")
|
|
324
|
+
if not adapter_config.get("email"):
|
|
325
|
+
adapter_config["email"] = os.getenv("JIRA_ACCESS_USER")
|
|
326
|
+
|
|
327
|
+
return AdapterRegistry.get_adapter(item.adapter, adapter_config)
|
|
328
|
+
|
|
329
|
+
async def _execute_operation(self, adapter, item: QueueItem) -> Dict[str, Any]:
|
|
330
|
+
"""Execute the queued operation.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
adapter: Adapter instance
|
|
334
|
+
item: Queue item
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Operation result
|
|
338
|
+
"""
|
|
339
|
+
operation = item.operation
|
|
340
|
+
data = item.ticket_data
|
|
341
|
+
|
|
342
|
+
if operation == "create":
|
|
343
|
+
task = Task(**data)
|
|
344
|
+
result = await adapter.create(task)
|
|
345
|
+
return {"id": result.id, "title": result.title, "state": result.state}
|
|
346
|
+
|
|
347
|
+
elif operation == "update":
|
|
348
|
+
ticket_id = data.pop("ticket_id")
|
|
349
|
+
result = await adapter.update(ticket_id, data)
|
|
350
|
+
return {"id": result.id if result else None, "success": bool(result)}
|
|
351
|
+
|
|
352
|
+
elif operation == "delete":
|
|
353
|
+
ticket_id = data.get("ticket_id")
|
|
354
|
+
result = await adapter.delete(ticket_id)
|
|
355
|
+
return {"success": result}
|
|
356
|
+
|
|
357
|
+
elif operation == "transition":
|
|
358
|
+
ticket_id = data.get("ticket_id")
|
|
359
|
+
state = data.get("state")
|
|
360
|
+
result = await adapter.transition_state(ticket_id, state)
|
|
361
|
+
return {"id": result.id if result else None, "state": state, "success": bool(result)}
|
|
362
|
+
|
|
363
|
+
elif operation == "comment":
|
|
364
|
+
ticket_id = data.get("ticket_id")
|
|
365
|
+
content = data.get("content")
|
|
366
|
+
await adapter.add_comment(ticket_id, content)
|
|
367
|
+
return {"success": True}
|
|
368
|
+
|
|
369
|
+
else:
|
|
370
|
+
raise ValueError(f"Unknown operation: {operation}")
|
|
371
|
+
|
|
372
|
+
def get_status(self) -> Dict[str, Any]:
|
|
373
|
+
"""Get worker status.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Status information
|
|
377
|
+
"""
|
|
378
|
+
queue_stats = self.queue.get_stats()
|
|
379
|
+
|
|
380
|
+
# Calculate throughput
|
|
381
|
+
throughput = 0
|
|
382
|
+
if self.stats["start_time"]:
|
|
383
|
+
elapsed = (datetime.now() - self.stats["start_time"]).total_seconds()
|
|
384
|
+
if elapsed > 0:
|
|
385
|
+
throughput = self.stats["items_processed"] / elapsed * 60 # items per minute
|
|
386
|
+
|
|
387
|
+
return {
|
|
388
|
+
"running": self.running,
|
|
389
|
+
"configuration": {
|
|
390
|
+
"batch_size": self.batch_size,
|
|
391
|
+
"max_concurrent": self.max_concurrent,
|
|
392
|
+
},
|
|
393
|
+
"worker_stats": {
|
|
394
|
+
"items_processed": self.stats["items_processed"],
|
|
395
|
+
"items_failed": self.stats["items_failed"],
|
|
396
|
+
"batches_processed": self.stats["batches_processed"],
|
|
397
|
+
"throughput_per_minute": throughput,
|
|
398
|
+
"uptime_seconds": (
|
|
399
|
+
(datetime.now() - self.stats["start_time"]).total_seconds()
|
|
400
|
+
if self.stats["start_time"] else 0
|
|
401
|
+
),
|
|
402
|
+
},
|
|
403
|
+
"queue_stats": queue_stats,
|
|
404
|
+
"total_pending": queue_stats.get(QueueStatus.PENDING.value, 0),
|
|
405
|
+
"total_processing": queue_stats.get(QueueStatus.PROCESSING.value, 0),
|
|
406
|
+
"total_completed": queue_stats.get(QueueStatus.COMPLETED.value, 0),
|
|
407
|
+
"total_failed": queue_stats.get(QueueStatus.FAILED.value, 0)
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
@classmethod
|
|
411
|
+
def get_logs(cls, lines: int = 50) -> str:
|
|
412
|
+
"""Get recent log entries.
|
|
413
|
+
|
|
414
|
+
Args:
|
|
415
|
+
lines: Number of lines to return
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
Log content
|
|
419
|
+
"""
|
|
420
|
+
if not LOG_FILE.exists():
|
|
421
|
+
return "No logs available"
|
|
422
|
+
|
|
423
|
+
with open(LOG_FILE, "r") as f:
|
|
424
|
+
all_lines = f.readlines()
|
|
425
|
+
return "".join(all_lines[-lines:])
|