mcp-ticketer 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-ticketer might be problematic. Click here for more details.
- mcp_ticketer/__init__.py +27 -0
- mcp_ticketer/__version__.py +40 -0
- mcp_ticketer/adapters/__init__.py +8 -0
- mcp_ticketer/adapters/aitrackdown.py +396 -0
- mcp_ticketer/adapters/github.py +974 -0
- mcp_ticketer/adapters/jira.py +831 -0
- mcp_ticketer/adapters/linear.py +1355 -0
- mcp_ticketer/cache/__init__.py +5 -0
- mcp_ticketer/cache/memory.py +193 -0
- mcp_ticketer/cli/__init__.py +5 -0
- mcp_ticketer/cli/main.py +812 -0
- mcp_ticketer/cli/queue_commands.py +285 -0
- mcp_ticketer/cli/utils.py +523 -0
- mcp_ticketer/core/__init__.py +15 -0
- mcp_ticketer/core/adapter.py +211 -0
- mcp_ticketer/core/config.py +403 -0
- mcp_ticketer/core/http_client.py +430 -0
- mcp_ticketer/core/mappers.py +492 -0
- mcp_ticketer/core/models.py +111 -0
- mcp_ticketer/core/registry.py +128 -0
- mcp_ticketer/mcp/__init__.py +5 -0
- mcp_ticketer/mcp/server.py +459 -0
- mcp_ticketer/py.typed +0 -0
- mcp_ticketer/queue/__init__.py +7 -0
- mcp_ticketer/queue/__main__.py +6 -0
- mcp_ticketer/queue/manager.py +261 -0
- mcp_ticketer/queue/queue.py +357 -0
- mcp_ticketer/queue/run_worker.py +38 -0
- mcp_ticketer/queue/worker.py +425 -0
- mcp_ticketer-0.1.1.dist-info/METADATA +362 -0
- mcp_ticketer-0.1.1.dist-info/RECORD +35 -0
- mcp_ticketer-0.1.1.dist-info/WHEEL +5 -0
- mcp_ticketer-0.1.1.dist-info/entry_points.txt +3 -0
- mcp_ticketer-0.1.1.dist-info/licenses/LICENSE +21 -0
- mcp_ticketer-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
"""Worker manager with file-based locking for single instance."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import psutil
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Dict, Any
|
|
10
|
+
import fcntl
|
|
11
|
+
import logging
|
|
12
|
+
|
|
13
|
+
from .queue import Queue
|
|
14
|
+
from .worker import Worker
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class WorkerManager:
|
|
20
|
+
"""Manages worker process with file-based locking."""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
"""Initialize worker manager."""
|
|
24
|
+
self.lock_file = Path.home() / ".mcp-ticketer" / "worker.lock"
|
|
25
|
+
self.pid_file = Path.home() / ".mcp-ticketer" / "worker.pid"
|
|
26
|
+
self.lock_file.parent.mkdir(parents=True, exist_ok=True)
|
|
27
|
+
self.queue = Queue()
|
|
28
|
+
|
|
29
|
+
def _acquire_lock(self) -> bool:
|
|
30
|
+
"""Acquire exclusive lock for worker.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
True if lock acquired, False otherwise
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
# Create lock file if it doesn't exist
|
|
37
|
+
if not self.lock_file.exists():
|
|
38
|
+
self.lock_file.touch()
|
|
39
|
+
|
|
40
|
+
# Try to acquire exclusive lock
|
|
41
|
+
self.lock_fd = open(self.lock_file, "w")
|
|
42
|
+
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
43
|
+
|
|
44
|
+
# Write PID to lock file
|
|
45
|
+
self.lock_fd.write(str(os.getpid()))
|
|
46
|
+
self.lock_fd.flush()
|
|
47
|
+
|
|
48
|
+
return True
|
|
49
|
+
except IOError:
|
|
50
|
+
# Lock already held
|
|
51
|
+
return False
|
|
52
|
+
|
|
53
|
+
def _release_lock(self):
|
|
54
|
+
"""Release worker lock."""
|
|
55
|
+
if hasattr(self, 'lock_fd'):
|
|
56
|
+
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
|
|
57
|
+
self.lock_fd.close()
|
|
58
|
+
|
|
59
|
+
# Clean up PID file
|
|
60
|
+
if self.pid_file.exists():
|
|
61
|
+
self.pid_file.unlink()
|
|
62
|
+
|
|
63
|
+
def start_if_needed(self) -> bool:
|
|
64
|
+
"""Start worker if not already running and there are pending items.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
True if worker started or already running, False otherwise
|
|
68
|
+
"""
|
|
69
|
+
# Check if worker is already running
|
|
70
|
+
if self.is_running():
|
|
71
|
+
logger.debug("Worker already running")
|
|
72
|
+
return True
|
|
73
|
+
|
|
74
|
+
# Check if there are pending items
|
|
75
|
+
if self.queue.get_pending_count() == 0:
|
|
76
|
+
logger.debug("No pending items, worker not needed")
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
# Try to start worker
|
|
80
|
+
return self.start()
|
|
81
|
+
|
|
82
|
+
def start(self) -> bool:
|
|
83
|
+
"""Start the worker process.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if started successfully, False otherwise
|
|
87
|
+
"""
|
|
88
|
+
# Check if already running
|
|
89
|
+
if self.is_running():
|
|
90
|
+
logger.info("Worker is already running")
|
|
91
|
+
return True
|
|
92
|
+
|
|
93
|
+
# Try to acquire lock
|
|
94
|
+
if not self._acquire_lock():
|
|
95
|
+
logger.warning("Could not acquire lock - another worker may be running")
|
|
96
|
+
return False
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
# Start worker in subprocess
|
|
100
|
+
cmd = [
|
|
101
|
+
sys.executable,
|
|
102
|
+
"-m",
|
|
103
|
+
"mcp_ticketer.queue.run_worker"
|
|
104
|
+
]
|
|
105
|
+
|
|
106
|
+
# Start as background process
|
|
107
|
+
process = subprocess.Popen(
|
|
108
|
+
cmd,
|
|
109
|
+
stdout=subprocess.DEVNULL,
|
|
110
|
+
stderr=subprocess.DEVNULL,
|
|
111
|
+
start_new_session=True
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Save PID
|
|
115
|
+
self.pid_file.write_text(str(process.pid))
|
|
116
|
+
|
|
117
|
+
# Give the process a moment to start
|
|
118
|
+
import time
|
|
119
|
+
time.sleep(0.5)
|
|
120
|
+
|
|
121
|
+
# Verify process is running
|
|
122
|
+
if not psutil.pid_exists(process.pid):
|
|
123
|
+
logger.error("Worker process died immediately after starting")
|
|
124
|
+
self._cleanup()
|
|
125
|
+
return False
|
|
126
|
+
|
|
127
|
+
logger.info(f"Started worker process with PID {process.pid}")
|
|
128
|
+
return True
|
|
129
|
+
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.error(f"Failed to start worker: {e}")
|
|
132
|
+
self._release_lock()
|
|
133
|
+
return False
|
|
134
|
+
|
|
135
|
+
def stop(self) -> bool:
|
|
136
|
+
"""Stop the worker process.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
True if stopped successfully, False otherwise
|
|
140
|
+
"""
|
|
141
|
+
pid = self._get_pid()
|
|
142
|
+
if not pid:
|
|
143
|
+
logger.info("No worker process to stop")
|
|
144
|
+
return True
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
# Check if process exists
|
|
148
|
+
if not psutil.pid_exists(pid):
|
|
149
|
+
logger.info("Worker process not found, cleaning up")
|
|
150
|
+
self._cleanup()
|
|
151
|
+
return True
|
|
152
|
+
|
|
153
|
+
# Send SIGTERM
|
|
154
|
+
process = psutil.Process(pid)
|
|
155
|
+
process.terminate()
|
|
156
|
+
|
|
157
|
+
# Wait for graceful shutdown
|
|
158
|
+
gone, alive = psutil.wait_procs([process], timeout=10)
|
|
159
|
+
|
|
160
|
+
if gone:
|
|
161
|
+
logger.info(f"Worker process {pid} terminated gracefully")
|
|
162
|
+
else:
|
|
163
|
+
# Force kill if still alive
|
|
164
|
+
for p in alive:
|
|
165
|
+
logger.warning(f"Force killing worker process {p.pid}")
|
|
166
|
+
p.kill()
|
|
167
|
+
|
|
168
|
+
self._cleanup()
|
|
169
|
+
return True
|
|
170
|
+
|
|
171
|
+
except Exception as e:
|
|
172
|
+
logger.error(f"Error stopping worker: {e}")
|
|
173
|
+
return False
|
|
174
|
+
|
|
175
|
+
def restart(self) -> bool:
|
|
176
|
+
"""Restart the worker process.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
True if restarted successfully, False otherwise
|
|
180
|
+
"""
|
|
181
|
+
logger.info("Restarting worker...")
|
|
182
|
+
self.stop()
|
|
183
|
+
time.sleep(1) # Brief pause between stop and start
|
|
184
|
+
return self.start()
|
|
185
|
+
|
|
186
|
+
def is_running(self) -> bool:
|
|
187
|
+
"""Check if worker is currently running.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
True if running, False otherwise
|
|
191
|
+
"""
|
|
192
|
+
pid = self._get_pid()
|
|
193
|
+
if not pid:
|
|
194
|
+
return False
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
# Check if process exists and is actually our worker
|
|
198
|
+
if psutil.pid_exists(pid):
|
|
199
|
+
process = psutil.Process(pid)
|
|
200
|
+
cmdline = " ".join(process.cmdline())
|
|
201
|
+
return "run_worker" in cmdline or "mcp_ticketer.queue" in cmdline
|
|
202
|
+
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
|
203
|
+
pass
|
|
204
|
+
|
|
205
|
+
return False
|
|
206
|
+
|
|
207
|
+
def get_status(self) -> Dict[str, Any]:
|
|
208
|
+
"""Get detailed worker status.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Status information
|
|
212
|
+
"""
|
|
213
|
+
is_running = self.is_running()
|
|
214
|
+
pid = self._get_pid() if is_running else None
|
|
215
|
+
|
|
216
|
+
status = {
|
|
217
|
+
"running": is_running,
|
|
218
|
+
"pid": pid
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
# Add process info if running
|
|
222
|
+
if is_running and pid:
|
|
223
|
+
try:
|
|
224
|
+
process = psutil.Process(pid)
|
|
225
|
+
status.update({
|
|
226
|
+
"cpu_percent": process.cpu_percent(),
|
|
227
|
+
"memory_mb": process.memory_info().rss / 1024 / 1024,
|
|
228
|
+
"create_time": process.create_time(),
|
|
229
|
+
"status": process.status()
|
|
230
|
+
})
|
|
231
|
+
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
|
232
|
+
pass
|
|
233
|
+
|
|
234
|
+
# Add queue stats
|
|
235
|
+
queue_stats = self.queue.get_stats()
|
|
236
|
+
status["queue"] = queue_stats
|
|
237
|
+
|
|
238
|
+
return status
|
|
239
|
+
|
|
240
|
+
def _get_pid(self) -> Optional[int]:
|
|
241
|
+
"""Get worker PID from file.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Process ID or None if not found
|
|
245
|
+
"""
|
|
246
|
+
if not self.pid_file.exists():
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
pid_text = self.pid_file.read_text().strip()
|
|
251
|
+
return int(pid_text)
|
|
252
|
+
except (ValueError, IOError):
|
|
253
|
+
return None
|
|
254
|
+
|
|
255
|
+
def _cleanup(self):
|
|
256
|
+
"""Clean up lock and PID files."""
|
|
257
|
+
self._release_lock()
|
|
258
|
+
if self.pid_file.exists():
|
|
259
|
+
self.pid_file.unlink()
|
|
260
|
+
if self.lock_file.exists():
|
|
261
|
+
self.lock_file.unlink()
|
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
"""SQLite-based queue system for async ticket operations."""
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
import json
|
|
5
|
+
import threading
|
|
6
|
+
from datetime import datetime, timedelta
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional, Dict, Any, List
|
|
10
|
+
from dataclasses import dataclass, asdict
|
|
11
|
+
import uuid
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class QueueStatus(str, Enum):
|
|
15
|
+
"""Queue item status values."""
|
|
16
|
+
PENDING = "pending"
|
|
17
|
+
PROCESSING = "processing"
|
|
18
|
+
COMPLETED = "completed"
|
|
19
|
+
FAILED = "failed"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class QueueItem:
|
|
24
|
+
"""Represents a queued operation."""
|
|
25
|
+
id: str
|
|
26
|
+
ticket_data: Dict[str, Any]
|
|
27
|
+
adapter: str
|
|
28
|
+
operation: str
|
|
29
|
+
status: QueueStatus
|
|
30
|
+
created_at: datetime
|
|
31
|
+
processed_at: Optional[datetime] = None
|
|
32
|
+
error_message: Optional[str] = None
|
|
33
|
+
retry_count: int = 0
|
|
34
|
+
result: Optional[Dict[str, Any]] = None
|
|
35
|
+
|
|
36
|
+
def to_dict(self) -> dict:
|
|
37
|
+
"""Convert to dictionary for storage."""
|
|
38
|
+
data = asdict(self)
|
|
39
|
+
data['created_at'] = self.created_at.isoformat()
|
|
40
|
+
if self.processed_at:
|
|
41
|
+
data['processed_at'] = self.processed_at.isoformat()
|
|
42
|
+
return data
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def from_row(cls, row: tuple) -> "QueueItem":
|
|
46
|
+
"""Create from database row."""
|
|
47
|
+
return cls(
|
|
48
|
+
id=row[0],
|
|
49
|
+
ticket_data=json.loads(row[1]),
|
|
50
|
+
adapter=row[2],
|
|
51
|
+
operation=row[3],
|
|
52
|
+
status=QueueStatus(row[4]),
|
|
53
|
+
created_at=datetime.fromisoformat(row[5]),
|
|
54
|
+
processed_at=datetime.fromisoformat(row[6]) if row[6] else None,
|
|
55
|
+
error_message=row[7],
|
|
56
|
+
retry_count=row[8],
|
|
57
|
+
result=json.loads(row[9]) if row[9] else None
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class Queue:
|
|
62
|
+
"""Thread-safe SQLite queue for ticket operations."""
|
|
63
|
+
|
|
64
|
+
def __init__(self, db_path: Optional[Path] = None):
|
|
65
|
+
"""Initialize queue with database connection.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
db_path: Path to SQLite database. Defaults to ~/.mcp-ticketer/queue.db
|
|
69
|
+
"""
|
|
70
|
+
if db_path is None:
|
|
71
|
+
db_dir = Path.home() / ".mcp-ticketer"
|
|
72
|
+
db_dir.mkdir(parents=True, exist_ok=True)
|
|
73
|
+
db_path = db_dir / "queue.db"
|
|
74
|
+
|
|
75
|
+
self.db_path = str(db_path)
|
|
76
|
+
self._lock = threading.Lock()
|
|
77
|
+
self._init_database()
|
|
78
|
+
|
|
79
|
+
def _init_database(self):
|
|
80
|
+
"""Initialize database schema."""
|
|
81
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
82
|
+
conn.execute('''
|
|
83
|
+
CREATE TABLE IF NOT EXISTS queue (
|
|
84
|
+
id TEXT PRIMARY KEY,
|
|
85
|
+
ticket_data TEXT NOT NULL,
|
|
86
|
+
adapter TEXT NOT NULL,
|
|
87
|
+
operation TEXT NOT NULL,
|
|
88
|
+
status TEXT NOT NULL,
|
|
89
|
+
created_at TEXT NOT NULL,
|
|
90
|
+
processed_at TEXT,
|
|
91
|
+
error_message TEXT,
|
|
92
|
+
retry_count INTEGER DEFAULT 0,
|
|
93
|
+
result TEXT,
|
|
94
|
+
CHECK (status IN ('pending', 'processing', 'completed', 'failed'))
|
|
95
|
+
)
|
|
96
|
+
''')
|
|
97
|
+
|
|
98
|
+
# Create indices for efficient queries
|
|
99
|
+
conn.execute('''
|
|
100
|
+
CREATE INDEX IF NOT EXISTS idx_queue_status
|
|
101
|
+
ON queue(status)
|
|
102
|
+
''')
|
|
103
|
+
conn.execute('''
|
|
104
|
+
CREATE INDEX IF NOT EXISTS idx_queue_created
|
|
105
|
+
ON queue(created_at)
|
|
106
|
+
''')
|
|
107
|
+
conn.execute('''
|
|
108
|
+
CREATE INDEX IF NOT EXISTS idx_queue_adapter
|
|
109
|
+
ON queue(adapter)
|
|
110
|
+
''')
|
|
111
|
+
|
|
112
|
+
conn.commit()
|
|
113
|
+
|
|
114
|
+
def add(self,
|
|
115
|
+
ticket_data: Dict[str, Any],
|
|
116
|
+
adapter: str,
|
|
117
|
+
operation: str) -> str:
|
|
118
|
+
"""Add item to queue.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
ticket_data: The ticket data for the operation
|
|
122
|
+
adapter: Name of the adapter to use
|
|
123
|
+
operation: Operation to perform (create, update, delete, etc.)
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Queue ID for tracking
|
|
127
|
+
"""
|
|
128
|
+
queue_id = f"Q-{uuid.uuid4().hex[:8].upper()}"
|
|
129
|
+
|
|
130
|
+
with self._lock:
|
|
131
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
132
|
+
conn.execute('''
|
|
133
|
+
INSERT INTO queue (
|
|
134
|
+
id, ticket_data, adapter, operation,
|
|
135
|
+
status, created_at, retry_count
|
|
136
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
137
|
+
''', (
|
|
138
|
+
queue_id,
|
|
139
|
+
json.dumps(ticket_data),
|
|
140
|
+
adapter,
|
|
141
|
+
operation,
|
|
142
|
+
QueueStatus.PENDING.value,
|
|
143
|
+
datetime.now().isoformat(),
|
|
144
|
+
0
|
|
145
|
+
))
|
|
146
|
+
conn.commit()
|
|
147
|
+
|
|
148
|
+
return queue_id
|
|
149
|
+
|
|
150
|
+
def get_next_pending(self) -> Optional[QueueItem]:
|
|
151
|
+
"""Get next pending item from queue.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Next pending QueueItem or None if queue is empty
|
|
155
|
+
"""
|
|
156
|
+
with self._lock:
|
|
157
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
158
|
+
# Get next pending item ordered by creation time
|
|
159
|
+
cursor = conn.execute('''
|
|
160
|
+
SELECT * FROM queue
|
|
161
|
+
WHERE status = ?
|
|
162
|
+
ORDER BY created_at
|
|
163
|
+
LIMIT 1
|
|
164
|
+
''', (QueueStatus.PENDING.value,))
|
|
165
|
+
|
|
166
|
+
row = cursor.fetchone()
|
|
167
|
+
if row:
|
|
168
|
+
# Mark as processing
|
|
169
|
+
conn.execute('''
|
|
170
|
+
UPDATE queue
|
|
171
|
+
SET status = ?
|
|
172
|
+
WHERE id = ?
|
|
173
|
+
''', (QueueStatus.PROCESSING.value, row[0]))
|
|
174
|
+
conn.commit()
|
|
175
|
+
|
|
176
|
+
return QueueItem.from_row(row)
|
|
177
|
+
|
|
178
|
+
return None
|
|
179
|
+
|
|
180
|
+
def update_status(self,
|
|
181
|
+
queue_id: str,
|
|
182
|
+
status: QueueStatus,
|
|
183
|
+
error_message: Optional[str] = None,
|
|
184
|
+
result: Optional[Dict[str, Any]] = None):
|
|
185
|
+
"""Update queue item status.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
queue_id: Queue item ID
|
|
189
|
+
status: New status
|
|
190
|
+
error_message: Error message if failed
|
|
191
|
+
result: Result data if completed
|
|
192
|
+
"""
|
|
193
|
+
with self._lock:
|
|
194
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
195
|
+
processed_at = datetime.now().isoformat() if status in [
|
|
196
|
+
QueueStatus.COMPLETED, QueueStatus.FAILED
|
|
197
|
+
] else None
|
|
198
|
+
|
|
199
|
+
conn.execute('''
|
|
200
|
+
UPDATE queue
|
|
201
|
+
SET status = ?, processed_at = ?,
|
|
202
|
+
error_message = ?, result = ?
|
|
203
|
+
WHERE id = ?
|
|
204
|
+
''', (
|
|
205
|
+
status.value,
|
|
206
|
+
processed_at,
|
|
207
|
+
error_message,
|
|
208
|
+
json.dumps(result) if result else None,
|
|
209
|
+
queue_id
|
|
210
|
+
))
|
|
211
|
+
conn.commit()
|
|
212
|
+
|
|
213
|
+
def increment_retry(self, queue_id: str) -> int:
|
|
214
|
+
"""Increment retry count for item.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
queue_id: Queue item ID
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
New retry count
|
|
221
|
+
"""
|
|
222
|
+
with self._lock:
|
|
223
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
224
|
+
cursor = conn.execute('''
|
|
225
|
+
UPDATE queue
|
|
226
|
+
SET retry_count = retry_count + 1,
|
|
227
|
+
status = ?
|
|
228
|
+
WHERE id = ?
|
|
229
|
+
RETURNING retry_count
|
|
230
|
+
''', (QueueStatus.PENDING.value, queue_id))
|
|
231
|
+
|
|
232
|
+
result = cursor.fetchone()
|
|
233
|
+
conn.commit()
|
|
234
|
+
return result[0] if result else 0
|
|
235
|
+
|
|
236
|
+
def get_item(self, queue_id: str) -> Optional[QueueItem]:
|
|
237
|
+
"""Get specific queue item by ID.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
queue_id: Queue item ID
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
QueueItem or None if not found
|
|
244
|
+
"""
|
|
245
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
246
|
+
cursor = conn.execute('''
|
|
247
|
+
SELECT * FROM queue WHERE id = ?
|
|
248
|
+
''', (queue_id,))
|
|
249
|
+
|
|
250
|
+
row = cursor.fetchone()
|
|
251
|
+
return QueueItem.from_row(row) if row else None
|
|
252
|
+
|
|
253
|
+
def list_items(self,
|
|
254
|
+
status: Optional[QueueStatus] = None,
|
|
255
|
+
limit: int = 50) -> List[QueueItem]:
|
|
256
|
+
"""List queue items.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
status: Filter by status (optional)
|
|
260
|
+
limit: Maximum items to return
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
List of QueueItems
|
|
264
|
+
"""
|
|
265
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
266
|
+
if status:
|
|
267
|
+
cursor = conn.execute('''
|
|
268
|
+
SELECT * FROM queue
|
|
269
|
+
WHERE status = ?
|
|
270
|
+
ORDER BY created_at DESC
|
|
271
|
+
LIMIT ?
|
|
272
|
+
''', (status.value, limit))
|
|
273
|
+
else:
|
|
274
|
+
cursor = conn.execute('''
|
|
275
|
+
SELECT * FROM queue
|
|
276
|
+
ORDER BY created_at DESC
|
|
277
|
+
LIMIT ?
|
|
278
|
+
''', (limit,))
|
|
279
|
+
|
|
280
|
+
return [QueueItem.from_row(row) for row in cursor.fetchall()]
|
|
281
|
+
|
|
282
|
+
def get_pending_count(self) -> int:
|
|
283
|
+
"""Get count of pending items.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Number of pending items
|
|
287
|
+
"""
|
|
288
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
289
|
+
cursor = conn.execute('''
|
|
290
|
+
SELECT COUNT(*) FROM queue
|
|
291
|
+
WHERE status = ?
|
|
292
|
+
''', (QueueStatus.PENDING.value,))
|
|
293
|
+
|
|
294
|
+
return cursor.fetchone()[0]
|
|
295
|
+
|
|
296
|
+
def cleanup_old(self, days: int = 7):
|
|
297
|
+
"""Clean up old completed/failed items.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
days: Delete items older than this many days
|
|
301
|
+
"""
|
|
302
|
+
cutoff_date = (datetime.now() - timedelta(days=days)).isoformat()
|
|
303
|
+
|
|
304
|
+
with self._lock:
|
|
305
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
306
|
+
conn.execute('''
|
|
307
|
+
DELETE FROM queue
|
|
308
|
+
WHERE status IN (?, ?)
|
|
309
|
+
AND processed_at < ?
|
|
310
|
+
''', (
|
|
311
|
+
QueueStatus.COMPLETED.value,
|
|
312
|
+
QueueStatus.FAILED.value,
|
|
313
|
+
cutoff_date
|
|
314
|
+
))
|
|
315
|
+
conn.commit()
|
|
316
|
+
|
|
317
|
+
def reset_stuck_items(self, timeout_minutes: int = 30):
|
|
318
|
+
"""Reset items stuck in processing state.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
timeout_minutes: Consider items stuck after this many minutes
|
|
322
|
+
"""
|
|
323
|
+
cutoff_time = (datetime.now() - timedelta(minutes=timeout_minutes)).isoformat()
|
|
324
|
+
|
|
325
|
+
with self._lock:
|
|
326
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
327
|
+
conn.execute('''
|
|
328
|
+
UPDATE queue
|
|
329
|
+
SET status = ?, error_message = ?
|
|
330
|
+
WHERE status = ?
|
|
331
|
+
AND created_at < ?
|
|
332
|
+
''', (
|
|
333
|
+
QueueStatus.PENDING.value,
|
|
334
|
+
"Reset from stuck processing state",
|
|
335
|
+
QueueStatus.PROCESSING.value,
|
|
336
|
+
cutoff_time
|
|
337
|
+
))
|
|
338
|
+
conn.commit()
|
|
339
|
+
|
|
340
|
+
def get_stats(self) -> Dict[str, int]:
|
|
341
|
+
"""Get queue statistics.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Dictionary with counts by status
|
|
345
|
+
"""
|
|
346
|
+
with sqlite3.connect(self.db_path) as conn:
|
|
347
|
+
cursor = conn.execute('''
|
|
348
|
+
SELECT status, COUNT(*)
|
|
349
|
+
FROM queue
|
|
350
|
+
GROUP BY status
|
|
351
|
+
''')
|
|
352
|
+
|
|
353
|
+
stats = {status.value: 0 for status in QueueStatus}
|
|
354
|
+
for status, count in cursor.fetchall():
|
|
355
|
+
stats[status] = count
|
|
356
|
+
|
|
357
|
+
return stats
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Standalone worker runner module."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
from .queue import Queue
|
|
7
|
+
from .worker import Worker
|
|
8
|
+
|
|
9
|
+
# Set up logging
|
|
10
|
+
logging.basicConfig(
|
|
11
|
+
level=logging.INFO,
|
|
12
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
13
|
+
)
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def main():
|
|
18
|
+
"""Run the worker process."""
|
|
19
|
+
logger.info("Starting standalone worker process")
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
# Create queue and worker
|
|
23
|
+
queue = Queue()
|
|
24
|
+
worker = Worker(queue)
|
|
25
|
+
|
|
26
|
+
# Run worker (blocking)
|
|
27
|
+
worker.start(daemon=False)
|
|
28
|
+
|
|
29
|
+
except KeyboardInterrupt:
|
|
30
|
+
logger.info("Worker interrupted by user")
|
|
31
|
+
sys.exit(0)
|
|
32
|
+
except Exception as e:
|
|
33
|
+
logger.error(f"Worker failed: {e}", exc_info=True)
|
|
34
|
+
sys.exit(1)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
if __name__ == "__main__":
|
|
38
|
+
main()
|