claude-memory-agent 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +107 -0
- package/README.md +200 -0
- package/agent_card.py +512 -0
- package/bin/cli.js +181 -0
- package/bin/postinstall.js +216 -0
- package/config.py +104 -0
- package/dashboard.html +2689 -0
- package/hooks/README.md +196 -0
- package/hooks/__pycache__/auto-detect-response.cpython-312.pyc +0 -0
- package/hooks/__pycache__/auto_capture.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_start.cpython-312.pyc +0 -0
- package/hooks/auto-detect-response.py +348 -0
- package/hooks/auto_capture.py +255 -0
- package/hooks/detect-correction.py +173 -0
- package/hooks/grounding-hook.py +348 -0
- package/hooks/log-tool-use.py +234 -0
- package/hooks/log-user-request.py +208 -0
- package/hooks/pre-tool-decision.py +218 -0
- package/hooks/problem-detector.py +343 -0
- package/hooks/session_end.py +192 -0
- package/hooks/session_start.py +227 -0
- package/install.py +887 -0
- package/main.py +2859 -0
- package/manager.py +997 -0
- package/package.json +55 -0
- package/requirements.txt +8 -0
- package/run_server.py +136 -0
- package/services/__init__.py +50 -0
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/agent_registry.cpython-312.pyc +0 -0
- package/services/__pycache__/auth.cpython-312.pyc +0 -0
- package/services/__pycache__/auto_inject.cpython-312.pyc +0 -0
- package/services/__pycache__/claude_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/services/__pycache__/compaction_flush.cpython-312.pyc +0 -0
- package/services/__pycache__/confidence.cpython-312.pyc +0 -0
- package/services/__pycache__/daily_log.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/__pycache__/embeddings.cpython-312.pyc +0 -0
- package/services/__pycache__/insights.cpython-312.pyc +0 -0
- package/services/__pycache__/llm_analyzer.cpython-312.pyc +0 -0
- package/services/__pycache__/memory_md_sync.cpython-312.pyc +0 -0
- package/services/__pycache__/retry_queue.cpython-312.pyc +0 -0
- package/services/__pycache__/timeline.cpython-312.pyc +0 -0
- package/services/__pycache__/vector_index.cpython-312.pyc +0 -0
- package/services/__pycache__/websocket.cpython-312.pyc +0 -0
- package/services/agent_registry.py +753 -0
- package/services/auth.py +331 -0
- package/services/auto_inject.py +250 -0
- package/services/claude_md_sync.py +275 -0
- package/services/cleanup.py +667 -0
- package/services/compaction_flush.py +447 -0
- package/services/confidence.py +301 -0
- package/services/daily_log.py +333 -0
- package/services/database.py +2485 -0
- package/services/embeddings.py +358 -0
- package/services/insights.py +632 -0
- package/services/llm_analyzer.py +595 -0
- package/services/memory_md_sync.py +409 -0
- package/services/retry_queue.py +453 -0
- package/services/timeline.py +579 -0
- package/services/vector_index.py +398 -0
- package/services/websocket.py +257 -0
- package/skills/__init__.py +6 -0
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/admin.cpython-312.pyc +0 -0
- package/skills/__pycache__/checkpoint.cpython-312.pyc +0 -0
- package/skills/__pycache__/claude_md.cpython-312.pyc +0 -0
- package/skills/__pycache__/cleanup.cpython-312.pyc +0 -0
- package/skills/__pycache__/grounding.cpython-312.pyc +0 -0
- package/skills/__pycache__/insights.cpython-312.pyc +0 -0
- package/skills/__pycache__/natural_language.cpython-312.pyc +0 -0
- package/skills/__pycache__/retrieve.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/state.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/__pycache__/summarize.cpython-312.pyc +0 -0
- package/skills/__pycache__/timeline.cpython-312.pyc +0 -0
- package/skills/__pycache__/verification.cpython-312.pyc +0 -0
- package/skills/admin.py +469 -0
- package/skills/checkpoint.py +198 -0
- package/skills/claude_md.py +363 -0
- package/skills/cleanup.py +241 -0
- package/skills/grounding.py +801 -0
- package/skills/insights.py +231 -0
- package/skills/natural_language.py +277 -0
- package/skills/retrieve.py +67 -0
- package/skills/search.py +213 -0
- package/skills/state.py +182 -0
- package/skills/store.py +179 -0
- package/skills/summarize.py +588 -0
- package/skills/timeline.py +387 -0
- package/skills/verification.py +391 -0
- package/start_daemon.py +155 -0
- package/test_automation.py +221 -0
- package/test_complete.py +338 -0
- package/test_full.py +322 -0
- package/update_system.py +817 -0
- package/verify_db.py +134 -0
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
"""Retry queue for hook failures with persistence and exponential backoff.
|
|
2
|
+
|
|
3
|
+
Ensures hook calls are not lost when the memory agent is unavailable.
|
|
4
|
+
Uses SQLite for persistence and supports file-based fallback.
|
|
5
|
+
"""
|
|
6
|
+
import os
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
import sqlite3
|
|
10
|
+
import asyncio
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Dict, Any, Optional, List, Callable
|
|
13
|
+
from datetime import datetime, timedelta
|
|
14
|
+
from threading import Lock
|
|
15
|
+
from dotenv import load_dotenv
|
|
16
|
+
|
|
17
|
+
load_dotenv()
|
|
18
|
+
|
|
19
|
+
# Configuration
|
|
20
|
+
QUEUE_DB_PATH = os.getenv("QUEUE_DB_PATH", str(Path(__file__).parent.parent / "queue.db"))
|
|
21
|
+
QUEUE_FILE_FALLBACK = os.getenv("QUEUE_FILE_FALLBACK", str(Path.home() / ".claude" / "memory_queue.jsonl"))
|
|
22
|
+
MAX_RETRIES = int(os.getenv("QUEUE_MAX_RETRIES", "5"))
|
|
23
|
+
BASE_BACKOFF_SECONDS = float(os.getenv("QUEUE_BASE_BACKOFF", "1.0"))
|
|
24
|
+
MAX_BACKOFF_SECONDS = float(os.getenv("QUEUE_MAX_BACKOFF", "300.0")) # 5 minutes max
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class RetryQueue:
|
|
28
|
+
"""SQLite-backed retry queue with exponential backoff.
|
|
29
|
+
|
|
30
|
+
Features:
|
|
31
|
+
- Persistent storage in SQLite
|
|
32
|
+
- File-based fallback when SQLite unavailable
|
|
33
|
+
- Exponential backoff for retries
|
|
34
|
+
- Dead letter queue for permanently failed requests
|
|
35
|
+
- Background processing with configurable interval
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, db_path: str = QUEUE_DB_PATH):
|
|
39
|
+
self.db_path = db_path
|
|
40
|
+
self.fallback_path = Path(QUEUE_FILE_FALLBACK)
|
|
41
|
+
self.conn: Optional[sqlite3.Connection] = None
|
|
42
|
+
self._lock = Lock()
|
|
43
|
+
self._processing = False
|
|
44
|
+
self._processor_task: Optional[asyncio.Task] = None
|
|
45
|
+
|
|
46
|
+
# Stats
|
|
47
|
+
self._enqueued = 0
|
|
48
|
+
self._processed = 0
|
|
49
|
+
self._failed = 0
|
|
50
|
+
self._retried = 0
|
|
51
|
+
|
|
52
|
+
self._initialize_db()
|
|
53
|
+
|
|
54
|
+
def _initialize_db(self):
|
|
55
|
+
"""Initialize the queue database."""
|
|
56
|
+
try:
|
|
57
|
+
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
|
|
58
|
+
self.conn.row_factory = sqlite3.Row
|
|
59
|
+
cursor = self.conn.cursor()
|
|
60
|
+
|
|
61
|
+
# Queue table
|
|
62
|
+
cursor.execute("""
|
|
63
|
+
CREATE TABLE IF NOT EXISTS pending_requests (
|
|
64
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
65
|
+
endpoint TEXT NOT NULL,
|
|
66
|
+
method TEXT DEFAULT 'POST',
|
|
67
|
+
payload TEXT NOT NULL,
|
|
68
|
+
headers TEXT,
|
|
69
|
+
attempts INTEGER DEFAULT 0,
|
|
70
|
+
max_attempts INTEGER DEFAULT 5,
|
|
71
|
+
created_at TEXT DEFAULT (datetime('now')),
|
|
72
|
+
next_retry_at TEXT DEFAULT (datetime('now')),
|
|
73
|
+
last_error TEXT,
|
|
74
|
+
status TEXT DEFAULT 'pending'
|
|
75
|
+
)
|
|
76
|
+
""")
|
|
77
|
+
|
|
78
|
+
# Dead letter queue for permanently failed requests
|
|
79
|
+
cursor.execute("""
|
|
80
|
+
CREATE TABLE IF NOT EXISTS dead_letters (
|
|
81
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
82
|
+
original_id INTEGER,
|
|
83
|
+
endpoint TEXT NOT NULL,
|
|
84
|
+
method TEXT,
|
|
85
|
+
payload TEXT NOT NULL,
|
|
86
|
+
headers TEXT,
|
|
87
|
+
attempts INTEGER,
|
|
88
|
+
last_error TEXT,
|
|
89
|
+
created_at TEXT,
|
|
90
|
+
failed_at TEXT DEFAULT (datetime('now'))
|
|
91
|
+
)
|
|
92
|
+
""")
|
|
93
|
+
|
|
94
|
+
# Indexes
|
|
95
|
+
cursor.execute("CREATE INDEX IF NOT EXISTS idx_pending_status ON pending_requests(status)")
|
|
96
|
+
cursor.execute("CREATE INDEX IF NOT EXISTS idx_pending_retry ON pending_requests(next_retry_at)")
|
|
97
|
+
|
|
98
|
+
self.conn.commit()
|
|
99
|
+
except Exception as e:
|
|
100
|
+
# Fall back to file-based queue
|
|
101
|
+
self.conn = None
|
|
102
|
+
self._ensure_fallback_dir()
|
|
103
|
+
|
|
104
|
+
def _ensure_fallback_dir(self):
|
|
105
|
+
"""Ensure the fallback directory exists."""
|
|
106
|
+
self.fallback_path.parent.mkdir(parents=True, exist_ok=True)
|
|
107
|
+
|
|
108
|
+
def enqueue(
|
|
109
|
+
self,
|
|
110
|
+
endpoint: str,
|
|
111
|
+
payload: Dict[str, Any],
|
|
112
|
+
method: str = "POST",
|
|
113
|
+
headers: Optional[Dict[str, str]] = None,
|
|
114
|
+
max_attempts: int = MAX_RETRIES
|
|
115
|
+
) -> int:
|
|
116
|
+
"""Add a request to the retry queue.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
endpoint: API endpoint URL
|
|
120
|
+
payload: Request payload (will be JSON serialized)
|
|
121
|
+
method: HTTP method
|
|
122
|
+
headers: Optional headers
|
|
123
|
+
max_attempts: Maximum retry attempts
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Queue item ID (or -1 for file fallback)
|
|
127
|
+
"""
|
|
128
|
+
with self._lock:
|
|
129
|
+
self._enqueued += 1
|
|
130
|
+
|
|
131
|
+
if self.conn:
|
|
132
|
+
try:
|
|
133
|
+
cursor = self.conn.cursor()
|
|
134
|
+
cursor.execute(
|
|
135
|
+
"""
|
|
136
|
+
INSERT INTO pending_requests
|
|
137
|
+
(endpoint, method, payload, headers, max_attempts)
|
|
138
|
+
VALUES (?, ?, ?, ?, ?)
|
|
139
|
+
""",
|
|
140
|
+
(
|
|
141
|
+
endpoint,
|
|
142
|
+
method,
|
|
143
|
+
json.dumps(payload),
|
|
144
|
+
json.dumps(headers) if headers else None,
|
|
145
|
+
max_attempts
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
self.conn.commit()
|
|
149
|
+
return cursor.lastrowid
|
|
150
|
+
except Exception:
|
|
151
|
+
pass # Fall through to file fallback
|
|
152
|
+
|
|
153
|
+
# File-based fallback
|
|
154
|
+
self._ensure_fallback_dir()
|
|
155
|
+
item = {
|
|
156
|
+
"endpoint": endpoint,
|
|
157
|
+
"method": method,
|
|
158
|
+
"payload": payload,
|
|
159
|
+
"headers": headers,
|
|
160
|
+
"attempts": 0,
|
|
161
|
+
"max_attempts": max_attempts,
|
|
162
|
+
"created_at": datetime.now().isoformat(),
|
|
163
|
+
"status": "pending"
|
|
164
|
+
}
|
|
165
|
+
with open(self.fallback_path, "a") as f:
|
|
166
|
+
f.write(json.dumps(item) + "\n")
|
|
167
|
+
return -1
|
|
168
|
+
|
|
169
|
+
def get_pending(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
170
|
+
"""Get pending requests ready for retry.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
limit: Maximum number of items to return
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
List of pending request dictionaries
|
|
177
|
+
"""
|
|
178
|
+
with self._lock:
|
|
179
|
+
if self.conn:
|
|
180
|
+
try:
|
|
181
|
+
cursor = self.conn.cursor()
|
|
182
|
+
cursor.execute(
|
|
183
|
+
"""
|
|
184
|
+
SELECT * FROM pending_requests
|
|
185
|
+
WHERE status = 'pending'
|
|
186
|
+
AND datetime(next_retry_at) <= datetime('now')
|
|
187
|
+
ORDER BY next_retry_at ASC
|
|
188
|
+
LIMIT ?
|
|
189
|
+
""",
|
|
190
|
+
(limit,)
|
|
191
|
+
)
|
|
192
|
+
rows = cursor.fetchall()
|
|
193
|
+
return [dict(row) for row in rows]
|
|
194
|
+
except Exception:
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
# File fallback
|
|
198
|
+
if self.fallback_path.exists():
|
|
199
|
+
items = []
|
|
200
|
+
with open(self.fallback_path, "r") as f:
|
|
201
|
+
for line in f:
|
|
202
|
+
try:
|
|
203
|
+
item = json.loads(line.strip())
|
|
204
|
+
if item.get("status") == "pending":
|
|
205
|
+
items.append(item)
|
|
206
|
+
if len(items) >= limit:
|
|
207
|
+
break
|
|
208
|
+
except json.JSONDecodeError:
|
|
209
|
+
continue
|
|
210
|
+
return items
|
|
211
|
+
|
|
212
|
+
return []
|
|
213
|
+
|
|
214
|
+
def mark_success(self, item_id: int):
|
|
215
|
+
"""Mark a request as successfully processed."""
|
|
216
|
+
with self._lock:
|
|
217
|
+
self._processed += 1
|
|
218
|
+
if self.conn and item_id > 0:
|
|
219
|
+
try:
|
|
220
|
+
cursor = self.conn.cursor()
|
|
221
|
+
cursor.execute(
|
|
222
|
+
"DELETE FROM pending_requests WHERE id = ?",
|
|
223
|
+
(item_id,)
|
|
224
|
+
)
|
|
225
|
+
self.conn.commit()
|
|
226
|
+
except Exception:
|
|
227
|
+
pass
|
|
228
|
+
|
|
229
|
+
def mark_failed(self, item_id: int, error: str):
|
|
230
|
+
"""Mark a request as failed and schedule retry or move to dead letter queue."""
|
|
231
|
+
with self._lock:
|
|
232
|
+
self._retried += 1
|
|
233
|
+
|
|
234
|
+
if self.conn and item_id > 0:
|
|
235
|
+
try:
|
|
236
|
+
cursor = self.conn.cursor()
|
|
237
|
+
|
|
238
|
+
# Get current item
|
|
239
|
+
cursor.execute("SELECT * FROM pending_requests WHERE id = ?", (item_id,))
|
|
240
|
+
row = cursor.fetchone()
|
|
241
|
+
if not row:
|
|
242
|
+
return
|
|
243
|
+
|
|
244
|
+
attempts = row["attempts"] + 1
|
|
245
|
+
max_attempts = row["max_attempts"]
|
|
246
|
+
|
|
247
|
+
if attempts >= max_attempts:
|
|
248
|
+
# Move to dead letter queue
|
|
249
|
+
self._failed += 1
|
|
250
|
+
cursor.execute(
|
|
251
|
+
"""
|
|
252
|
+
INSERT INTO dead_letters
|
|
253
|
+
(original_id, endpoint, method, payload, headers, attempts, last_error, created_at)
|
|
254
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
255
|
+
""",
|
|
256
|
+
(
|
|
257
|
+
item_id,
|
|
258
|
+
row["endpoint"],
|
|
259
|
+
row["method"],
|
|
260
|
+
row["payload"],
|
|
261
|
+
row["headers"],
|
|
262
|
+
attempts,
|
|
263
|
+
error,
|
|
264
|
+
row["created_at"]
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
cursor.execute("DELETE FROM pending_requests WHERE id = ?", (item_id,))
|
|
268
|
+
else:
|
|
269
|
+
# Calculate next retry with exponential backoff
|
|
270
|
+
backoff = min(
|
|
271
|
+
BASE_BACKOFF_SECONDS * (2 ** attempts),
|
|
272
|
+
MAX_BACKOFF_SECONDS
|
|
273
|
+
)
|
|
274
|
+
next_retry = datetime.now() + timedelta(seconds=backoff)
|
|
275
|
+
|
|
276
|
+
cursor.execute(
|
|
277
|
+
"""
|
|
278
|
+
UPDATE pending_requests
|
|
279
|
+
SET attempts = ?, last_error = ?, next_retry_at = ?, status = 'pending'
|
|
280
|
+
WHERE id = ?
|
|
281
|
+
""",
|
|
282
|
+
(attempts, error, next_retry.isoformat(), item_id)
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
self.conn.commit()
|
|
286
|
+
except Exception:
|
|
287
|
+
pass
|
|
288
|
+
|
|
289
|
+
def get_queue_depth(self) -> int:
|
|
290
|
+
"""Get the number of pending requests."""
|
|
291
|
+
with self._lock:
|
|
292
|
+
if self.conn:
|
|
293
|
+
try:
|
|
294
|
+
cursor = self.conn.cursor()
|
|
295
|
+
cursor.execute("SELECT COUNT(*) as count FROM pending_requests WHERE status = 'pending'")
|
|
296
|
+
row = cursor.fetchone()
|
|
297
|
+
return row["count"] if row else 0
|
|
298
|
+
except Exception:
|
|
299
|
+
pass
|
|
300
|
+
|
|
301
|
+
# File fallback
|
|
302
|
+
if self.fallback_path.exists():
|
|
303
|
+
count = 0
|
|
304
|
+
with open(self.fallback_path, "r") as f:
|
|
305
|
+
for line in f:
|
|
306
|
+
try:
|
|
307
|
+
item = json.loads(line.strip())
|
|
308
|
+
if item.get("status") == "pending":
|
|
309
|
+
count += 1
|
|
310
|
+
except json.JSONDecodeError:
|
|
311
|
+
continue
|
|
312
|
+
return count
|
|
313
|
+
|
|
314
|
+
return 0
|
|
315
|
+
|
|
316
|
+
def get_dead_letters(self, limit: int = 50) -> List[Dict[str, Any]]:
|
|
317
|
+
"""Get items from the dead letter queue."""
|
|
318
|
+
with self._lock:
|
|
319
|
+
if self.conn:
|
|
320
|
+
try:
|
|
321
|
+
cursor = self.conn.cursor()
|
|
322
|
+
cursor.execute(
|
|
323
|
+
"SELECT * FROM dead_letters ORDER BY failed_at DESC LIMIT ?",
|
|
324
|
+
(limit,)
|
|
325
|
+
)
|
|
326
|
+
rows = cursor.fetchall()
|
|
327
|
+
return [dict(row) for row in rows]
|
|
328
|
+
except Exception:
|
|
329
|
+
pass
|
|
330
|
+
return []
|
|
331
|
+
|
|
332
|
+
def retry_dead_letter(self, dead_letter_id: int) -> bool:
|
|
333
|
+
"""Move a dead letter back to the pending queue."""
|
|
334
|
+
with self._lock:
|
|
335
|
+
if self.conn:
|
|
336
|
+
try:
|
|
337
|
+
cursor = self.conn.cursor()
|
|
338
|
+
cursor.execute("SELECT * FROM dead_letters WHERE id = ?", (dead_letter_id,))
|
|
339
|
+
row = cursor.fetchone()
|
|
340
|
+
if not row:
|
|
341
|
+
return False
|
|
342
|
+
|
|
343
|
+
cursor.execute(
|
|
344
|
+
"""
|
|
345
|
+
INSERT INTO pending_requests
|
|
346
|
+
(endpoint, method, payload, headers, attempts, max_attempts, created_at)
|
|
347
|
+
VALUES (?, ?, ?, ?, 0, ?, ?)
|
|
348
|
+
""",
|
|
349
|
+
(
|
|
350
|
+
row["endpoint"],
|
|
351
|
+
row["method"],
|
|
352
|
+
row["payload"],
|
|
353
|
+
row["headers"],
|
|
354
|
+
MAX_RETRIES,
|
|
355
|
+
row["created_at"]
|
|
356
|
+
)
|
|
357
|
+
)
|
|
358
|
+
cursor.execute("DELETE FROM dead_letters WHERE id = ?", (dead_letter_id,))
|
|
359
|
+
self.conn.commit()
|
|
360
|
+
return True
|
|
361
|
+
except Exception:
|
|
362
|
+
pass
|
|
363
|
+
return False
|
|
364
|
+
|
|
365
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
366
|
+
"""Get queue statistics."""
|
|
367
|
+
dead_letter_count = 0
|
|
368
|
+
if self.conn:
|
|
369
|
+
try:
|
|
370
|
+
cursor = self.conn.cursor()
|
|
371
|
+
cursor.execute("SELECT COUNT(*) as count FROM dead_letters")
|
|
372
|
+
row = cursor.fetchone()
|
|
373
|
+
dead_letter_count = row["count"] if row else 0
|
|
374
|
+
except Exception:
|
|
375
|
+
pass
|
|
376
|
+
|
|
377
|
+
return {
|
|
378
|
+
"queue_depth": self.get_queue_depth(),
|
|
379
|
+
"dead_letters": dead_letter_count,
|
|
380
|
+
"total_enqueued": self._enqueued,
|
|
381
|
+
"total_processed": self._processed,
|
|
382
|
+
"total_failed": self._failed,
|
|
383
|
+
"total_retried": self._retried,
|
|
384
|
+
"db_path": self.db_path,
|
|
385
|
+
"fallback_path": str(self.fallback_path),
|
|
386
|
+
"using_db": self.conn is not None
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
async def process_queue(
|
|
390
|
+
self,
|
|
391
|
+
processor: Callable[[Dict[str, Any]], bool],
|
|
392
|
+
batch_size: int = 10,
|
|
393
|
+
interval_seconds: float = 5.0
|
|
394
|
+
):
|
|
395
|
+
"""Background task to process the queue.
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
processor: Async function that processes a single item, returns True on success
|
|
399
|
+
batch_size: Number of items to process per batch
|
|
400
|
+
interval_seconds: Time between processing batches
|
|
401
|
+
"""
|
|
402
|
+
self._processing = True
|
|
403
|
+
|
|
404
|
+
while self._processing:
|
|
405
|
+
try:
|
|
406
|
+
items = self.get_pending(limit=batch_size)
|
|
407
|
+
|
|
408
|
+
for item in items:
|
|
409
|
+
try:
|
|
410
|
+
success = await processor(item)
|
|
411
|
+
if success:
|
|
412
|
+
self.mark_success(item.get("id", -1))
|
|
413
|
+
else:
|
|
414
|
+
self.mark_failed(item.get("id", -1), "Processor returned False")
|
|
415
|
+
except Exception as e:
|
|
416
|
+
self.mark_failed(item.get("id", -1), str(e))
|
|
417
|
+
|
|
418
|
+
except Exception:
|
|
419
|
+
pass # Don't crash the background task
|
|
420
|
+
|
|
421
|
+
await asyncio.sleep(interval_seconds)
|
|
422
|
+
|
|
423
|
+
def stop_processing(self):
|
|
424
|
+
"""Stop the background processing task."""
|
|
425
|
+
self._processing = False
|
|
426
|
+
|
|
427
|
+
def close(self):
|
|
428
|
+
"""Close the database connection."""
|
|
429
|
+
self.stop_processing()
|
|
430
|
+
if self.conn:
|
|
431
|
+
self.conn.close()
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
# Global instance
|
|
435
|
+
_queue: Optional[RetryQueue] = None
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
def get_queue() -> RetryQueue:
|
|
439
|
+
"""Get the global retry queue instance."""
|
|
440
|
+
global _queue
|
|
441
|
+
if _queue is None:
|
|
442
|
+
_queue = RetryQueue()
|
|
443
|
+
return _queue
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
async def enqueue_request(
|
|
447
|
+
endpoint: str,
|
|
448
|
+
payload: Dict[str, Any],
|
|
449
|
+
method: str = "POST",
|
|
450
|
+
headers: Optional[Dict[str, str]] = None
|
|
451
|
+
) -> int:
|
|
452
|
+
"""Convenience function to enqueue a request."""
|
|
453
|
+
return get_queue().enqueue(endpoint, payload, method, headers)
|