s3db.js 9.3.0 → 10.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +72 -13
- package/dist/s3db.cjs.js +2342 -540
- package/dist/s3db.cjs.js.map +1 -1
- package/dist/s3db.es.js +2341 -541
- package/dist/s3db.es.js.map +1 -1
- package/package.json +1 -1
- package/src/client.class.js +8 -7
- package/src/concerns/high-performance-inserter.js +285 -0
- package/src/concerns/partition-queue.js +171 -0
- package/src/errors.js +10 -2
- package/src/partition-drivers/base-partition-driver.js +96 -0
- package/src/partition-drivers/index.js +60 -0
- package/src/partition-drivers/memory-partition-driver.js +274 -0
- package/src/partition-drivers/sqs-partition-driver.js +332 -0
- package/src/partition-drivers/sync-partition-driver.js +38 -0
- package/src/plugins/audit.plugin.js +4 -4
- package/src/plugins/backup.plugin.js +380 -105
- package/src/plugins/backup.plugin.js.backup +1 -1
- package/src/plugins/cache.plugin.js +203 -150
- package/src/plugins/eventual-consistency.plugin.js +1012 -0
- package/src/plugins/fulltext.plugin.js +6 -6
- package/src/plugins/index.js +2 -0
- package/src/plugins/metrics.plugin.js +13 -13
- package/src/plugins/replicator.plugin.js +108 -70
- package/src/plugins/replicators/s3db-replicator.class.js +7 -3
- package/src/plugins/replicators/sqs-replicator.class.js +11 -3
- package/src/plugins/s3-queue.plugin.js +776 -0
- package/src/plugins/scheduler.plugin.js +226 -164
- package/src/plugins/state-machine.plugin.js +109 -81
- package/src/resource.class.js +205 -0
- package/PLUGINS.md +0 -5036
|
@@ -0,0 +1,776 @@
|
|
|
1
|
+
import Plugin from "./plugin.class.js";
|
|
2
|
+
import tryFn from "../concerns/try-fn.js";
|
|
3
|
+
import { idGenerator } from "../concerns/id.js";
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* S3QueuePlugin - Distributed Queue System with ETag-based Atomicity
|
|
7
|
+
*
|
|
8
|
+
* Provides a distributed queue processing system using S3 as backend with:
|
|
9
|
+
* - Atomic message claiming using S3 ETags (zero race conditions)
|
|
10
|
+
* - Visibility timeout pattern (like SQS)
|
|
11
|
+
* - Automatic retries with exponential backoff
|
|
12
|
+
* - Dead letter queue support
|
|
13
|
+
* - Concurrent workers with configurable concurrency
|
|
14
|
+
* - At-least-once delivery guarantee
|
|
15
|
+
*
|
|
16
|
+
* === Configuration Example ===
|
|
17
|
+
*
|
|
18
|
+
* new S3QueuePlugin({
|
|
19
|
+
* resource: 'emails', // Target resource name
|
|
20
|
+
* visibilityTimeout: 30000, // 30 seconds
|
|
21
|
+
* pollInterval: 1000, // 1 second
|
|
22
|
+
* maxAttempts: 3, // Max retry attempts
|
|
23
|
+
* concurrency: 5, // Number of concurrent workers
|
|
24
|
+
* deadLetterResource: 'failed_emails', // Dead letter queue (optional)
|
|
25
|
+
* autoStart: true, // Auto-start workers
|
|
26
|
+
*
|
|
27
|
+
* onMessage: async (record, context) => {
|
|
28
|
+
* // Process message
|
|
29
|
+
* await sendEmail(record);
|
|
30
|
+
* return { sent: true };
|
|
31
|
+
* },
|
|
32
|
+
*
|
|
33
|
+
* onError: (error, record) => {
|
|
34
|
+
* console.error('Failed:', error);
|
|
35
|
+
* },
|
|
36
|
+
*
|
|
37
|
+
* onComplete: (record, result) => {
|
|
38
|
+
* console.log('Completed:', result);
|
|
39
|
+
* }
|
|
40
|
+
* });
|
|
41
|
+
*
|
|
42
|
+
* === Usage ===
|
|
43
|
+
*
|
|
44
|
+
* // Enqueue a message
|
|
45
|
+
* await db.resource('emails').enqueue({
|
|
46
|
+
* to: 'user@example.com',
|
|
47
|
+
* subject: 'Hello',
|
|
48
|
+
* body: 'World'
|
|
49
|
+
* });
|
|
50
|
+
*
|
|
51
|
+
* // Start processing (if not auto-started)
|
|
52
|
+
* await db.resource('emails').startProcessing(async (email) => {
|
|
53
|
+
* await sendEmail(email);
|
|
54
|
+
* }, { concurrency: 10 });
|
|
55
|
+
*
|
|
56
|
+
* // Stop processing
|
|
57
|
+
* await db.resource('emails').stopProcessing();
|
|
58
|
+
*
|
|
59
|
+
* // Get queue statistics
|
|
60
|
+
* const stats = await db.resource('emails').queueStats();
|
|
61
|
+
* // { total: 100, pending: 50, processing: 20, completed: 25, failed: 5, dead: 0 }
|
|
62
|
+
*/
|
|
63
|
+
export class S3QueuePlugin extends Plugin {
|
|
64
|
+
constructor(options = {}) {
|
|
65
|
+
super(options);
|
|
66
|
+
|
|
67
|
+
if (!options.resource) {
|
|
68
|
+
throw new Error('S3QueuePlugin requires "resource" option');
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
this.config = {
|
|
72
|
+
resource: options.resource,
|
|
73
|
+
visibilityTimeout: options.visibilityTimeout || 30000, // 30 seconds
|
|
74
|
+
pollInterval: options.pollInterval || 1000, // 1 second
|
|
75
|
+
maxAttempts: options.maxAttempts || 3,
|
|
76
|
+
concurrency: options.concurrency || 1,
|
|
77
|
+
deadLetterResource: options.deadLetterResource || null,
|
|
78
|
+
autoStart: options.autoStart !== false,
|
|
79
|
+
onMessage: options.onMessage,
|
|
80
|
+
onError: options.onError,
|
|
81
|
+
onComplete: options.onComplete,
|
|
82
|
+
verbose: options.verbose || false,
|
|
83
|
+
...options
|
|
84
|
+
};
|
|
85
|
+
|
|
86
|
+
this.queueResource = null; // Resource: <resource>_queue
|
|
87
|
+
this.targetResource = null; // Resource original do usuário
|
|
88
|
+
this.deadLetterResourceObj = null;
|
|
89
|
+
this.workers = [];
|
|
90
|
+
this.isRunning = false;
|
|
91
|
+
this.workerId = `worker-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
|
|
92
|
+
|
|
93
|
+
// Deduplication cache to prevent S3 eventual consistency issues
|
|
94
|
+
// Tracks recently processed messages to avoid reprocessing
|
|
95
|
+
this.processedCache = new Map(); // queueId -> timestamp
|
|
96
|
+
this.cacheCleanupInterval = null;
|
|
97
|
+
this.lockCleanupInterval = null;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
async onSetup() {
|
|
101
|
+
// Get target resource
|
|
102
|
+
this.targetResource = this.database.resources[this.config.resource];
|
|
103
|
+
if (!this.targetResource) {
|
|
104
|
+
throw new Error(`S3QueuePlugin: resource '${this.config.resource}' not found`);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Create queue metadata resource
|
|
108
|
+
const queueName = `${this.config.resource}_queue`;
|
|
109
|
+
const [ok, err] = await tryFn(() =>
|
|
110
|
+
this.database.createResource({
|
|
111
|
+
name: queueName,
|
|
112
|
+
attributes: {
|
|
113
|
+
id: 'string|required',
|
|
114
|
+
originalId: 'string|required', // ID do registro original
|
|
115
|
+
status: 'string|required', // pending/processing/completed/failed/dead
|
|
116
|
+
visibleAt: 'number|required', // Timestamp de visibilidade
|
|
117
|
+
claimedBy: 'string|optional', // Worker que claimed
|
|
118
|
+
claimedAt: 'number|optional', // Timestamp do claim
|
|
119
|
+
attempts: 'number|default:0',
|
|
120
|
+
maxAttempts: 'number|default:3',
|
|
121
|
+
error: 'string|optional',
|
|
122
|
+
result: 'json|optional',
|
|
123
|
+
createdAt: 'string|required',
|
|
124
|
+
completedAt: 'number|optional'
|
|
125
|
+
},
|
|
126
|
+
behavior: 'body-overflow',
|
|
127
|
+
timestamps: true,
|
|
128
|
+
asyncPartitions: true,
|
|
129
|
+
partitions: {
|
|
130
|
+
byStatus: { fields: { status: 'string' } },
|
|
131
|
+
byDate: { fields: { createdAt: 'string|maxlength:10' } }
|
|
132
|
+
}
|
|
133
|
+
})
|
|
134
|
+
);
|
|
135
|
+
|
|
136
|
+
if (!ok && !this.database.resources[queueName]) {
|
|
137
|
+
throw new Error(`Failed to create queue resource: ${err?.message}`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
this.queueResource = this.database.resources[queueName];
|
|
141
|
+
|
|
142
|
+
// Create lock resource for distributed locking (enabled by default)
|
|
143
|
+
const lockName = `${this.config.resource}_locks`;
|
|
144
|
+
const [okLock, errLock] = await tryFn(() =>
|
|
145
|
+
this.database.createResource({
|
|
146
|
+
name: lockName,
|
|
147
|
+
attributes: {
|
|
148
|
+
id: 'string|required',
|
|
149
|
+
workerId: 'string|required',
|
|
150
|
+
timestamp: 'number|required',
|
|
151
|
+
ttl: 'number|default:5000'
|
|
152
|
+
},
|
|
153
|
+
behavior: 'body-overflow',
|
|
154
|
+
timestamps: false
|
|
155
|
+
})
|
|
156
|
+
);
|
|
157
|
+
|
|
158
|
+
if (okLock || this.database.resources[lockName]) {
|
|
159
|
+
this.lockResource = this.database.resources[lockName];
|
|
160
|
+
} else {
|
|
161
|
+
// Locks disabled if creation fails
|
|
162
|
+
this.lockResource = null;
|
|
163
|
+
if (this.config.verbose) {
|
|
164
|
+
console.log(`[S3QueuePlugin] Lock resource creation failed, locking disabled: ${errLock?.message}`);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// Add helper methods to target resource
|
|
169
|
+
this.addHelperMethods();
|
|
170
|
+
|
|
171
|
+
// Create dead letter resource if configured
|
|
172
|
+
if (this.config.deadLetterResource) {
|
|
173
|
+
await this.createDeadLetterResource();
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
if (this.config.verbose) {
|
|
177
|
+
console.log(`[S3QueuePlugin] Setup completed for resource '${this.config.resource}'`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
async onStart() {
|
|
182
|
+
if (this.config.autoStart && this.config.onMessage) {
|
|
183
|
+
await this.startProcessing();
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
async onStop() {
|
|
188
|
+
await this.stopProcessing();
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
addHelperMethods() {
|
|
192
|
+
const plugin = this;
|
|
193
|
+
const resource = this.targetResource;
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Enqueue a message to the queue
|
|
197
|
+
*/
|
|
198
|
+
resource.enqueue = async function(data, options = {}) {
|
|
199
|
+
// Generate ID if not provided
|
|
200
|
+
const recordData = {
|
|
201
|
+
id: data.id || idGenerator(),
|
|
202
|
+
...data
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
// Insert original record first
|
|
206
|
+
const record = await resource.insert(recordData);
|
|
207
|
+
|
|
208
|
+
// Create queue entry
|
|
209
|
+
const queueEntry = {
|
|
210
|
+
id: idGenerator(),
|
|
211
|
+
originalId: record.id,
|
|
212
|
+
status: 'pending',
|
|
213
|
+
visibleAt: Date.now(),
|
|
214
|
+
attempts: 0,
|
|
215
|
+
maxAttempts: options.maxAttempts || plugin.config.maxAttempts,
|
|
216
|
+
createdAt: new Date().toISOString().slice(0, 10)
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
await plugin.queueResource.insert(queueEntry);
|
|
220
|
+
|
|
221
|
+
plugin.emit('message.enqueued', { id: record.id, queueId: queueEntry.id });
|
|
222
|
+
|
|
223
|
+
return record;
|
|
224
|
+
};
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Get queue statistics
|
|
228
|
+
*/
|
|
229
|
+
resource.queueStats = async function() {
|
|
230
|
+
return await plugin.getStats();
|
|
231
|
+
};
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Start processing messages with worker(s)
|
|
235
|
+
*/
|
|
236
|
+
resource.startProcessing = async function(handler, options = {}) {
|
|
237
|
+
return await plugin.startProcessing(handler, options);
|
|
238
|
+
};
|
|
239
|
+
|
|
240
|
+
/**
|
|
241
|
+
* Stop all workers
|
|
242
|
+
*/
|
|
243
|
+
resource.stopProcessing = async function() {
|
|
244
|
+
return await plugin.stopProcessing();
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
async startProcessing(handler = null, options = {}) {
|
|
249
|
+
if (this.isRunning) {
|
|
250
|
+
if (this.config.verbose) {
|
|
251
|
+
console.log('[S3QueuePlugin] Already running');
|
|
252
|
+
}
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
const messageHandler = handler || this.config.onMessage;
|
|
257
|
+
if (!messageHandler) {
|
|
258
|
+
throw new Error('S3QueuePlugin: onMessage handler required');
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
this.isRunning = true;
|
|
262
|
+
const concurrency = options.concurrency || this.config.concurrency;
|
|
263
|
+
|
|
264
|
+
// Start cache cleanup (every 5 seconds, remove entries older than 30 seconds)
|
|
265
|
+
this.cacheCleanupInterval = setInterval(() => {
|
|
266
|
+
const now = Date.now();
|
|
267
|
+
const maxAge = 30000; // 30 seconds
|
|
268
|
+
|
|
269
|
+
for (const [queueId, timestamp] of this.processedCache.entries()) {
|
|
270
|
+
if (now - timestamp > maxAge) {
|
|
271
|
+
this.processedCache.delete(queueId);
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
}, 5000);
|
|
275
|
+
|
|
276
|
+
// Start lock cleanup (every 10 seconds, remove expired locks)
|
|
277
|
+
this.lockCleanupInterval = setInterval(() => {
|
|
278
|
+
this.cleanupStaleLocks().catch(err => {
|
|
279
|
+
if (this.config.verbose) {
|
|
280
|
+
console.log(`[lockCleanup] Error: ${err.message}`);
|
|
281
|
+
}
|
|
282
|
+
});
|
|
283
|
+
}, 10000);
|
|
284
|
+
|
|
285
|
+
// Start N workers
|
|
286
|
+
for (let i = 0; i < concurrency; i++) {
|
|
287
|
+
const worker = this.createWorker(messageHandler, i);
|
|
288
|
+
this.workers.push(worker);
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
if (this.config.verbose) {
|
|
292
|
+
console.log(`[S3QueuePlugin] Started ${concurrency} workers`);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
this.emit('workers.started', { concurrency, workerId: this.workerId });
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
async stopProcessing() {
|
|
299
|
+
if (!this.isRunning) return;
|
|
300
|
+
|
|
301
|
+
this.isRunning = false;
|
|
302
|
+
|
|
303
|
+
// Stop cache cleanup
|
|
304
|
+
if (this.cacheCleanupInterval) {
|
|
305
|
+
clearInterval(this.cacheCleanupInterval);
|
|
306
|
+
this.cacheCleanupInterval = null;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Stop lock cleanup
|
|
310
|
+
if (this.lockCleanupInterval) {
|
|
311
|
+
clearInterval(this.lockCleanupInterval);
|
|
312
|
+
this.lockCleanupInterval = null;
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// Wait for workers to finish current tasks
|
|
316
|
+
await Promise.all(this.workers);
|
|
317
|
+
this.workers = [];
|
|
318
|
+
|
|
319
|
+
// Clear deduplication cache
|
|
320
|
+
this.processedCache.clear();
|
|
321
|
+
|
|
322
|
+
if (this.config.verbose) {
|
|
323
|
+
console.log('[S3QueuePlugin] Stopped all workers');
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
this.emit('workers.stopped', { workerId: this.workerId });
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
createWorker(handler, workerIndex) {
|
|
330
|
+
return (async () => {
|
|
331
|
+
while (this.isRunning) {
|
|
332
|
+
try {
|
|
333
|
+
// Try to claim a message
|
|
334
|
+
const message = await this.claimMessage();
|
|
335
|
+
|
|
336
|
+
if (message) {
|
|
337
|
+
// Process the claimed message
|
|
338
|
+
await this.processMessage(message, handler);
|
|
339
|
+
} else {
|
|
340
|
+
// No messages available, wait before polling again
|
|
341
|
+
await new Promise(resolve => setTimeout(resolve, this.config.pollInterval));
|
|
342
|
+
}
|
|
343
|
+
} catch (error) {
|
|
344
|
+
if (this.config.verbose) {
|
|
345
|
+
console.error(`[Worker ${workerIndex}] Error:`, error.message);
|
|
346
|
+
}
|
|
347
|
+
// Wait a bit before retrying on error
|
|
348
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
})();
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
async claimMessage() {
|
|
355
|
+
const now = Date.now();
|
|
356
|
+
|
|
357
|
+
// Query for available messages
|
|
358
|
+
const [ok, err, messages] = await tryFn(() =>
|
|
359
|
+
this.queueResource.query({
|
|
360
|
+
status: 'pending'
|
|
361
|
+
})
|
|
362
|
+
);
|
|
363
|
+
|
|
364
|
+
if (!ok || !messages || messages.length === 0) {
|
|
365
|
+
return null;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// Filter messages that are visible now
|
|
369
|
+
const available = messages.filter(m => m.visibleAt <= now);
|
|
370
|
+
if (available.length === 0) {
|
|
371
|
+
return null;
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
// Try to claim first available message using ETag
|
|
375
|
+
for (const msg of available) {
|
|
376
|
+
const claimed = await this.attemptClaim(msg);
|
|
377
|
+
if (claimed) {
|
|
378
|
+
return claimed;
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
return null;
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
/**
|
|
386
|
+
* Acquire a distributed lock using ETag-based conditional updates
|
|
387
|
+
* This ensures only one worker can claim a message at a time
|
|
388
|
+
*
|
|
389
|
+
* Uses a two-step process:
|
|
390
|
+
* 1. Create lock resource (similar to queue resource) if not exists
|
|
391
|
+
* 2. Try to claim lock using ETag-based conditional update
|
|
392
|
+
*/
|
|
393
|
+
async acquireLock(messageId) {
|
|
394
|
+
if (!this.lockResource) {
|
|
395
|
+
return true; // Locks disabled
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const lockId = `lock-${messageId}`;
|
|
399
|
+
const now = Date.now();
|
|
400
|
+
|
|
401
|
+
try {
|
|
402
|
+
// Try to get existing lock
|
|
403
|
+
const [okGet, errGet, existingLock] = await tryFn(() =>
|
|
404
|
+
this.lockResource.get(lockId)
|
|
405
|
+
);
|
|
406
|
+
|
|
407
|
+
if (existingLock) {
|
|
408
|
+
// Lock exists - check if expired
|
|
409
|
+
const lockAge = now - existingLock.timestamp;
|
|
410
|
+
if (lockAge < existingLock.ttl) {
|
|
411
|
+
// Lock still valid, owned by another worker
|
|
412
|
+
return false;
|
|
413
|
+
}
|
|
414
|
+
// Lock expired - try to claim it with ETag
|
|
415
|
+
const [ok, err, result] = await tryFn(() =>
|
|
416
|
+
this.lockResource.updateConditional(lockId, {
|
|
417
|
+
workerId: this.workerId,
|
|
418
|
+
timestamp: now,
|
|
419
|
+
ttl: 5000
|
|
420
|
+
}, {
|
|
421
|
+
ifMatch: existingLock._etag
|
|
422
|
+
})
|
|
423
|
+
);
|
|
424
|
+
|
|
425
|
+
return ok && result.success;
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
// Lock doesn't exist - create it
|
|
429
|
+
const [okCreate, errCreate] = await tryFn(() =>
|
|
430
|
+
this.lockResource.insert({
|
|
431
|
+
id: lockId,
|
|
432
|
+
workerId: this.workerId,
|
|
433
|
+
timestamp: now,
|
|
434
|
+
ttl: 5000
|
|
435
|
+
})
|
|
436
|
+
);
|
|
437
|
+
|
|
438
|
+
return okCreate;
|
|
439
|
+
} catch (error) {
|
|
440
|
+
// On any error, skip this message
|
|
441
|
+
if (this.config.verbose) {
|
|
442
|
+
console.log(`[acquireLock] Error: ${error.message}`);
|
|
443
|
+
}
|
|
444
|
+
return false;
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Release a distributed lock by deleting the lock record
|
|
450
|
+
*/
|
|
451
|
+
async releaseLock(messageId) {
|
|
452
|
+
if (!this.lockResource) {
|
|
453
|
+
return; // Locks disabled
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
const lockId = `lock-${messageId}`;
|
|
457
|
+
|
|
458
|
+
try {
|
|
459
|
+
await this.lockResource.delete(lockId);
|
|
460
|
+
} catch (error) {
|
|
461
|
+
// Ignore errors on release (lock may have expired or been cleaned up)
|
|
462
|
+
if (this.config.verbose) {
|
|
463
|
+
console.log(`[releaseLock] Failed to release lock for ${messageId}: ${error.message}`);
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
/**
|
|
469
|
+
* Clean up stale locks (older than TTL)
|
|
470
|
+
* This prevents deadlocks if a worker crashes while holding a lock
|
|
471
|
+
*/
|
|
472
|
+
async cleanupStaleLocks() {
|
|
473
|
+
if (!this.lockResource) {
|
|
474
|
+
return; // Locks disabled
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
const now = Date.now();
|
|
478
|
+
|
|
479
|
+
try {
|
|
480
|
+
// List all locks
|
|
481
|
+
const locks = await this.lockResource.list();
|
|
482
|
+
|
|
483
|
+
// Delete expired locks
|
|
484
|
+
for (const lock of locks) {
|
|
485
|
+
const lockAge = now - lock.timestamp;
|
|
486
|
+
if (lockAge > lock.ttl) {
|
|
487
|
+
await this.lockResource.delete(lock.id);
|
|
488
|
+
if (this.config.verbose) {
|
|
489
|
+
console.log(`[cleanupStaleLocks] Removed expired lock: ${lock.id}`);
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
} catch (error) {
|
|
494
|
+
// Ignore errors in cleanup (non-critical)
|
|
495
|
+
if (this.config.verbose) {
|
|
496
|
+
console.log(`[cleanupStaleLocks] Error during cleanup: ${error.message}`);
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
async attemptClaim(msg) {
|
|
502
|
+
const now = Date.now();
|
|
503
|
+
|
|
504
|
+
// Try to acquire distributed lock for cache check
|
|
505
|
+
// This prevents race condition where multiple workers check cache simultaneously
|
|
506
|
+
const lockAcquired = await this.acquireLock(msg.id);
|
|
507
|
+
|
|
508
|
+
if (!lockAcquired) {
|
|
509
|
+
// Another worker is checking/claiming this message, skip it
|
|
510
|
+
return null;
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
// Check deduplication cache (protected by lock)
|
|
514
|
+
if (this.processedCache.has(msg.id)) {
|
|
515
|
+
await this.releaseLock(msg.id);
|
|
516
|
+
if (this.config.verbose) {
|
|
517
|
+
console.log(`[attemptClaim] Message ${msg.id} already processed (in cache)`);
|
|
518
|
+
}
|
|
519
|
+
return null;
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
// Add to cache immediately (while still holding lock)
|
|
523
|
+
// This prevents other workers from claiming this message
|
|
524
|
+
this.processedCache.set(msg.id, Date.now());
|
|
525
|
+
|
|
526
|
+
// Release lock now that cache is updated
|
|
527
|
+
await this.releaseLock(msg.id);
|
|
528
|
+
|
|
529
|
+
// Fetch the message with ETag (query doesn't return _etag)
|
|
530
|
+
const [okGet, errGet, msgWithETag] = await tryFn(() =>
|
|
531
|
+
this.queueResource.get(msg.id)
|
|
532
|
+
);
|
|
533
|
+
|
|
534
|
+
if (!okGet || !msgWithETag) {
|
|
535
|
+
// Message was deleted or not found - remove from cache
|
|
536
|
+
this.processedCache.delete(msg.id);
|
|
537
|
+
if (this.config.verbose) {
|
|
538
|
+
console.log(`[attemptClaim] Message ${msg.id} not found or error: ${errGet?.message}`);
|
|
539
|
+
}
|
|
540
|
+
return null;
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
// Check if still pending and visible
|
|
544
|
+
if (msgWithETag.status !== 'pending' || msgWithETag.visibleAt > now) {
|
|
545
|
+
// Not claimable - remove from cache so another worker can try later
|
|
546
|
+
this.processedCache.delete(msg.id);
|
|
547
|
+
if (this.config.verbose) {
|
|
548
|
+
console.log(`[attemptClaim] Message ${msg.id} not claimable: status=${msgWithETag.status}, visibleAt=${msgWithETag.visibleAt}, now=${now}`);
|
|
549
|
+
}
|
|
550
|
+
return null;
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
if (this.config.verbose) {
|
|
554
|
+
console.log(`[attemptClaim] Attempting to claim ${msg.id} with ETag: ${msgWithETag._etag}`);
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
// Attempt atomic claim using ETag
|
|
558
|
+
const [ok, err, result] = await tryFn(() =>
|
|
559
|
+
this.queueResource.updateConditional(msgWithETag.id, {
|
|
560
|
+
status: 'processing',
|
|
561
|
+
claimedBy: this.workerId,
|
|
562
|
+
claimedAt: now,
|
|
563
|
+
visibleAt: now + this.config.visibilityTimeout,
|
|
564
|
+
attempts: msgWithETag.attempts + 1
|
|
565
|
+
}, {
|
|
566
|
+
ifMatch: msgWithETag._etag // ← ATOMIC CLAIM using ETag!
|
|
567
|
+
})
|
|
568
|
+
);
|
|
569
|
+
|
|
570
|
+
if (!ok || !result.success) {
|
|
571
|
+
// Race lost - another worker claimed it - remove from cache
|
|
572
|
+
this.processedCache.delete(msg.id);
|
|
573
|
+
if (this.config.verbose) {
|
|
574
|
+
console.log(`[attemptClaim] Failed to claim ${msg.id}: ${err?.message || result.error}`)
|
|
575
|
+
}
|
|
576
|
+
return null;
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
if (this.config.verbose) {
|
|
580
|
+
console.log(`[attemptClaim] Successfully claimed ${msg.id}`);
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
// Cache entry already added above, keep it
|
|
584
|
+
|
|
585
|
+
// Success! Now load the original record
|
|
586
|
+
const [okRecord, errRecord, record] = await tryFn(() =>
|
|
587
|
+
this.targetResource.get(msgWithETag.originalId)
|
|
588
|
+
);
|
|
589
|
+
|
|
590
|
+
if (!okRecord) {
|
|
591
|
+
// Original record was deleted? Mark queue entry as failed
|
|
592
|
+
await this.failMessage(msgWithETag.id, 'Original record not found');
|
|
593
|
+
return null;
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
return {
|
|
597
|
+
queueId: msgWithETag.id,
|
|
598
|
+
record,
|
|
599
|
+
attempts: msgWithETag.attempts + 1,
|
|
600
|
+
maxAttempts: msgWithETag.maxAttempts
|
|
601
|
+
};
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
async processMessage(message, handler) {
|
|
605
|
+
const startTime = Date.now();
|
|
606
|
+
|
|
607
|
+
try {
|
|
608
|
+
// Execute user handler
|
|
609
|
+
const result = await handler(message.record, {
|
|
610
|
+
queueId: message.queueId,
|
|
611
|
+
attempts: message.attempts,
|
|
612
|
+
workerId: this.workerId
|
|
613
|
+
});
|
|
614
|
+
|
|
615
|
+
// Mark as completed
|
|
616
|
+
await this.completeMessage(message.queueId, result);
|
|
617
|
+
|
|
618
|
+
const duration = Date.now() - startTime;
|
|
619
|
+
|
|
620
|
+
this.emit('message.completed', {
|
|
621
|
+
queueId: message.queueId,
|
|
622
|
+
originalId: message.record.id,
|
|
623
|
+
duration,
|
|
624
|
+
attempts: message.attempts
|
|
625
|
+
});
|
|
626
|
+
|
|
627
|
+
if (this.config.onComplete) {
|
|
628
|
+
await this.config.onComplete(message.record, result);
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
} catch (error) {
|
|
632
|
+
// Handle failure
|
|
633
|
+
const shouldRetry = message.attempts < message.maxAttempts;
|
|
634
|
+
|
|
635
|
+
if (shouldRetry) {
|
|
636
|
+
// Retry with backoff
|
|
637
|
+
await this.retryMessage(message.queueId, message.attempts, error.message);
|
|
638
|
+
|
|
639
|
+
this.emit('message.retry', {
|
|
640
|
+
queueId: message.queueId,
|
|
641
|
+
originalId: message.record.id,
|
|
642
|
+
attempts: message.attempts,
|
|
643
|
+
error: error.message
|
|
644
|
+
});
|
|
645
|
+
} else {
|
|
646
|
+
// Max attempts reached - move to dead letter queue
|
|
647
|
+
await this.moveToDeadLetter(message.queueId, message.record, error.message);
|
|
648
|
+
|
|
649
|
+
this.emit('message.dead', {
|
|
650
|
+
queueId: message.queueId,
|
|
651
|
+
originalId: message.record.id,
|
|
652
|
+
error: error.message
|
|
653
|
+
});
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
if (this.config.onError) {
|
|
657
|
+
await this.config.onError(error, message.record);
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
async completeMessage(queueId, result) {
|
|
663
|
+
await this.queueResource.update(queueId, {
|
|
664
|
+
status: 'completed',
|
|
665
|
+
completedAt: Date.now(),
|
|
666
|
+
result
|
|
667
|
+
});
|
|
668
|
+
|
|
669
|
+
// Note: message already in cache from attemptClaim()
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
async failMessage(queueId, error) {
|
|
673
|
+
await this.queueResource.update(queueId, {
|
|
674
|
+
status: 'failed',
|
|
675
|
+
error
|
|
676
|
+
});
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
async retryMessage(queueId, attempts, error) {
|
|
680
|
+
// Exponential backoff: 2^attempts * 1000ms, max 30 seconds
|
|
681
|
+
const backoff = Math.min(Math.pow(2, attempts) * 1000, 30000);
|
|
682
|
+
|
|
683
|
+
await this.queueResource.update(queueId, {
|
|
684
|
+
status: 'pending',
|
|
685
|
+
visibleAt: Date.now() + backoff,
|
|
686
|
+
error
|
|
687
|
+
});
|
|
688
|
+
|
|
689
|
+
// Remove from cache so it can be retried
|
|
690
|
+
this.processedCache.delete(queueId);
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
async moveToDeadLetter(queueId, record, error) {
|
|
694
|
+
// Save to dead letter queue if configured
|
|
695
|
+
if (this.config.deadLetterResource && this.deadLetterResourceObj) {
|
|
696
|
+
const msg = await this.queueResource.get(queueId);
|
|
697
|
+
|
|
698
|
+
await this.deadLetterResourceObj.insert({
|
|
699
|
+
id: idGenerator(),
|
|
700
|
+
originalId: record.id,
|
|
701
|
+
queueId: queueId,
|
|
702
|
+
data: record,
|
|
703
|
+
error,
|
|
704
|
+
attempts: msg.attempts,
|
|
705
|
+
createdAt: new Date().toISOString()
|
|
706
|
+
});
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
// Mark as dead in queue
|
|
710
|
+
await this.queueResource.update(queueId, {
|
|
711
|
+
status: 'dead',
|
|
712
|
+
error
|
|
713
|
+
});
|
|
714
|
+
|
|
715
|
+
// Note: message already in cache from attemptClaim()
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
async getStats() {
|
|
719
|
+
const [ok, err, allMessages] = await tryFn(() =>
|
|
720
|
+
this.queueResource.list()
|
|
721
|
+
);
|
|
722
|
+
|
|
723
|
+
if (!ok) {
|
|
724
|
+
if (this.config.verbose) {
|
|
725
|
+
console.warn('[S3QueuePlugin] Failed to get stats:', err.message);
|
|
726
|
+
}
|
|
727
|
+
return null;
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
const stats = {
|
|
731
|
+
total: allMessages.length,
|
|
732
|
+
pending: 0,
|
|
733
|
+
processing: 0,
|
|
734
|
+
completed: 0,
|
|
735
|
+
failed: 0,
|
|
736
|
+
dead: 0
|
|
737
|
+
};
|
|
738
|
+
|
|
739
|
+
for (const msg of allMessages) {
|
|
740
|
+
if (stats[msg.status] !== undefined) {
|
|
741
|
+
stats[msg.status]++;
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
return stats;
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
async createDeadLetterResource() {
|
|
749
|
+
const [ok, err] = await tryFn(() =>
|
|
750
|
+
this.database.createResource({
|
|
751
|
+
name: this.config.deadLetterResource,
|
|
752
|
+
attributes: {
|
|
753
|
+
id: 'string|required',
|
|
754
|
+
originalId: 'string|required',
|
|
755
|
+
queueId: 'string|required',
|
|
756
|
+
data: 'json|required',
|
|
757
|
+
error: 'string|required',
|
|
758
|
+
attempts: 'number|required',
|
|
759
|
+
createdAt: 'string|required'
|
|
760
|
+
},
|
|
761
|
+
behavior: 'body-overflow',
|
|
762
|
+
timestamps: true
|
|
763
|
+
})
|
|
764
|
+
);
|
|
765
|
+
|
|
766
|
+
if (ok || this.database.resources[this.config.deadLetterResource]) {
|
|
767
|
+
this.deadLetterResourceObj = this.database.resources[this.config.deadLetterResource];
|
|
768
|
+
|
|
769
|
+
if (this.config.verbose) {
|
|
770
|
+
console.log(`[S3QueuePlugin] Dead letter queue created: ${this.config.deadLetterResource}`);
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
export default S3QueuePlugin;
|