@checkstack/queue-memory-backend 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,55 @@
1
+ # @checkstack/queue-memory-backend
2
+
3
+ ## 0.0.2
4
+
5
+ ### Patch Changes
6
+
7
+ - d20d274: Initial release of all @checkstack packages. Rebranded from Checkmate to Checkstack with new npm organization @checkstack and domain checkstack.dev.
8
+ - Updated dependencies [d20d274]
9
+ - @checkstack/backend-api@0.0.2
10
+ - @checkstack/common@0.0.2
11
+ - @checkstack/queue-api@0.0.2
12
+ - @checkstack/queue-memory-common@0.0.2
13
+
14
+ ## 1.0.1
15
+
16
+ ### Patch Changes
17
+
18
+ - Updated dependencies [b4eb432]
19
+ - Updated dependencies [a65e002]
20
+ - @checkstack/backend-api@1.1.0
21
+ - @checkstack/common@0.2.0
22
+ - @checkstack/queue-api@1.0.1
23
+ - @checkstack/queue-memory-common@0.1.2
24
+
25
+ ## 1.0.0
26
+
27
+ ### Major Changes
28
+
29
+ - 8e889b4: Add consumer group support to Queue API for distributed event system. BREAKING: consume() now requires ConsumeOptions with consumerGroup parameter.
30
+
31
+ ### Patch Changes
32
+
33
+ - e4d83fc: Add BullMQ queue plugin with orphaned job cleanup
34
+
35
+ - **queue-api**: Added `listRecurringJobs()` method to Queue interface for detecting orphaned jobs
36
+ - **queue-bullmq-backend**: New plugin implementing BullMQ (Redis) queue backend with job schedulers, consumer groups, and distributed job persistence
37
+ - **queue-bullmq-common**: New common package with queue permissions
38
+ - **queue-memory-backend**: Implemented `listRecurringJobs()` for in-memory queue
39
+ - **healthcheck-backend**: Enhanced `bootstrapHealthChecks` to clean up orphaned job schedulers using `listRecurringJobs()`
40
+ - **test-utils-backend**: Added `listRecurringJobs()` to mock queue factory
41
+
42
+ This enables production-ready distributed queue processing with Redis persistence and automatic cleanup of orphaned jobs when health checks are deleted.
43
+
44
+ - Updated dependencies [ffc28f6]
45
+ - Updated dependencies [e4d83fc]
46
+ - Updated dependencies [71275dd]
47
+ - Updated dependencies [ae19ff6]
48
+ - Updated dependencies [b55fae6]
49
+ - Updated dependencies [b354ab3]
50
+ - Updated dependencies [8e889b4]
51
+ - Updated dependencies [81f3f85]
52
+ - @checkstack/common@0.1.0
53
+ - @checkstack/backend-api@1.0.0
54
+ - @checkstack/queue-api@1.0.0
55
+ - @checkstack/queue-memory-common@0.1.1
package/package.json ADDED
@@ -0,0 +1,23 @@
1
+ {
2
+ "name": "@checkstack/queue-memory-backend",
3
+ "version": "0.0.2",
4
+ "type": "module",
5
+ "main": "src/index.ts",
6
+ "scripts": {
7
+ "typecheck": "tsc --noEmit",
8
+ "lint": "bun run lint:code",
9
+ "lint:code": "eslint . --max-warnings 0"
10
+ },
11
+ "dependencies": {
12
+ "@checkstack/backend-api": "workspace:*",
13
+ "@checkstack/queue-api": "workspace:*",
14
+ "@checkstack/queue-memory-common": "workspace:*",
15
+ "zod": "^4.2.1",
16
+ "@checkstack/common": "workspace:*"
17
+ },
18
+ "devDependencies": {
19
+ "@types/bun": "latest",
20
+ "@checkstack/tsconfig": "workspace:*",
21
+ "@checkstack/scripts": "workspace:*"
22
+ }
23
+ }
package/src/index.ts ADDED
@@ -0,0 +1,26 @@
1
+ import {
2
+ createBackendPlugin,
3
+ coreServices,
4
+ } from "@checkstack/backend-api";
5
+ import { InMemoryQueuePlugin } from "./plugin";
6
+ import { permissionList } from "@checkstack/queue-memory-common";
7
+ import { pluginMetadata } from "./plugin-metadata";
8
+
9
+ export default createBackendPlugin({
10
+ metadata: pluginMetadata,
11
+ register(env) {
12
+ env.registerPermissions(permissionList);
13
+
14
+ env.registerInit({
15
+ deps: {
16
+ queuePluginRegistry: coreServices.queuePluginRegistry,
17
+ logger: coreServices.logger,
18
+ },
19
+ init: async ({ queuePluginRegistry, logger }) => {
20
+ logger.debug("🔌 Registering In-Memory Queue Plugin...");
21
+ const plugin = new InMemoryQueuePlugin();
22
+ queuePluginRegistry.register(plugin);
23
+ },
24
+ });
25
+ },
26
+ });
@@ -0,0 +1,471 @@
1
+ import { describe, it, expect, beforeEach } from "bun:test";
2
+ import { InMemoryQueue } from "./memory-queue";
3
+ import type { QueueJob } from "@checkstack/queue-api";
4
+
5
+ describe("InMemoryQueue Consumer Groups", () => {
6
+ let queue: InMemoryQueue<string>;
7
+
8
+ beforeEach(() => {
9
+ queue = new InMemoryQueue("test-queue", {
10
+ concurrency: 10,
11
+ maxQueueSize: 100,
12
+ delayMultiplier: 0.01, // 100x faster delays for testing
13
+ });
14
+ });
15
+
16
+ describe("Broadcast Pattern (Unique Consumer Groups)", () => {
17
+ it("should deliver message to all consumers with different groups", async () => {
18
+ const received: string[] = [];
19
+
20
+ // Register two consumers with different groups
21
+ await queue.consume(
22
+ async (job) => {
23
+ received.push(`consumer-1:${job.data}`);
24
+ },
25
+ { consumerGroup: "group-1", maxRetries: 0 }
26
+ );
27
+
28
+ await queue.consume(
29
+ async (job) => {
30
+ received.push(`consumer-2:${job.data}`);
31
+ },
32
+ { consumerGroup: "group-2", maxRetries: 0 }
33
+ );
34
+
35
+ // Enqueue a message
36
+ await queue.enqueue("test-message");
37
+
38
+ // Wait for processing
39
+ await new Promise((resolve) => setTimeout(resolve, 100));
40
+
41
+ // Both consumers should receive the message
42
+ expect(received).toContain("consumer-1:test-message");
43
+ expect(received).toContain("consumer-2:test-message");
44
+ expect(received.length).toBe(2);
45
+ });
46
+
47
+ it("should deliver multiple messages to all consumer groups", async () => {
48
+ const received: Record<string, string[]> = {
49
+ "group-1": [],
50
+ "group-2": [],
51
+ };
52
+
53
+ await queue.consume(
54
+ async (job) => {
55
+ received["group-1"].push(job.data);
56
+ },
57
+ { consumerGroup: "group-1", maxRetries: 0 }
58
+ );
59
+
60
+ await queue.consume(
61
+ async (job) => {
62
+ received["group-2"].push(job.data);
63
+ },
64
+ { consumerGroup: "group-2", maxRetries: 0 }
65
+ );
66
+
67
+ // Enqueue multiple messages
68
+ await queue.enqueue("msg-1");
69
+ await queue.enqueue("msg-2");
70
+ await queue.enqueue("msg-3");
71
+
72
+ // Wait for processing
73
+ await new Promise((resolve) => setTimeout(resolve, 150));
74
+
75
+ // Both groups should receive all messages
76
+ expect(received["group-1"]).toEqual(["msg-1", "msg-2", "msg-3"]);
77
+ expect(received["group-2"]).toEqual(["msg-1", "msg-2", "msg-3"]);
78
+ });
79
+ });
80
+
81
+ describe("Work-Queue Pattern (Same Consumer Group)", () => {
82
+ it("should distribute messages round-robin within same group", async () => {
83
+ const received: Record<string, string[]> = {
84
+ "consumer-1": [],
85
+ "consumer-2": [],
86
+ };
87
+
88
+ // Two consumers in the same group
89
+ await queue.consume(
90
+ async (job) => {
91
+ received["consumer-1"].push(job.data);
92
+ },
93
+ { consumerGroup: "shared-group", maxRetries: 0 }
94
+ );
95
+
96
+ await queue.consume(
97
+ async (job) => {
98
+ received["consumer-2"].push(job.data);
99
+ },
100
+ { consumerGroup: "shared-group", maxRetries: 0 }
101
+ );
102
+
103
+ // Enqueue multiple messages
104
+ await queue.enqueue("msg-1");
105
+ await queue.enqueue("msg-2");
106
+ await queue.enqueue("msg-3");
107
+ await queue.enqueue("msg-4");
108
+
109
+ // Wait for processing
110
+ await new Promise((resolve) => setTimeout(resolve, 150));
111
+
112
+ // Messages should be distributed (round-robin)
113
+ const total =
114
+ received["consumer-1"].length + received["consumer-2"].length;
115
+ expect(total).toBe(4);
116
+
117
+ // Each consumer should get some messages (round-robin)
118
+ expect(received["consumer-1"].length).toBeGreaterThan(0);
119
+ expect(received["consumer-2"].length).toBeGreaterThan(0);
120
+
121
+ // All messages should be received exactly once across consumers
122
+ const allReceived = [
123
+ ...received["consumer-1"],
124
+ ...received["consumer-2"],
125
+ ].sort();
126
+ expect(allReceived).toEqual(["msg-1", "msg-2", "msg-3", "msg-4"]);
127
+ });
128
+
129
+ it("should only deliver to one consumer in the group", async () => {
130
+ let consumer1Count = 0;
131
+ let consumer2Count = 0;
132
+
133
+ await queue.consume(
134
+ async () => {
135
+ consumer1Count++;
136
+ },
137
+ { consumerGroup: "work-group", maxRetries: 0 }
138
+ );
139
+
140
+ await queue.consume(
141
+ async () => {
142
+ consumer2Count++;
143
+ },
144
+ { consumerGroup: "work-group", maxRetries: 0 }
145
+ );
146
+
147
+ // Single message
148
+ await queue.enqueue("test");
149
+
150
+ // Wait for processing
151
+ await new Promise((resolve) => setTimeout(resolve, 100));
152
+
153
+ // Only one consumer should process it
154
+ expect(consumer1Count + consumer2Count).toBe(1);
155
+ });
156
+ });
157
+
158
+ describe("Retry Logic", () => {
159
+ it("should retry failed jobs with exponential backoff", async () => {
160
+ let attempts = 0;
161
+ const attemptTimestamps: number[] = [];
162
+
163
+ await queue.consume(
164
+ async (job) => {
165
+ attemptTimestamps.push(Date.now());
166
+ attempts++;
167
+ if (attempts < 3) {
168
+ throw new Error("Simulated failure");
169
+ }
170
+ },
171
+ { consumerGroup: "retry-group", maxRetries: 3 }
172
+ );
173
+
174
+ await queue.enqueue("test");
175
+
176
+ // Wait for retries (with delayMultiplier=0.01: 20ms + 40ms = 60ms + buffer)
177
+ await new Promise((resolve) => setTimeout(resolve, 150));
178
+
179
+ // Should have tried 3 times (initial + 2 retries)
180
+ expect(attempts).toBe(3);
181
+
182
+ // Check exponential backoff (delays should increase)
183
+ if (attemptTimestamps.length >= 3) {
184
+ const delay1 = attemptTimestamps[1] - attemptTimestamps[0];
185
+ const delay2 = attemptTimestamps[2] - attemptTimestamps[1];
186
+
187
+ // With delayMultiplier=0.01: first retry after 2^1 * 1000 * 0.01 = 20ms
188
+ expect(delay1).toBeGreaterThanOrEqual(15); // Allow tolerance
189
+ expect(delay1).toBeLessThanOrEqual(50);
190
+
191
+ // Second retry after 2^2 * 1000 * 0.01 = 40ms
192
+ expect(delay2).toBeGreaterThanOrEqual(35);
193
+ expect(delay2).toBeLessThanOrEqual(80);
194
+
195
+ // Verify exponential growth (delay2 should be roughly 2x delay1)
196
+ expect(delay2).toBeGreaterThan(delay1);
197
+ }
198
+ });
199
+
200
+ it("should not retry beyond maxRetries", async () => {
201
+ let attempts = 0;
202
+
203
+ await queue.consume(
204
+ async () => {
205
+ attempts++;
206
+ throw new Error("Always fails");
207
+ },
208
+ { consumerGroup: "fail-group", maxRetries: 2 }
209
+ );
210
+
211
+ await queue.enqueue("test");
212
+
213
+ // Wait for all retries (with delayMultiplier=0.01: ~60ms total)
214
+ await new Promise((resolve) => setTimeout(resolve, 150));
215
+
216
+ // Should try 3 times total (initial + 2 retries)
217
+ expect(attempts).toBe(3);
218
+ });
219
+ });
220
+
221
+ describe("Mixed Patterns", () => {
222
+ it("should handle both broadcast and work-queue simultaneously", async () => {
223
+ const broadcastReceived: string[] = [];
224
+ const workQueueReceived: string[] = [];
225
+
226
+ // Broadcast consumers (different groups)
227
+ await queue.consume(
228
+ async (job) => {
229
+ broadcastReceived.push(`broadcast-1:${job.data}`);
230
+ },
231
+ { consumerGroup: "broadcast-1", maxRetries: 0 }
232
+ );
233
+
234
+ await queue.consume(
235
+ async (job) => {
236
+ broadcastReceived.push(`broadcast-2:${job.data}`);
237
+ },
238
+ { consumerGroup: "broadcast-2", maxRetries: 0 }
239
+ );
240
+
241
+ // Work-queue consumers (same group)
242
+ await queue.consume(
243
+ async (job) => {
244
+ workQueueReceived.push(`work-1:${job.data}`);
245
+ },
246
+ { consumerGroup: "work-group", maxRetries: 0 }
247
+ );
248
+
249
+ await queue.consume(
250
+ async (job) => {
251
+ workQueueReceived.push(`work-2:${job.data}`);
252
+ },
253
+ { consumerGroup: "work-group", maxRetries: 0 }
254
+ );
255
+
256
+ await queue.enqueue("test-msg");
257
+
258
+ // Wait for processing
259
+ await new Promise((resolve) => setTimeout(resolve, 150));
260
+
261
+ // Both broadcast consumers should receive
262
+ expect(broadcastReceived.length).toBe(2);
263
+ expect(broadcastReceived).toContain("broadcast-1:test-msg");
264
+ expect(broadcastReceived).toContain("broadcast-2:test-msg");
265
+
266
+ // Only one work-queue consumer should receive
267
+ expect(workQueueReceived.length).toBe(1);
268
+ expect(
269
+ workQueueReceived[0] === "work-1:test-msg" ||
270
+ workQueueReceived[0] === "work-2:test-msg"
271
+ ).toBe(true);
272
+ });
273
+ });
274
+
275
+ describe("Queue Stats", () => {
276
+ it("should track consumer group count", async () => {
277
+ await queue.consume(async () => {}, {
278
+ consumerGroup: "group-1",
279
+ maxRetries: 0,
280
+ });
281
+ await queue.consume(async () => {}, {
282
+ consumerGroup: "group-2",
283
+ maxRetries: 0,
284
+ });
285
+ await queue.consume(async () => {}, {
286
+ consumerGroup: "group-3",
287
+ maxRetries: 0,
288
+ });
289
+
290
+ const stats = await queue.getStats();
291
+ expect(stats.consumerGroups).toBe(3);
292
+ });
293
+
294
+ it("should track pending, processing, completed, and failed", async () => {
295
+ let processedCount = 0;
296
+
297
+ await queue.consume(
298
+ async () => {
299
+ processedCount++;
300
+ if (processedCount === 2) {
301
+ throw new Error("Fail second job");
302
+ }
303
+ // Simulate some processing time
304
+ await new Promise((resolve) => setTimeout(resolve, 50));
305
+ },
306
+ { consumerGroup: "stats-group", maxRetries: 0 }
307
+ );
308
+
309
+ await queue.enqueue("msg-1");
310
+ await queue.enqueue("msg-2");
311
+ await queue.enqueue("msg-3");
312
+
313
+ // Check stats during processing
314
+ await new Promise((resolve) => setTimeout(resolve, 25));
315
+ let stats = await queue.getStats();
316
+ expect(stats.processing).toBeGreaterThanOrEqual(0);
317
+
318
+ // Wait for completion
319
+ await new Promise((resolve) => setTimeout(resolve, 200));
320
+ stats = await queue.getStats();
321
+
322
+ expect(stats.completed).toBe(2); // msg-1 and msg-3
323
+ expect(stats.failed).toBe(1); // msg-2
324
+ expect(stats.pending).toBe(0);
325
+ });
326
+ });
327
+
328
+ describe("Delayed Jobs", () => {
329
+ it("should not process job until delay expires", async () => {
330
+ const processedTimes: number[] = [];
331
+ const enqueueTime = Date.now();
332
+
333
+ await queue.consume(
334
+ async (job) => {
335
+ processedTimes.push(Date.now());
336
+ },
337
+ { consumerGroup: "delay-group", maxRetries: 0 }
338
+ );
339
+
340
+ // Enqueue with 2-second delay (becomes 20ms with delayMultiplier=0.01)
341
+ await queue.enqueue("delayed-job", { startDelay: 2 });
342
+
343
+ // Check immediately - should not be processed yet
344
+ await new Promise((resolve) => setTimeout(resolve, 10));
345
+ expect(processedTimes.length).toBe(0);
346
+
347
+ // Wait for delay to expire (20ms + generous buffer for CI)
348
+ await new Promise((resolve) => setTimeout(resolve, 150));
349
+ expect(processedTimes.length).toBe(1);
350
+
351
+ // Verify it was processed after the delay
352
+ const actualDelay = processedTimes[0] - enqueueTime;
353
+ expect(actualDelay).toBeGreaterThanOrEqual(15); // Allow tolerance
354
+ expect(actualDelay).toBeLessThanOrEqual(200); // Allow more tolerance for CI
355
+ });
356
+
357
+ it("should process non-delayed jobs immediately while delayed jobs wait", async () => {
358
+ const processed: string[] = [];
359
+
360
+ await queue.consume(
361
+ async (job) => {
362
+ processed.push(job.data);
363
+ },
364
+ { consumerGroup: "mixed-delay-group", maxRetries: 0 }
365
+ );
366
+
367
+ // Enqueue delayed job first (10s delay = 100ms with multiplier)
368
+ await queue.enqueue("delayed", { startDelay: 10 });
369
+
370
+ // Enqueue immediate job
371
+ await queue.enqueue("immediate");
372
+
373
+ // Wait for immediate job to be processed (should be done quickly)
374
+ // The delayed job should NOT be processed yet (100ms delay)
375
+ await new Promise((resolve) => setTimeout(resolve, 50));
376
+ expect(processed).toEqual(["immediate"]);
377
+
378
+ // Wait for delayed job (100ms + generous buffer for CI)
379
+ await new Promise((resolve) => setTimeout(resolve, 200));
380
+ expect(processed).toEqual(["immediate", "delayed"]);
381
+ });
382
+
383
+ it("should respect priority with delayed jobs", async () => {
384
+ const processed: string[] = [];
385
+
386
+ await queue.consume(
387
+ async (job) => {
388
+ processed.push(job.data);
389
+ },
390
+ { consumerGroup: "priority-delay-group", maxRetries: 0 }
391
+ );
392
+
393
+ // Enqueue multiple delayed jobs with same delay but different priorities
394
+ // (1s delay = 10ms with multiplier)
395
+ await queue.enqueue("low-priority", {
396
+ startDelay: 1,
397
+ priority: 1,
398
+ });
399
+ await queue.enqueue("high-priority", {
400
+ startDelay: 1,
401
+ priority: 10,
402
+ });
403
+ await queue.enqueue("medium-priority", {
404
+ startDelay: 1,
405
+ priority: 5,
406
+ });
407
+
408
+ // Wait for delay to expire (10ms + buffer)
409
+ await new Promise((resolve) => setTimeout(resolve, 50));
410
+
411
+ // Should process in priority order (highest first)
412
+ expect(processed).toEqual([
413
+ "high-priority",
414
+ "medium-priority",
415
+ "low-priority",
416
+ ]);
417
+ });
418
+ });
419
+
420
+ describe("Job Deduplication", () => {
421
+ it("should skip duplicate jobs with same jobId", async () => {
422
+ const processed: string[] = [];
423
+
424
+ await queue.consume(
425
+ async (job) => {
426
+ processed.push(job.data);
427
+ },
428
+ { consumerGroup: "dedup-group", maxRetries: 0 }
429
+ );
430
+
431
+ // Enqueue job with custom jobId
432
+ const jobId1 = await queue.enqueue("first", { jobId: "unique-job-1" });
433
+ expect(jobId1).toBe("unique-job-1");
434
+
435
+ // Try to enqueue duplicate (should return same jobId without adding to queue)
436
+ const jobId2 = await queue.enqueue("duplicate", {
437
+ jobId: "unique-job-1",
438
+ });
439
+ expect(jobId2).toBe("unique-job-1");
440
+
441
+ // Wait for processing
442
+ await new Promise((resolve) => setTimeout(resolve, 100));
443
+
444
+ // Should only have processed the first job
445
+ expect(processed.length).toBe(1);
446
+ expect(processed[0]).toBe("first");
447
+ });
448
+
449
+ it("should allow different jobIds", async () => {
450
+ const processed: string[] = [];
451
+
452
+ await queue.consume(
453
+ async (job) => {
454
+ processed.push(job.data);
455
+ },
456
+ { consumerGroup: "different-group", maxRetries: 0 }
457
+ );
458
+
459
+ await queue.enqueue("job1", { jobId: "job-1" });
460
+ await queue.enqueue("job2", { jobId: "job-2" });
461
+ await queue.enqueue("job3", { jobId: "job-3" });
462
+
463
+ await new Promise((resolve) => setTimeout(resolve, 100));
464
+
465
+ expect(processed.length).toBe(3);
466
+ expect(processed).toContain("job1");
467
+ expect(processed).toContain("job2");
468
+ expect(processed).toContain("job3");
469
+ });
470
+ });
471
+ });
@@ -0,0 +1,429 @@
1
+ import {
2
+ Queue,
3
+ QueueJob,
4
+ QueueConsumer,
5
+ QueueStats,
6
+ ConsumeOptions,
7
+ RecurringJobDetails,
8
+ } from "@checkstack/queue-api";
9
+ import { InMemoryQueueConfig } from "./plugin";
10
+
11
+ /**
12
+ * Extended queue job with availability tracking for delayed jobs
13
+ */
14
+ interface InternalQueueJob<T> extends QueueJob<T> {
15
+ availableAt: Date;
16
+ }
17
+
18
+ /**
19
+ * Simple semaphore for concurrency control
20
+ */
21
+ class Semaphore {
22
+ private permits: number;
23
+ private waiting: Array<() => void> = [];
24
+
25
+ constructor(permits: number) {
26
+ this.permits = permits;
27
+ }
28
+
29
+ async acquire(): Promise<void> {
30
+ if (this.permits > 0) {
31
+ this.permits--;
32
+ return;
33
+ }
34
+
35
+ return new Promise<void>((resolve) => {
36
+ this.waiting.push(resolve);
37
+ });
38
+ }
39
+
40
+ release(): void {
41
+ this.permits++;
42
+ const resolve = this.waiting.shift();
43
+ if (resolve) {
44
+ this.permits--;
45
+ resolve();
46
+ }
47
+ }
48
+ }
49
+
50
+ /**
51
+ * Consumer group state tracking
52
+ */
53
+ interface ConsumerGroupState<T> {
54
+ consumers: Array<{
55
+ id: string;
56
+ handler: QueueConsumer<T>;
57
+ maxRetries: number;
58
+ }>;
59
+ nextConsumerIndex: number; // For round-robin within group
60
+ processedJobIds: Set<string>; // Track which jobs this group has processed
61
+ }
62
+
63
+ /**
64
+ * Recurring job metadata
65
+ */
66
+ interface RecurringJobMetadata<T> {
67
+ jobId: string;
68
+ intervalSeconds: number;
69
+ payload: T;
70
+ priority: number;
71
+ enabled: boolean; // For cancellation
72
+ }
73
+
74
+ /**
75
+ * In-memory queue implementation with consumer group support
76
+ */
77
+ export class InMemoryQueue<T> implements Queue<T> {
78
+ private jobs: InternalQueueJob<T>[] = [];
79
+ private consumerGroups = new Map<string, ConsumerGroupState<T>>();
80
+ private recurringJobs = new Map<string, RecurringJobMetadata<T>>();
81
+ private semaphore: Semaphore;
82
+ private stopped = false;
83
+ private processing = 0;
84
+ private stats = {
85
+ completed: 0,
86
+ failed: 0,
87
+ };
88
+
89
+ constructor(private name: string, private config: InMemoryQueueConfig) {
90
+ this.semaphore = new Semaphore(config.concurrency);
91
+ }
92
+
93
+ async enqueue(
94
+ data: T,
95
+ options?: { priority?: number; startDelay?: number; jobId?: string }
96
+ ): Promise<string> {
97
+ if (this.jobs.length >= this.config.maxQueueSize) {
98
+ throw new Error(
99
+ `Queue '${this.name}' is full (max: ${this.config.maxQueueSize})`
100
+ );
101
+ }
102
+
103
+ const now = new Date();
104
+ const delayMs = options?.startDelay
105
+ ? options.startDelay * 1000 * (this.config.delayMultiplier ?? 1)
106
+ : 0;
107
+ const availableAt = delayMs > 0 ? new Date(now.getTime() + delayMs) : now;
108
+
109
+ // Use custom jobId if provided, otherwise generate one
110
+ const jobId = options?.jobId ?? crypto.randomUUID();
111
+
112
+ // Check for duplicate jobId
113
+ if (options?.jobId && this.jobs.some((j) => j.id === options.jobId)) {
114
+ // Job with this ID already exists, skip silently
115
+ return options.jobId;
116
+ }
117
+
118
+ const job: InternalQueueJob<T> = {
119
+ id: jobId,
120
+ data,
121
+ priority: options?.priority ?? 0,
122
+ timestamp: now,
123
+ attempts: 0,
124
+ availableAt,
125
+ };
126
+
127
+ // Insert job in priority order (higher priority first)
128
+ const insertIndex = this.jobs.findIndex(
129
+ (existingJob) => existingJob.priority! < job.priority!
130
+ );
131
+
132
+ if (insertIndex === -1) {
133
+ this.jobs.push(job);
134
+ } else {
135
+ this.jobs.splice(insertIndex, 0, job);
136
+ }
137
+
138
+ // Trigger processing for all consumer groups (or schedule for later)
139
+ if (!this.stopped) {
140
+ if (options?.startDelay) {
141
+ // Schedule processing when the job becomes available
142
+ const scheduledDelayMs =
143
+ options.startDelay * 1000 * (this.config.delayMultiplier ?? 1);
144
+ setTimeout(() => {
145
+ if (!this.stopped) {
146
+ void this.processNext();
147
+ }
148
+ }, scheduledDelayMs);
149
+ } else {
150
+ void this.processNext();
151
+ }
152
+ }
153
+
154
+ return job.id;
155
+ }
156
+
157
+ async consume(
158
+ consumer: QueueConsumer<T>,
159
+ options: ConsumeOptions
160
+ ): Promise<void> {
161
+ const { consumerGroup, maxRetries = 3 } = options;
162
+
163
+ // Get or create consumer group
164
+ let groupState = this.consumerGroups.get(consumerGroup);
165
+ if (!groupState) {
166
+ groupState = {
167
+ consumers: [],
168
+ nextConsumerIndex: 0,
169
+ processedJobIds: new Set(),
170
+ };
171
+ this.consumerGroups.set(consumerGroup, groupState);
172
+ }
173
+
174
+ // Add consumer to group
175
+ groupState.consumers.push({
176
+ id: crypto.randomUUID(),
177
+ handler: consumer,
178
+ maxRetries,
179
+ });
180
+
181
+ // Start processing existing jobs
182
+ if (!this.stopped) {
183
+ void this.processNext();
184
+ }
185
+ }
186
+
187
+ async scheduleRecurring(
188
+ data: T,
189
+ options: {
190
+ jobId: string;
191
+ intervalSeconds: number;
192
+ startDelay?: number;
193
+ priority?: number;
194
+ }
195
+ ): Promise<string> {
196
+ const { jobId, intervalSeconds, startDelay = 0, priority = 0 } = options;
197
+
198
+ // Check if this is an update to an existing recurring job
199
+ const existingMetadata = this.recurringJobs.get(jobId);
200
+ if (existingMetadata) {
201
+ // UPDATE CASE: Cancel pending executions
202
+ // Find and remove any pending jobs for this recurring job
203
+ this.jobs = this.jobs.filter((job) => {
204
+ // Check if this job belongs to the recurring job being updated
205
+ if (job.id.startsWith(jobId + ":")) {
206
+ // Remove from processed sets to prevent orphaned references
207
+ for (const group of this.consumerGroups.values()) {
208
+ group.processedJobIds.delete(job.id);
209
+ }
210
+ return false; // Remove this job
211
+ }
212
+ return true; // Keep other jobs
213
+ });
214
+ }
215
+
216
+ // Store or update recurring job metadata
217
+ this.recurringJobs.set(jobId, {
218
+ jobId,
219
+ intervalSeconds,
220
+ payload: data,
221
+ priority,
222
+ enabled: true,
223
+ });
224
+
225
+ // Schedule first execution with new configuration
226
+ await this.enqueue(data, {
227
+ jobId: `${jobId}:${Date.now()}`, // Unique ID for this execution
228
+ startDelay,
229
+ priority,
230
+ });
231
+
232
+ return jobId;
233
+ }
234
+
235
+ async cancelRecurring(jobId: string): Promise<void> {
236
+ const metadata = this.recurringJobs.get(jobId);
237
+ if (metadata) {
238
+ metadata.enabled = false; // Mark as disabled to stop future rescheduling
239
+
240
+ // Also cancel any pending jobs
241
+ this.jobs = this.jobs.filter((job) => {
242
+ if (job.id.startsWith(jobId + ":")) {
243
+ // Remove from processed sets
244
+ for (const group of this.consumerGroups.values()) {
245
+ group.processedJobIds.delete(job.id);
246
+ }
247
+ return false;
248
+ }
249
+ return true;
250
+ });
251
+ }
252
+ }
253
+
254
+ async listRecurringJobs(): Promise<string[]> {
255
+ return [...this.recurringJobs.keys()];
256
+ }
257
+
258
+ async getRecurringJobDetails(
259
+ jobId: string
260
+ ): Promise<RecurringJobDetails<T> | undefined> {
261
+ const metadata = this.recurringJobs.get(jobId);
262
+ if (!metadata || !metadata.enabled) {
263
+ return undefined;
264
+ }
265
+ return {
266
+ jobId: metadata.jobId,
267
+ data: metadata.payload,
268
+ intervalSeconds: metadata.intervalSeconds,
269
+ priority: metadata.priority,
270
+ };
271
+ }
272
+
273
+ async getInFlightCount(): Promise<number> {
274
+ return this.processing;
275
+ }
276
+
277
+ private async processNext(): Promise<void> {
278
+ if (this.jobs.length === 0 || this.consumerGroups.size === 0) {
279
+ return;
280
+ }
281
+
282
+ const now = new Date();
283
+
284
+ // For each consumer group, check if there's a job they haven't processed
285
+ for (const [groupId, groupState] of this.consumerGroups.entries()) {
286
+ if (groupState.consumers.length === 0) continue;
287
+
288
+ // Find next unprocessed job for this group that is available
289
+ const job = this.jobs.find(
290
+ (j) => !groupState.processedJobIds.has(j.id) && j.availableAt <= now
291
+ );
292
+
293
+ if (!job) continue;
294
+
295
+ // Mark as processed by this group
296
+ groupState.processedJobIds.add(job.id);
297
+
298
+ // Select consumer via round-robin
299
+ const consumerIndex =
300
+ groupState.nextConsumerIndex % groupState.consumers.length;
301
+ const selectedConsumer = groupState.consumers[consumerIndex];
302
+ groupState.nextConsumerIndex++;
303
+
304
+ // Process job (don't await - process asynchronously)
305
+ void this.processJob(job, selectedConsumer, groupId, groupState);
306
+ }
307
+
308
+ // Remove fully processed jobs
309
+ this.jobs = this.jobs.filter((job) => {
310
+ // Job is done if all groups have processed it
311
+ return ![...this.consumerGroups.values()].every((group) =>
312
+ group.processedJobIds.has(job.id)
313
+ );
314
+ });
315
+ }
316
+
317
+ private async processJob(
318
+ job: InternalQueueJob<T>,
319
+ consumer: ConsumerGroupState<T>["consumers"][0],
320
+ groupId: string,
321
+ groupState: ConsumerGroupState<T>
322
+ ): Promise<void> {
323
+ await this.semaphore.acquire();
324
+ this.processing++;
325
+
326
+ let isRetrying = false;
327
+
328
+ try {
329
+ await consumer.handler(job);
330
+ this.stats.completed++;
331
+
332
+ // After successful execution, check for recurring job and reschedule
333
+ const recurringJobId = this.findRecurringJobId(job.id);
334
+ if (recurringJobId) {
335
+ const metadata = this.recurringJobs.get(recurringJobId);
336
+ if (metadata?.enabled) {
337
+ // Reschedule for next interval
338
+ void this.enqueue(metadata.payload, {
339
+ jobId: `${recurringJobId}:${Date.now()}`,
340
+ startDelay: metadata.intervalSeconds,
341
+ priority: metadata.priority,
342
+ });
343
+ }
344
+ }
345
+ } catch (error) {
346
+ console.error(
347
+ `Job ${job.id} failed in group ${groupId} (attempt ${job.attempts}):`,
348
+ error
349
+ );
350
+
351
+ // Retry logic
352
+ if (job.attempts! < consumer.maxRetries) {
353
+ job.attempts!++;
354
+ isRetrying = true;
355
+
356
+ // Remove from processed set to allow retry
357
+ groupState.processedJobIds.delete(job.id);
358
+
359
+ // Re-add job to queue for retry (with priority to process soon, preserving availableAt)
360
+ const insertIndex = this.jobs.findIndex(
361
+ (existingJob) => existingJob.priority! < (job.priority ?? 0)
362
+ );
363
+ if (insertIndex === -1) {
364
+ this.jobs.push(job);
365
+ } else {
366
+ this.jobs.splice(insertIndex, 0, job);
367
+ }
368
+
369
+ // Re-trigger processing with exponential backoff
370
+ const delay =
371
+ Math.pow(2, job.attempts!) *
372
+ 1000 *
373
+ (this.config.delayMultiplier ?? 1);
374
+ setTimeout(() => {
375
+ if (!this.stopped) {
376
+ void this.processNext();
377
+ }
378
+ }, delay);
379
+ } else {
380
+ this.stats.failed++;
381
+ }
382
+ } finally {
383
+ this.processing--;
384
+ this.semaphore.release();
385
+
386
+ // Process next job if available (but not if we're retrying - setTimeout will handle it)
387
+ if (!isRetrying && this.jobs.length > 0 && !this.stopped) {
388
+ void this.processNext();
389
+ }
390
+ }
391
+ }
392
+
393
+ /**
394
+ * Extract the recurring job ID from an execution job ID
395
+ * Execution jobs have format: "recurringJobId:timestamp"
396
+ */
397
+ private findRecurringJobId(executionJobId: string): string | undefined {
398
+ // Check if this execution job belongs to any recurring job
399
+ for (const [recurringId] of this.recurringJobs) {
400
+ if (executionJobId.startsWith(recurringId + ":")) {
401
+ return recurringId;
402
+ }
403
+ }
404
+ return undefined;
405
+ }
406
+
407
+ async stop(): Promise<void> {
408
+ this.stopped = true;
409
+
410
+ // Wait for all processing jobs to complete
411
+ while (this.processing > 0) {
412
+ await new Promise((resolve) => setTimeout(resolve, 100));
413
+ }
414
+ }
415
+
416
+ async getStats(): Promise<QueueStats> {
417
+ return {
418
+ pending: this.jobs.length,
419
+ processing: this.processing,
420
+ completed: this.stats.completed,
421
+ failed: this.stats.failed,
422
+ consumerGroups: this.consumerGroups.size,
423
+ };
424
+ }
425
+
426
+ async testConnection(): Promise<void> {
427
+ // In-memory queue is always available
428
+ }
429
+ }
@@ -0,0 +1,9 @@
1
+ import { definePluginMetadata } from "@checkstack/common";
2
+
3
+ /**
4
+ * Plugin metadata for the Queue Memory backend.
5
+ * This is the single source of truth for the plugin ID.
6
+ */
7
+ export const pluginMetadata = definePluginMetadata({
8
+ pluginId: "queue-memory",
9
+ });
package/src/plugin.ts ADDED
@@ -0,0 +1,44 @@
1
+ import { QueuePlugin, Queue } from "@checkstack/queue-api";
2
+ import { z } from "zod";
3
+ import { InMemoryQueue } from "./memory-queue";
4
+
5
+ const configSchema = z.object({
6
+ concurrency: z
7
+ .number()
8
+ .min(1)
9
+ .max(100)
10
+ .default(10)
11
+ .describe("Maximum number of concurrent jobs to process"),
12
+ maxQueueSize: z
13
+ .number()
14
+ .min(1)
15
+ .default(10_000)
16
+ .describe("Maximum number of jobs that can be queued"),
17
+ /**
18
+ * Multiplier for all delays (retry backoff, startDelay).
19
+ * Default is 1 (normal). Set to 0.01 in tests for 100x faster delays.
20
+ */
21
+ delayMultiplier: z
22
+ .number()
23
+ .min(0)
24
+ .max(1)
25
+ .default(1)
26
+ .describe(
27
+ "Delay multiplier (default: 1). Only change for testing purposes - set to 0.01 for 100x faster test execution."
28
+ ),
29
+ });
30
+
31
+ export type InMemoryQueueConfig = z.infer<typeof configSchema>;
32
+
33
+ export class InMemoryQueuePlugin implements QueuePlugin<InMemoryQueueConfig> {
34
+ id = "memory";
35
+ displayName = "In-Memory Queue";
36
+ description =
37
+ "Simple in-memory queue for development and single-instance deployments";
38
+ configVersion = 1; // Initial version
39
+ configSchema = configSchema;
40
+
41
+ createQueue<T>(name: string, config: InMemoryQueueConfig): Queue<T> {
42
+ return new InMemoryQueue<T>(name, config);
43
+ }
44
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,6 @@
1
+ {
2
+ "extends": "@checkstack/tsconfig/backend.json",
3
+ "include": [
4
+ "src"
5
+ ]
6
+ }