@checkstack/queue-memory-backend 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,52 @@
1
1
  # @checkstack/queue-memory-backend
2
2
 
3
+ ## 0.2.1
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [180be38]
8
+ - Updated dependencies [7a23261]
9
+ - @checkstack/queue-api@0.1.0
10
+ - @checkstack/common@0.3.0
11
+ - @checkstack/backend-api@0.3.2
12
+ - @checkstack/queue-memory-common@0.1.1
13
+
14
+ ## 0.2.0
15
+
16
+ ### Minor Changes
17
+
18
+ - 9a27800: Changed recurring job scheduling from completion-based to wall-clock scheduling.
19
+
20
+ **Breaking Change:** Recurring jobs now run on a fixed interval (like BullMQ) regardless of whether the previous job has completed. If a job takes longer than `intervalSeconds`, multiple jobs may run concurrently.
21
+
22
+ **Improvements:**
23
+
24
+ - Fixed job ID collision bug when rescheduling within the same millisecond
25
+ - Configuration updates via `scheduleRecurring()` now properly cancel old intervals before starting new ones
26
+ - Added `heartbeatIntervalMs` to config for resilient job recovery after system sleep
27
+
28
+ ### Patch Changes
29
+
30
+ - 9a27800: Fix recurring jobs resilience and add logger support
31
+
32
+ **Rescheduling Fix:**
33
+ Previously, recurring job rescheduling logic was inside the `try` block of `processJob()`. When a job handler threw an exception and `maxRetries` was exhausted (or 0), the recurring job would never be rescheduled, permanently breaking the scheduling chain.
34
+
35
+ This fix moves the rescheduling logic to the `finally` block, ensuring recurring jobs are always rescheduled after execution, regardless of success or failure.
36
+
37
+ **Heartbeat Mechanism:**
38
+ Added a periodic heartbeat (default: 5 seconds) that checks for ready jobs and triggers processing. This ensures jobs are processed even if `setTimeout` callbacks fail to fire (e.g., after system sleep/wake cycles). Configurable via `heartbeatIntervalMs` option; set to 0 to disable.
39
+
40
+ **Logger Service Integration:**
41
+
42
+ - Added optional `logger` parameter to `QueuePlugin.createQueue()` interface
43
+ - `InMemoryQueue` now uses the provided logger instead of raw `console.error`
44
+ - Consistent with the rest of the codebase's logging patterns
45
+
46
+ - Updated dependencies [9a27800]
47
+ - @checkstack/queue-api@0.0.6
48
+ - @checkstack/backend-api@0.3.1
49
+
3
50
  ## 0.1.0
4
51
 
5
52
  ### Minor Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@checkstack/queue-memory-backend",
3
- "version": "0.1.0",
3
+ "version": "0.2.1",
4
4
  "type": "module",
5
5
  "main": "src/index.ts",
6
6
  "scripts": {
@@ -0,0 +1,215 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * Queue Load Benchmark Script
4
+ *
5
+ * Run manually to measure queue throughput at various concurrency levels:
6
+ *
7
+ * cd plugins/queue-memory-backend
8
+ * bun run src/benchmark.ts
9
+ *
10
+ * This is NOT a test file - it runs benchmarks and prints results.
11
+ */
12
+
13
+ import { InMemoryQueue } from "./memory-queue";
14
+ import type { Logger } from "@checkstack/backend-api";
15
+
16
+ const testLogger: Logger = {
17
+ debug: () => {},
18
+ info: () => {},
19
+ warn: () => {},
20
+ error: () => {},
21
+ };
22
+
23
+ /**
24
+ * Simulate an I/O-bound job (like a health check)
25
+ */
26
+ async function simulateIOWork(durationMs: number): Promise<void> {
27
+ await new Promise((resolve) => setTimeout(resolve, durationMs));
28
+ }
29
+
30
+ /**
31
+ * Calculate percentile from sorted array
32
+ */
33
+ function percentile(sortedValues: number[], p: number): number {
34
+ const index = Math.ceil((p / 100) * sortedValues.length) - 1;
35
+ return sortedValues[Math.max(0, index)];
36
+ }
37
+
38
+ interface LoadTestResult {
39
+ concurrency: number;
40
+ totalJobs: number;
41
+ durationMs: number;
42
+ jobsPerSecond: number;
43
+ avgLatencyMs: number;
44
+ p50LatencyMs: number;
45
+ p95LatencyMs: number;
46
+ p99LatencyMs: number;
47
+ }
48
+
49
+ /**
50
+ * Run a load test at a specific concurrency level
51
+ */
52
+ async function runLoadTest(props: {
53
+ concurrency: number;
54
+ totalJobs: number;
55
+ jobDurationMs: number;
56
+ }): Promise<LoadTestResult> {
57
+ const { concurrency, totalJobs, jobDurationMs } = props;
58
+
59
+ const queue = new InMemoryQueue<number>(
60
+ `load-test-${concurrency}`,
61
+ {
62
+ concurrency,
63
+ maxQueueSize: totalJobs + 100,
64
+ delayMultiplier: 1,
65
+ heartbeatIntervalMs: 0,
66
+ },
67
+ testLogger
68
+ );
69
+
70
+ const latencies: number[] = [];
71
+ let completed = 0;
72
+ const startTimes = new Map<number, number>();
73
+
74
+ await queue.consume(
75
+ async (job) => {
76
+ const jobStart = startTimes.get(job.data) ?? Date.now();
77
+ await simulateIOWork(jobDurationMs);
78
+ const latency = Date.now() - jobStart;
79
+ latencies.push(latency);
80
+ completed++;
81
+ },
82
+ { consumerGroup: "load-test-group", maxRetries: 0 }
83
+ );
84
+
85
+ const testStart = Date.now();
86
+ for (let i = 0; i < totalJobs; i++) {
87
+ startTimes.set(i, Date.now());
88
+ await queue.enqueue(i);
89
+ }
90
+
91
+ while (completed < totalJobs) {
92
+ await new Promise((resolve) => setTimeout(resolve, 50));
93
+ }
94
+ const testDuration = Date.now() - testStart;
95
+
96
+ await queue.stop();
97
+
98
+ const sortedLatencies = latencies.toSorted((a, b) => a - b);
99
+ const avgLatency =
100
+ latencies.reduce((sum, l) => sum + l, 0) / latencies.length;
101
+
102
+ return {
103
+ concurrency,
104
+ totalJobs,
105
+ durationMs: testDuration,
106
+ jobsPerSecond: (totalJobs / testDuration) * 1000,
107
+ avgLatencyMs: Math.round(avgLatency),
108
+ p50LatencyMs: percentile(sortedLatencies, 50),
109
+ p95LatencyMs: percentile(sortedLatencies, 95),
110
+ p99LatencyMs: percentile(sortedLatencies, 99),
111
+ };
112
+ }
113
+
114
+ function formatResult(result: LoadTestResult): string {
115
+ return [
116
+ `Concurrency: ${result.concurrency}`,
117
+ ` Jobs: ${result.totalJobs} in ${result.durationMs}ms`,
118
+ ` Throughput: ${result.jobsPerSecond.toFixed(1)} jobs/sec`,
119
+ ` Latency: avg=${result.avgLatencyMs}ms, p50=${result.p50LatencyMs}ms, p95=${result.p95LatencyMs}ms, p99=${result.p99LatencyMs}ms`,
120
+ ].join("\n");
121
+ }
122
+
123
+ // Main benchmark runner
124
+ async function main() {
125
+ console.log("\n🚀 Queue Load Benchmark\n");
126
+ console.log("=".repeat(60));
127
+
128
+ // Test 1: Fast jobs
129
+ console.log("\n📊 Fast Jobs (10ms simulated I/O)\n");
130
+ for (const concurrency of [5, 10, 25, 50]) {
131
+ const result = await runLoadTest({
132
+ concurrency,
133
+ totalJobs: 100,
134
+ jobDurationMs: 10,
135
+ });
136
+ console.log(formatResult(result));
137
+ console.log("-".repeat(60));
138
+ }
139
+
140
+ // Test 2: Realistic jobs
141
+ console.log("\n📊 Realistic Jobs (50ms simulated I/O)\n");
142
+ for (const concurrency of [5, 10, 25, 50]) {
143
+ const result = await runLoadTest({
144
+ concurrency,
145
+ totalJobs: 100,
146
+ jobDurationMs: 50,
147
+ });
148
+ console.log(formatResult(result));
149
+ console.log("-".repeat(60));
150
+ }
151
+
152
+ // Test 3: Slow jobs showing concurrency benefit
153
+ console.log("\n📊 Slow Jobs (200ms) - Demonstrating Concurrency Benefits\n");
154
+ const results: LoadTestResult[] = [];
155
+ for (const concurrency of [1, 5, 10, 25]) {
156
+ const result = await runLoadTest({
157
+ concurrency,
158
+ totalJobs: 50,
159
+ jobDurationMs: 200,
160
+ });
161
+ results.push(result);
162
+ console.log(formatResult(result));
163
+ console.log("-".repeat(60));
164
+ }
165
+ const sequential = results.find((r) => r.concurrency === 1)!;
166
+ const parallel = results.find((r) => r.concurrency === 25)!;
167
+ console.log(
168
+ `\n✨ Speedup: ${(sequential.durationMs / parallel.durationMs).toFixed(
169
+ 1
170
+ )}x faster with concurrency=25`
171
+ );
172
+
173
+ // Test 4: Burst load
174
+ console.log("\n📊 Burst Load (500 jobs at once)\n");
175
+ const burstResult = await runLoadTest({
176
+ concurrency: 50,
177
+ totalJobs: 500,
178
+ jobDurationMs: 20,
179
+ });
180
+ console.log(formatResult(burstResult));
181
+
182
+ // Test 5: Max throughput
183
+ console.log("\n📊 Max Throughput (minimal job duration)\n");
184
+ const maxResult = await runLoadTest({
185
+ concurrency: 100,
186
+ totalJobs: 1000,
187
+ jobDurationMs: 1,
188
+ });
189
+ console.log(formatResult(maxResult));
190
+ console.log(
191
+ `\n✨ Max throughput: ${maxResult.jobsPerSecond.toFixed(0)} jobs/sec`
192
+ );
193
+
194
+ // Capacity planning guidance
195
+ console.log("\n" + "=".repeat(60));
196
+ console.log("📋 CAPACITY PLANNING GUIDANCE");
197
+ console.log("=".repeat(60));
198
+ console.log(`
199
+ Throughput Formula: jobs/sec ≈ concurrency / avg_job_duration_seconds
200
+
201
+ Concurrency Settings:
202
+ - Default: 10 (conservative)
203
+ - Moderate: 25-50 (I/O-bound checks)
204
+ - Aggressive: 100 (max, watch resources)
205
+
206
+ Bottlenecks: DB pool, rate limits, memory, sockets
207
+
208
+ Recommendations:
209
+ - Start with concurrency=10
210
+ - Increase if jobs queue but CPU/memory low
211
+ - Use BullMQ for horizontal scaling
212
+ `);
213
+ }
214
+
215
+ await main();
@@ -1,16 +1,34 @@
1
- import { describe, it, expect, beforeEach } from "bun:test";
1
+ import { describe, it, expect, beforeEach, afterEach } from "bun:test";
2
2
  import { InMemoryQueue } from "./memory-queue";
3
3
  import type { QueueJob } from "@checkstack/queue-api";
4
+ import type { Logger } from "@checkstack/backend-api";
5
+
6
+ // Suppress console.error output during tests for failed jobs
7
+ const testLogger: Logger = {
8
+ debug: () => {},
9
+ info: () => {},
10
+ warn: () => {},
11
+ error: () => {},
12
+ };
4
13
 
5
14
  describe("InMemoryQueue Consumer Groups", () => {
6
15
  let queue: InMemoryQueue<string>;
7
16
 
8
17
  beforeEach(() => {
9
- queue = new InMemoryQueue("test-queue", {
10
- concurrency: 10,
11
- maxQueueSize: 100,
12
- delayMultiplier: 0.01, // 100x faster delays for testing
13
- });
18
+ queue = new InMemoryQueue(
19
+ "test-queue",
20
+ {
21
+ concurrency: 10,
22
+ maxQueueSize: 100,
23
+ delayMultiplier: 0.01, // 100x faster delays for testing
24
+ heartbeatIntervalMs: 0, // Disable heartbeat during tests
25
+ },
26
+ testLogger
27
+ );
28
+ });
29
+
30
+ afterEach(async () => {
31
+ await queue.stop();
14
32
  });
15
33
 
16
34
  describe("Broadcast Pattern (Unique Consumer Groups)", () => {
@@ -376,7 +394,7 @@ describe("InMemoryQueue Consumer Groups", () => {
376
394
  expect(processed).toEqual(["immediate"]);
377
395
 
378
396
  // Wait for delayed job (100ms + generous buffer for CI)
379
- await new Promise((resolve) => setTimeout(resolve, 200));
397
+ await new Promise((resolve) => setTimeout(resolve, 300));
380
398
  expect(processed).toEqual(["immediate", "delayed"]);
381
399
  });
382
400
 
@@ -468,4 +486,6 @@ describe("InMemoryQueue Consumer Groups", () => {
468
486
  expect(processed).toContain("job3");
469
487
  });
470
488
  });
489
+
490
+ // NOTE: Recurring job tests are in recurring-jobs.test.ts
471
491
  });
@@ -6,6 +6,7 @@ import {
6
6
  ConsumeOptions,
7
7
  RecurringJobDetails,
8
8
  } from "@checkstack/queue-api";
9
+ import type { Logger } from "@checkstack/backend-api";
9
10
  import { InMemoryQueueConfig } from "./plugin";
10
11
 
11
12
  /**
@@ -69,6 +70,7 @@ interface RecurringJobMetadata<T> {
69
70
  payload: T;
70
71
  priority: number;
71
72
  enabled: boolean; // For cancellation
73
+ intervalId?: ReturnType<typeof setInterval>; // For wall-clock scheduling
72
74
  }
73
75
 
74
76
  /**
@@ -86,8 +88,40 @@ export class InMemoryQueue<T> implements Queue<T> {
86
88
  failed: 0,
87
89
  };
88
90
 
89
- constructor(private name: string, private config: InMemoryQueueConfig) {
91
+ private logger: Logger;
92
+ private heartbeatInterval: ReturnType<typeof setInterval> | undefined;
93
+
94
+ constructor(
95
+ private name: string,
96
+ private config: InMemoryQueueConfig,
97
+ logger: Logger
98
+ ) {
90
99
  this.semaphore = new Semaphore(config.concurrency);
100
+ this.logger = logger;
101
+
102
+ // Start heartbeat for resilient job processing (e.g., after system sleep)
103
+ if (config.heartbeatIntervalMs > 0) {
104
+ this.heartbeatInterval = setInterval(() => {
105
+ if (
106
+ !this.stopped &&
107
+ this.jobs.length > 0 &&
108
+ this.consumerGroups.size > 0
109
+ ) {
110
+ void this.processNext();
111
+ }
112
+ }, config.heartbeatIntervalMs);
113
+ }
114
+ }
115
+
116
+ /**
117
+ * Schedule a callback after a delay.
118
+ */
119
+ private scheduleDelayed(ms: number, callback: () => void): void {
120
+ setTimeout(() => {
121
+ if (!this.stopped) {
122
+ callback();
123
+ }
124
+ }, ms);
91
125
  }
92
126
 
93
127
  async enqueue(
@@ -141,11 +175,9 @@ export class InMemoryQueue<T> implements Queue<T> {
141
175
  // Schedule processing when the job becomes available
142
176
  const scheduledDelayMs =
143
177
  options.startDelay * 1000 * (this.config.delayMultiplier ?? 1);
144
- setTimeout(() => {
145
- if (!this.stopped) {
146
- void this.processNext();
147
- }
148
- }, scheduledDelayMs);
178
+ this.scheduleDelayed(scheduledDelayMs, () => {
179
+ void this.processNext();
180
+ });
149
181
  } else {
150
182
  void this.processNext();
151
183
  }
@@ -198,7 +230,11 @@ export class InMemoryQueue<T> implements Queue<T> {
198
230
  // Check if this is an update to an existing recurring job
199
231
  const existingMetadata = this.recurringJobs.get(jobId);
200
232
  if (existingMetadata) {
201
- // UPDATE CASE: Cancel pending executions
233
+ // UPDATE CASE: Clear existing interval and pending executions
234
+ if (existingMetadata.intervalId) {
235
+ clearInterval(existingMetadata.intervalId);
236
+ }
237
+
202
238
  // Find and remove any pending jobs for this recurring job
203
239
  this.jobs = this.jobs.filter((job) => {
204
240
  // Check if this job belongs to the recurring job being updated
@@ -213,18 +249,40 @@ export class InMemoryQueue<T> implements Queue<T> {
213
249
  });
214
250
  }
215
251
 
216
- // Store or update recurring job metadata
252
+ // Calculate interval in ms with delay multiplier
253
+ const intervalMs =
254
+ intervalSeconds * 1000 * (this.config.delayMultiplier ?? 1);
255
+
256
+ // Create interval for wall-clock scheduling
257
+ const intervalId = setInterval(() => {
258
+ if (!this.stopped) {
259
+ // Add random suffix to ensure unique job IDs
260
+ const uniqueId = `${jobId}:${Date.now()}-${Math.random()
261
+ .toString(36)
262
+ .slice(2, 8)}`;
263
+ void this.enqueue(data, {
264
+ jobId: uniqueId,
265
+ priority,
266
+ });
267
+ }
268
+ }, intervalMs);
269
+
270
+ // Store recurring job metadata with interval ID
217
271
  this.recurringJobs.set(jobId, {
218
272
  jobId,
219
273
  intervalSeconds,
220
274
  payload: data,
221
275
  priority,
222
276
  enabled: true,
277
+ intervalId,
223
278
  });
224
279
 
225
- // Schedule first execution with new configuration
280
+ // Schedule first execution (with optional startDelay)
281
+ const firstJobId = `${jobId}:${Date.now()}-${Math.random()
282
+ .toString(36)
283
+ .slice(2, 8)}`;
226
284
  await this.enqueue(data, {
227
- jobId: `${jobId}:${Date.now()}`, // Unique ID for this execution
285
+ jobId: firstJobId,
228
286
  startDelay,
229
287
  priority,
230
288
  });
@@ -235,7 +293,13 @@ export class InMemoryQueue<T> implements Queue<T> {
235
293
  async cancelRecurring(jobId: string): Promise<void> {
236
294
  const metadata = this.recurringJobs.get(jobId);
237
295
  if (metadata) {
238
- metadata.enabled = false; // Mark as disabled to stop future rescheduling
296
+ metadata.enabled = false; // Mark as disabled
297
+
298
+ // Clear the interval timer
299
+ if (metadata.intervalId) {
300
+ clearInterval(metadata.intervalId);
301
+ metadata.intervalId = undefined;
302
+ }
239
303
 
240
304
  // Also cancel any pending jobs
241
305
  this.jobs = this.jobs.filter((job) => {
@@ -328,22 +392,8 @@ export class InMemoryQueue<T> implements Queue<T> {
328
392
  try {
329
393
  await consumer.handler(job);
330
394
  this.stats.completed++;
331
-
332
- // After successful execution, check for recurring job and reschedule
333
- const recurringJobId = this.findRecurringJobId(job.id);
334
- if (recurringJobId) {
335
- const metadata = this.recurringJobs.get(recurringJobId);
336
- if (metadata?.enabled) {
337
- // Reschedule for next interval
338
- void this.enqueue(metadata.payload, {
339
- jobId: `${recurringJobId}:${Date.now()}`,
340
- startDelay: metadata.intervalSeconds,
341
- priority: metadata.priority,
342
- });
343
- }
344
- }
345
395
  } catch (error) {
346
- console.error(
396
+ this.logger.error(
347
397
  `Job ${job.id} failed in group ${groupId} (attempt ${job.attempts}):`,
348
398
  error
349
399
  );
@@ -371,11 +421,9 @@ export class InMemoryQueue<T> implements Queue<T> {
371
421
  Math.pow(2, job.attempts!) *
372
422
  1000 *
373
423
  (this.config.delayMultiplier ?? 1);
374
- setTimeout(() => {
375
- if (!this.stopped) {
376
- void this.processNext();
377
- }
378
- }, delay);
424
+ this.scheduleDelayed(delay, () => {
425
+ void this.processNext();
426
+ });
379
427
  } else {
380
428
  this.stats.failed++;
381
429
  }
@@ -390,23 +438,24 @@ export class InMemoryQueue<T> implements Queue<T> {
390
438
  }
391
439
  }
392
440
 
393
- /**
394
- * Extract the recurring job ID from an execution job ID
395
- * Execution jobs have format: "recurringJobId:timestamp"
396
- */
397
- private findRecurringJobId(executionJobId: string): string | undefined {
398
- // Check if this execution job belongs to any recurring job
399
- for (const [recurringId] of this.recurringJobs) {
400
- if (executionJobId.startsWith(recurringId + ":")) {
401
- return recurringId;
402
- }
403
- }
404
- return undefined;
405
- }
406
-
407
441
  async stop(): Promise<void> {
408
442
  this.stopped = true;
409
443
 
444
+ // Clear heartbeat interval
445
+ if (this.heartbeatInterval) {
446
+ clearInterval(this.heartbeatInterval);
447
+ this.heartbeatInterval = undefined;
448
+ }
449
+
450
+ // Clear all recurring job intervals
451
+ for (const metadata of this.recurringJobs.values()) {
452
+ if (metadata.intervalId) {
453
+ clearInterval(metadata.intervalId);
454
+ metadata.intervalId = undefined;
455
+ }
456
+ metadata.enabled = false;
457
+ }
458
+
410
459
  // Wait for all processing jobs to complete
411
460
  while (this.processing > 0) {
412
461
  await new Promise((resolve) => setTimeout(resolve, 100));
package/src/plugin.ts CHANGED
@@ -1,4 +1,5 @@
1
1
  import { QueuePlugin, Queue } from "@checkstack/queue-api";
2
+ import type { Logger } from "@checkstack/backend-api";
2
3
  import { z } from "zod";
3
4
  import { InMemoryQueue } from "./memory-queue";
4
5
 
@@ -26,6 +27,18 @@ const configSchema = z.object({
26
27
  .describe(
27
28
  "Delay multiplier (default: 1). Only change for testing purposes - set to 0.01 for 100x faster test execution."
28
29
  ),
30
+ /**
31
+ * Interval in milliseconds for the heartbeat that checks for ready jobs.
32
+ * This ensures jobs are processed even if setTimeout fails (e.g., after system sleep).
33
+ * Set to 0 to disable.
34
+ */
35
+ heartbeatIntervalMs: z
36
+ .number()
37
+ .min(0)
38
+ .default(5000)
39
+ .describe(
40
+ "Heartbeat interval in ms to check for ready jobs (default: 5000). Set to 0 to disable."
41
+ ),
29
42
  });
30
43
 
31
44
  export type InMemoryQueueConfig = z.infer<typeof configSchema>;
@@ -38,7 +51,11 @@ export class InMemoryQueuePlugin implements QueuePlugin<InMemoryQueueConfig> {
38
51
  configVersion = 1; // Initial version
39
52
  configSchema = configSchema;
40
53
 
41
- createQueue<T>(name: string, config: InMemoryQueueConfig): Queue<T> {
42
- return new InMemoryQueue<T>(name, config);
54
+ createQueue<T>(
55
+ name: string,
56
+ config: InMemoryQueueConfig,
57
+ logger: Logger
58
+ ): Queue<T> {
59
+ return new InMemoryQueue<T>(name, config, logger);
43
60
  }
44
61
  }
@@ -0,0 +1,204 @@
1
+ /**
2
+ * Recurring Job Tests for InMemoryQueue
3
+ */
4
+
5
+ import { describe, it, expect, afterEach } from "bun:test";
6
+ import { InMemoryQueue } from "./memory-queue";
7
+ import type { Logger } from "@checkstack/backend-api";
8
+
9
+ const testLogger: Logger = {
10
+ debug: () => {},
11
+ info: () => {},
12
+ warn: () => {},
13
+ error: () => {},
14
+ };
15
+
16
+ function createTestQueue(name: string) {
17
+ return new InMemoryQueue<string>(
18
+ name,
19
+ {
20
+ concurrency: 10,
21
+ maxQueueSize: 100,
22
+ delayMultiplier: 0.01, // Speed up delays for testing
23
+ heartbeatIntervalMs: 5,
24
+ },
25
+ testLogger
26
+ );
27
+ }
28
+
29
+ describe("InMemoryQueue Recurring Jobs", () => {
30
+ let queue: InMemoryQueue<string> | undefined;
31
+
32
+ afterEach(async () => {
33
+ if (queue) {
34
+ await queue.stop();
35
+ queue = undefined;
36
+ }
37
+ });
38
+
39
+ it("should reschedule recurring job after successful execution", async () => {
40
+ queue = createTestQueue("test-reschedule");
41
+
42
+ let executionCount = 0;
43
+ await queue.consume(
44
+ async () => {
45
+ executionCount++;
46
+ },
47
+ { consumerGroup: "test", maxRetries: 0 }
48
+ );
49
+
50
+ await queue.scheduleRecurring("payload", {
51
+ jobId: "recurring-success",
52
+ intervalSeconds: 0.5, // 5ms with multiplier
53
+ });
54
+
55
+ // Wait for multiple executions
56
+ await Bun.sleep(100);
57
+
58
+ expect(executionCount).toBeGreaterThanOrEqual(2);
59
+ });
60
+
61
+ it("should reschedule recurring job even after handler failure", async () => {
62
+ queue = createTestQueue("test-failure-reschedule");
63
+
64
+ let executionCount = 0;
65
+ await queue.consume(
66
+ async () => {
67
+ executionCount++;
68
+ throw new Error("Handler failed");
69
+ },
70
+ { consumerGroup: "test", maxRetries: 0 }
71
+ );
72
+
73
+ await queue.scheduleRecurring("payload", {
74
+ jobId: "recurring-failure",
75
+ intervalSeconds: 0.5,
76
+ });
77
+
78
+ await Bun.sleep(100);
79
+
80
+ // Should still reschedule despite failures
81
+ expect(executionCount).toBeGreaterThanOrEqual(2);
82
+ });
83
+
84
+ it("should not reschedule during retries", async () => {
85
+ queue = createTestQueue("test-no-reschedule-during-retry");
86
+
87
+ let attempts = 0;
88
+ let completed = false;
89
+
90
+ await queue.consume(
91
+ async () => {
92
+ attempts++;
93
+ if (attempts < 3) {
94
+ throw new Error("Retry me");
95
+ }
96
+ completed = true;
97
+ },
98
+ { consumerGroup: "test", maxRetries: 5 }
99
+ );
100
+
101
+ await queue.scheduleRecurring("payload", {
102
+ jobId: "recurring-retry",
103
+ intervalSeconds: 60, // Long interval so only retries should execute
104
+ });
105
+
106
+ await Bun.sleep(200);
107
+
108
+ // Should complete after retries, not reschedule
109
+ expect(completed).toBe(true);
110
+ expect(attempts).toBe(3);
111
+ });
112
+
113
+ it("should stop scheduling when cancelled", async () => {
114
+ queue = createTestQueue("test-cancel");
115
+
116
+ let executionCount = 0;
117
+ await queue.consume(
118
+ async () => {
119
+ executionCount++;
120
+ },
121
+ { consumerGroup: "test", maxRetries: 0 }
122
+ );
123
+
124
+ await queue.scheduleRecurring("payload", {
125
+ jobId: "recurring-cancel",
126
+ intervalSeconds: 0.5,
127
+ });
128
+
129
+ await Bun.sleep(50);
130
+ const countBeforeCancel = executionCount;
131
+
132
+ await queue.cancelRecurring("recurring-cancel");
133
+ await Bun.sleep(100);
134
+
135
+ // Should not have executed more after cancellation
136
+ expect(countBeforeCancel).toBeGreaterThanOrEqual(1);
137
+ expect(executionCount).toBe(countBeforeCancel);
138
+ });
139
+
140
+ it("should update recurring job when called with same jobId", async () => {
141
+ queue = createTestQueue("test-update");
142
+
143
+ const payloads: string[] = [];
144
+ await queue.consume(
145
+ async (job) => {
146
+ payloads.push(job.data);
147
+ },
148
+ { consumerGroup: "test", maxRetries: 0 }
149
+ );
150
+
151
+ // Schedule with original payload
152
+ await queue.scheduleRecurring("original-payload", {
153
+ jobId: "recurring-update",
154
+ intervalSeconds: 5, // 50ms with multiplier - slow interval
155
+ });
156
+
157
+ await Bun.sleep(20);
158
+
159
+ // Update to new payload and faster interval
160
+ await queue.scheduleRecurring("updated-payload", {
161
+ jobId: "recurring-update",
162
+ intervalSeconds: 0.5, // 5ms with multiplier
163
+ });
164
+
165
+ await Bun.sleep(80);
166
+
167
+ // Should have the updated payload multiple times
168
+ const updatedPayloads = payloads.filter((p) => p === "updated-payload");
169
+ expect(updatedPayloads.length).toBeGreaterThanOrEqual(2);
170
+ });
171
+
172
+ it("should cancel old interval and pending jobs when updating", async () => {
173
+ queue = createTestQueue("test-update-cancels-old");
174
+
175
+ let executionCount = 0;
176
+ await queue.consume(
177
+ async () => {
178
+ executionCount++;
179
+ },
180
+ { consumerGroup: "test", maxRetries: 0 }
181
+ );
182
+
183
+ // Schedule with a long interval (won't fire again during test)
184
+ await queue.scheduleRecurring("payload", {
185
+ jobId: "recurring-test",
186
+ intervalSeconds: 100, // Very long, should only execute once initially
187
+ });
188
+
189
+ await Bun.sleep(30);
190
+ const countAfterFirst = executionCount;
191
+
192
+ // Update to a short interval
193
+ await queue.scheduleRecurring("payload", {
194
+ jobId: "recurring-test",
195
+ intervalSeconds: 0.5, // 5ms with multiplier
196
+ });
197
+
198
+ await Bun.sleep(80);
199
+
200
+ // Should have executed multiple times with new interval
201
+ expect(executionCount).toBeGreaterThan(countAfterFirst);
202
+ expect(executionCount - countAfterFirst).toBeGreaterThanOrEqual(2);
203
+ });
204
+ });