@checkstack/queue-memory-backend 0.2.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # @checkstack/queue-memory-backend
2
2
 
3
+ ## 0.2.1
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [180be38]
8
+ - Updated dependencies [7a23261]
9
+ - @checkstack/queue-api@0.1.0
10
+ - @checkstack/common@0.3.0
11
+ - @checkstack/backend-api@0.3.2
12
+ - @checkstack/queue-memory-common@0.1.1
13
+
3
14
  ## 0.2.0
4
15
 
5
16
  ### Minor Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@checkstack/queue-memory-backend",
3
- "version": "0.2.0",
3
+ "version": "0.2.1",
4
4
  "type": "module",
5
5
  "main": "src/index.ts",
6
6
  "scripts": {
@@ -0,0 +1,215 @@
1
+ #!/usr/bin/env bun
2
+ /**
3
+ * Queue Load Benchmark Script
4
+ *
5
+ * Run manually to measure queue throughput at various concurrency levels:
6
+ *
7
+ * cd plugins/queue-memory-backend
8
+ * bun run src/benchmark.ts
9
+ *
10
+ * This is NOT a test file - it runs benchmarks and prints results.
11
+ */
12
+
13
+ import { InMemoryQueue } from "./memory-queue";
14
+ import type { Logger } from "@checkstack/backend-api";
15
+
16
+ const testLogger: Logger = {
17
+ debug: () => {},
18
+ info: () => {},
19
+ warn: () => {},
20
+ error: () => {},
21
+ };
22
+
23
+ /**
24
+ * Simulate an I/O-bound job (like a health check)
25
+ */
26
+ async function simulateIOWork(durationMs: number): Promise<void> {
27
+ await new Promise((resolve) => setTimeout(resolve, durationMs));
28
+ }
29
+
30
+ /**
31
+ * Calculate percentile from sorted array
32
+ */
33
+ function percentile(sortedValues: number[], p: number): number {
34
+ const index = Math.ceil((p / 100) * sortedValues.length) - 1;
35
+ return sortedValues[Math.max(0, index)];
36
+ }
37
+
38
+ interface LoadTestResult {
39
+ concurrency: number;
40
+ totalJobs: number;
41
+ durationMs: number;
42
+ jobsPerSecond: number;
43
+ avgLatencyMs: number;
44
+ p50LatencyMs: number;
45
+ p95LatencyMs: number;
46
+ p99LatencyMs: number;
47
+ }
48
+
49
+ /**
50
+ * Run a load test at a specific concurrency level
51
+ */
52
+ async function runLoadTest(props: {
53
+ concurrency: number;
54
+ totalJobs: number;
55
+ jobDurationMs: number;
56
+ }): Promise<LoadTestResult> {
57
+ const { concurrency, totalJobs, jobDurationMs } = props;
58
+
59
+ const queue = new InMemoryQueue<number>(
60
+ `load-test-${concurrency}`,
61
+ {
62
+ concurrency,
63
+ maxQueueSize: totalJobs + 100,
64
+ delayMultiplier: 1,
65
+ heartbeatIntervalMs: 0,
66
+ },
67
+ testLogger
68
+ );
69
+
70
+ const latencies: number[] = [];
71
+ let completed = 0;
72
+ const startTimes = new Map<number, number>();
73
+
74
+ await queue.consume(
75
+ async (job) => {
76
+ const jobStart = startTimes.get(job.data) ?? Date.now();
77
+ await simulateIOWork(jobDurationMs);
78
+ const latency = Date.now() - jobStart;
79
+ latencies.push(latency);
80
+ completed++;
81
+ },
82
+ { consumerGroup: "load-test-group", maxRetries: 0 }
83
+ );
84
+
85
+ const testStart = Date.now();
86
+ for (let i = 0; i < totalJobs; i++) {
87
+ startTimes.set(i, Date.now());
88
+ await queue.enqueue(i);
89
+ }
90
+
91
+ while (completed < totalJobs) {
92
+ await new Promise((resolve) => setTimeout(resolve, 50));
93
+ }
94
+ const testDuration = Date.now() - testStart;
95
+
96
+ await queue.stop();
97
+
98
+ const sortedLatencies = latencies.toSorted((a, b) => a - b);
99
+ const avgLatency =
100
+ latencies.reduce((sum, l) => sum + l, 0) / latencies.length;
101
+
102
+ return {
103
+ concurrency,
104
+ totalJobs,
105
+ durationMs: testDuration,
106
+ jobsPerSecond: (totalJobs / testDuration) * 1000,
107
+ avgLatencyMs: Math.round(avgLatency),
108
+ p50LatencyMs: percentile(sortedLatencies, 50),
109
+ p95LatencyMs: percentile(sortedLatencies, 95),
110
+ p99LatencyMs: percentile(sortedLatencies, 99),
111
+ };
112
+ }
113
+
114
+ function formatResult(result: LoadTestResult): string {
115
+ return [
116
+ `Concurrency: ${result.concurrency}`,
117
+ ` Jobs: ${result.totalJobs} in ${result.durationMs}ms`,
118
+ ` Throughput: ${result.jobsPerSecond.toFixed(1)} jobs/sec`,
119
+ ` Latency: avg=${result.avgLatencyMs}ms, p50=${result.p50LatencyMs}ms, p95=${result.p95LatencyMs}ms, p99=${result.p99LatencyMs}ms`,
120
+ ].join("\n");
121
+ }
122
+
123
+ // Main benchmark runner
124
+ async function main() {
125
+ console.log("\nšŸš€ Queue Load Benchmark\n");
126
+ console.log("=".repeat(60));
127
+
128
+ // Test 1: Fast jobs
129
+ console.log("\nšŸ“Š Fast Jobs (10ms simulated I/O)\n");
130
+ for (const concurrency of [5, 10, 25, 50]) {
131
+ const result = await runLoadTest({
132
+ concurrency,
133
+ totalJobs: 100,
134
+ jobDurationMs: 10,
135
+ });
136
+ console.log(formatResult(result));
137
+ console.log("-".repeat(60));
138
+ }
139
+
140
+ // Test 2: Realistic jobs
141
+ console.log("\nšŸ“Š Realistic Jobs (50ms simulated I/O)\n");
142
+ for (const concurrency of [5, 10, 25, 50]) {
143
+ const result = await runLoadTest({
144
+ concurrency,
145
+ totalJobs: 100,
146
+ jobDurationMs: 50,
147
+ });
148
+ console.log(formatResult(result));
149
+ console.log("-".repeat(60));
150
+ }
151
+
152
+ // Test 3: Slow jobs showing concurrency benefit
153
+ console.log("\nšŸ“Š Slow Jobs (200ms) - Demonstrating Concurrency Benefits\n");
154
+ const results: LoadTestResult[] = [];
155
+ for (const concurrency of [1, 5, 10, 25]) {
156
+ const result = await runLoadTest({
157
+ concurrency,
158
+ totalJobs: 50,
159
+ jobDurationMs: 200,
160
+ });
161
+ results.push(result);
162
+ console.log(formatResult(result));
163
+ console.log("-".repeat(60));
164
+ }
165
+ const sequential = results.find((r) => r.concurrency === 1)!;
166
+ const parallel = results.find((r) => r.concurrency === 25)!;
167
+ console.log(
168
+ `\n✨ Speedup: ${(sequential.durationMs / parallel.durationMs).toFixed(
169
+ 1
170
+ )}x faster with concurrency=25`
171
+ );
172
+
173
+ // Test 4: Burst load
174
+ console.log("\nšŸ“Š Burst Load (500 jobs at once)\n");
175
+ const burstResult = await runLoadTest({
176
+ concurrency: 50,
177
+ totalJobs: 500,
178
+ jobDurationMs: 20,
179
+ });
180
+ console.log(formatResult(burstResult));
181
+
182
+ // Test 5: Max throughput
183
+ console.log("\nšŸ“Š Max Throughput (minimal job duration)\n");
184
+ const maxResult = await runLoadTest({
185
+ concurrency: 100,
186
+ totalJobs: 1000,
187
+ jobDurationMs: 1,
188
+ });
189
+ console.log(formatResult(maxResult));
190
+ console.log(
191
+ `\n✨ Max throughput: ${maxResult.jobsPerSecond.toFixed(0)} jobs/sec`
192
+ );
193
+
194
+ // Capacity planning guidance
195
+ console.log("\n" + "=".repeat(60));
196
+ console.log("šŸ“‹ CAPACITY PLANNING GUIDANCE");
197
+ console.log("=".repeat(60));
198
+ console.log(`
199
+ Throughput Formula: jobs/sec ā‰ˆ concurrency / avg_job_duration_seconds
200
+
201
+ Concurrency Settings:
202
+ - Default: 10 (conservative)
203
+ - Moderate: 25-50 (I/O-bound checks)
204
+ - Aggressive: 100 (max, watch resources)
205
+
206
+ Bottlenecks: DB pool, rate limits, memory, sockets
207
+
208
+ Recommendations:
209
+ - Start with concurrency=10
210
+ - Increase if jobs queue but CPU/memory low
211
+ - Use BullMQ for horizontal scaling
212
+ `);
213
+ }
214
+
215
+ await main();