opencode-swarm-plugin 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,466 @@
1
+ /**
2
+ * Rate Limiter Integration Tests
3
+ *
4
+ * Tests the rate limiting functionality with both Redis and SQLite backends.
5
+ * Requires Redis to be running for Redis tests (skipped if unavailable).
6
+ */
7
+ import { describe, test, expect, beforeEach, afterEach } from "vitest";
8
+ import { join } from "node:path";
9
+ import { mkdirSync, rmSync, existsSync } from "node:fs";
10
+ import { tmpdir } from "node:os";
11
+ import {
12
+ createRateLimiter,
13
+ InMemoryRateLimiter,
14
+ SqliteRateLimiter,
15
+ RedisRateLimiter,
16
+ resetFallbackWarning,
17
+ getLimitsForEndpoint,
18
+ DEFAULT_LIMITS,
19
+ type RateLimiter,
20
+ } from "./rate-limiter";
21
+ import Redis from "ioredis";
22
+
23
+ // ============================================================================
24
+ // Test Utilities
25
+ // ============================================================================
26
+
27
+ const TEST_AGENT = "TestAgent";
28
+ const TEST_ENDPOINT = "send";
29
+
30
+ /**
31
+ * Create a temporary directory for SQLite tests
32
+ */
33
+ function createTempDir(): string {
34
+ const dir = join(tmpdir(), `rate-limiter-test-${Date.now()}`);
35
+ mkdirSync(dir, { recursive: true });
36
+ return dir;
37
+ }
38
+
39
+ /**
40
+ * Clean up temporary directory
41
+ */
42
+ function cleanupTempDir(dir: string): void {
43
+ if (existsSync(dir)) {
44
+ rmSync(dir, { recursive: true, force: true });
45
+ }
46
+ }
47
+
48
+ /**
49
+ * Check if Redis is available
50
+ */
51
+ async function isRedisAvailable(): Promise<boolean> {
52
+ try {
53
+ const redis = new Redis({
54
+ connectTimeout: 1000,
55
+ maxRetriesPerRequest: 1,
56
+ retryStrategy: () => null,
57
+ lazyConnect: true,
58
+ });
59
+ await redis.connect();
60
+ await redis.ping();
61
+ await redis.quit();
62
+ return true;
63
+ } catch {
64
+ return false;
65
+ }
66
+ }
67
+
68
+ // ============================================================================
69
+ // InMemoryRateLimiter Tests
70
+ // ============================================================================
71
+
72
+ describe("InMemoryRateLimiter", () => {
73
+ let limiter: InMemoryRateLimiter;
74
+
75
+ beforeEach(() => {
76
+ limiter = new InMemoryRateLimiter();
77
+ });
78
+
79
+ afterEach(async () => {
80
+ await limiter.close();
81
+ });
82
+
83
+ test("allows requests under limit", async () => {
84
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
85
+ expect(result.allowed).toBe(true);
86
+ expect(result.remaining).toBeGreaterThan(0);
87
+ });
88
+
89
+ test("blocks requests over per-minute limit", async () => {
90
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
91
+
92
+ // Record requests up to the limit
93
+ for (let i = 0; i < limits.perMinute; i++) {
94
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
95
+ }
96
+
97
+ // Next request should be blocked
98
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
99
+ expect(result.allowed).toBe(false);
100
+ expect(result.remaining).toBe(0);
101
+ });
102
+
103
+ test("tracks per-agent limits separately", async () => {
104
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
105
+
106
+ // Fill up Agent1's limit
107
+ for (let i = 0; i < limits.perMinute; i++) {
108
+ await limiter.recordRequest("Agent1", TEST_ENDPOINT);
109
+ }
110
+
111
+ // Agent1 should be blocked
112
+ const result1 = await limiter.checkLimit("Agent1", TEST_ENDPOINT);
113
+ expect(result1.allowed).toBe(false);
114
+
115
+ // Agent2 should still be allowed
116
+ const result2 = await limiter.checkLimit("Agent2", TEST_ENDPOINT);
117
+ expect(result2.allowed).toBe(true);
118
+ });
119
+
120
+ test("tracks per-endpoint limits separately", async () => {
121
+ const sendLimits = getLimitsForEndpoint("send");
122
+
123
+ // Fill up send limit
124
+ for (let i = 0; i < sendLimits.perMinute; i++) {
125
+ await limiter.recordRequest(TEST_AGENT, "send");
126
+ }
127
+
128
+ // send should be blocked
129
+ const sendResult = await limiter.checkLimit(TEST_AGENT, "send");
130
+ expect(sendResult.allowed).toBe(false);
131
+
132
+ // inbox should still be allowed
133
+ const inboxResult = await limiter.checkLimit(TEST_AGENT, "inbox");
134
+ expect(inboxResult.allowed).toBe(true);
135
+ });
136
+
137
+ test("reset clears all limits", async () => {
138
+ // Record some requests
139
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
140
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
141
+
142
+ // Reset
143
+ limiter.reset();
144
+
145
+ // Should have full limit available
146
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
147
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
148
+ expect(result.remaining).toBe(limits.perMinute);
149
+ });
150
+
151
+ test("returns correct resetAt timestamp", async () => {
152
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
153
+ const now = Date.now();
154
+
155
+ // Fill up limit
156
+ for (let i = 0; i < limits.perMinute; i++) {
157
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
158
+ }
159
+
160
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
161
+ expect(result.allowed).toBe(false);
162
+
163
+ // resetAt should be approximately 1 minute from the first request
164
+ expect(result.resetAt).toBeGreaterThan(now);
165
+ expect(result.resetAt).toBeLessThanOrEqual(now + 60_000 + 1000); // Allow 1s tolerance
166
+ });
167
+ });
168
+
169
+ // ============================================================================
170
+ // SqliteRateLimiter Tests
171
+ // ============================================================================
172
+
173
+ describe("SqliteRateLimiter", () => {
174
+ let limiter: SqliteRateLimiter;
175
+ let tempDir: string;
176
+
177
+ beforeEach(() => {
178
+ tempDir = createTempDir();
179
+ const dbPath = join(tempDir, "rate-limits.db");
180
+ limiter = new SqliteRateLimiter(dbPath);
181
+ });
182
+
183
+ afterEach(async () => {
184
+ await limiter.close();
185
+ cleanupTempDir(tempDir);
186
+ });
187
+
188
+ test("allows requests under limit", async () => {
189
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
190
+ expect(result.allowed).toBe(true);
191
+ expect(result.remaining).toBeGreaterThan(0);
192
+ });
193
+
194
+ test("blocks requests over per-minute limit", async () => {
195
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
196
+
197
+ // Record requests up to the limit
198
+ for (let i = 0; i < limits.perMinute; i++) {
199
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
200
+ }
201
+
202
+ // Next request should be blocked
203
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
204
+ expect(result.allowed).toBe(false);
205
+ expect(result.remaining).toBe(0);
206
+ });
207
+
208
+ test("creates database directory if not exists", () => {
209
+ const nestedDir = join(tempDir, "nested", "path");
210
+ const dbPath = join(nestedDir, "rate-limits.db");
211
+
212
+ // Should not throw
213
+ const nestedLimiter = new SqliteRateLimiter(dbPath);
214
+ expect(existsSync(nestedDir)).toBe(true);
215
+ nestedLimiter.close();
216
+ });
217
+
218
+ test("persists data across instances", async () => {
219
+ const dbPath = join(tempDir, "persistent.db");
220
+
221
+ // First instance - record some requests
222
+ const limiter1 = new SqliteRateLimiter(dbPath);
223
+ await limiter1.recordRequest(TEST_AGENT, TEST_ENDPOINT);
224
+ await limiter1.recordRequest(TEST_AGENT, TEST_ENDPOINT);
225
+ await limiter1.close();
226
+
227
+ // Second instance - should see the recorded requests
228
+ const limiter2 = new SqliteRateLimiter(dbPath);
229
+ const result = await limiter2.checkLimit(TEST_AGENT, TEST_ENDPOINT);
230
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
231
+ expect(result.remaining).toBe(limits.perMinute - 2);
232
+ await limiter2.close();
233
+ });
234
+ });
235
+
236
+ // ============================================================================
237
+ // RedisRateLimiter Tests (skipped if Redis unavailable)
238
+ // ============================================================================
239
+
240
+ describe("RedisRateLimiter", async () => {
241
+ const redisAvailable = await isRedisAvailable();
242
+
243
+ test.skipIf(!redisAvailable)("allows requests under limit", async () => {
244
+ const redis = new Redis();
245
+ const limiter = new RedisRateLimiter(redis);
246
+
247
+ try {
248
+ // Clean up any existing keys
249
+ await redis.del(`ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:minute`);
250
+ await redis.del(`ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:hour`);
251
+
252
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
253
+ expect(result.allowed).toBe(true);
254
+ expect(result.remaining).toBeGreaterThan(0);
255
+ } finally {
256
+ await limiter.close();
257
+ }
258
+ });
259
+
260
+ test.skipIf(!redisAvailable)(
261
+ "blocks requests over per-minute limit",
262
+ async () => {
263
+ const redis = new Redis();
264
+ const limiter = new RedisRateLimiter(redis);
265
+
266
+ try {
267
+ // Clean up any existing keys
268
+ await redis.del(`ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:minute`);
269
+ await redis.del(`ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:hour`);
270
+
271
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
272
+
273
+ // Record requests up to the limit
274
+ for (let i = 0; i < limits.perMinute; i++) {
275
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
276
+ }
277
+
278
+ // Next request should be blocked
279
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
280
+ expect(result.allowed).toBe(false);
281
+ expect(result.remaining).toBe(0);
282
+ } finally {
283
+ await limiter.close();
284
+ }
285
+ },
286
+ );
287
+
288
+ test.skipIf(!redisAvailable)("sets TTL on keys", async () => {
289
+ const redis = new Redis();
290
+ const limiter = new RedisRateLimiter(redis);
291
+
292
+ try {
293
+ // Clean up any existing keys
294
+ const minuteKey = `ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:minute`;
295
+ const hourKey = `ratelimit:${TEST_AGENT}:${TEST_ENDPOINT}:hour`;
296
+ await redis.del(minuteKey);
297
+ await redis.del(hourKey);
298
+
299
+ // Record a request
300
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
301
+
302
+ // Check TTL is set
303
+ const minuteTTL = await redis.ttl(minuteKey);
304
+ const hourTTL = await redis.ttl(hourKey);
305
+
306
+ expect(minuteTTL).toBeGreaterThan(0);
307
+ expect(minuteTTL).toBeLessThanOrEqual(120); // 2 minutes
308
+ expect(hourTTL).toBeGreaterThan(0);
309
+ expect(hourTTL).toBeLessThanOrEqual(7200); // 2 hours
310
+ } finally {
311
+ await limiter.close();
312
+ }
313
+ });
314
+ });
315
+
316
+ // ============================================================================
317
+ // createRateLimiter Factory Tests
318
+ // ============================================================================
319
+
320
+ describe("createRateLimiter", () => {
321
+ let tempDir: string;
322
+
323
+ beforeEach(() => {
324
+ tempDir = createTempDir();
325
+ resetFallbackWarning();
326
+ });
327
+
328
+ afterEach(() => {
329
+ cleanupTempDir(tempDir);
330
+ });
331
+
332
+ test("creates InMemoryRateLimiter when backend is memory", async () => {
333
+ const limiter = await createRateLimiter({ backend: "memory" });
334
+ expect(limiter).toBeInstanceOf(InMemoryRateLimiter);
335
+ await limiter.close();
336
+ });
337
+
338
+ test("creates SqliteRateLimiter when backend is sqlite", async () => {
339
+ const dbPath = join(tempDir, "test.db");
340
+ const limiter = await createRateLimiter({
341
+ backend: "sqlite",
342
+ sqlitePath: dbPath,
343
+ });
344
+ expect(limiter).toBeInstanceOf(SqliteRateLimiter);
345
+ await limiter.close();
346
+ });
347
+
348
+ test("falls back to SQLite when Redis unavailable", async () => {
349
+ const dbPath = join(tempDir, "fallback.db");
350
+ const limiter = await createRateLimiter({
351
+ redisUrl: "redis://localhost:59999", // Non-existent port
352
+ sqlitePath: dbPath,
353
+ });
354
+
355
+ // Should fall back to SQLite
356
+ expect(limiter).toBeInstanceOf(SqliteRateLimiter);
357
+ await limiter.close();
358
+ });
359
+ });
360
+
361
+ // ============================================================================
362
+ // Configuration Tests
363
+ // ============================================================================
364
+
365
+ describe("Configuration", () => {
366
+ test("DEFAULT_LIMITS has all expected endpoints", () => {
367
+ const expectedEndpoints = [
368
+ "send",
369
+ "reserve",
370
+ "release",
371
+ "ack",
372
+ "inbox",
373
+ "read_message",
374
+ "summarize_thread",
375
+ "search",
376
+ ];
377
+
378
+ for (const endpoint of expectedEndpoints) {
379
+ expect(DEFAULT_LIMITS[endpoint]).toBeDefined();
380
+ expect(DEFAULT_LIMITS[endpoint].perMinute).toBeGreaterThan(0);
381
+ expect(DEFAULT_LIMITS[endpoint].perHour).toBeGreaterThan(0);
382
+ }
383
+ });
384
+
385
+ test("getLimitsForEndpoint returns defaults for known endpoints", () => {
386
+ const limits = getLimitsForEndpoint("send");
387
+ expect(limits.perMinute).toBe(DEFAULT_LIMITS.send.perMinute);
388
+ expect(limits.perHour).toBe(DEFAULT_LIMITS.send.perHour);
389
+ });
390
+
391
+ test("getLimitsForEndpoint returns fallback for unknown endpoints", () => {
392
+ const limits = getLimitsForEndpoint("unknown_endpoint");
393
+ expect(limits.perMinute).toBe(60);
394
+ expect(limits.perHour).toBe(600);
395
+ });
396
+
397
+ test("env vars override default limits", () => {
398
+ const originalMin = process.env.OPENCODE_RATE_LIMIT_SEND_PER_MIN;
399
+ const originalHour = process.env.OPENCODE_RATE_LIMIT_SEND_PER_HOUR;
400
+
401
+ try {
402
+ process.env.OPENCODE_RATE_LIMIT_SEND_PER_MIN = "100";
403
+ process.env.OPENCODE_RATE_LIMIT_SEND_PER_HOUR = "1000";
404
+
405
+ const limits = getLimitsForEndpoint("send");
406
+ expect(limits.perMinute).toBe(100);
407
+ expect(limits.perHour).toBe(1000);
408
+ } finally {
409
+ // Restore original values
410
+ if (originalMin !== undefined) {
411
+ process.env.OPENCODE_RATE_LIMIT_SEND_PER_MIN = originalMin;
412
+ } else {
413
+ delete process.env.OPENCODE_RATE_LIMIT_SEND_PER_MIN;
414
+ }
415
+ if (originalHour !== undefined) {
416
+ process.env.OPENCODE_RATE_LIMIT_SEND_PER_HOUR = originalHour;
417
+ } else {
418
+ delete process.env.OPENCODE_RATE_LIMIT_SEND_PER_HOUR;
419
+ }
420
+ }
421
+ });
422
+ });
423
+
424
+ // ============================================================================
425
+ // Dual Window Tests
426
+ // ============================================================================
427
+
428
+ describe("Dual Window Enforcement", () => {
429
+ let limiter: InMemoryRateLimiter;
430
+
431
+ beforeEach(() => {
432
+ limiter = new InMemoryRateLimiter();
433
+ });
434
+
435
+ afterEach(async () => {
436
+ await limiter.close();
437
+ });
438
+
439
+ test("enforces both minute and hour limits", async () => {
440
+ // Use an endpoint with low limits for testing
441
+ // inbox has 60/min, 600/hour
442
+ const endpoint = "inbox";
443
+ const limits = getLimitsForEndpoint(endpoint);
444
+
445
+ // Record requests up to minute limit
446
+ for (let i = 0; i < limits.perMinute; i++) {
447
+ await limiter.recordRequest(TEST_AGENT, endpoint);
448
+ }
449
+
450
+ // Should be blocked by minute limit
451
+ const result = await limiter.checkLimit(TEST_AGENT, endpoint);
452
+ expect(result.allowed).toBe(false);
453
+ });
454
+
455
+ test("returns most restrictive remaining count", async () => {
456
+ // Record a few requests
457
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
458
+ await limiter.recordRequest(TEST_AGENT, TEST_ENDPOINT);
459
+
460
+ const result = await limiter.checkLimit(TEST_AGENT, TEST_ENDPOINT);
461
+ const limits = getLimitsForEndpoint(TEST_ENDPOINT);
462
+
463
+ // Remaining should be based on minute window (more restrictive)
464
+ expect(result.remaining).toBe(limits.perMinute - 2);
465
+ });
466
+ });