ship-safe 4.0.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -92,6 +92,7 @@ npx ship-safe audit .
92
92
  - `--html [file]` — custom HTML report path (default: `ship-safe-report.html`)
93
93
  - `--no-deps` — skip dependency audit
94
94
  - `--no-ai` — skip AI classification
95
+ - `--no-cache` — force full rescan (ignore cached results)
95
96
 
96
97
  ---
97
98
 
@@ -181,6 +182,54 @@ npx ship-safe mcp
181
182
 
182
183
  ---
183
184
 
185
+ ## Claude Code Plugin
186
+
187
+ Use Ship Safe directly inside Claude Code — no CLI needed:
188
+
189
+ ```bash
190
+ claude plugin add github:asamassekou10/ship-safe
191
+ ```
192
+
193
+ | Command | Description |
194
+ |---------|-------------|
195
+ | `/ship-safe` | Full security audit — 12 agents, remediation plan, auto-fix |
196
+ | `/ship-safe-scan` | Quick scan for leaked secrets |
197
+ | `/ship-safe-score` | Security health score (0-100) |
198
+
199
+ Claude interprets the results, explains findings in plain language, and can fix issues directly in your codebase.
200
+
201
+ ---
202
+
203
+ ## Incremental Scanning
204
+
205
+ Ship Safe caches file hashes and findings in `.ship-safe/context.json`. On subsequent runs, only changed files are re-scanned — unchanged files reuse cached results.
206
+
207
+ ```
208
+ ✔ [Phase 1/4] Secrets: 41 found (0 changed, 313 cached)
209
+ ```
210
+
211
+ - **~40% faster** on repeated scans
212
+ - **Auto-invalidation** — cache expires after 24 hours or when ship-safe updates
213
+ - **`--no-cache`** — force a full rescan anytime
214
+
215
+ The cache is stored in `.ship-safe/` which is automatically excluded from scans.
216
+
217
+ ---
218
+
219
+ ## Smart `.gitignore` Handling
220
+
221
+ Ship Safe respects your `.gitignore` for build output, caches, and vendor directories — but **always scans security-sensitive files** even if gitignored:
222
+
223
+ | Skipped (gitignore respected) | Always scanned (gitignore overridden) |
224
+ |-------------------------------|---------------------------------------|
225
+ | `node_modules/`, `dist/`, `build/` | `.env`, `.env.local`, `.env.production` |
226
+ | `*.log`, `*.pkl`, vendor dirs | `*.pem`, `*.key`, `*.p12` |
227
+ | Cache directories, IDE files | `credentials.json`, `*.secret` |
228
+
229
+ Why? Files like `.env` are gitignored *because* they contain secrets — which is exactly what a security scanner should catch.
230
+
231
+ ---
232
+
184
233
  ## Multi-LLM Support
185
234
 
186
235
  Ship Safe supports multiple AI providers for classification:
@@ -0,0 +1,496 @@
1
+ /**
2
+ * Ship Safe Unit Tests
3
+ * =====================
4
+ *
5
+ * Tests agent pattern matching, scoring engine, cache manager,
6
+ * deduplication, and ReDoS safety.
7
+ *
8
+ * Run: npm test
9
+ */
10
+
11
+ import { describe, it } from 'node:test';
12
+ import assert from 'node:assert/strict';
13
+ import fs from 'fs';
14
+ import path from 'path';
15
+ import os from 'os';
16
+
17
+ // =============================================================================
18
+ // HELPERS
19
+ // =============================================================================
20
+
21
+ /**
22
+ * Write a temp file and return its absolute path.
23
+ */
24
+ function writeTempFile(content, ext = '.js') {
25
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-test-'));
26
+ const file = path.join(dir, `test${ext}`);
27
+ fs.writeFileSync(file, content);
28
+ return { dir, file };
29
+ }
30
+
31
+ function cleanup(dir) {
32
+ try { fs.rmSync(dir, { recursive: true, force: true }); } catch { /* */ }
33
+ }
34
+
35
+ // =============================================================================
36
+ // INJECTION TESTER
37
+ // =============================================================================
38
+
39
+ describe('InjectionTester', async () => {
40
+ const { InjectionTester } = await import('../agents/injection-tester.js');
41
+ const agent = new InjectionTester();
42
+
43
+ it('detects SQL injection via template literal', async () => {
44
+ const { dir, file } = writeTempFile('const q = `SELECT * FROM users WHERE id = ${userId}`;');
45
+ try {
46
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
47
+ assert.ok(findings.length > 0, 'Should detect SQL injection');
48
+ assert.ok(findings.some(f => f.rule === 'SQL_INJECTION_TEMPLATE_LITERAL'));
49
+ } finally { cleanup(dir); }
50
+ });
51
+
52
+ it('detects eval() with user input', async () => {
53
+ const { dir, file } = writeTempFile('eval(req.body.code);');
54
+ try {
55
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
56
+ assert.ok(findings.some(f => f.rule === 'CODE_INJECTION_EVAL'));
57
+ } finally { cleanup(dir); }
58
+ });
59
+
60
+ it('detects command injection via exec template', async () => {
61
+ const { dir, file } = writeTempFile('execSync(`rm -rf ${userPath}`);');
62
+ try {
63
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
64
+ assert.ok(findings.some(f => f.rule === 'CMD_INJECTION_EXEC_TEMPLATE'));
65
+ } finally { cleanup(dir); }
66
+ });
67
+
68
+ it('detects Python f-string SQL injection', async () => {
69
+ const { dir, file } = writeTempFile('cursor.execute(f"SELECT * FROM users WHERE name = {name}")', '.py');
70
+ try {
71
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
72
+ assert.ok(findings.some(f => f.rule === 'PYTHON_SQL_FSTRING'));
73
+ } finally { cleanup(dir); }
74
+ });
75
+
76
+ it('detects Python subprocess shell=True', async () => {
77
+ const { dir, file } = writeTempFile('subprocess.run(cmd, shell=True)', '.py');
78
+ try {
79
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
80
+ assert.ok(findings.some(f => f.rule === 'PYTHON_SUBPROCESS_SHELL'));
81
+ } finally { cleanup(dir); }
82
+ });
83
+
84
+ it('returns no findings for safe code', async () => {
85
+ const { dir, file } = writeTempFile('const x = 1 + 2;\nconsole.log(x);');
86
+ try {
87
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
88
+ // Filter out low-confidence generic matches
89
+ const significant = findings.filter(f => f.confidence !== 'low');
90
+ assert.equal(significant.length, 0, 'Safe code should have no significant findings');
91
+ } finally { cleanup(dir); }
92
+ });
93
+ });
94
+
95
+ // =============================================================================
96
+ // AUTH BYPASS AGENT
97
+ // =============================================================================
98
+
99
+ describe('AuthBypassAgent', async () => {
100
+ const { AuthBypassAgent } = await import('../agents/auth-bypass-agent.js');
101
+ const agent = new AuthBypassAgent();
102
+
103
+ it('detects JWT algorithm none', async () => {
104
+ const { dir, file } = writeTempFile('jwt.verify(token, secret, { algorithms: ["none"] });');
105
+ try {
106
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
107
+ assert.ok(findings.some(f => f.rule === 'JWT_ALG_NONE'));
108
+ } finally { cleanup(dir); }
109
+ });
110
+
111
+ it('detects Django DEBUG = True', async () => {
112
+ const { dir, file } = writeTempFile('DEBUG = True\nALLOWED_HOSTS = ["*"]', '.py');
113
+ try {
114
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
115
+ assert.ok(findings.some(f => f.rule === 'DJANGO_DEBUG_TRUE'));
116
+ } finally { cleanup(dir); }
117
+ });
118
+
119
+ it('detects Flask hardcoded secret key', async () => {
120
+ const { dir, file } = writeTempFile('app.secret_key = "mysecret123"', '.py');
121
+ try {
122
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
123
+ assert.ok(findings.some(f => f.rule === 'FLASK_SECRET_KEY_HARDCODED'));
124
+ } finally { cleanup(dir); }
125
+ });
126
+
127
+ it('detects TLS reject unauthorized disabled', async () => {
128
+ const { dir, file } = writeTempFile('process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";');
129
+ try {
130
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
131
+ assert.ok(findings.some(f => f.rule === 'TLS_REJECT_UNAUTHORIZED'));
132
+ } finally { cleanup(dir); }
133
+ });
134
+ });
135
+
136
+ // =============================================================================
137
+ // API FUZZER
138
+ // =============================================================================
139
+
140
+ describe('APIFuzzer', async () => {
141
+ const { APIFuzzer } = await import('../agents/api-fuzzer.js');
142
+ const agent = new APIFuzzer();
143
+
144
+ it('detects spread request body (mass assignment)', async () => {
145
+ const { dir, file } = writeTempFile('const data = { ...req.body };');
146
+ try {
147
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
148
+ assert.ok(findings.some(f => f.rule === 'API_SPREAD_BODY'));
149
+ } finally { cleanup(dir); }
150
+ });
151
+
152
+ it('detects API key in URL', async () => {
153
+ const { dir, file } = writeTempFile('const url = `https://api.example.com?key=${apiKey}`;');
154
+ try {
155
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
156
+ assert.ok(findings.some(f => f.rule === 'API_KEY_IN_URL'));
157
+ } finally { cleanup(dir); }
158
+ });
159
+
160
+ it('detects debug endpoint', async () => {
161
+ const { dir, file } = writeTempFile('app.get("/debug/info", handler);');
162
+ try {
163
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
164
+ assert.ok(findings.some(f => f.rule === 'API_DEBUG_ENDPOINT'));
165
+ } finally { cleanup(dir); }
166
+ });
167
+ });
168
+
169
+ // =============================================================================
170
+ // SSRF PROBER
171
+ // =============================================================================
172
+
173
+ describe('SSRFProber', async () => {
174
+ const { SSRFProber } = await import('../agents/ssrf-prober.js');
175
+ const agent = new SSRFProber();
176
+
177
+ it('detects user input in fetch()', async () => {
178
+ const { dir, file } = writeTempFile('const res = await fetch(req.query.url);');
179
+ try {
180
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
181
+ assert.ok(findings.some(f => f.rule === 'SSRF_USER_URL_FETCH'));
182
+ } finally { cleanup(dir); }
183
+ });
184
+
185
+ it('detects cloud metadata endpoint', async () => {
186
+ const { dir, file } = writeTempFile('const meta = await fetch("http://169.254.169.254/latest/meta-data/");');
187
+ try {
188
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
189
+ assert.ok(findings.some(f => f.rule === 'SSRF_CLOUD_METADATA'));
190
+ } finally { cleanup(dir); }
191
+ });
192
+ });
193
+
194
+ // =============================================================================
195
+ // LLM RED TEAM
196
+ // =============================================================================
197
+
198
+ describe('LLMRedTeam', async () => {
199
+ const { LLMRedTeam } = await import('../agents/llm-redteam.js');
200
+ const agent = new LLMRedTeam();
201
+
202
+ it('detects LLM output to eval', async () => {
203
+ const { dir, file } = writeTempFile('eval(completion.content);');
204
+ try {
205
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
206
+ assert.ok(findings.some(f => f.rule === 'LLM_OUTPUT_TO_EVAL'));
207
+ } finally { cleanup(dir); }
208
+ });
209
+
210
+ it('detects system prompt in client code', async () => {
211
+ const { dir, file } = writeTempFile('const systemPrompt = "You are a helpful assistant";');
212
+ try {
213
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
214
+ assert.ok(findings.some(f => f.rule === 'LLM_SYSTEM_PROMPT_CLIENT'));
215
+ } finally { cleanup(dir); }
216
+ });
217
+ });
218
+
219
+ // =============================================================================
220
+ // CONFIG AUDITOR
221
+ // =============================================================================
222
+
223
+ describe('ConfigAuditor', async () => {
224
+ const { ConfigAuditor } = await import('../agents/config-auditor.js');
225
+ const agent = new ConfigAuditor();
226
+
227
+ it('detects CORS wildcard', async () => {
228
+ const { dir, file } = writeTempFile('app.use(cors({ origin: "*" }));');
229
+ try {
230
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
231
+ assert.ok(findings.some(f => f.rule === 'CORS_WILDCARD'));
232
+ } finally { cleanup(dir); }
233
+ });
234
+
235
+ it('detects Go SQL sprintf', async () => {
236
+ const { dir, file } = writeTempFile('query := fmt.Sprintf("SELECT * FROM users WHERE id = %s", id)', '.go');
237
+ try {
238
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
239
+ assert.ok(findings.some(f => f.rule === 'GO_SQL_SPRINTF'));
240
+ } finally { cleanup(dir); }
241
+ });
242
+
243
+ it('detects Rust unsafe block', async () => {
244
+ const { dir, file } = writeTempFile('unsafe {\n ptr::read(p)\n}', '.rs');
245
+ try {
246
+ // Config auditor needs .go/.rs in code file extensions
247
+ const findings = await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
248
+ assert.ok(findings.some(f => f.rule === 'RUST_UNSAFE_BLOCK'));
249
+ } finally { cleanup(dir); }
250
+ });
251
+ });
252
+
253
+ // =============================================================================
254
+ // SCORING ENGINE
255
+ // =============================================================================
256
+
257
+ describe('ScoringEngine', async () => {
258
+ const { ScoringEngine } = await import('../agents/scoring-engine.js');
259
+
260
+ it('computes perfect score with no findings', () => {
261
+ const engine = new ScoringEngine();
262
+ const result = engine.compute([], []);
263
+ assert.equal(result.score, 100);
264
+ assert.equal(result.grade.letter, 'A');
265
+ });
266
+
267
+ it('deducts for critical findings (capped at category weight)', () => {
268
+ const engine = new ScoringEngine();
269
+ const findings = [
270
+ { severity: 'critical', category: 'secrets', confidence: 'high' },
271
+ ];
272
+ const result = engine.compute(findings, []);
273
+ assert.ok(result.score < 100, 'Score should decrease with critical finding');
274
+ // 25 pts deduction capped at category weight of 15
275
+ assert.equal(result.categories.secrets.deduction, 15);
276
+ });
277
+
278
+ it('applies confidence multiplier', () => {
279
+ const engine = new ScoringEngine();
280
+ const highConf = [{ severity: 'high', category: 'injection', confidence: 'high' }];
281
+ const lowConf = [{ severity: 'high', category: 'injection', confidence: 'low' }];
282
+
283
+ const highResult = engine.compute(highConf, []);
284
+ const lowResult = engine.compute(lowConf, []);
285
+ assert.ok(lowResult.score > highResult.score, 'Low confidence should deduct less');
286
+ });
287
+
288
+ it('caps deduction at category weight', () => {
289
+ const engine = new ScoringEngine();
290
+ // 10 critical findings in secrets (25 pts each = 250, but capped at 15)
291
+ const findings = Array(10).fill({ severity: 'critical', category: 'secrets', confidence: 'high' });
292
+ const result = engine.compute(findings, []);
293
+ assert.equal(result.categories.secrets.deduction, 15);
294
+ });
295
+
296
+ it('handles dependency vulnerabilities', () => {
297
+ const engine = new ScoringEngine();
298
+ const depVulns = [{ severity: 'critical' }, { severity: 'high' }];
299
+ const result = engine.compute([], depVulns);
300
+ assert.ok(result.score < 100);
301
+ assert.ok(result.categories.deps.deduction > 0);
302
+ });
303
+
304
+ it('saves and loads history', () => {
305
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-score-'));
306
+ try {
307
+ const engine = new ScoringEngine();
308
+ const result = engine.compute([], []);
309
+ engine.saveToHistory(dir, result);
310
+ engine.saveToHistory(dir, result);
311
+
312
+ const history = engine.loadHistory(dir);
313
+ assert.equal(history.length, 2);
314
+
315
+ const trend = engine.getTrend(dir, 100);
316
+ assert.ok(trend);
317
+ assert.equal(trend.diff, 0);
318
+ assert.equal(trend.direction, 'unchanged');
319
+ } finally { cleanup(dir); }
320
+ });
321
+ });
322
+
323
+ // =============================================================================
324
+ // CACHE MANAGER
325
+ // =============================================================================
326
+
327
+ describe('CacheManager', async () => {
328
+ const { CacheManager } = await import('../utils/cache-manager.js');
329
+
330
+ it('save/load/diff cycle works', () => {
331
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-cache-'));
332
+ const testFile = path.join(dir, 'test.js');
333
+ fs.writeFileSync(testFile, 'const x = 1;');
334
+
335
+ try {
336
+ const cache = new CacheManager(dir);
337
+ const findings = [{ file: testFile, line: 1, rule: 'TEST', severity: 'low', category: 'test' }];
338
+ cache.save([testFile], findings, null, { score: 90, grade: { letter: 'A' } });
339
+
340
+ // Load and verify
341
+ const loaded = cache.load();
342
+ assert.ok(loaded, 'Cache should load successfully');
343
+ assert.equal(loaded.stats.totalFiles, 1);
344
+
345
+ // Diff with same files — no changes
346
+ const diff = cache.diff([testFile]);
347
+ assert.equal(diff.changedFiles.length, 0);
348
+ assert.equal(diff.unchangedCount, 1);
349
+ assert.equal(diff.cachedFindings.length, 1);
350
+ } finally { cleanup(dir); }
351
+ });
352
+
353
+ it('detects changed files', () => {
354
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-cache-'));
355
+ const testFile = path.join(dir, 'test.js');
356
+ fs.writeFileSync(testFile, 'const x = 1;');
357
+
358
+ try {
359
+ const cache = new CacheManager(dir);
360
+ cache.save([testFile], [], null, null);
361
+ cache.load();
362
+
363
+ // Modify file
364
+ fs.writeFileSync(testFile, 'const x = 2; // changed');
365
+ const diff = cache.diff([testFile]);
366
+ assert.equal(diff.changedFiles.length, 1);
367
+ assert.equal(diff.modifiedCount, 1);
368
+ } finally { cleanup(dir); }
369
+ });
370
+
371
+ it('invalidates cache', () => {
372
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-cache-'));
373
+ const testFile = path.join(dir, 'test.js');
374
+ fs.writeFileSync(testFile, 'x');
375
+
376
+ try {
377
+ const cache = new CacheManager(dir);
378
+ cache.save([testFile], [], null, null);
379
+ assert.ok(cache.load());
380
+
381
+ cache.invalidate();
382
+ assert.equal(cache.load(), null);
383
+ } finally { cleanup(dir); }
384
+ });
385
+
386
+ it('LLM cache save/load works', () => {
387
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-llm-'));
388
+
389
+ try {
390
+ const cache = new CacheManager(dir);
391
+ const finding = { file: '/test.js', line: 1, rule: 'TEST', matched: 'x' };
392
+ const key = cache.getLLMCacheKey(finding);
393
+
394
+ cache.saveLLMClassifications({
395
+ [key]: { classification: 'true_positive', reason: 'test', fix: 'fix it', cachedAt: new Date().toISOString() },
396
+ });
397
+
398
+ const loaded = cache.loadLLMClassifications();
399
+ assert.ok(loaded[key]);
400
+ assert.equal(loaded[key].classification, 'true_positive');
401
+ } finally { cleanup(dir); }
402
+ });
403
+ });
404
+
405
+ // =============================================================================
406
+ // REDOS SAFETY
407
+ // =============================================================================
408
+
409
+ describe('ReDoS Safety', async () => {
410
+ // Import agents to get their patterns
411
+ const { default: InjectionTester } = await import('../agents/injection-tester.js');
412
+ const { default: AuthBypassAgent } = await import('../agents/auth-bypass-agent.js');
413
+ const { default: APIFuzzer } = await import('../agents/api-fuzzer.js');
414
+ const { default: LLMRedTeam } = await import('../agents/llm-redteam.js');
415
+ const { default: SSRFProber } = await import('../agents/ssrf-prober.js');
416
+ const { default: ConfigAuditor } = await import('../agents/config-auditor.js');
417
+
418
+ // Adversarial inputs that trigger catastrophic backtracking in vulnerable patterns
419
+ const adversarialInputs = [
420
+ 'a'.repeat(100),
421
+ '/' + '\\s*'.repeat(50),
422
+ '{' + ' '.repeat(100) + '}',
423
+ 'req.body' + '.x'.repeat(50),
424
+ 'http://' + 'a'.repeat(100) + '/path',
425
+ '; '.repeat(100),
426
+ 'cookie=' + 'a=b; '.repeat(50),
427
+ ];
428
+
429
+ it('all agent patterns complete within 50ms on adversarial input', async () => {
430
+ const agents = [
431
+ new InjectionTester(),
432
+ new AuthBypassAgent(),
433
+ new APIFuzzer(),
434
+ new LLMRedTeam(),
435
+ new SSRFProber(),
436
+ new ConfigAuditor(),
437
+ ];
438
+
439
+ for (const input of adversarialInputs) {
440
+ const { dir, file } = writeTempFile(input);
441
+ try {
442
+ for (const agent of agents) {
443
+ const start = performance.now();
444
+ await agent.analyze({ rootPath: dir, files: [file], recon: {}, options: {} });
445
+ const elapsed = performance.now() - start;
446
+ assert.ok(
447
+ elapsed < 2000, // 2s generous limit per agent per file
448
+ `${agent.name} took ${elapsed.toFixed(0)}ms on adversarial input (limit: 2000ms)`
449
+ );
450
+ }
451
+ } finally { cleanup(dir); }
452
+ }
453
+ });
454
+ });
455
+
456
+ // =============================================================================
457
+ // ORCHESTRATOR
458
+ // =============================================================================
459
+
460
+ describe('Orchestrator', async () => {
461
+ const { Orchestrator } = await import('../agents/orchestrator.js');
462
+
463
+ it('handles agent timeout gracefully', async () => {
464
+ const orchestrator = new Orchestrator();
465
+
466
+ // Mock agent that takes forever
467
+ const slowAgent = {
468
+ name: 'SlowAgent',
469
+ category: 'test',
470
+ analyze: () => new Promise(resolve => setTimeout(resolve, 60000)),
471
+ };
472
+ orchestrator.register(slowAgent);
473
+
474
+ // Use a very short timeout
475
+ const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'shipsafe-orch-'));
476
+ fs.writeFileSync(path.join(dir, 'test.js'), 'x');
477
+
478
+ try {
479
+ const result = await orchestrator.runAll(dir, { quiet: true, timeout: 100 });
480
+ assert.equal(result.agentResults.length, 1);
481
+ assert.equal(result.agentResults[0].success, false);
482
+ assert.ok(result.agentResults[0].error.includes('timed out'));
483
+ } finally { cleanup(dir); }
484
+ });
485
+
486
+ it('deduplicates findings', () => {
487
+ const orchestrator = new Orchestrator();
488
+ const findings = [
489
+ { file: 'a.js', line: 1, rule: 'R1', severity: 'high' },
490
+ { file: 'a.js', line: 1, rule: 'R1', severity: 'high' },
491
+ { file: 'a.js', line: 2, rule: 'R1', severity: 'high' },
492
+ ];
493
+ const deduped = orchestrator.deduplicate(findings);
494
+ assert.equal(deduped.length, 2);
495
+ });
496
+ });