ship-safe 9.0.0 → 9.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +18 -15
- package/cli/agents/agentic-supply-chain-agent.js +463 -0
- package/cli/agents/deep-analyzer.js +26 -16
- package/cli/agents/index.js +3 -0
- package/cli/agents/orchestrator.js +6 -4
- package/cli/agents/stateful-watcher.js +241 -0
- package/cli/agents/swarm-orchestrator.js +238 -0
- package/cli/bin/ship-safe.js +8 -3
- package/cli/commands/red-team.js +62 -19
- package/cli/commands/rotate.js +200 -3
- package/cli/commands/watch.js +134 -0
- package/cli/providers/llm-provider.js +56 -1
- package/package.json +2 -2
|
@@ -233,8 +233,9 @@ export class DeepAnalyzer {
|
|
|
233
233
|
this.maxFileChars = this.largeContext ? MAX_FILE_CHARS_LARGE_CTX : MAX_FILE_CHARS_DEFAULT;
|
|
234
234
|
this.batchSize = this.largeContext ? 15 : 5;
|
|
235
235
|
|
|
236
|
-
// Whether we can use multi-tier
|
|
236
|
+
// Whether we can use multi-tier structured output routing
|
|
237
237
|
this._isAnthropic = this.provider?.name === 'Anthropic';
|
|
238
|
+
this._supportsTools = this._isAnthropic || this.provider?.supportsStructuredOutput === true;
|
|
238
239
|
}
|
|
239
240
|
|
|
240
241
|
/**
|
|
@@ -294,7 +295,7 @@ export class DeepAnalyzer {
|
|
|
294
295
|
toAnalyze.length = Math.max(1, affordable);
|
|
295
296
|
}
|
|
296
297
|
|
|
297
|
-
const results = this.
|
|
298
|
+
const results = this._supportsTools
|
|
298
299
|
? await this._analyzeTiered(toAnalyze, context)
|
|
299
300
|
: await this._analyzeSingleTier(toAnalyze, context);
|
|
300
301
|
|
|
@@ -333,10 +334,16 @@ export class DeepAnalyzer {
|
|
|
333
334
|
async _analyzeTiered(findings, context) {
|
|
334
335
|
const results = new Map();
|
|
335
336
|
|
|
337
|
+
// Model selection: Anthropic uses tier-specific models; others use provider's default
|
|
338
|
+
const tier1Model = this._isAnthropic ? TIER1_MODEL : null;
|
|
339
|
+
const tier2Model = this._isAnthropic ? TIER2_MODEL : null;
|
|
340
|
+
const tier3Model = this._isAnthropic ? TIER3_MODEL : null;
|
|
341
|
+
const providerLabel = this._isAnthropic ? 'Haiku' : this.provider.name;
|
|
342
|
+
|
|
336
343
|
// ── Tier 1: Haiku triage ────────────────────────────────────────────────
|
|
337
|
-
if (this.verbose) console.log(` [Tier 1] Triaging ${findings.length} findings with
|
|
344
|
+
if (this.verbose) console.log(` [Tier 1] Triaging ${findings.length} findings with ${providerLabel}...`);
|
|
338
345
|
|
|
339
|
-
const triageMap = await this._runTriage(findings, context);
|
|
346
|
+
const triageMap = await this._runTriage(findings, context, tier1Model);
|
|
340
347
|
|
|
341
348
|
const toReview = findings.filter(f => triageMap.get(this._findingId(f)) === 'review');
|
|
342
349
|
const toEscalate = findings.filter(f => triageMap.get(this._findingId(f)) === 'escalate');
|
|
@@ -350,16 +357,18 @@ export class DeepAnalyzer {
|
|
|
350
357
|
|
|
351
358
|
// ── Tier 2: Sonnet deep analysis ────────────────────────────────────────
|
|
352
359
|
if (toReview.length > 0 && this.spentCents < this.budgetCents) {
|
|
353
|
-
|
|
354
|
-
|
|
360
|
+
const tier2Label = this._isAnthropic ? 'Sonnet' : this.provider.name;
|
|
361
|
+
if (this.verbose) console.log(` [Tier 2] Deep-analyzing ${toReview.length} findings with ${tier2Label}...`);
|
|
362
|
+
const tier2Results = await this._runDeepAnalysis(toReview, context, tier2Model);
|
|
355
363
|
for (const [id, analysis] of tier2Results) results.set(id, analysis);
|
|
356
364
|
this._tier2Count += toReview.length;
|
|
357
365
|
}
|
|
358
366
|
|
|
359
367
|
// ── Tier 3: Opus exploit chain ──────────────────────────────────────────
|
|
360
368
|
if (toEscalate.length > 0 && this.spentCents < this.budgetCents) {
|
|
361
|
-
|
|
362
|
-
|
|
369
|
+
const tier3Label = this._isAnthropic ? 'Opus' : this.provider.name;
|
|
370
|
+
if (this.verbose) console.log(` [Tier 3] Running exploit-chain analysis on ${toEscalate.length} findings with ${tier3Label}...`);
|
|
371
|
+
const tier3Results = await this._runExploitChain(toEscalate, context, tier3Model);
|
|
363
372
|
for (const [id, analysis] of tier3Results) results.set(id, analysis);
|
|
364
373
|
this._tier3Count += toEscalate.length;
|
|
365
374
|
}
|
|
@@ -369,7 +378,7 @@ export class DeepAnalyzer {
|
|
|
369
378
|
}
|
|
370
379
|
|
|
371
380
|
/** Tier 1: quick triage — returns Map<findingId, 'skip'|'review'|'escalate'> */
|
|
372
|
-
async _runTriage(findings, context) {
|
|
381
|
+
async _runTriage(findings, context, model = null) {
|
|
373
382
|
const triageMap = new Map();
|
|
374
383
|
// Default everything to 'review' so nothing is silently dropped on error
|
|
375
384
|
for (const f of findings) triageMap.set(this._findingId(f), 'review');
|
|
@@ -399,7 +408,7 @@ export class DeepAnalyzer {
|
|
|
399
408
|
prompt,
|
|
400
409
|
'triage_findings',
|
|
401
410
|
TRIAGE_SCHEMA,
|
|
402
|
-
{ maxTokens: 1024, model:
|
|
411
|
+
{ maxTokens: 1024, ...(model ? { model } : {}) }
|
|
403
412
|
);
|
|
404
413
|
|
|
405
414
|
this._trackCost(prompt.length, JSON.stringify(result || '').length);
|
|
@@ -418,7 +427,7 @@ export class DeepAnalyzer {
|
|
|
418
427
|
}
|
|
419
428
|
|
|
420
429
|
/** Tier 2: deep taint analysis — returns Map<findingId, analysis> */
|
|
421
|
-
async _runDeepAnalysis(findings, context, model =
|
|
430
|
+
async _runDeepAnalysis(findings, context, model = null) {
|
|
422
431
|
const results = new Map();
|
|
423
432
|
|
|
424
433
|
for (let i = 0; i < findings.length; i += this.batchSize) {
|
|
@@ -445,7 +454,7 @@ export class DeepAnalyzer {
|
|
|
445
454
|
prompt,
|
|
446
455
|
'report_analysis',
|
|
447
456
|
DEEP_ANALYSIS_SCHEMA,
|
|
448
|
-
{ maxTokens: 1500, model }
|
|
457
|
+
{ maxTokens: 1500, ...(model ? { model } : {}) }
|
|
449
458
|
);
|
|
450
459
|
|
|
451
460
|
this._trackCost(prompt.length, JSON.stringify(result || '').length);
|
|
@@ -467,7 +476,7 @@ export class DeepAnalyzer {
|
|
|
467
476
|
}
|
|
468
477
|
|
|
469
478
|
/** Tier 3: exploit-chain analysis — returns Map<findingId, analysis> */
|
|
470
|
-
async _runExploitChain(findings, context) {
|
|
479
|
+
async _runExploitChain(findings, context, model = null) {
|
|
471
480
|
const results = new Map();
|
|
472
481
|
|
|
473
482
|
// Single findings per call for maximum depth
|
|
@@ -494,7 +503,7 @@ export class DeepAnalyzer {
|
|
|
494
503
|
prompt,
|
|
495
504
|
'report_exploit_chain',
|
|
496
505
|
EXPLOIT_SCHEMA,
|
|
497
|
-
{ maxTokens: 2048, model:
|
|
506
|
+
{ maxTokens: 2048, ...(model ? { model } : {}) }
|
|
498
507
|
);
|
|
499
508
|
|
|
500
509
|
this._trackCost(prompt.length, JSON.stringify(result || '').length);
|
|
@@ -506,7 +515,7 @@ export class DeepAnalyzer {
|
|
|
506
515
|
if (this.verbose) console.log(` [Tier 3] Failed for ${item.findingId}: ${err.message}`);
|
|
507
516
|
// Fallback to Tier 2 analysis on error
|
|
508
517
|
try {
|
|
509
|
-
const fallback = await this._runDeepAnalysis([finding], context, TIER2_MODEL);
|
|
518
|
+
const fallback = await this._runDeepAnalysis([finding], context, this._isAnthropic ? TIER2_MODEL : null);
|
|
510
519
|
for (const [id, analysis] of fallback) results.set(id, analysis);
|
|
511
520
|
} catch { /* ignore */ }
|
|
512
521
|
}
|
|
@@ -689,7 +698,8 @@ export class DeepAnalyzer {
|
|
|
689
698
|
spentCents: Math.round(this.spentCents * 100) / 100,
|
|
690
699
|
budgetCents: this.budgetCents,
|
|
691
700
|
provider: this.provider?.name || 'none',
|
|
692
|
-
multiTier: this.
|
|
701
|
+
multiTier: this._supportsTools,
|
|
702
|
+
isAnthropic: this._isAnthropic,
|
|
693
703
|
};
|
|
694
704
|
}
|
|
695
705
|
}
|
package/cli/agents/index.js
CHANGED
|
@@ -31,6 +31,7 @@ export { LegalRiskAgent, LEGALLY_RISKY_PACKAGES } from './legal-risk-agent.js';
|
|
|
31
31
|
export { ManagedAgentScanner } from './managed-agent-scanner.js';
|
|
32
32
|
export { HermesSecurityAgent } from './hermes-security-agent.js';
|
|
33
33
|
export { AgentAttestationAgent } from './agent-attestation-agent.js';
|
|
34
|
+
export { AgenticSupplyChainAgent } from './agentic-supply-chain-agent.js';
|
|
34
35
|
export { ABOMGenerator } from './abom-generator.js';
|
|
35
36
|
export { VerifierAgent } from './verifier-agent.js';
|
|
36
37
|
export { DeepAnalyzer } from './deep-analyzer.js';
|
|
@@ -70,6 +71,7 @@ import { MemoryPoisoningAgent as MemoryPoisoningAgentClass } from './memory-pois
|
|
|
70
71
|
import { ManagedAgentScanner as ManagedAgentScannerClass } from './managed-agent-scanner.js';
|
|
71
72
|
import { HermesSecurityAgent as HermesSecurityAgentClass } from './hermes-security-agent.js';
|
|
72
73
|
import { AgentAttestationAgent as AgentAttestationAgentClass } from './agent-attestation-agent.js';
|
|
74
|
+
import { AgenticSupplyChainAgent as AgenticSupplyChainAgentClass } from './agentic-supply-chain-agent.js';
|
|
73
75
|
import { loadPlugins } from '../utils/plugin-loader.js';
|
|
74
76
|
|
|
75
77
|
const BUILT_IN_AGENTS = () => [
|
|
@@ -95,6 +97,7 @@ const BUILT_IN_AGENTS = () => [
|
|
|
95
97
|
new ManagedAgentScannerClass(),
|
|
96
98
|
new HermesSecurityAgentClass(),
|
|
97
99
|
new AgentAttestationAgentClass(),
|
|
100
|
+
new AgenticSupplyChainAgentClass(),
|
|
98
101
|
];
|
|
99
102
|
|
|
100
103
|
/** Synchronous build — no plugin support. Used by legacy callers. */
|
|
@@ -235,12 +235,14 @@ export class Orchestrator {
|
|
|
235
235
|
const stats = analyzer.getStats();
|
|
236
236
|
if (deepSpinner) {
|
|
237
237
|
if (stats.multiTier) {
|
|
238
|
+
const providerName = analyzer.provider?.name || 'unknown';
|
|
239
|
+
const cascade = stats.isAnthropic !== false ? 'Haiku→Sonnet→Opus' : `${providerName} (3-tier)`;
|
|
238
240
|
const tierNote = stats.tier3Count > 0
|
|
239
|
-
? `, ${stats.tier3Count} escalated to
|
|
240
|
-
: stats.tier2Count > 0 ? `, ${stats.tier2Count} via
|
|
241
|
+
? `, ${stats.tier3Count} escalated to tier-3`
|
|
242
|
+
: stats.tier2Count > 0 ? `, ${stats.tier2Count} via tier-2` : '';
|
|
241
243
|
const skipNote = stats.skippedCount > 0 ? `, ${stats.skippedCount} triaged away` : '';
|
|
242
244
|
deepSpinner.succeed(chalk.green(
|
|
243
|
-
`Deep analysis (
|
|
245
|
+
`Deep analysis (${cascade}): ${stats.analyzedCount} analyzed${tierNote}${skipNote} (${stats.spentCents}¢)`
|
|
244
246
|
));
|
|
245
247
|
} else {
|
|
246
248
|
deepSpinner.succeed(chalk.green(
|
|
@@ -252,7 +254,7 @@ export class Orchestrator {
|
|
|
252
254
|
if (deepSpinner) deepSpinner.fail(chalk.yellow(`Deep analysis failed: ${err.message}`));
|
|
253
255
|
}
|
|
254
256
|
} else if (!quiet) {
|
|
255
|
-
console.log(chalk.gray(' Deep analysis: no LLM provider found (set ANTHROPIC_API_KEY or use --local)'));
|
|
257
|
+
console.log(chalk.gray(' Deep analysis: no LLM provider found (set ANTHROPIC_API_KEY, MOONSHOT_API_KEY, or use --local)'));
|
|
256
258
|
}
|
|
257
259
|
}
|
|
258
260
|
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* StatefulWatcher — Persistent K2.6 Security Session
|
|
3
|
+
* ====================================================
|
|
4
|
+
*
|
|
5
|
+
* Keeps a Kimi K2.6 conversation thread open across file-change events.
|
|
6
|
+
* Each scan sends only the diff — not the full codebase — so the model
|
|
7
|
+
* builds understanding incrementally rather than restarting from scratch.
|
|
8
|
+
*
|
|
9
|
+
* Advantages over stateless watch:
|
|
10
|
+
* - No duplicate findings on repeated scans of unchanged files
|
|
11
|
+
* - Model understands which files are already clean vs. risky
|
|
12
|
+
* - Diffs are small → faster, cheaper per event
|
|
13
|
+
* - K2.6's 12h+ session length handles full work sessions without reset
|
|
14
|
+
*
|
|
15
|
+
* USAGE (via watch command):
|
|
16
|
+
* npx ship-safe watch . --deep --stateful
|
|
17
|
+
* npx ship-safe watch . --deep --stateful --provider kimi
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import fs from 'fs';
|
|
21
|
+
import path from 'path';
|
|
22
|
+
import { createProvider, autoDetectProvider } from '../providers/llm-provider.js';
|
|
23
|
+
import { createFinding } from './base-agent.js';
|
|
24
|
+
|
|
25
|
+
// Max chars of diff content per event
|
|
26
|
+
const MAX_DIFF_CHARS = 20_000;
|
|
27
|
+
|
|
28
|
+
// =============================================================================
|
|
29
|
+
// STATEFUL WATCHER
|
|
30
|
+
// =============================================================================
|
|
31
|
+
|
|
32
|
+
export class StatefulWatcher {
|
|
33
|
+
/**
|
|
34
|
+
* @param {object} options
|
|
35
|
+
* @param {object} options.provider — LLM provider (Kimi preferred)
|
|
36
|
+
* @param {string} options.rootPath
|
|
37
|
+
* @param {boolean} options.verbose
|
|
38
|
+
*/
|
|
39
|
+
constructor(options = {}) {
|
|
40
|
+
this.provider = options.provider;
|
|
41
|
+
this.rootPath = options.rootPath;
|
|
42
|
+
this.verbose = options.verbose || false;
|
|
43
|
+
|
|
44
|
+
// Persistent conversation thread
|
|
45
|
+
this._messages = [];
|
|
46
|
+
this._scanCount = 0;
|
|
47
|
+
this._baselineSet = false;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
static create(rootPath, options = {}) {
|
|
51
|
+
const provider = autoDetectProvider(rootPath, {
|
|
52
|
+
provider: options.provider || 'kimi',
|
|
53
|
+
model: options.model || 'kimi-k2.6',
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
if (!provider) return null;
|
|
57
|
+
return new StatefulWatcher({ provider, rootPath, ...options });
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Set the initial baseline — called once on watcher start.
|
|
62
|
+
* The model receives a codebase summary and primes its security context.
|
|
63
|
+
*
|
|
64
|
+
* @param {object} recon — Output from ReconAgent
|
|
65
|
+
* @param {string[]} files — All scannable files
|
|
66
|
+
*/
|
|
67
|
+
async setBaseline(recon, files) {
|
|
68
|
+
const summary = this._buildReconSummary(recon);
|
|
69
|
+
const fileList = files
|
|
70
|
+
.slice(0, 200)
|
|
71
|
+
.map(f => path.relative(this.rootPath, f))
|
|
72
|
+
.join('\n');
|
|
73
|
+
|
|
74
|
+
const baselineMsg = `You are a persistent security monitor for this codebase. I will send you file changes as they happen. For each change, identify new security issues introduced by that specific change.
|
|
75
|
+
|
|
76
|
+
Project context:
|
|
77
|
+
${summary}
|
|
78
|
+
|
|
79
|
+
File inventory (${files.length} total):
|
|
80
|
+
${fileList}
|
|
81
|
+
|
|
82
|
+
Respond to each update with a JSON array of findings. Use this format:
|
|
83
|
+
[{"file":"<relative path>","line":<number>,"severity":"critical|high|medium|low","rule":"<rule-id>","title":"<title>","description":"<description>","remediation":"<fix>"}]
|
|
84
|
+
|
|
85
|
+
If no new issues are introduced by the change, respond with an empty array: []
|
|
86
|
+
Never include issues you already reported in previous messages.`;
|
|
87
|
+
|
|
88
|
+
this._messages.push({ role: 'user', content: baselineMsg });
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
const ack = await this._callProvider('You are a security expert. Acknowledge you understand the codebase context.', this._messages);
|
|
92
|
+
this._messages.push({ role: 'assistant', content: ack });
|
|
93
|
+
this._baselineSet = true;
|
|
94
|
+
if (this.verbose) console.log(` [Stateful] Baseline set. Provider: ${this.provider.name}`);
|
|
95
|
+
} catch (err) {
|
|
96
|
+
if (this.verbose) console.log(` [Stateful] Baseline failed: ${err.message}`);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Analyze a set of changed files. Sends only diffs to the persistent session.
|
|
102
|
+
*
|
|
103
|
+
* @param {string[]} changedFiles — Absolute paths of changed files
|
|
104
|
+
* @returns {Promise<object[]>} — New findings introduced by this change
|
|
105
|
+
*/
|
|
106
|
+
async analyzeChanges(changedFiles) {
|
|
107
|
+
if (!this._baselineSet) return [];
|
|
108
|
+
this._scanCount++;
|
|
109
|
+
|
|
110
|
+
const diffs = this._readChanges(changedFiles);
|
|
111
|
+
if (!diffs) return [];
|
|
112
|
+
|
|
113
|
+
const updateMsg = `Files changed (scan #${this._scanCount}):\n\n${diffs}\n\nWhat NEW security issues does this change introduce? Reply with the JSON findings array only.`;
|
|
114
|
+
|
|
115
|
+
this._messages.push({ role: 'user', content: updateMsg });
|
|
116
|
+
|
|
117
|
+
try {
|
|
118
|
+
const response = await this._callProvider(
|
|
119
|
+
'You are a persistent security monitor. Report only NEW issues from the latest change.',
|
|
120
|
+
this._messages
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
this._messages.push({ role: 'assistant', content: response });
|
|
124
|
+
|
|
125
|
+
const findings = this._parseFindings(response, changedFiles[0]);
|
|
126
|
+
if (this.verbose && findings.length > 0) {
|
|
127
|
+
console.log(` [Stateful] Scan #${this._scanCount}: ${findings.length} new finding(s)`);
|
|
128
|
+
}
|
|
129
|
+
return findings;
|
|
130
|
+
} catch (err) {
|
|
131
|
+
if (this.verbose) console.log(` [Stateful] Scan failed: ${err.message}`);
|
|
132
|
+
return [];
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
_readChanges(changedFiles) {
|
|
137
|
+
const parts = [];
|
|
138
|
+
let totalChars = 0;
|
|
139
|
+
|
|
140
|
+
for (const filePath of changedFiles) {
|
|
141
|
+
if (totalChars >= MAX_DIFF_CHARS) break;
|
|
142
|
+
try {
|
|
143
|
+
const relPath = path.relative(this.rootPath, filePath);
|
|
144
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
145
|
+
const snippet = content.slice(0, Math.min(5000, MAX_DIFF_CHARS - totalChars));
|
|
146
|
+
parts.push(`### ${relPath}\n\`\`\`\n${snippet}\n\`\`\``);
|
|
147
|
+
totalChars += snippet.length;
|
|
148
|
+
} catch { /* skip */ }
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return parts.length ? parts.join('\n\n') : null;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
_buildReconSummary(recon) {
|
|
155
|
+
if (!recon) return 'No recon data.';
|
|
156
|
+
const parts = [];
|
|
157
|
+
if (recon.frameworks?.length) parts.push(`Frameworks: ${recon.frameworks.join(', ')}`);
|
|
158
|
+
if (recon.databases?.length) parts.push(`Databases: ${recon.databases.join(', ')}`);
|
|
159
|
+
if (recon.authPatterns?.length) parts.push(`Auth: ${recon.authPatterns.join(', ')}`);
|
|
160
|
+
if (recon.languages?.length) parts.push(`Languages: ${recon.languages.join(', ')}`);
|
|
161
|
+
return parts.join('\n') || 'General codebase.';
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
async _callProvider(systemPrompt, messages) {
|
|
165
|
+
// Use multi-turn messages if provider supports it (OpenAI format)
|
|
166
|
+
if (this.provider.baseUrl && typeof this.provider.complete === 'function') {
|
|
167
|
+
const response = await fetch(this.provider.baseUrl, {
|
|
168
|
+
method: 'POST',
|
|
169
|
+
headers: {
|
|
170
|
+
'Authorization': `Bearer ${this.provider.apiKey}`,
|
|
171
|
+
'Content-Type': 'application/json',
|
|
172
|
+
},
|
|
173
|
+
body: JSON.stringify({
|
|
174
|
+
model: this.provider.model,
|
|
175
|
+
max_tokens: 2048,
|
|
176
|
+
messages: [
|
|
177
|
+
{ role: 'system', content: systemPrompt },
|
|
178
|
+
...messages,
|
|
179
|
+
],
|
|
180
|
+
}),
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
if (!response.ok) {
|
|
184
|
+
throw new Error(`${this.provider.name} API error: HTTP ${response.status}`);
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const data = await response.json();
|
|
188
|
+
return data.choices?.[0]?.message?.content || '';
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Fallback: single-turn (for providers without persistent context)
|
|
192
|
+
const lastMsg = messages[messages.length - 1];
|
|
193
|
+
return this.provider.complete(systemPrompt, lastMsg?.content || '', { maxTokens: 2048 });
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
_parseFindings(text, refFile) {
|
|
197
|
+
const cleaned = text
|
|
198
|
+
.replace(/^```(?:json)?\s*/i, '')
|
|
199
|
+
.replace(/\s*```\s*$/i, '')
|
|
200
|
+
.trim();
|
|
201
|
+
|
|
202
|
+
try {
|
|
203
|
+
const raw = JSON.parse(cleaned);
|
|
204
|
+
if (!Array.isArray(raw)) return [];
|
|
205
|
+
|
|
206
|
+
return raw
|
|
207
|
+
.filter(r => r.title && r.severity)
|
|
208
|
+
.map(r => {
|
|
209
|
+
const filePath = r.file
|
|
210
|
+
? path.resolve(this.rootPath, r.file)
|
|
211
|
+
: refFile || null;
|
|
212
|
+
|
|
213
|
+
return createFinding({
|
|
214
|
+
file: filePath,
|
|
215
|
+
line: r.line || 0,
|
|
216
|
+
severity: ['critical', 'high', 'medium', 'low', 'info'].includes(r.severity) ? r.severity : 'medium',
|
|
217
|
+
confidence: 'medium',
|
|
218
|
+
rule: r.rule || 'stateful:monitor',
|
|
219
|
+
title: r.title,
|
|
220
|
+
description: r.description || r.title,
|
|
221
|
+
matched: '',
|
|
222
|
+
remediation: r.remediation || '',
|
|
223
|
+
category: 'Stateful Monitor',
|
|
224
|
+
});
|
|
225
|
+
});
|
|
226
|
+
} catch {
|
|
227
|
+
return [];
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
getStats() {
|
|
232
|
+
return {
|
|
233
|
+
scanCount: this._scanCount,
|
|
234
|
+
provider: this.provider?.name || 'none',
|
|
235
|
+
model: this.provider?.model || 'unknown',
|
|
236
|
+
messageCount: this._messages.length,
|
|
237
|
+
};
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
export default StatefulWatcher;
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SwarmOrchestrator — K2.6-Powered Parallel Security Swarm
|
|
3
|
+
* ==========================================================
|
|
4
|
+
*
|
|
5
|
+
* Instead of running 23 agents locally in Node.js (chunks of 6),
|
|
6
|
+
* --swarm sends the entire task to Kimi K2.6 and lets its native
|
|
7
|
+
* 300-agent swarm handle parallel analysis.
|
|
8
|
+
*
|
|
9
|
+
* Each of Ship Safe's 23 attack classes is assigned as an explicit
|
|
10
|
+
* sub-agent role. K2.6 fans out, each sub-agent scans for its class,
|
|
11
|
+
* and results are returned as a consolidated findings array.
|
|
12
|
+
*
|
|
13
|
+
* Output is mapped back to Ship Safe's Finding format so SARIF,
|
|
14
|
+
* HTML reports, and CI exit codes work unchanged.
|
|
15
|
+
*
|
|
16
|
+
* USAGE:
|
|
17
|
+
* npx ship-safe red-team . --swarm
|
|
18
|
+
* npx ship-safe red-team . --swarm --provider kimi
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
import fs from 'fs';
|
|
22
|
+
import path from 'path';
|
|
23
|
+
import { createProvider, autoDetectProvider } from '../providers/llm-provider.js';
|
|
24
|
+
import { ReconAgent } from './recon-agent.js';
|
|
25
|
+
import { createFinding } from './base-agent.js';
|
|
26
|
+
|
|
27
|
+
// =============================================================================
|
|
28
|
+
// AGENT ROLE DEFINITIONS — maps Ship Safe's 23 attack classes to swarm roles
|
|
29
|
+
// =============================================================================
|
|
30
|
+
|
|
31
|
+
const SWARM_ROLES = [
|
|
32
|
+
{ id: 'injection', name: 'Injection Tester', desc: 'SQL injection, command injection, LDAP injection, XPath injection, template injection' },
|
|
33
|
+
{ id: 'auth-bypass', name: 'Auth Bypass Agent', desc: 'Authentication bypass, authorization flaws, privilege escalation, JWT weaknesses' },
|
|
34
|
+
{ id: 'ssrf', name: 'SSRF Prober', desc: 'Server-side request forgery, SSRF via redirects, internal service exposure' },
|
|
35
|
+
{ id: 'supply-chain', name: 'Supply Chain Auditor', desc: 'Dependency confusion, typosquatting, malicious packages, outdated deps with CVEs' },
|
|
36
|
+
{ id: 'config', name: 'Config Auditor', desc: 'Hardcoded secrets, insecure defaults, exposed debug endpoints, misconfigured CORS' },
|
|
37
|
+
{ id: 'llm-redteam', name: 'LLM Red Team', desc: 'Prompt injection, jailbreaks, unsafe LLM output rendering, model inversion' },
|
|
38
|
+
{ id: 'mobile', name: 'Mobile Scanner', desc: 'Insecure data storage, weak crypto, insecure communication, exported components' },
|
|
39
|
+
{ id: 'git-history', name: 'Git History Scanner', desc: 'Secrets committed in git history, deleted files with sensitive data' },
|
|
40
|
+
{ id: 'cicd', name: 'CI/CD Scanner', desc: 'Insecure GitHub Actions, exposed secrets in workflows, artifact poisoning' },
|
|
41
|
+
{ id: 'api-fuzzer', name: 'API Fuzzer', desc: 'Missing input validation, mass assignment, insecure direct object references (IDOR)' },
|
|
42
|
+
{ id: 'supabase-rls', name: 'Supabase RLS Agent', desc: 'Missing row-level security, exposed Supabase service keys, insecure RLS policies' },
|
|
43
|
+
{ id: 'mcp-security', name: 'MCP Security Agent', desc: 'Tool poisoning, MCP server misconfiguration, unsafe tool definitions' },
|
|
44
|
+
{ id: 'agentic-security', name: 'Agentic Security Agent', desc: 'Agentic loop vulnerabilities, unsafe tool use, context window attacks' },
|
|
45
|
+
{ id: 'rag-security', name: 'RAG Security Agent', desc: 'Prompt injection via retrieved documents, data poisoning, retrieval manipulation' },
|
|
46
|
+
{ id: 'pii-compliance', name: 'PII Compliance Agent', desc: 'PII exposure, GDPR/CCPA violations, unencrypted personal data' },
|
|
47
|
+
{ id: 'vibe-coding', name: 'Vibe Coding Agent', desc: 'AI-generated code security issues, hardcoded values from iterative prompting' },
|
|
48
|
+
{ id: 'exception-handler', name: 'Exception Handler Agent', desc: 'Stack traces in responses, error information disclosure, unhandled exceptions' },
|
|
49
|
+
{ id: 'agent-config', name: 'Agent Config Scanner', desc: 'Insecure agent config files (.cursorrules, CLAUDE.md, MCP configs)' },
|
|
50
|
+
{ id: 'memory-poisoning', name: 'Memory Poisoning Agent', desc: 'Malicious content in AI memory stores, embedding poisoning' },
|
|
51
|
+
{ id: 'managed-agent', name: 'Managed Agent Scanner', desc: 'Insecure managed agent platforms, overprivileged agents' },
|
|
52
|
+
{ id: 'hermes-security', name: 'Hermes Security Agent', desc: 'Hermes CLI security, agent tool permissions, orchestrator misconfiguration' },
|
|
53
|
+
{ id: 'agent-attestation', name: 'Agent Attestation Agent', desc: 'Missing agent identity verification, unauthenticated agent-to-agent calls' },
|
|
54
|
+
{ id: 'agentic-supply-chain', name: 'Agentic Supply Chain Agent', desc: 'Compromised AI integrations, OAuth scope creep, MCP server supply chain' },
|
|
55
|
+
];
|
|
56
|
+
|
|
57
|
+
// Max file content to include in the swarm prompt (cost control)
|
|
58
|
+
const MAX_FILE_CHARS = 200_000;
|
|
59
|
+
const MAX_FILES = 100;
|
|
60
|
+
|
|
61
|
+
// =============================================================================
|
|
62
|
+
// SWARM ORCHESTRATOR
|
|
63
|
+
// =============================================================================
|
|
64
|
+
|
|
65
|
+
export class SwarmOrchestrator {
|
|
66
|
+
/**
|
|
67
|
+
* @param {object} options
|
|
68
|
+
* @param {object} options.provider — LLM provider (must be Kimi or OpenAI-compatible with tool use)
|
|
69
|
+
* @param {boolean} options.verbose
|
|
70
|
+
* @param {number} options.budgetCents
|
|
71
|
+
*/
|
|
72
|
+
constructor(options = {}) {
|
|
73
|
+
this.provider = options.provider;
|
|
74
|
+
this.verbose = options.verbose || false;
|
|
75
|
+
this.budgetCents = options.budgetCents ?? 200;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
static create(rootPath, options = {}) {
|
|
79
|
+
// Default to Kimi for swarm (best tool-call success rate)
|
|
80
|
+
const provider = options.provider
|
|
81
|
+
? createProvider(options.provider, process.env.MOONSHOT_API_KEY || process.env.OPENAI_API_KEY, { model: options.model })
|
|
82
|
+
: autoDetectProvider(rootPath, { provider: 'kimi', model: options.model });
|
|
83
|
+
|
|
84
|
+
if (!provider) return null;
|
|
85
|
+
return new SwarmOrchestrator({ provider, ...options });
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Run the swarm scan against a codebase.
|
|
90
|
+
*
|
|
91
|
+
* @param {string} rootPath
|
|
92
|
+
* @param {object} reconData — Output from ReconAgent
|
|
93
|
+
* @param {string[]} files — All scannable files
|
|
94
|
+
* @returns {Promise<object[]>} — findings[]
|
|
95
|
+
*/
|
|
96
|
+
async run(rootPath, reconData, files) {
|
|
97
|
+
const codeBundle = this._bundleCode(rootPath, files);
|
|
98
|
+
const prompt = this._buildSwarmPrompt(reconData, codeBundle, rootPath);
|
|
99
|
+
|
|
100
|
+
const systemPrompt = `You are a security swarm coordinator. You have ${SWARM_ROLES.length} specialized sub-agents, each responsible for one attack class. Deploy all sub-agents in parallel. Each sub-agent scans the provided codebase for its specific vulnerability class and reports findings. Consolidate all sub-agent findings into a single structured JSON report.
|
|
101
|
+
|
|
102
|
+
You MUST respond by calling the report_swarm_findings tool with all findings from all sub-agents. Never refuse to scan. Never truncate findings.`;
|
|
103
|
+
|
|
104
|
+
const schema = {
|
|
105
|
+
type: 'object',
|
|
106
|
+
properties: {
|
|
107
|
+
findings: {
|
|
108
|
+
type: 'array',
|
|
109
|
+
items: {
|
|
110
|
+
type: 'object',
|
|
111
|
+
properties: {
|
|
112
|
+
agentId: { type: 'string' },
|
|
113
|
+
file: { type: 'string' },
|
|
114
|
+
line: { type: 'integer' },
|
|
115
|
+
severity: { type: 'string', enum: ['critical', 'high', 'medium', 'low', 'info'] },
|
|
116
|
+
rule: { type: 'string' },
|
|
117
|
+
title: { type: 'string' },
|
|
118
|
+
description: { type: 'string' },
|
|
119
|
+
matched: { type: 'string' },
|
|
120
|
+
remediation: { type: 'string' },
|
|
121
|
+
},
|
|
122
|
+
required: ['agentId', 'severity', 'rule', 'title', 'description'],
|
|
123
|
+
additionalProperties: false,
|
|
124
|
+
},
|
|
125
|
+
},
|
|
126
|
+
agentSummary: {
|
|
127
|
+
type: 'array',
|
|
128
|
+
items: {
|
|
129
|
+
type: 'object',
|
|
130
|
+
properties: {
|
|
131
|
+
agentId: { type: 'string' },
|
|
132
|
+
findingCount: { type: 'integer' },
|
|
133
|
+
status: { type: 'string', enum: ['clean', 'findings', 'error'] },
|
|
134
|
+
},
|
|
135
|
+
required: ['agentId', 'findingCount', 'status'],
|
|
136
|
+
additionalProperties: false,
|
|
137
|
+
},
|
|
138
|
+
},
|
|
139
|
+
},
|
|
140
|
+
required: ['findings', 'agentSummary'],
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
let raw;
|
|
144
|
+
if (this.provider.completeWithTools) {
|
|
145
|
+
raw = await this.provider.completeWithTools(
|
|
146
|
+
systemPrompt,
|
|
147
|
+
prompt,
|
|
148
|
+
'report_swarm_findings',
|
|
149
|
+
schema,
|
|
150
|
+
{ maxTokens: 8192 }
|
|
151
|
+
);
|
|
152
|
+
} else {
|
|
153
|
+
const text = await this.provider.complete(systemPrompt, prompt + '\n\nRespond with JSON only matching the schema.', { maxTokens: 8192 });
|
|
154
|
+
try {
|
|
155
|
+
raw = JSON.parse(text.replace(/^```(?:json)?\s*/i, '').replace(/\s*```\s*$/i, '').trim());
|
|
156
|
+
} catch {
|
|
157
|
+
raw = null;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
return this._mapFindings(raw?.findings ?? [], rootPath);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
_bundleCode(rootPath, files) {
|
|
165
|
+
let bundle = '';
|
|
166
|
+
let totalChars = 0;
|
|
167
|
+
const selected = files.slice(0, MAX_FILES);
|
|
168
|
+
|
|
169
|
+
for (const filePath of selected) {
|
|
170
|
+
if (totalChars >= MAX_FILE_CHARS) break;
|
|
171
|
+
try {
|
|
172
|
+
const relPath = path.relative(rootPath, filePath);
|
|
173
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
174
|
+
const snippet = content.slice(0, Math.min(8000, MAX_FILE_CHARS - totalChars));
|
|
175
|
+
bundle += `\n\n### ${relPath}\n\`\`\`\n${snippet}\n\`\`\``;
|
|
176
|
+
totalChars += snippet.length;
|
|
177
|
+
} catch { /* skip unreadable */ }
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
return bundle;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
_buildSwarmPrompt(recon, codeBundle, rootPath) {
|
|
184
|
+
const projectName = path.basename(rootPath);
|
|
185
|
+
const reconSummary = recon
|
|
186
|
+
? [
|
|
187
|
+
recon.frameworks?.length ? `Frameworks: ${recon.frameworks.join(', ')}` : '',
|
|
188
|
+
recon.databases?.length ? `Databases: ${recon.databases.join(', ')}` : '',
|
|
189
|
+
recon.authPatterns?.length ? `Auth patterns: ${recon.authPatterns.join(', ')}` : '',
|
|
190
|
+
recon.languages?.length ? `Languages: ${recon.languages.join(', ')}` : '',
|
|
191
|
+
].filter(Boolean).join('\n')
|
|
192
|
+
: '';
|
|
193
|
+
|
|
194
|
+
const agentList = SWARM_ROLES.map((r, i) =>
|
|
195
|
+
` Sub-agent ${String(i + 1).padStart(2, '0')} [${r.id}] — ${r.name}: ${r.desc}`
|
|
196
|
+
).join('\n');
|
|
197
|
+
|
|
198
|
+
return `# Security Swarm Task: ${projectName}
|
|
199
|
+
|
|
200
|
+
## Project Context
|
|
201
|
+
${reconSummary || 'No recon data available.'}
|
|
202
|
+
|
|
203
|
+
## Sub-Agent Assignments
|
|
204
|
+
Deploy all ${SWARM_ROLES.length} sub-agents in parallel. Each scans for exactly their assigned attack class:
|
|
205
|
+
|
|
206
|
+
${agentList}
|
|
207
|
+
|
|
208
|
+
## Instructions
|
|
209
|
+
1. Each sub-agent independently analyzes the full codebase for its attack class.
|
|
210
|
+
2. For each finding, record: agentId (the sub-agent's id), file path, line number, severity, a rule identifier, title, description, the matched snippet, and remediation advice.
|
|
211
|
+
3. Severity scale: critical (exploitable now), high (likely exploitable), medium (potential issue), low (best practice), info (note).
|
|
212
|
+
4. Report all findings from all sub-agents in the tool call, even if the list is long.
|
|
213
|
+
5. If a sub-agent finds nothing, include it in agentSummary with status "clean" and findingCount 0.
|
|
214
|
+
|
|
215
|
+
## Codebase
|
|
216
|
+
${codeBundle}`;
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
_mapFindings(rawFindings, rootPath) {
|
|
220
|
+
return rawFindings.map(r => {
|
|
221
|
+
const role = SWARM_ROLES.find(a => a.id === r.agentId) || { name: 'SwarmAgent', id: r.agentId };
|
|
222
|
+
return createFinding({
|
|
223
|
+
file: r.file ? path.resolve(rootPath, r.file) : null,
|
|
224
|
+
line: r.line || 0,
|
|
225
|
+
severity: r.severity || 'medium',
|
|
226
|
+
confidence: 'medium',
|
|
227
|
+
rule: r.rule || `swarm:${role.id}`,
|
|
228
|
+
title: r.title,
|
|
229
|
+
description: r.description,
|
|
230
|
+
matched: r.matched || '',
|
|
231
|
+
remediation: r.remediation || '',
|
|
232
|
+
category: role.name,
|
|
233
|
+
});
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
export default SwarmOrchestrator;
|