agent-security-scanner-mcp 3.1.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,570 @@
1
+ // src/tools/scan-prompt.js
2
+ import { z } from "zod";
3
+ import { readFileSync, existsSync } from "fs";
4
+ import { dirname, join } from "path";
5
+ import { fileURLToPath } from "url";
6
+ import { createHash } from "crypto";
7
+
8
+ // Handle both ESM and CJS bundling
9
+ let __dirname;
10
+ try {
11
+ __dirname = dirname(fileURLToPath(import.meta.url));
12
+ } catch {
13
+ __dirname = process.cwd();
14
+ }
15
+
16
+ // Risk thresholds for action determination
17
+ const RISK_THRESHOLDS = {
18
+ CRITICAL: 85,
19
+ HIGH: 65,
20
+ MEDIUM: 40,
21
+ LOW: 20
22
+ };
23
+
24
+ // Category weights for risk calculation
25
+ const CATEGORY_WEIGHTS = {
26
+ "exfiltration": 1.0,
27
+ "malicious-injection": 1.0,
28
+ "system-manipulation": 1.0,
29
+ "social-engineering": 0.8,
30
+ "obfuscation": 0.7,
31
+ "agent-manipulation": 0.9,
32
+ "prompt-injection": 0.9,
33
+ "prompt-injection-content": 1.0,
34
+ "prompt-injection-jailbreak": 1.0,
35
+ "prompt-injection-extraction": 0.9,
36
+ "prompt-injection-delimiter": 0.8,
37
+ "prompt-injection-encoded": 0.9,
38
+ "prompt-injection-context": 0.8,
39
+ "prompt-injection-privilege": 0.85,
40
+ "prompt-injection-multi-turn": 0.7,
41
+ "prompt-injection-output": 0.9,
42
+ "unknown": 0.5
43
+ };
44
+
45
+ // Confidence multipliers
46
+ const CONFIDENCE_MULTIPLIERS = {
47
+ "HIGH": 1.0,
48
+ "MEDIUM": 0.7,
49
+ "LOW": 0.4
50
+ };
51
+
52
+ // Load agent attack rules from YAML
53
+ function loadAgentAttackRules() {
54
+ try {
55
+ const rulesPath = join(__dirname, '..', '..', 'rules', 'agent-attacks.security.yaml');
56
+ if (!existsSync(rulesPath)) {
57
+ console.error("Agent attack rules file not found");
58
+ return [];
59
+ }
60
+
61
+ const yaml = readFileSync(rulesPath, 'utf-8');
62
+ const rules = [];
63
+
64
+ // Simple YAML parsing for rules
65
+ const ruleBlocks = yaml.split(/^ - id:/m).slice(1);
66
+
67
+ for (const block of ruleBlocks) {
68
+ const lines = (' - id:' + block).split('\n');
69
+ const rule = {
70
+ id: '',
71
+ severity: 'WARNING',
72
+ message: '',
73
+ patterns: [],
74
+ metadata: {}
75
+ };
76
+
77
+ let inPatterns = false;
78
+ let inMetadata = false;
79
+
80
+ for (const line of lines) {
81
+ if (line.match(/^\s+- id:\s*/)) {
82
+ rule.id = line.replace(/^\s+- id:\s*/, '').trim();
83
+ } else if (line.match(/^\s+severity:\s*/)) {
84
+ rule.severity = line.replace(/^\s+severity:\s*/, '').trim();
85
+ } else if (line.match(/^\s+message:\s*/)) {
86
+ rule.message = line.replace(/^\s+message:\s*["']?/, '').replace(/["']$/, '').trim();
87
+ } else if (line.match(/^\s+patterns:\s*$/)) {
88
+ inPatterns = true;
89
+ inMetadata = false;
90
+ } else if (line.match(/^\s+metadata:\s*$/)) {
91
+ inPatterns = false;
92
+ inMetadata = true;
93
+ } else if (inPatterns && line.match(/^\s+- /)) {
94
+ let pattern = line.replace(/^\s+- /, '').trim();
95
+ pattern = pattern.replace(/^["']|["']$/g, '');
96
+ // Strip Python-style inline flags - JS doesn't support them
97
+ pattern = pattern.replace(/^\(\?i\)/, '');
98
+ // Unescape double backslashes from YAML (\\s -> \s)
99
+ pattern = pattern.replace(/\\\\/g, '\\');
100
+ if (pattern) rule.patterns.push(pattern);
101
+ } else if (inMetadata && line.match(/^\s+\w+:/)) {
102
+ const match = line.match(/^\s+(\w+):\s*["']?([^"'\n]+)["']?/);
103
+ if (match) {
104
+ rule.metadata[match[1]] = match[2].trim();
105
+ }
106
+ } else if (line.match(/^\s+languages:/)) {
107
+ inPatterns = false;
108
+ inMetadata = false;
109
+ }
110
+ }
111
+
112
+ if (rule.id && rule.patterns.length > 0) {
113
+ rules.push(rule);
114
+ }
115
+ }
116
+
117
+ return rules;
118
+ } catch (error) {
119
+ console.error("Error loading agent attack rules:", error.message);
120
+ return [];
121
+ }
122
+ }
123
+
124
+ // Also load prompt injection rules
125
+ function loadPromptInjectionRules() {
126
+ try {
127
+ const rulesPath = join(__dirname, '..', '..', 'rules', 'prompt-injection.security.yaml');
128
+ if (!existsSync(rulesPath)) {
129
+ return [];
130
+ }
131
+
132
+ const yaml = readFileSync(rulesPath, 'utf-8');
133
+ const rules = [];
134
+
135
+ const ruleBlocks = yaml.split(/^ - id:/m).slice(1);
136
+
137
+ for (const block of ruleBlocks) {
138
+ const lines = (' - id:' + block).split('\n');
139
+ const rule = {
140
+ id: '',
141
+ severity: 'WARNING',
142
+ message: '',
143
+ patterns: [],
144
+ metadata: {}
145
+ };
146
+
147
+ let inPatterns = false;
148
+ let inMetadata = false;
149
+
150
+ for (const line of lines) {
151
+ if (line.match(/^\s+- id:\s*/)) {
152
+ rule.id = line.replace(/^\s+- id:\s*/, '').trim();
153
+ } else if (line.match(/^\s+severity:\s*/)) {
154
+ rule.severity = line.replace(/^\s+severity:\s*/, '').trim();
155
+ } else if (line.match(/^\s+message:\s*/)) {
156
+ rule.message = line.replace(/^\s+message:\s*["']?/, '').replace(/["']$/, '').trim();
157
+ } else if (line.match(/^\s+patterns:\s*$/)) {
158
+ inPatterns = true;
159
+ inMetadata = false;
160
+ } else if (line.match(/^\s+metadata:\s*$/)) {
161
+ inPatterns = false;
162
+ inMetadata = true;
163
+ } else if (inPatterns && line.match(/^\s+- /)) {
164
+ let pattern = line.replace(/^\s+- /, '').trim();
165
+ pattern = pattern.replace(/^["']|["']$/g, '');
166
+ // Strip Python-style inline flags - JS doesn't support them
167
+ pattern = pattern.replace(/^\(\?i\)/, '');
168
+ // Unescape double backslashes from YAML (\\s -> \s)
169
+ pattern = pattern.replace(/\\\\/g, '\\');
170
+ if (pattern) rule.patterns.push(pattern);
171
+ } else if (inMetadata && line.match(/^\s+\w+:/)) {
172
+ const match = line.match(/^\s+(\w+):\s*["']?([^"'\n]+)["']?/);
173
+ if (match) {
174
+ rule.metadata[match[1]] = match[2].trim();
175
+ }
176
+ }
177
+ }
178
+
179
+ // Only include generic rules (content patterns, not code patterns)
180
+ if (rule.id && rule.patterns.length > 0 && rule.id.startsWith('generic.prompt')) {
181
+ rules.push(rule);
182
+ }
183
+ }
184
+
185
+ return rules;
186
+ } catch (error) {
187
+ console.error("Error loading prompt injection rules:", error.message);
188
+ return [];
189
+ }
190
+ }
191
+
192
+ // Calculate risk score from findings
193
+ function calculateRiskScore(findings, context) {
194
+ if (findings.length === 0) return 0;
195
+
196
+ let totalScore = 0;
197
+
198
+ for (const finding of findings) {
199
+ const riskScore = parseInt(finding.risk_score) || 50;
200
+ const category = finding.category || 'unknown';
201
+ const confidence = finding.confidence || 'MEDIUM';
202
+
203
+ const categoryWeight = CATEGORY_WEIGHTS[category] || 0.5;
204
+ const confidenceMultiplier = CONFIDENCE_MULTIPLIERS[confidence] || 0.7;
205
+
206
+ totalScore += (riskScore / 100) * categoryWeight * confidenceMultiplier * 100;
207
+ }
208
+
209
+ // Average the scores but boost for multiple findings
210
+ let avgScore = totalScore / findings.length;
211
+
212
+ // Enhanced compound boosting
213
+ if (findings.length > 1) {
214
+ // Cross-category boost: if findings span multiple categories, boost by 0.15
215
+ const uniqueCategories = new Set(findings.map(f => f.category || 'unknown'));
216
+ if (uniqueCategories.size > 1) {
217
+ avgScore = avgScore * (1 + 0.15);
218
+ }
219
+
220
+ // Mixed-severity boost: if both ERROR and WARNING present, 1.1x
221
+ const hasError = findings.some(f => f.severity === 'ERROR');
222
+ const hasWarning = findings.some(f => f.severity === 'WARNING');
223
+ if (hasError && hasWarning) {
224
+ avgScore = avgScore * 1.1;
225
+ }
226
+
227
+ // Per-finding boost (smaller than before)
228
+ avgScore = avgScore * (1 + (findings.length - 1) * 0.05);
229
+ }
230
+
231
+ avgScore = Math.min(100, avgScore);
232
+
233
+ // Apply sensitivity adjustment (wider spread for meaningful impact)
234
+ if (context?.sensitivity_level === 'high') {
235
+ avgScore = Math.min(100, avgScore * 1.5);
236
+ } else if (context?.sensitivity_level === 'low') {
237
+ avgScore = avgScore * 0.5;
238
+ }
239
+
240
+ return Math.round(avgScore);
241
+ }
242
+
243
+ // Determine action based on risk score, findings, and context
244
+ function determineAction(riskScore, findings, context) {
245
+ // Adjust thresholds based on sensitivity level
246
+ let blockThreshold = RISK_THRESHOLDS.HIGH;
247
+ let warnThreshold = RISK_THRESHOLDS.MEDIUM;
248
+ let logThreshold = RISK_THRESHOLDS.LOW;
249
+
250
+ if (context?.sensitivity_level === 'high') {
251
+ blockThreshold = 50;
252
+ warnThreshold = 30;
253
+ logThreshold = 15;
254
+ } else if (context?.sensitivity_level === 'low') {
255
+ blockThreshold = 75;
256
+ warnThreshold = 50;
257
+ logThreshold = 30;
258
+ }
259
+
260
+ // Check for any BLOCK action findings
261
+ const hasBlockFinding = findings.some(f => f.action === 'BLOCK');
262
+ if (hasBlockFinding || riskScore >= RISK_THRESHOLDS.CRITICAL) {
263
+ return 'BLOCK';
264
+ }
265
+
266
+ if (riskScore >= blockThreshold) {
267
+ return 'BLOCK';
268
+ }
269
+
270
+ const hasWarnFinding = findings.some(f => f.action === 'WARN');
271
+ if (hasWarnFinding || riskScore >= warnThreshold) {
272
+ return 'WARN';
273
+ }
274
+
275
+ const hasLogFinding = findings.some(f => f.action === 'LOG');
276
+ if (hasLogFinding || riskScore >= logThreshold) {
277
+ return 'LOG';
278
+ }
279
+
280
+ return 'ALLOW';
281
+ }
282
+
283
+ // Determine risk level from score
284
+ function getRiskLevel(score) {
285
+ if (score >= RISK_THRESHOLDS.CRITICAL) return 'CRITICAL';
286
+ if (score >= RISK_THRESHOLDS.HIGH) return 'HIGH';
287
+ if (score >= RISK_THRESHOLDS.MEDIUM) return 'MEDIUM';
288
+ if (score >= RISK_THRESHOLDS.LOW) return 'LOW';
289
+ return 'NONE';
290
+ }
291
+
292
+ // Generate explanation from findings
293
+ function generateExplanation(findings, action) {
294
+ if (findings.length === 0) {
295
+ return 'No security concerns detected in this prompt.';
296
+ }
297
+
298
+ const categories = [...new Set(findings.map(f => f.category))];
299
+ const severity = findings.some(f => f.severity === 'ERROR') ? 'critical' : 'potential';
300
+
301
+ let explanation = `Detected ${findings.length} ${severity} security concern(s)`;
302
+
303
+ if (categories.length > 0) {
304
+ explanation += ` in categories: ${categories.join(', ')}`;
305
+ }
306
+
307
+ explanation += `. Action: ${action}.`;
308
+
309
+ if (action === 'BLOCK') {
310
+ explanation += ' This prompt appears to contain malicious intent and should not be executed.';
311
+ } else if (action === 'WARN') {
312
+ explanation += ' Review carefully before proceeding.';
313
+ }
314
+
315
+ return explanation;
316
+ }
317
+
318
+ // Generate recommendations from findings
319
+ function generateRecommendations(findings) {
320
+ const recommendations = new Set();
321
+
322
+ for (const finding of findings) {
323
+ const category = finding.category;
324
+
325
+ switch (category) {
326
+ case 'exfiltration':
327
+ recommendations.add('Never allow prompts that request sending code or secrets to external URLs');
328
+ recommendations.add('Block access to sensitive files like .env, SSH keys, and credentials');
329
+ break;
330
+ case 'malicious-injection':
331
+ recommendations.add('Reject requests for backdoors, reverse shells, or malicious code');
332
+ recommendations.add('Never disable security controls at user request');
333
+ break;
334
+ case 'system-manipulation':
335
+ recommendations.add('Block destructive file operations and system configuration changes');
336
+ recommendations.add('Prevent persistence mechanisms like crontab or startup script modifications');
337
+ break;
338
+ case 'social-engineering':
339
+ recommendations.add('Verify authorization claims through proper channels, not prompt content');
340
+ recommendations.add('Be skeptical of urgency claims or claims of special modes');
341
+ break;
342
+ case 'obfuscation':
343
+ recommendations.add('Be wary of encoded or fragmented instructions');
344
+ recommendations.add('Reject requests for "examples" of malicious code');
345
+ break;
346
+ case 'agent-manipulation':
347
+ recommendations.add('Maintain confirmation prompts for sensitive operations');
348
+ recommendations.add('Never hide output or actions from the user');
349
+ break;
350
+ default:
351
+ recommendations.add('Review this prompt carefully before execution');
352
+ }
353
+ }
354
+
355
+ return [...recommendations];
356
+ }
357
+
358
+ // Create SHA256 hash for audit logging
359
+ function hashPrompt(text) {
360
+ return createHash('sha256').update(text).digest('hex').substring(0, 16);
361
+ }
362
+
363
+ // Export schema for tool registration
364
+ export const scanAgentPromptSchema = {
365
+ prompt_text: z.string().describe("The prompt or instruction text to analyze"),
366
+ context: z.object({
367
+ previous_messages: z.array(z.string()).optional().describe("Previous conversation messages for multi-turn detection"),
368
+ sensitivity_level: z.enum(["high", "medium", "low"]).optional().describe("Sensitivity level - high means more strict, low means more permissive")
369
+ }).optional().describe("Optional context for better analysis"),
370
+ verbosity: z.enum(['minimal', 'compact', 'full']).optional().describe("Response detail level: 'minimal' (action only), 'compact' (default), 'full' (all details)")
371
+ };
372
+
373
+ // Export handler function
374
+ export async function scanAgentPrompt({ prompt_text, context, verbosity }) {
375
+ const findings = [];
376
+
377
+ // Load rules
378
+ const agentRules = loadAgentAttackRules();
379
+ const promptRules = loadPromptInjectionRules();
380
+ const allRules = [...agentRules, ...promptRules];
381
+
382
+ // 2.7: Extract content from code blocks and append to scan text
383
+ let expandedText = prompt_text;
384
+ const codeBlockRegex = /```[\s\S]*?```/g;
385
+ const codeBlocks = prompt_text.match(codeBlockRegex);
386
+ if (codeBlocks) {
387
+ for (const block of codeBlocks) {
388
+ // Strip the ``` delimiters and extract inner content
389
+ const inner = block.replace(/^```\w*\n?/, '').replace(/\n?```$/, '');
390
+ expandedText += '\n' + inner;
391
+ }
392
+ }
393
+
394
+ // Scan expanded text against all rules
395
+ for (const rule of allRules) {
396
+ for (const pattern of rule.patterns) {
397
+ try {
398
+ const regex = new RegExp(pattern, 'i');
399
+ const match = expandedText.match(regex);
400
+
401
+ if (match) {
402
+ findings.push({
403
+ rule_id: rule.id,
404
+ category: rule.metadata.category || 'unknown',
405
+ severity: rule.severity,
406
+ message: rule.message,
407
+ matched_text: match[0].substring(0, 100),
408
+ confidence: rule.metadata.confidence || 'MEDIUM',
409
+ risk_score: rule.metadata.risk_score || '50',
410
+ action: rule.metadata.action || 'WARN'
411
+ });
412
+ break; // Only one match per rule
413
+ }
414
+ } catch (e) {
415
+ // Skip invalid regex
416
+ }
417
+ }
418
+ }
419
+
420
+ // 2.8: Runtime base64 decode-and-rescan
421
+ const base64Regex = /[A-Za-z0-9+/]{40,}={0,2}/g;
422
+ const b64Matches = expandedText.match(base64Regex);
423
+ if (b64Matches) {
424
+ for (const b64str of b64Matches) {
425
+ try {
426
+ const decoded = Buffer.from(b64str, 'base64').toString('utf-8');
427
+ // Check printability: >70% ASCII printable characters
428
+ const printable = decoded.split('').filter(c => c.charCodeAt(0) >= 32 && c.charCodeAt(0) <= 126).length;
429
+ if (printable / decoded.length > 0.7) {
430
+ // Re-scan decoded text against prompt rules only
431
+ for (const rule of allRules) {
432
+ if (!rule.id.startsWith('generic.prompt')) continue;
433
+ for (const pattern of rule.patterns) {
434
+ try {
435
+ const regex = new RegExp(pattern, 'i');
436
+ const match = decoded.match(regex);
437
+ if (match) {
438
+ findings.push({
439
+ rule_id: rule.id + '.base64-decoded',
440
+ category: rule.metadata.category || 'unknown',
441
+ severity: rule.severity,
442
+ message: rule.message + ' (detected in base64-decoded content)',
443
+ matched_text: match[0].substring(0, 100),
444
+ confidence: rule.metadata.confidence || 'MEDIUM',
445
+ risk_score: rule.metadata.risk_score || '50',
446
+ action: rule.metadata.action || 'WARN'
447
+ });
448
+ break;
449
+ }
450
+ } catch (e) {
451
+ // Skip invalid regex
452
+ }
453
+ }
454
+ }
455
+ }
456
+ } catch (e) {
457
+ // Skip invalid base64
458
+ }
459
+ }
460
+ }
461
+
462
+ // Multi-turn escalation detection (Bug 9)
463
+ if (context?.previous_messages && Array.isArray(context.previous_messages) && context.previous_messages.length > 0) {
464
+ let prevMatchCount = 0;
465
+ for (const prevMsg of context.previous_messages) {
466
+ for (const rule of allRules) {
467
+ for (const pattern of rule.patterns) {
468
+ try {
469
+ const regex = new RegExp(pattern, 'i');
470
+ if (regex.test(prevMsg)) {
471
+ prevMatchCount++;
472
+ break;
473
+ }
474
+ } catch (e) {
475
+ // Skip invalid regex
476
+ }
477
+ }
478
+ if (prevMatchCount > 0) break;
479
+ }
480
+ if (prevMatchCount > 0) break;
481
+ }
482
+
483
+ // If both previous and current messages have matches, flag escalation
484
+ if (prevMatchCount > 0 && findings.length > 0) {
485
+ findings.push({
486
+ rule_id: 'multi-turn.escalation',
487
+ category: 'social-engineering',
488
+ severity: 'WARNING',
489
+ message: 'Multi-turn escalation detected: suspicious patterns found in both previous and current messages.',
490
+ matched_text: 'escalation across conversation turns',
491
+ confidence: 'MEDIUM',
492
+ risk_score: '70',
493
+ action: 'WARN'
494
+ });
495
+ }
496
+ }
497
+
498
+ // Calculate risk score
499
+ const riskScore = calculateRiskScore(findings, context);
500
+ const action = determineAction(riskScore, findings, context);
501
+ const riskLevel = getRiskLevel(riskScore);
502
+ const explanation = generateExplanation(findings, action);
503
+ const recommendations = generateRecommendations(findings);
504
+
505
+ // Create audit info
506
+ const audit = {
507
+ timestamp: new Date().toISOString(),
508
+ prompt_hash: hashPrompt(prompt_text),
509
+ prompt_length: prompt_text.length,
510
+ rules_checked: allRules.length,
511
+ context_provided: !!context
512
+ };
513
+
514
+ // Determine verbosity (default: compact)
515
+ const level = verbosity || 'compact';
516
+
517
+ let result;
518
+ switch (level) {
519
+ case 'minimal':
520
+ result = {
521
+ action,
522
+ risk_level: riskLevel,
523
+ findings_count: findings.length,
524
+ message: findings.length > 0
525
+ ? `${action}: ${findings.length} concern(s) detected. Use verbosity='compact' for details.`
526
+ : "ALLOW: No security concerns detected."
527
+ };
528
+ break;
529
+ case 'full':
530
+ result = {
531
+ action,
532
+ risk_score: riskScore,
533
+ risk_level: riskLevel,
534
+ findings_count: findings.length,
535
+ findings: findings.map(f => ({
536
+ rule_id: f.rule_id,
537
+ category: f.category,
538
+ severity: f.severity,
539
+ message: f.message,
540
+ matched_text: f.matched_text,
541
+ confidence: f.confidence
542
+ })),
543
+ explanation,
544
+ recommendations,
545
+ audit
546
+ };
547
+ break;
548
+ case 'compact':
549
+ default:
550
+ result = {
551
+ action,
552
+ risk_score: riskScore,
553
+ risk_level: riskLevel,
554
+ findings_count: findings.length,
555
+ findings: findings.map(f => ({
556
+ rule_id: f.rule_id,
557
+ severity: f.severity,
558
+ message: f.message
559
+ })),
560
+ recommendations
561
+ };
562
+ }
563
+
564
+ return {
565
+ content: [{
566
+ type: "text",
567
+ text: JSON.stringify(result, null, 2)
568
+ }]
569
+ };
570
+ }
@@ -0,0 +1,117 @@
1
+ // src/tools/scan-security.js
2
+ import { z } from "zod";
3
+ import { existsSync, readFileSync } from "fs";
4
+ import { detectLanguage, runAnalyzer, generateFix, toSarif } from '../utils.js';
5
+
6
+ export const scanSecuritySchema = {
7
+ file_path: z.string().describe("Path to the file to scan"),
8
+ output_format: z.enum(['json', 'sarif']).optional().describe("Output format: 'json' (default) or 'sarif' for GitHub/GitLab integration"),
9
+ verbosity: z.enum(['minimal', 'compact', 'full']).optional().describe("Response detail level: 'minimal' (counts only), 'compact' (default, actionable info), 'full' (complete metadata)")
10
+ };
11
+
12
+ // Verbosity formatters
13
+ function formatMinimal(file_path, language, issues) {
14
+ const bySeverity = { error: 0, warning: 0, info: 0 };
15
+ issues.forEach(i => bySeverity[i.severity] = (bySeverity[i.severity] || 0) + 1);
16
+ return {
17
+ file: file_path,
18
+ language,
19
+ total: issues.length,
20
+ critical: bySeverity.error,
21
+ warning: bySeverity.warning,
22
+ info: bySeverity.info,
23
+ message: issues.length > 0
24
+ ? `Found ${issues.length} issue(s). Use verbosity='compact' for details.`
25
+ : "No security issues found."
26
+ };
27
+ }
28
+
29
+ function formatCompact(file_path, language, issues) {
30
+ return {
31
+ file: file_path,
32
+ language,
33
+ issues_count: issues.length,
34
+ issues: issues.map(i => ({
35
+ line: i.line + 1,
36
+ ruleId: i.ruleId,
37
+ severity: i.severity,
38
+ message: i.message,
39
+ fix: i.suggested_fix?.fixed ? i.suggested_fix.fixed.trim() : null
40
+ }))
41
+ };
42
+ }
43
+
44
+ function formatFull(file_path, language, issues) {
45
+ return {
46
+ file: file_path,
47
+ language,
48
+ issues_count: issues.length,
49
+ issues: issues
50
+ };
51
+ }
52
+
53
+ export async function scanSecurity({ file_path, output_format, verbosity }) {
54
+ if (!existsSync(file_path)) {
55
+ return {
56
+ content: [{ type: "text", text: JSON.stringify({ error: "File not found" }) }]
57
+ };
58
+ }
59
+
60
+ const issues = runAnalyzer(file_path);
61
+
62
+ if (issues.error) {
63
+ return {
64
+ content: [{ type: "text", text: JSON.stringify(issues) }]
65
+ };
66
+ }
67
+
68
+ // Read file content for fix suggestions
69
+ const content = readFileSync(file_path, 'utf-8');
70
+ const lines = content.split('\n');
71
+ const language = detectLanguage(file_path);
72
+
73
+ // Enhance issues with fix suggestions
74
+ const enhancedIssues = issues.map(issue => {
75
+ const line = lines[issue.line] || '';
76
+ const fix = generateFix(issue, line, language);
77
+ return {
78
+ ...issue,
79
+ line_content: line.trim(),
80
+ suggested_fix: fix
81
+ };
82
+ });
83
+
84
+ // Determine verbosity (default: compact)
85
+ const level = verbosity || 'compact';
86
+
87
+ // Return SARIF format if requested (always full detail)
88
+ if (output_format === 'sarif') {
89
+ return {
90
+ content: [{
91
+ type: "text",
92
+ text: JSON.stringify(toSarif(file_path, language, enhancedIssues), null, 2)
93
+ }]
94
+ };
95
+ }
96
+
97
+ // Format based on verbosity
98
+ let result;
99
+ switch (level) {
100
+ case 'minimal':
101
+ result = formatMinimal(file_path, language, enhancedIssues);
102
+ break;
103
+ case 'full':
104
+ result = formatFull(file_path, language, enhancedIssues);
105
+ break;
106
+ case 'compact':
107
+ default:
108
+ result = formatCompact(file_path, language, enhancedIssues);
109
+ }
110
+
111
+ return {
112
+ content: [{
113
+ type: "text",
114
+ text: JSON.stringify(result, null, 2)
115
+ }]
116
+ };
117
+ }