@massu/core 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +71 -0
- package/README.md +2 -2
- package/dist/hooks/cost-tracker.js +149 -11527
- package/dist/hooks/post-edit-context.js +127 -11493
- package/dist/hooks/post-tool-use.js +169 -11550
- package/dist/hooks/pre-compact.js +149 -11530
- package/dist/hooks/pre-delete-check.js +144 -11523
- package/dist/hooks/quality-event.js +149 -11527
- package/dist/hooks/session-end.js +188 -11570
- package/dist/hooks/session-start.js +159 -11534
- package/dist/hooks/user-prompt.js +149 -11530
- package/package.json +14 -19
- package/src/adr-generator.ts +292 -0
- package/src/analytics.ts +373 -0
- package/src/audit-trail.ts +450 -0
- package/src/backfill-sessions.ts +180 -0
- package/src/cli.ts +105 -0
- package/src/cloud-sync.ts +190 -0
- package/src/commands/doctor.ts +300 -0
- package/src/commands/init.ts +395 -0
- package/src/commands/install-hooks.ts +26 -0
- package/src/config.ts +357 -0
- package/src/cost-tracker.ts +355 -0
- package/src/db.ts +233 -0
- package/src/dependency-scorer.ts +337 -0
- package/src/docs-map.json +100 -0
- package/src/docs-tools.ts +517 -0
- package/src/domains.ts +181 -0
- package/src/hooks/cost-tracker.ts +66 -0
- package/src/hooks/intent-suggester.ts +131 -0
- package/src/hooks/post-edit-context.ts +91 -0
- package/src/hooks/post-tool-use.ts +175 -0
- package/src/hooks/pre-compact.ts +146 -0
- package/src/hooks/pre-delete-check.ts +153 -0
- package/src/hooks/quality-event.ts +127 -0
- package/src/hooks/security-gate.ts +121 -0
- package/src/hooks/session-end.ts +467 -0
- package/src/hooks/session-start.ts +210 -0
- package/src/hooks/user-prompt.ts +91 -0
- package/src/import-resolver.ts +224 -0
- package/src/memory-db.ts +1376 -0
- package/src/memory-tools.ts +391 -0
- package/src/middleware-tree.ts +70 -0
- package/src/observability-tools.ts +343 -0
- package/src/observation-extractor.ts +411 -0
- package/src/page-deps.ts +283 -0
- package/src/prompt-analyzer.ts +332 -0
- package/src/regression-detector.ts +319 -0
- package/src/rules.ts +57 -0
- package/src/schema-mapper.ts +232 -0
- package/src/security-scorer.ts +405 -0
- package/src/security-utils.ts +133 -0
- package/src/sentinel-db.ts +578 -0
- package/src/sentinel-scanner.ts +405 -0
- package/src/sentinel-tools.ts +512 -0
- package/src/sentinel-types.ts +140 -0
- package/src/server.ts +189 -0
- package/src/session-archiver.ts +112 -0
- package/src/session-state-generator.ts +174 -0
- package/src/team-knowledge.ts +407 -0
- package/src/tools.ts +847 -0
- package/src/transcript-parser.ts +458 -0
- package/src/trpc-index.ts +214 -0
- package/src/validate-features-runner.ts +106 -0
- package/src/validation-engine.ts +358 -0
- package/dist/cli.js +0 -7890
- package/dist/server.js +0 -7008
package/src/rules.ts
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
// Copyright (c) 2026 Massu. All rights reserved.
|
|
2
|
+
// Licensed under BSL 1.1 - see LICENSE file for details.
|
|
3
|
+
|
|
4
|
+
import { getConfig } from './config.ts';
|
|
5
|
+
|
|
6
|
+
export interface PatternRule {
|
|
7
|
+
/** Glob pattern to match file paths against */
|
|
8
|
+
match: string;
|
|
9
|
+
/** List of rules that apply to matched files */
|
|
10
|
+
rules: string[];
|
|
11
|
+
/** Severity: CRITICAL rules are schema mismatches or Edge Runtime violations */
|
|
12
|
+
severity?: 'CRITICAL' | 'HIGH' | 'MEDIUM' | 'LOW';
|
|
13
|
+
/** Pattern file to reference for details */
|
|
14
|
+
patternFile?: string;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Get pattern rules from config.
|
|
19
|
+
* Converts the config format (pattern/rules) to the internal PatternRule format.
|
|
20
|
+
*/
|
|
21
|
+
function getPatternRules(): PatternRule[] {
|
|
22
|
+
return getConfig().rules.map((r) => ({
|
|
23
|
+
match: r.pattern,
|
|
24
|
+
rules: r.rules,
|
|
25
|
+
}));
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Match a file path against all pattern rules and return applicable rules.
|
|
30
|
+
*/
|
|
31
|
+
export function matchRules(filePath: string): PatternRule[] {
|
|
32
|
+
const normalized = filePath.replace(/\\/g, '/');
|
|
33
|
+
const rules = getPatternRules();
|
|
34
|
+
return rules.filter((rule) => globMatch(normalized, rule.match));
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Simple glob matching for pattern rules.
|
|
39
|
+
* Supports **, *, and ? wildcards.
|
|
40
|
+
*/
|
|
41
|
+
export function globMatch(filePath: string, pattern: string): boolean {
|
|
42
|
+
// Convert glob to regex using placeholders to avoid replacement conflicts
|
|
43
|
+
let regexStr = pattern
|
|
44
|
+
.replace(/\*\*\//g, '\0GLOBSTARSLASH\0') // **/ placeholder
|
|
45
|
+
.replace(/\*\*/g, '\0GLOBSTAR\0') // ** placeholder
|
|
46
|
+
.replace(/\*/g, '\0STAR\0') // * placeholder
|
|
47
|
+
.replace(/\?/g, '\0QUESTION\0') // ? placeholder
|
|
48
|
+
.replace(/\./g, '\\.') // escape dots
|
|
49
|
+
.replace(/\0GLOBSTARSLASH\0/g, '(?:.*/)?') // **/ = zero or more directories
|
|
50
|
+
.replace(/\0GLOBSTAR\0/g, '.*') // ** = anything
|
|
51
|
+
.replace(/\0STAR\0/g, '[^/]*') // * = non-slash chars
|
|
52
|
+
.replace(/\0QUESTION\0/g, '.'); // ? = single char
|
|
53
|
+
|
|
54
|
+
// Anchor pattern
|
|
55
|
+
const regex = new RegExp(`(^|/)${regexStr}($|/)`);
|
|
56
|
+
return regex.test(filePath);
|
|
57
|
+
}
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
// Copyright (c) 2026 Massu. All rights reserved.
|
|
2
|
+
// Licensed under BSL 1.1 - see LICENSE file for details.
|
|
3
|
+
|
|
4
|
+
import { readFileSync, existsSync, readdirSync } from 'fs';
|
|
5
|
+
import { resolve, join } from 'path';
|
|
6
|
+
import { getConfig, getResolvedPaths, getProjectRoot } from './config.ts';
|
|
7
|
+
|
|
8
|
+
export interface SchemaModel {
|
|
9
|
+
name: string;
|
|
10
|
+
tableName: string;
|
|
11
|
+
fields: SchemaField[];
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface SchemaField {
|
|
15
|
+
name: string;
|
|
16
|
+
type: string;
|
|
17
|
+
nullable: boolean;
|
|
18
|
+
isRelation: boolean;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface ColumnUsage {
|
|
22
|
+
file: string;
|
|
23
|
+
line: number;
|
|
24
|
+
usage: string;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export interface SchemaMismatch {
|
|
28
|
+
table: string;
|
|
29
|
+
codeColumn: string;
|
|
30
|
+
actualColumns: string[];
|
|
31
|
+
files: string[];
|
|
32
|
+
severity: 'CRITICAL' | 'HIGH';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Parse the Prisma schema file and extract all models with their fields.
|
|
37
|
+
*/
|
|
38
|
+
export function parsePrismaSchema(): SchemaModel[] {
|
|
39
|
+
const schemaPath = getResolvedPaths().prismaSchemaPath;
|
|
40
|
+
if (!existsSync(schemaPath)) {
|
|
41
|
+
throw new Error(`Prisma schema not found at ${schemaPath}`);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const source = readFileSync(schemaPath, 'utf-8');
|
|
45
|
+
const models: SchemaModel[] = [];
|
|
46
|
+
const sourceLines = source.split('\n');
|
|
47
|
+
|
|
48
|
+
// Parse models by tracking brace depth instead of regex
|
|
49
|
+
let i = 0;
|
|
50
|
+
while (i < sourceLines.length) {
|
|
51
|
+
const line = sourceLines[i].trim();
|
|
52
|
+
const modelMatch = line.match(/^model\s+(\w+)\s*\{/);
|
|
53
|
+
|
|
54
|
+
if (modelMatch) {
|
|
55
|
+
const modelName = modelMatch[1];
|
|
56
|
+
const fields: SchemaField[] = [];
|
|
57
|
+
let braceDepth = 1;
|
|
58
|
+
i++;
|
|
59
|
+
|
|
60
|
+
const bodyLines: string[] = [];
|
|
61
|
+
while (i < sourceLines.length && braceDepth > 0) {
|
|
62
|
+
const bodyLine = sourceLines[i];
|
|
63
|
+
for (const ch of bodyLine) {
|
|
64
|
+
if (ch === '{') braceDepth++;
|
|
65
|
+
if (ch === '}') braceDepth--;
|
|
66
|
+
}
|
|
67
|
+
if (braceDepth > 0) {
|
|
68
|
+
bodyLines.push(bodyLine);
|
|
69
|
+
}
|
|
70
|
+
i++;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
for (const bodyLine of bodyLines) {
|
|
74
|
+
const trimmed = bodyLine.trim();
|
|
75
|
+
if (!trimmed || trimmed.startsWith('//') || trimmed.startsWith('@@')) continue;
|
|
76
|
+
|
|
77
|
+
// Parse field: fieldName Type? @annotations
|
|
78
|
+
// Must handle large whitespace padding in the schema
|
|
79
|
+
const fieldMatch = trimmed.match(/^(\w+)\s+(\w+)(\?)?(\[\])?\s*(.*)?$/);
|
|
80
|
+
if (fieldMatch) {
|
|
81
|
+
const fieldName = fieldMatch[1];
|
|
82
|
+
const fieldType = fieldMatch[2];
|
|
83
|
+
const nullable = !!fieldMatch[3];
|
|
84
|
+
const annotations = fieldMatch[5] || '';
|
|
85
|
+
|
|
86
|
+
// Skip @relation fields (they're virtual)
|
|
87
|
+
const isRelation = annotations.includes('@relation') || fieldType[0] === fieldType[0].toUpperCase() && !['String', 'Int', 'Float', 'Boolean', 'DateTime', 'Json', 'Decimal', 'BigInt', 'Bytes'].includes(fieldType);
|
|
88
|
+
|
|
89
|
+
fields.push({
|
|
90
|
+
name: fieldName,
|
|
91
|
+
type: fieldType + (fieldMatch[4] || '') + (nullable ? '?' : ''),
|
|
92
|
+
nullable,
|
|
93
|
+
isRelation,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Derive table name from @@map or use model name directly (Prisma convention)
|
|
99
|
+
const body = bodyLines.join('\n');
|
|
100
|
+
const mapMatch = body.match(/@@map\("([^"]+)"\)/);
|
|
101
|
+
const tableName = mapMatch ? mapMatch[1] : toSnakeCase(modelName);
|
|
102
|
+
|
|
103
|
+
models.push({ name: modelName, tableName, fields });
|
|
104
|
+
} else {
|
|
105
|
+
i++;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return models;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
function toSnakeCase(str: string): string {
|
|
113
|
+
return str
|
|
114
|
+
.replace(/([A-Z])/g, '_$1')
|
|
115
|
+
.toLowerCase()
|
|
116
|
+
.replace(/^_/, '');
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Find all references to a table's columns in router files.
|
|
121
|
+
*/
|
|
122
|
+
export function findColumnUsageInRouters(tableName: string): Map<string, ColumnUsage[]> {
|
|
123
|
+
const usage = new Map<string, ColumnUsage[]>();
|
|
124
|
+
const routersDir = getResolvedPaths().routersDir;
|
|
125
|
+
|
|
126
|
+
if (!existsSync(routersDir)) return usage;
|
|
127
|
+
|
|
128
|
+
scanDirectory(routersDir, tableName, usage);
|
|
129
|
+
return usage;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
function scanDirectory(dir: string, tableName: string, usage: Map<string, ColumnUsage[]>): void {
|
|
133
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
134
|
+
for (const entry of entries) {
|
|
135
|
+
const fullPath = join(dir, entry.name);
|
|
136
|
+
if (entry.isDirectory()) {
|
|
137
|
+
scanDirectory(fullPath, tableName, usage);
|
|
138
|
+
} else if (entry.name.endsWith('.ts')) {
|
|
139
|
+
scanFile(fullPath, tableName, usage);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
function scanFile(absPath: string, tableName: string, usage: Map<string, ColumnUsage[]>): void {
|
|
145
|
+
try {
|
|
146
|
+
const source = readFileSync(absPath, 'utf-8');
|
|
147
|
+
|
|
148
|
+
// Check if this file references the table
|
|
149
|
+
if (!source.includes(tableName)) return;
|
|
150
|
+
|
|
151
|
+
const relPath = absPath.slice(getProjectRoot().length + 1);
|
|
152
|
+
const lines = source.split('\n');
|
|
153
|
+
|
|
154
|
+
for (let i = 0; i < lines.length; i++) {
|
|
155
|
+
const line = lines[i];
|
|
156
|
+
|
|
157
|
+
// Look for property access patterns: tableName.columnName or { columnName: ... } near table references
|
|
158
|
+
// Pattern 1: where: { columnName: value }
|
|
159
|
+
const whereMatch = line.match(/(\w+)\s*:\s*(?:\{|[^,}]+)/g);
|
|
160
|
+
if (whereMatch) {
|
|
161
|
+
for (const m of whereMatch) {
|
|
162
|
+
const colName = m.split(':')[0].trim();
|
|
163
|
+
if (colName && !['where', 'data', 'select', 'orderBy', 'include', 'const', 'let', 'return', 'if', 'else', 'async', 'await'].includes(colName)) {
|
|
164
|
+
if (!usage.has(colName)) usage.set(colName, []);
|
|
165
|
+
usage.get(colName)!.push({ file: relPath, line: i + 1, usage: line.trim() });
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
} catch {
|
|
171
|
+
// Skip unreadable
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Detect column name mismatches between code and schema.
|
|
177
|
+
*/
|
|
178
|
+
export function detectMismatches(models: SchemaModel[]): SchemaMismatch[] {
|
|
179
|
+
const mismatches: SchemaMismatch[] = [];
|
|
180
|
+
|
|
181
|
+
const knownMismatches = getConfig().knownMismatches ?? {};
|
|
182
|
+
|
|
183
|
+
for (const [tableName, wrongColumns] of Object.entries(knownMismatches)) {
|
|
184
|
+
const model = models.find(m => m.tableName === tableName);
|
|
185
|
+
if (!model) continue;
|
|
186
|
+
|
|
187
|
+
const actualColumnNames = model.fields.map(f => f.name);
|
|
188
|
+
|
|
189
|
+
for (const [wrongCol, correctCol] of Object.entries(wrongColumns)) {
|
|
190
|
+
// Search for the wrong column name in code
|
|
191
|
+
const routersDir = getResolvedPaths().routersDir;
|
|
192
|
+
const files = findFilesUsingColumn(routersDir, wrongCol, tableName);
|
|
193
|
+
|
|
194
|
+
if (files.length > 0) {
|
|
195
|
+
mismatches.push({
|
|
196
|
+
table: tableName,
|
|
197
|
+
codeColumn: wrongCol,
|
|
198
|
+
actualColumns: actualColumnNames,
|
|
199
|
+
files,
|
|
200
|
+
severity: 'CRITICAL',
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
return mismatches;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
function findFilesUsingColumn(dir: string, column: string, tableName: string): string[] {
|
|
210
|
+
const result: string[] = [];
|
|
211
|
+
if (!existsSync(dir)) return result;
|
|
212
|
+
|
|
213
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
214
|
+
for (const entry of entries) {
|
|
215
|
+
const fullPath = join(dir, entry.name);
|
|
216
|
+
if (entry.isDirectory()) {
|
|
217
|
+
result.push(...findFilesUsingColumn(fullPath, column, tableName));
|
|
218
|
+
} else if (entry.name.endsWith('.ts')) {
|
|
219
|
+
try {
|
|
220
|
+
const source = readFileSync(fullPath, 'utf-8');
|
|
221
|
+
// Only flag if both the table name and wrong column are in the file
|
|
222
|
+
if (source.includes(tableName) && source.includes(column)) {
|
|
223
|
+
result.push(fullPath.slice(getProjectRoot().length + 1));
|
|
224
|
+
}
|
|
225
|
+
} catch {
|
|
226
|
+
// Skip
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
return result;
|
|
232
|
+
}
|
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
// Copyright (c) 2026 Massu. All rights reserved.
|
|
2
|
+
// Licensed under BSL 1.1 - see LICENSE file for details.
|
|
3
|
+
|
|
4
|
+
import type Database from 'better-sqlite3';
|
|
5
|
+
import type { ToolDefinition, ToolResult } from './tools.ts';
|
|
6
|
+
import { getConfig } from './config.ts';
|
|
7
|
+
import { existsSync, readFileSync } from 'fs';
|
|
8
|
+
import { ensureWithinRoot, enforceSeverityFloors } from './security-utils.ts';
|
|
9
|
+
|
|
10
|
+
// ============================================================
|
|
11
|
+
// Security Risk Scoring
|
|
12
|
+
// ============================================================
|
|
13
|
+
|
|
14
|
+
/** Prefix a base tool name with the configured tool prefix. */
|
|
15
|
+
function p(baseName: string): string {
|
|
16
|
+
return `${getConfig().toolPrefix}_${baseName}`;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export interface SecurityFinding {
|
|
20
|
+
pattern: string;
|
|
21
|
+
severity: 'critical' | 'high' | 'medium' | 'low';
|
|
22
|
+
line: number;
|
|
23
|
+
description: string;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
interface SecurityPattern {
|
|
27
|
+
regex: RegExp;
|
|
28
|
+
severity: SecurityFinding['severity'];
|
|
29
|
+
description: string;
|
|
30
|
+
fileFilter?: RegExp;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Default security patterns. Configurable via security.patterns in config. */
|
|
34
|
+
const DEFAULT_SECURITY_PATTERNS: SecurityPattern[] = [
|
|
35
|
+
{
|
|
36
|
+
regex: /\bexec\s*\(\s*[`"'].*\$\{/,
|
|
37
|
+
severity: 'critical',
|
|
38
|
+
description: 'Potential command injection via template literal in exec()',
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
regex: /publicProcedure\s*\.\s*mutation/,
|
|
42
|
+
severity: 'critical',
|
|
43
|
+
description: 'Mutation without authentication (publicProcedure)',
|
|
44
|
+
fileFilter: /\.(ts|tsx)$/,
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
regex: /(password|secret|token|api_key)\s*[:=]\s*['"][^'"]{8,}['"]/i,
|
|
48
|
+
severity: 'critical',
|
|
49
|
+
description: 'Hardcoded credential or secret',
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
regex: /\bdangerouslySetInnerHTML\b/,
|
|
53
|
+
severity: 'high',
|
|
54
|
+
description: 'XSS risk via dangerouslySetInnerHTML',
|
|
55
|
+
fileFilter: /\.tsx$/,
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
regex: /\.raw\s*\(`/,
|
|
59
|
+
severity: 'high',
|
|
60
|
+
description: 'Raw SQL query with template literal (SQL injection risk)',
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
regex: /eval\s*\(/,
|
|
64
|
+
severity: 'high',
|
|
65
|
+
description: 'Use of eval() - code injection risk',
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
regex: /process\.env\.\w+.*\bconsole\.(log|info|debug)/,
|
|
69
|
+
severity: 'medium',
|
|
70
|
+
description: 'Environment variable logged to console',
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
regex: /catch\s*\([^)]*\)\s*\{[^}]*res\.(json|send)\([^)]*err/,
|
|
74
|
+
severity: 'medium',
|
|
75
|
+
description: 'Error details exposed in response',
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
regex: /Access-Control-Allow-Origin.*\*/,
|
|
79
|
+
severity: 'medium',
|
|
80
|
+
description: 'Overly permissive CORS (allows all origins)',
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
regex: /new\s+URL\s*\(\s*(?:req|input|params|query)/,
|
|
84
|
+
severity: 'medium',
|
|
85
|
+
description: 'URL constructed from user input (SSRF risk)',
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
regex: /JSON\.parse\s*\(\s*(?:req|input|body|params)/,
|
|
89
|
+
severity: 'low',
|
|
90
|
+
description: 'JSON.parse on user input without try/catch',
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
regex: /prototype\s*:/,
|
|
94
|
+
severity: 'high',
|
|
95
|
+
description: 'Prototype key in object literal (prototype pollution risk)',
|
|
96
|
+
},
|
|
97
|
+
];
|
|
98
|
+
|
|
99
|
+
/** Default severity weights. Configurable via security.severity_weights */
|
|
100
|
+
const DEFAULT_SEVERITY_WEIGHTS: Record<string, number> = {
|
|
101
|
+
critical: 25,
|
|
102
|
+
high: 15,
|
|
103
|
+
medium: 8,
|
|
104
|
+
low: 3,
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Get severity weights from config or defaults.
|
|
109
|
+
*/
|
|
110
|
+
function getSeverityWeights(): Record<string, number> {
|
|
111
|
+
const configWeights = getConfig().security?.severity_weights;
|
|
112
|
+
if (!configWeights) return DEFAULT_SEVERITY_WEIGHTS;
|
|
113
|
+
return enforceSeverityFloors(configWeights, DEFAULT_SEVERITY_WEIGHTS);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Score security risk for a file.
|
|
118
|
+
* Returns 0 (safe) to 100 (critical risk).
|
|
119
|
+
*/
|
|
120
|
+
export function scoreFileSecurity(filePath: string, projectRoot: string): {
|
|
121
|
+
riskScore: number;
|
|
122
|
+
findings: SecurityFinding[];
|
|
123
|
+
} {
|
|
124
|
+
let absPath: string;
|
|
125
|
+
try {
|
|
126
|
+
absPath = ensureWithinRoot(filePath, projectRoot);
|
|
127
|
+
} catch {
|
|
128
|
+
return {
|
|
129
|
+
riskScore: 100,
|
|
130
|
+
findings: [{
|
|
131
|
+
pattern: 'path_traversal',
|
|
132
|
+
severity: 'critical',
|
|
133
|
+
line: 0,
|
|
134
|
+
description: `Path traversal blocked: "${filePath}" resolves outside project root`,
|
|
135
|
+
}],
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
if (!existsSync(absPath)) {
|
|
139
|
+
return { riskScore: 0, findings: [] };
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
let source: string;
|
|
143
|
+
try {
|
|
144
|
+
source = readFileSync(absPath, 'utf-8');
|
|
145
|
+
} catch {
|
|
146
|
+
return { riskScore: 0, findings: [] };
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const findings: SecurityFinding[] = [];
|
|
150
|
+
const lines = source.split('\n');
|
|
151
|
+
|
|
152
|
+
for (let i = 0; i < lines.length; i++) {
|
|
153
|
+
const line = lines[i];
|
|
154
|
+
for (const pattern of DEFAULT_SECURITY_PATTERNS) {
|
|
155
|
+
if (pattern.fileFilter && !pattern.fileFilter.test(filePath)) continue;
|
|
156
|
+
if (pattern.regex.test(line)) {
|
|
157
|
+
findings.push({
|
|
158
|
+
pattern: pattern.regex.source.slice(0, 50),
|
|
159
|
+
severity: pattern.severity,
|
|
160
|
+
line: i + 1,
|
|
161
|
+
description: pattern.description,
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Calculate risk score
|
|
168
|
+
const severityWeights = getSeverityWeights();
|
|
169
|
+
let riskScore = 0;
|
|
170
|
+
|
|
171
|
+
for (const finding of findings) {
|
|
172
|
+
riskScore += severityWeights[finding.severity] ?? 0;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
return {
|
|
176
|
+
riskScore: Math.min(100, riskScore),
|
|
177
|
+
findings,
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Store security score for a file.
|
|
183
|
+
*/
|
|
184
|
+
export function storeSecurityScore(
|
|
185
|
+
db: Database.Database,
|
|
186
|
+
sessionId: string,
|
|
187
|
+
filePath: string,
|
|
188
|
+
riskScore: number,
|
|
189
|
+
findings: SecurityFinding[]
|
|
190
|
+
): void {
|
|
191
|
+
db.prepare(`
|
|
192
|
+
INSERT INTO security_scores
|
|
193
|
+
(session_id, file_path, risk_score, findings)
|
|
194
|
+
VALUES (?, ?, ?, ?)
|
|
195
|
+
`).run(sessionId, filePath, riskScore, JSON.stringify(findings));
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// ============================================================
|
|
199
|
+
// MCP Tool Definitions & Handlers
|
|
200
|
+
// ============================================================
|
|
201
|
+
|
|
202
|
+
export function getSecurityToolDefinitions(): ToolDefinition[] {
|
|
203
|
+
return [
|
|
204
|
+
{
|
|
205
|
+
name: p('security_score'),
|
|
206
|
+
description: 'Security risk score for a file. Detects SQL injection, XSS, hardcoded secrets, auth gaps, and more.',
|
|
207
|
+
inputSchema: {
|
|
208
|
+
type: 'object',
|
|
209
|
+
properties: {
|
|
210
|
+
file_path: { type: 'string', description: 'File path relative to project root' },
|
|
211
|
+
session_id: { type: 'string', description: 'Get scores for an entire session' },
|
|
212
|
+
},
|
|
213
|
+
required: [],
|
|
214
|
+
},
|
|
215
|
+
},
|
|
216
|
+
{
|
|
217
|
+
name: p('security_heatmap'),
|
|
218
|
+
description: 'Security risk heat map. Files ranked by risk score with summary findings.',
|
|
219
|
+
inputSchema: {
|
|
220
|
+
type: 'object',
|
|
221
|
+
properties: {
|
|
222
|
+
threshold: { type: 'number', description: 'Show files above this risk score (default: 30)' },
|
|
223
|
+
},
|
|
224
|
+
required: [],
|
|
225
|
+
},
|
|
226
|
+
},
|
|
227
|
+
{
|
|
228
|
+
name: p('security_trend'),
|
|
229
|
+
description: 'Security posture over time. Average risk scores and most improved/degraded areas.',
|
|
230
|
+
inputSchema: {
|
|
231
|
+
type: 'object',
|
|
232
|
+
properties: {
|
|
233
|
+
days: { type: 'number', description: 'Days to look back (default: 30)' },
|
|
234
|
+
},
|
|
235
|
+
required: [],
|
|
236
|
+
},
|
|
237
|
+
},
|
|
238
|
+
];
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
const SECURITY_BASE_NAMES = new Set(['security_score', 'security_heatmap', 'security_trend']);
|
|
242
|
+
|
|
243
|
+
export function isSecurityTool(name: string): boolean {
|
|
244
|
+
const pfx = getConfig().toolPrefix + '_';
|
|
245
|
+
const baseName = name.startsWith(pfx) ? name.slice(pfx.length) : name;
|
|
246
|
+
return SECURITY_BASE_NAMES.has(baseName);
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
export function handleSecurityToolCall(
|
|
250
|
+
name: string,
|
|
251
|
+
args: Record<string, unknown>,
|
|
252
|
+
memoryDb: Database.Database
|
|
253
|
+
): ToolResult {
|
|
254
|
+
try {
|
|
255
|
+
const pfx = getConfig().toolPrefix + '_';
|
|
256
|
+
const baseName = name.startsWith(pfx) ? name.slice(pfx.length) : name;
|
|
257
|
+
|
|
258
|
+
switch (baseName) {
|
|
259
|
+
case 'security_score':
|
|
260
|
+
return handleSecurityScore(args, memoryDb);
|
|
261
|
+
case 'security_heatmap':
|
|
262
|
+
return handleSecurityHeatmap(args, memoryDb);
|
|
263
|
+
case 'security_trend':
|
|
264
|
+
return handleSecurityTrend(args, memoryDb);
|
|
265
|
+
default:
|
|
266
|
+
return text(`Unknown security tool: ${name}`);
|
|
267
|
+
}
|
|
268
|
+
} catch (error) {
|
|
269
|
+
return text(`Error in ${name}: ${error instanceof Error ? error.message : String(error)}\n\nUsage: ${p('security_score')} { file_path: "src/..." }, ${p('security_heatmap')} { threshold: 30 }`);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
function handleSecurityScore(args: Record<string, unknown>, db: Database.Database): ToolResult {
|
|
274
|
+
const filePath = args.file_path as string | undefined;
|
|
275
|
+
const sessionId = args.session_id as string | undefined;
|
|
276
|
+
|
|
277
|
+
if (filePath) {
|
|
278
|
+
const config = getConfig();
|
|
279
|
+
const { riskScore, findings } = scoreFileSecurity(filePath, config.project.root);
|
|
280
|
+
|
|
281
|
+
// Store the result
|
|
282
|
+
const session = db.prepare(
|
|
283
|
+
"SELECT session_id FROM sessions WHERE status = 'active' ORDER BY started_at_epoch DESC LIMIT 1"
|
|
284
|
+
).get() as { session_id: string } | undefined;
|
|
285
|
+
if (session) {
|
|
286
|
+
storeSecurityScore(db, session.session_id, filePath, riskScore, findings);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
const lines = [
|
|
290
|
+
`## Security Score: ${filePath}`,
|
|
291
|
+
`Risk: **${riskScore}/100** ${riskScore === 0 ? '(clean)' : riskScore < 30 ? '(low risk)' : riskScore < 60 ? '(medium risk)' : '(HIGH RISK)'}`,
|
|
292
|
+
'',
|
|
293
|
+
];
|
|
294
|
+
|
|
295
|
+
if (findings.length > 0) {
|
|
296
|
+
lines.push('### Findings');
|
|
297
|
+
for (const f of findings) {
|
|
298
|
+
lines.push(`- **[${f.severity.toUpperCase()}]** L${f.line}: ${f.description}`);
|
|
299
|
+
}
|
|
300
|
+
} else {
|
|
301
|
+
lines.push(`No security findings detected (checked ${DEFAULT_SECURITY_PATTERNS.length} patterns including command injection, XSS, hardcoded secrets, and auth gaps).`);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
return text(lines.join('\n'));
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if (sessionId) {
|
|
308
|
+
const scores = db.prepare(`
|
|
309
|
+
SELECT file_path, risk_score, findings FROM security_scores
|
|
310
|
+
WHERE session_id = ?
|
|
311
|
+
ORDER BY risk_score DESC
|
|
312
|
+
`).all(sessionId) as Array<Record<string, unknown>>;
|
|
313
|
+
|
|
314
|
+
if (scores.length === 0) {
|
|
315
|
+
return text(`No security scores for session ${sessionId.slice(0, 8)}... Security scores are generated when files are scanned. Try: ${p('security_score')} { file_path: "src/server/api/routers/example.ts" } to scan a file, or ${p('security_heatmap')} {} to see all scanned files.`);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
const lines = [
|
|
319
|
+
`## Security Scores for Session ${sessionId.slice(0, 12)}...`,
|
|
320
|
+
'',
|
|
321
|
+
'| File | Risk Score | Findings |',
|
|
322
|
+
'|------|-----------|----------|',
|
|
323
|
+
];
|
|
324
|
+
|
|
325
|
+
for (const s of scores) {
|
|
326
|
+
const findingCount = JSON.parse(s.findings as string).length;
|
|
327
|
+
lines.push(`| ${s.file_path} | ${s.risk_score} | ${findingCount} |`);
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
return text(lines.join('\n'));
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
return text(`Usage: ${p('security_score')} { file_path: "src/server/api/routers/example.ts" } to scan a file, or ${p('security_score')} { session_id: "..." } to see all scores for a session.`);
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
function handleSecurityHeatmap(args: Record<string, unknown>, db: Database.Database): ToolResult {
|
|
337
|
+
const threshold = (args.threshold as number) ?? 30;
|
|
338
|
+
|
|
339
|
+
const files = db.prepare(`
|
|
340
|
+
SELECT file_path, MAX(risk_score) as max_risk, COUNT(*) as scan_count
|
|
341
|
+
FROM security_scores
|
|
342
|
+
GROUP BY file_path
|
|
343
|
+
HAVING max_risk >= ?
|
|
344
|
+
ORDER BY max_risk DESC
|
|
345
|
+
LIMIT 50
|
|
346
|
+
`).all(threshold) as Array<Record<string, unknown>>;
|
|
347
|
+
|
|
348
|
+
if (files.length === 0) {
|
|
349
|
+
return text(`No files with risk score >= ${threshold}. ${threshold > 0 ? `Try lowering the threshold or scan files with ${p('security_score')} { file_path: "..." }.` : 'No security scans recorded yet. Scan files to build the heat map.'}`);
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
const lines = [
|
|
353
|
+
`## Security Heat Map (threshold: ${threshold})`,
|
|
354
|
+
`Files at risk: ${files.length}`,
|
|
355
|
+
'',
|
|
356
|
+
'| Risk | File | Scans |',
|
|
357
|
+
'|------|------|-------|',
|
|
358
|
+
];
|
|
359
|
+
|
|
360
|
+
for (const f of files) {
|
|
361
|
+
const risk = f.max_risk as number;
|
|
362
|
+
const indicator = risk >= 60 ? 'HIGH' : risk >= 30 ? 'MEDIUM' : 'LOW';
|
|
363
|
+
lines.push(`| ${risk} [${indicator}] | ${f.file_path} | ${f.scan_count} |`);
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
return text(lines.join('\n'));
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
function handleSecurityTrend(args: Record<string, unknown>, db: Database.Database): ToolResult {
|
|
370
|
+
const days = (args.days as number) ?? 30;
|
|
371
|
+
|
|
372
|
+
const rows = db.prepare(`
|
|
373
|
+
SELECT date(created_at) as day,
|
|
374
|
+
AVG(risk_score) as avg_risk,
|
|
375
|
+
MAX(risk_score) as max_risk,
|
|
376
|
+
COUNT(*) as files_scanned
|
|
377
|
+
FROM security_scores
|
|
378
|
+
WHERE created_at >= datetime('now', ?)
|
|
379
|
+
GROUP BY date(created_at)
|
|
380
|
+
ORDER BY day ASC
|
|
381
|
+
`).all(`-${days} days`) as Array<Record<string, unknown>>;
|
|
382
|
+
|
|
383
|
+
if (rows.length === 0) {
|
|
384
|
+
return text(`No security scan data in the last ${days} days. Security trends build as files are scanned across sessions. Try: ${p('security_score')} { file_path: "src/server/api/routers/example.ts" } to scan a file, or try a longer time range with { days: 90 }.`);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
const lines = [
|
|
388
|
+
`## Security Trend (${days} days)`,
|
|
389
|
+
'',
|
|
390
|
+
'| Date | Avg Risk | Max Risk | Files Scanned |',
|
|
391
|
+
'|------|----------|----------|---------------|',
|
|
392
|
+
];
|
|
393
|
+
|
|
394
|
+
for (const row of rows) {
|
|
395
|
+
lines.push(
|
|
396
|
+
`| ${row.day} | ${(row.avg_risk as number).toFixed(1)} | ${row.max_risk} | ${row.files_scanned} |`
|
|
397
|
+
);
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
return text(lines.join('\n'));
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
function text(content: string): ToolResult {
|
|
404
|
+
return { content: [{ type: 'text', text: content }] };
|
|
405
|
+
}
|