driftdetect-mcp 0.7.1 โ 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/server.js +8 -1
- package/dist/bin/server.js.map +1 -1
- package/dist/enterprise-server.d.ts +4 -0
- package/dist/enterprise-server.d.ts.map +1 -1
- package/dist/enterprise-server.js +32 -2
- package/dist/enterprise-server.js.map +1 -1
- package/dist/infrastructure/index.d.ts +1 -0
- package/dist/infrastructure/index.d.ts.map +1 -1
- package/dist/infrastructure/index.js +2 -0
- package/dist/infrastructure/index.js.map +1 -1
- package/dist/infrastructure/startup-warmer.d.ts +65 -0
- package/dist/infrastructure/startup-warmer.d.ts.map +1 -0
- package/dist/infrastructure/startup-warmer.js +257 -0
- package/dist/infrastructure/startup-warmer.js.map +1 -0
- package/dist/tools/analysis/coupling.js +181 -63
- package/dist/tools/analysis/coupling.js.map +1 -1
- package/dist/tools/analysis/cpp.d.ts +51 -0
- package/dist/tools/analysis/cpp.d.ts.map +1 -0
- package/dist/tools/analysis/cpp.js +224 -0
- package/dist/tools/analysis/cpp.js.map +1 -0
- package/dist/tools/analysis/error-handling.js +130 -37
- package/dist/tools/analysis/error-handling.js.map +1 -1
- package/dist/tools/analysis/index.d.ts +3 -0
- package/dist/tools/analysis/index.d.ts.map +1 -1
- package/dist/tools/analysis/index.js +100 -0
- package/dist/tools/analysis/index.js.map +1 -1
- package/dist/tools/analysis/quality-gate.d.ts +78 -0
- package/dist/tools/analysis/quality-gate.d.ts.map +1 -0
- package/dist/tools/analysis/quality-gate.js +164 -0
- package/dist/tools/analysis/quality-gate.js.map +1 -0
- package/dist/tools/analysis/rust.d.ts +51 -0
- package/dist/tools/analysis/rust.d.ts.map +1 -0
- package/dist/tools/analysis/rust.js +253 -0
- package/dist/tools/analysis/rust.js.map +1 -0
- package/dist/tools/analysis/simulate.d.ts +1 -0
- package/dist/tools/analysis/simulate.d.ts.map +1 -1
- package/dist/tools/analysis/simulate.js +78 -74
- package/dist/tools/analysis/simulate.js.map +1 -1
- package/dist/tools/analysis/test-topology.js +117 -44
- package/dist/tools/analysis/test-topology.js.map +1 -1
- package/dist/tools/detail/reachability.d.ts +2 -0
- package/dist/tools/detail/reachability.d.ts.map +1 -1
- package/dist/tools/detail/reachability.js +42 -32
- package/dist/tools/detail/reachability.js.map +1 -1
- package/dist/tools/orchestration/index.d.ts +2 -0
- package/dist/tools/orchestration/index.d.ts.map +1 -1
- package/dist/tools/orchestration/index.js +3 -0
- package/dist/tools/orchestration/index.js.map +1 -1
- package/dist/tools/orchestration/package-context.d.ts +134 -0
- package/dist/tools/orchestration/package-context.d.ts.map +1 -0
- package/dist/tools/orchestration/package-context.js +261 -0
- package/dist/tools/orchestration/package-context.js.map +1 -0
- package/package.json +12 -12
- package/LICENSE +0 -21
- package/dist/server.d.ts +0 -11
- package/dist/server.d.ts.map +0 -1
- package/dist/server.js +0 -3177
- package/dist/server.js.map +0 -1
- package/dist/tools/detail/dna-check.d.ts +0 -32
- package/dist/tools/detail/dna-check.d.ts.map +0 -1
- package/dist/tools/detail/dna-check.js +0 -231
- package/dist/tools/detail/dna-check.js.map +0 -1
- package/dist/tools/exploration/files-list.d.ts +0 -29
- package/dist/tools/exploration/files-list.d.ts.map +0 -1
- package/dist/tools/exploration/files-list.js +0 -94
- package/dist/tools/exploration/files-list.js.map +0 -1
package/dist/server.js
DELETED
|
@@ -1,3177 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Drift MCP Server Implementation
|
|
3
|
-
*
|
|
4
|
-
* Provides structured access to drift functionality for AI agents.
|
|
5
|
-
*/
|
|
6
|
-
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
|
|
7
|
-
import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextprotocol/sdk/types.js';
|
|
8
|
-
import { PatternStore, ManifestStore, HistoryStore, DNAStore, PlaybookGenerator, AIContextBuilder, GENE_IDS, BoundaryStore, createCallGraphAnalyzer, createBoundaryScanner, createSemanticDataAccessScanner, createImpactAnalyzer, createDeadCodeDetector, createCoverageAnalyzer, } from 'driftdetect-core';
|
|
9
|
-
import { PackManager } from './packs.js';
|
|
10
|
-
import { FeedbackManager } from './feedback.js';
|
|
11
|
-
import { handleProjects } from './tools/discovery/projects.js';
|
|
12
|
-
const PATTERN_CATEGORIES = [
|
|
13
|
-
'structural', 'components', 'styling', 'api', 'auth', 'errors',
|
|
14
|
-
'data-access', 'testing', 'logging', 'security', 'config',
|
|
15
|
-
'types', 'performance', 'accessibility', 'documentation',
|
|
16
|
-
];
|
|
17
|
-
const TOOLS = [
|
|
18
|
-
{
|
|
19
|
-
name: 'drift_status',
|
|
20
|
-
description: 'Get overall codebase pattern health and statistics. Use this first to understand what patterns drift has learned.',
|
|
21
|
-
inputSchema: {
|
|
22
|
-
type: 'object',
|
|
23
|
-
properties: {},
|
|
24
|
-
required: [],
|
|
25
|
-
},
|
|
26
|
-
},
|
|
27
|
-
{
|
|
28
|
-
name: 'drift_patterns',
|
|
29
|
-
description: 'Get patterns for specific categories. Returns learned patterns with confidence scores and locations.',
|
|
30
|
-
inputSchema: {
|
|
31
|
-
type: 'object',
|
|
32
|
-
properties: {
|
|
33
|
-
categories: {
|
|
34
|
-
type: 'array',
|
|
35
|
-
items: { type: 'string' },
|
|
36
|
-
description: `Categories to query. Valid: ${PATTERN_CATEGORIES.join(', ')}`,
|
|
37
|
-
},
|
|
38
|
-
minConfidence: {
|
|
39
|
-
type: 'number',
|
|
40
|
-
description: 'Minimum confidence score (0.0-1.0)',
|
|
41
|
-
},
|
|
42
|
-
limit: {
|
|
43
|
-
type: 'number',
|
|
44
|
-
description: 'Maximum patterns to return (default: 30)',
|
|
45
|
-
},
|
|
46
|
-
includeExamples: {
|
|
47
|
-
type: 'boolean',
|
|
48
|
-
description: 'Include example locations (default: true, set false for compact output)',
|
|
49
|
-
},
|
|
50
|
-
},
|
|
51
|
-
required: [],
|
|
52
|
-
},
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
name: 'drift_files',
|
|
56
|
-
description: 'Get patterns found in a specific file or file pattern (glob supported).',
|
|
57
|
-
inputSchema: {
|
|
58
|
-
type: 'object',
|
|
59
|
-
properties: {
|
|
60
|
-
path: {
|
|
61
|
-
type: 'string',
|
|
62
|
-
description: 'File path or glob pattern (e.g., "src/api/*.ts")',
|
|
63
|
-
},
|
|
64
|
-
category: {
|
|
65
|
-
type: 'string',
|
|
66
|
-
description: 'Filter by category',
|
|
67
|
-
},
|
|
68
|
-
limit: {
|
|
69
|
-
type: 'number',
|
|
70
|
-
description: 'Maximum patterns to return (default: 20)',
|
|
71
|
-
},
|
|
72
|
-
offset: {
|
|
73
|
-
type: 'number',
|
|
74
|
-
description: 'Skip first N patterns for pagination (default: 0)',
|
|
75
|
-
},
|
|
76
|
-
compact: {
|
|
77
|
-
type: 'boolean',
|
|
78
|
-
description: 'Return summary only without full pattern details (default: false)',
|
|
79
|
-
},
|
|
80
|
-
},
|
|
81
|
-
required: ['path'],
|
|
82
|
-
},
|
|
83
|
-
},
|
|
84
|
-
{
|
|
85
|
-
name: 'drift_where',
|
|
86
|
-
description: 'Find where a pattern is used across the codebase.',
|
|
87
|
-
inputSchema: {
|
|
88
|
-
type: 'object',
|
|
89
|
-
properties: {
|
|
90
|
-
pattern: {
|
|
91
|
-
type: 'string',
|
|
92
|
-
description: 'Pattern name or ID to search for',
|
|
93
|
-
},
|
|
94
|
-
category: {
|
|
95
|
-
type: 'string',
|
|
96
|
-
description: 'Filter by category',
|
|
97
|
-
},
|
|
98
|
-
limit: {
|
|
99
|
-
type: 'number',
|
|
100
|
-
description: 'Maximum patterns to return (default: 10)',
|
|
101
|
-
},
|
|
102
|
-
maxLocations: {
|
|
103
|
-
type: 'number',
|
|
104
|
-
description: 'Maximum locations per pattern (default: 5)',
|
|
105
|
-
},
|
|
106
|
-
},
|
|
107
|
-
required: ['pattern'],
|
|
108
|
-
},
|
|
109
|
-
},
|
|
110
|
-
{
|
|
111
|
-
name: 'drift_export',
|
|
112
|
-
description: 'Export patterns in AI-optimized format. Use this to get full context for code generation.',
|
|
113
|
-
inputSchema: {
|
|
114
|
-
type: 'object',
|
|
115
|
-
properties: {
|
|
116
|
-
categories: {
|
|
117
|
-
type: 'array',
|
|
118
|
-
items: { type: 'string' },
|
|
119
|
-
description: 'Categories to export (defaults to all)',
|
|
120
|
-
},
|
|
121
|
-
format: {
|
|
122
|
-
type: 'string',
|
|
123
|
-
enum: ['ai-context', 'json', 'summary'],
|
|
124
|
-
description: 'Export format (default: ai-context)',
|
|
125
|
-
},
|
|
126
|
-
compact: {
|
|
127
|
-
type: 'boolean',
|
|
128
|
-
description: 'Compact output with fewer details',
|
|
129
|
-
},
|
|
130
|
-
},
|
|
131
|
-
required: [],
|
|
132
|
-
},
|
|
133
|
-
},
|
|
134
|
-
{
|
|
135
|
-
name: 'drift_contracts',
|
|
136
|
-
description: 'Get frontend/backend API contract status. Shows mismatches between API calls and endpoints.',
|
|
137
|
-
inputSchema: {
|
|
138
|
-
type: 'object',
|
|
139
|
-
properties: {
|
|
140
|
-
status: {
|
|
141
|
-
type: 'string',
|
|
142
|
-
enum: ['all', 'verified', 'mismatch', 'discovered'],
|
|
143
|
-
description: 'Filter by contract status',
|
|
144
|
-
},
|
|
145
|
-
},
|
|
146
|
-
required: [],
|
|
147
|
-
},
|
|
148
|
-
},
|
|
149
|
-
{
|
|
150
|
-
name: 'drift_examples',
|
|
151
|
-
description: 'Get actual code examples for patterns. Returns real code snippets from the codebase that demonstrate how patterns are implemented. Use this to understand HOW to implement patterns. Automatically filters out documentation, config files, and deprecated code.',
|
|
152
|
-
inputSchema: {
|
|
153
|
-
type: 'object',
|
|
154
|
-
properties: {
|
|
155
|
-
categories: {
|
|
156
|
-
type: 'array',
|
|
157
|
-
items: { type: 'string' },
|
|
158
|
-
description: `Categories to get examples for. Valid: ${PATTERN_CATEGORIES.join(', ')}`,
|
|
159
|
-
},
|
|
160
|
-
pattern: {
|
|
161
|
-
type: 'string',
|
|
162
|
-
description: 'Specific pattern name or ID to get examples for',
|
|
163
|
-
},
|
|
164
|
-
maxExamples: {
|
|
165
|
-
type: 'number',
|
|
166
|
-
description: 'Maximum examples per pattern (default: 3)',
|
|
167
|
-
},
|
|
168
|
-
contextLines: {
|
|
169
|
-
type: 'number',
|
|
170
|
-
description: 'Lines of context around each match (default: 10)',
|
|
171
|
-
},
|
|
172
|
-
includeDeprecated: {
|
|
173
|
-
type: 'boolean',
|
|
174
|
-
description: 'Include deprecated/legacy code examples (default: false)',
|
|
175
|
-
},
|
|
176
|
-
},
|
|
177
|
-
required: [],
|
|
178
|
-
},
|
|
179
|
-
},
|
|
180
|
-
{
|
|
181
|
-
name: 'drift_pack',
|
|
182
|
-
description: 'Get a pre-defined pattern pack for common development tasks. Packs are cached and auto-invalidate when patterns change. Also supports learning new packs from usage patterns.',
|
|
183
|
-
inputSchema: {
|
|
184
|
-
type: 'object',
|
|
185
|
-
properties: {
|
|
186
|
-
action: {
|
|
187
|
-
type: 'string',
|
|
188
|
-
enum: ['get', 'list', 'suggest', 'create', 'delete', 'infer'],
|
|
189
|
-
description: 'Action to perform (default: "get"). "suggest" shows packs based on usage, "infer" suggests from file structure, "create" saves a custom pack, "delete" removes a custom pack',
|
|
190
|
-
},
|
|
191
|
-
name: {
|
|
192
|
-
type: 'string',
|
|
193
|
-
description: 'Pack name for get/create/delete actions',
|
|
194
|
-
},
|
|
195
|
-
refresh: {
|
|
196
|
-
type: 'boolean',
|
|
197
|
-
description: 'Force regenerate the pack even if cached (default: false)',
|
|
198
|
-
},
|
|
199
|
-
list: {
|
|
200
|
-
type: 'boolean',
|
|
201
|
-
description: '[DEPRECATED: use action="list"] List all available packs',
|
|
202
|
-
},
|
|
203
|
-
// For create action
|
|
204
|
-
description: {
|
|
205
|
-
type: 'string',
|
|
206
|
-
description: 'Pack description (for create action)',
|
|
207
|
-
},
|
|
208
|
-
categories: {
|
|
209
|
-
type: 'array',
|
|
210
|
-
items: { type: 'string' },
|
|
211
|
-
description: 'Categories to include in pack (for create action)',
|
|
212
|
-
},
|
|
213
|
-
patterns: {
|
|
214
|
-
type: 'array',
|
|
215
|
-
items: { type: 'string' },
|
|
216
|
-
description: 'Pattern name filters (for create action)',
|
|
217
|
-
},
|
|
218
|
-
},
|
|
219
|
-
required: [],
|
|
220
|
-
},
|
|
221
|
-
},
|
|
222
|
-
{
|
|
223
|
-
name: 'drift_feedback',
|
|
224
|
-
description: 'Provide feedback on pattern examples to improve future suggestions. Good feedback helps drift learn which files produce useful examples.',
|
|
225
|
-
inputSchema: {
|
|
226
|
-
type: 'object',
|
|
227
|
-
properties: {
|
|
228
|
-
action: {
|
|
229
|
-
type: 'string',
|
|
230
|
-
enum: ['rate', 'stats', 'clear'],
|
|
231
|
-
description: 'Action: "rate" to rate an example, "stats" to see feedback statistics, "clear" to reset all feedback',
|
|
232
|
-
},
|
|
233
|
-
patternId: {
|
|
234
|
-
type: 'string',
|
|
235
|
-
description: 'Pattern ID (for rate action)',
|
|
236
|
-
},
|
|
237
|
-
patternName: {
|
|
238
|
-
type: 'string',
|
|
239
|
-
description: 'Pattern name (for rate action)',
|
|
240
|
-
},
|
|
241
|
-
category: {
|
|
242
|
-
type: 'string',
|
|
243
|
-
description: 'Pattern category (for rate action)',
|
|
244
|
-
},
|
|
245
|
-
file: {
|
|
246
|
-
type: 'string',
|
|
247
|
-
description: 'File path of the example (for rate action)',
|
|
248
|
-
},
|
|
249
|
-
line: {
|
|
250
|
-
type: 'number',
|
|
251
|
-
description: 'Line number of the example (for rate action)',
|
|
252
|
-
},
|
|
253
|
-
rating: {
|
|
254
|
-
type: 'string',
|
|
255
|
-
enum: ['good', 'bad', 'irrelevant'],
|
|
256
|
-
description: 'Rating: "good" = useful example, "bad" = wrong/misleading, "irrelevant" = not related to pattern',
|
|
257
|
-
},
|
|
258
|
-
reason: {
|
|
259
|
-
type: 'string',
|
|
260
|
-
description: 'Optional reason for the rating',
|
|
261
|
-
},
|
|
262
|
-
},
|
|
263
|
-
required: [],
|
|
264
|
-
},
|
|
265
|
-
},
|
|
266
|
-
{
|
|
267
|
-
name: 'drift_trends',
|
|
268
|
-
description: 'Get pattern trend analysis and regression alerts. Shows how patterns have changed over time. Use this to check codebase health before generating code or to identify patterns that need attention.',
|
|
269
|
-
inputSchema: {
|
|
270
|
-
type: 'object',
|
|
271
|
-
properties: {
|
|
272
|
-
period: {
|
|
273
|
-
type: 'string',
|
|
274
|
-
enum: ['7d', '30d', '90d'],
|
|
275
|
-
description: 'Time period to analyze (default: 7d)',
|
|
276
|
-
},
|
|
277
|
-
category: {
|
|
278
|
-
type: 'string',
|
|
279
|
-
description: `Filter trends by category. Valid: ${PATTERN_CATEGORIES.join(', ')}`,
|
|
280
|
-
},
|
|
281
|
-
severity: {
|
|
282
|
-
type: 'string',
|
|
283
|
-
enum: ['all', 'critical', 'warning'],
|
|
284
|
-
description: 'Filter by severity level (default: all)',
|
|
285
|
-
},
|
|
286
|
-
type: {
|
|
287
|
-
type: 'string',
|
|
288
|
-
enum: ['all', 'regressions', 'improvements'],
|
|
289
|
-
description: 'Filter by trend type (default: all)',
|
|
290
|
-
},
|
|
291
|
-
},
|
|
292
|
-
required: [],
|
|
293
|
-
},
|
|
294
|
-
},
|
|
295
|
-
{
|
|
296
|
-
name: 'drift_parser_info',
|
|
297
|
-
description: 'Get information about parser capabilities and status. Shows which parsers are available (tree-sitter vs regex), their features, and supported frameworks. Use this to understand parsing capabilities before analyzing Python, C#, TypeScript, or Java code.',
|
|
298
|
-
inputSchema: {
|
|
299
|
-
type: 'object',
|
|
300
|
-
properties: {
|
|
301
|
-
language: {
|
|
302
|
-
type: 'string',
|
|
303
|
-
enum: ['python', 'csharp', 'typescript', 'java', 'all'],
|
|
304
|
-
description: 'Language to get parser info for (default: all)',
|
|
305
|
-
},
|
|
306
|
-
},
|
|
307
|
-
required: [],
|
|
308
|
-
},
|
|
309
|
-
},
|
|
310
|
-
// DNA Tools
|
|
311
|
-
{
|
|
312
|
-
name: 'drift_dna',
|
|
313
|
-
description: 'Get the styling DNA profile for the codebase. Shows how components are styled (variants, responsive, states, theming, spacing, animation) with confidence scores and exemplar files.',
|
|
314
|
-
inputSchema: {
|
|
315
|
-
type: 'object',
|
|
316
|
-
properties: {
|
|
317
|
-
gene: {
|
|
318
|
-
type: 'string',
|
|
319
|
-
enum: ['variant-handling', 'responsive-approach', 'state-styling', 'theming', 'spacing-philosophy', 'animation-approach'],
|
|
320
|
-
description: 'Specific gene to query (optional)',
|
|
321
|
-
},
|
|
322
|
-
format: {
|
|
323
|
-
type: 'string',
|
|
324
|
-
enum: ['full', 'summary', 'ai-context'],
|
|
325
|
-
description: 'Output format (default: ai-context)',
|
|
326
|
-
},
|
|
327
|
-
level: {
|
|
328
|
-
type: 'number',
|
|
329
|
-
enum: [1, 2, 3, 4],
|
|
330
|
-
description: 'AI context detail level 1-4 (default: 3)',
|
|
331
|
-
},
|
|
332
|
-
},
|
|
333
|
-
required: [],
|
|
334
|
-
},
|
|
335
|
-
},
|
|
336
|
-
{
|
|
337
|
-
name: 'drift_playbook',
|
|
338
|
-
description: 'Generate or retrieve the styling playbook documentation. Contains conventions, code examples, and patterns to avoid for each styling concern.',
|
|
339
|
-
inputSchema: {
|
|
340
|
-
type: 'object',
|
|
341
|
-
properties: {
|
|
342
|
-
regenerate: {
|
|
343
|
-
type: 'boolean',
|
|
344
|
-
description: 'Force regeneration of playbook (default: false)',
|
|
345
|
-
},
|
|
346
|
-
section: {
|
|
347
|
-
type: 'string',
|
|
348
|
-
enum: ['variant-handling', 'responsive-approach', 'state-styling', 'theming', 'spacing-philosophy', 'animation-approach'],
|
|
349
|
-
description: 'Specific section to retrieve (optional)',
|
|
350
|
-
},
|
|
351
|
-
},
|
|
352
|
-
required: [],
|
|
353
|
-
},
|
|
354
|
-
},
|
|
355
|
-
{
|
|
356
|
-
name: 'drift_mutations',
|
|
357
|
-
description: 'Get files that deviate from established styling patterns. Mutations are styling inconsistencies that fragment AI context.',
|
|
358
|
-
inputSchema: {
|
|
359
|
-
type: 'object',
|
|
360
|
-
properties: {
|
|
361
|
-
gene: {
|
|
362
|
-
type: 'string',
|
|
363
|
-
enum: ['variant-handling', 'responsive-approach', 'state-styling', 'theming', 'spacing-philosophy', 'animation-approach'],
|
|
364
|
-
description: 'Filter by gene (optional)',
|
|
365
|
-
},
|
|
366
|
-
impact: {
|
|
367
|
-
type: 'string',
|
|
368
|
-
enum: ['low', 'medium', 'high'],
|
|
369
|
-
description: 'Filter by impact level (optional)',
|
|
370
|
-
},
|
|
371
|
-
suggest: {
|
|
372
|
-
type: 'boolean',
|
|
373
|
-
description: 'Include resolution suggestions (default: false)',
|
|
374
|
-
},
|
|
375
|
-
},
|
|
376
|
-
required: [],
|
|
377
|
-
},
|
|
378
|
-
},
|
|
379
|
-
{
|
|
380
|
-
name: 'drift_dna_check',
|
|
381
|
-
description: 'Check if code follows the established styling DNA. Use before generating component code to ensure consistency.',
|
|
382
|
-
inputSchema: {
|
|
383
|
-
type: 'object',
|
|
384
|
-
properties: {
|
|
385
|
-
code: {
|
|
386
|
-
type: 'string',
|
|
387
|
-
description: 'Code snippet to check against DNA',
|
|
388
|
-
},
|
|
389
|
-
file: {
|
|
390
|
-
type: 'string',
|
|
391
|
-
description: 'File path to check (alternative to code)',
|
|
392
|
-
},
|
|
393
|
-
genes: {
|
|
394
|
-
type: 'array',
|
|
395
|
-
items: { type: 'string' },
|
|
396
|
-
description: 'Specific genes to check (optional, defaults to all)',
|
|
397
|
-
},
|
|
398
|
-
},
|
|
399
|
-
required: [],
|
|
400
|
-
},
|
|
401
|
-
},
|
|
402
|
-
// Data Boundaries Tool
|
|
403
|
-
{
|
|
404
|
-
name: 'drift_boundaries',
|
|
405
|
-
description: 'Get data access boundaries and check for violations. Shows which code accesses which database tables/fields. Use this before generating data access code to understand access rules and restrictions.',
|
|
406
|
-
inputSchema: {
|
|
407
|
-
type: 'object',
|
|
408
|
-
properties: {
|
|
409
|
-
action: {
|
|
410
|
-
type: 'string',
|
|
411
|
-
enum: ['overview', 'table', 'file', 'sensitive', 'check', 'rules'],
|
|
412
|
-
description: 'Action to perform (default: overview). "overview" shows summary, "table" shows specific table access, "file" shows what data a file accesses, "sensitive" shows sensitive field access, "check" validates against rules, "rules" shows configured boundaries',
|
|
413
|
-
},
|
|
414
|
-
table: {
|
|
415
|
-
type: 'string',
|
|
416
|
-
description: 'Table name (required for "table" action)',
|
|
417
|
-
},
|
|
418
|
-
file: {
|
|
419
|
-
type: 'string',
|
|
420
|
-
description: 'File path or glob pattern (required for "file" action)',
|
|
421
|
-
},
|
|
422
|
-
includeViolations: {
|
|
423
|
-
type: 'boolean',
|
|
424
|
-
description: 'Include boundary violations in response (default: true)',
|
|
425
|
-
},
|
|
426
|
-
limit: {
|
|
427
|
-
type: 'number',
|
|
428
|
-
description: 'Maximum items per section (default: 10)',
|
|
429
|
-
},
|
|
430
|
-
},
|
|
431
|
-
required: [],
|
|
432
|
-
},
|
|
433
|
-
},
|
|
434
|
-
// Call Graph Tool
|
|
435
|
-
{
|
|
436
|
-
name: 'drift_callgraph',
|
|
437
|
-
description: 'Build and query call graphs for code reachability analysis. Answers "What data can this code access?", "Who can reach this data?", "What breaks if I change this?", "What code is never called?", and "Which sensitive data paths are tested?" Use this to understand the impact of security findings or code changes.',
|
|
438
|
-
inputSchema: {
|
|
439
|
-
type: 'object',
|
|
440
|
-
properties: {
|
|
441
|
-
action: {
|
|
442
|
-
type: 'string',
|
|
443
|
-
enum: ['status', 'build', 'reach', 'inverse', 'function', 'security', 'impact', 'dead', 'coverage'],
|
|
444
|
-
description: 'Action to perform (default: status). "status" shows overview, "build" builds the call graph, "reach" finds what data a location can access, "inverse" finds who can access specific data, "function" shows function details, "security" shows security-prioritized view (P0-P4 tiers), "impact" analyzes what breaks if you change a file or function, "dead" finds functions that are never called, "coverage" analyzes test coverage for sensitive data access paths',
|
|
445
|
-
},
|
|
446
|
-
location: {
|
|
447
|
-
type: 'string',
|
|
448
|
-
description: 'Code location for "reach" action (file:line or function_name)',
|
|
449
|
-
},
|
|
450
|
-
target: {
|
|
451
|
-
type: 'string',
|
|
452
|
-
description: 'Data target for "inverse" action (table or table.field), or file/function for "impact" action',
|
|
453
|
-
},
|
|
454
|
-
functionName: {
|
|
455
|
-
type: 'string',
|
|
456
|
-
description: 'Function name for "function" action',
|
|
457
|
-
},
|
|
458
|
-
maxDepth: {
|
|
459
|
-
type: 'number',
|
|
460
|
-
description: 'Maximum traversal depth (default: 10)',
|
|
461
|
-
},
|
|
462
|
-
confidence: {
|
|
463
|
-
type: 'string',
|
|
464
|
-
enum: ['high', 'medium', 'low'],
|
|
465
|
-
description: 'Minimum confidence level for "dead" action (default: low)',
|
|
466
|
-
},
|
|
467
|
-
limit: {
|
|
468
|
-
type: 'number',
|
|
469
|
-
description: 'Maximum items per section in output (default: 10)',
|
|
470
|
-
},
|
|
471
|
-
},
|
|
472
|
-
required: [],
|
|
473
|
-
},
|
|
474
|
-
},
|
|
475
|
-
// Projects Tool
|
|
476
|
-
{
|
|
477
|
-
name: 'drift_projects',
|
|
478
|
-
description: 'List and manage registered drift projects. Enables working across multiple codebases. Use this to see all projects, switch between them, or get details about a specific project.',
|
|
479
|
-
inputSchema: {
|
|
480
|
-
type: 'object',
|
|
481
|
-
properties: {
|
|
482
|
-
action: {
|
|
483
|
-
type: 'string',
|
|
484
|
-
enum: ['list', 'info', 'switch', 'recent'],
|
|
485
|
-
description: 'Action to perform (default: list). "list" shows all projects, "info" shows details for a project, "switch" changes active project, "recent" shows recently used projects',
|
|
486
|
-
},
|
|
487
|
-
project: {
|
|
488
|
-
type: 'string',
|
|
489
|
-
description: 'Project name, ID, or path (for info/switch actions)',
|
|
490
|
-
},
|
|
491
|
-
language: {
|
|
492
|
-
type: 'string',
|
|
493
|
-
description: 'Filter by language (for list action)',
|
|
494
|
-
},
|
|
495
|
-
framework: {
|
|
496
|
-
type: 'string',
|
|
497
|
-
description: 'Filter by framework (for list action)',
|
|
498
|
-
},
|
|
499
|
-
limit: {
|
|
500
|
-
type: 'number',
|
|
501
|
-
description: 'Maximum projects to return (default: all)',
|
|
502
|
-
},
|
|
503
|
-
},
|
|
504
|
-
required: [],
|
|
505
|
-
},
|
|
506
|
-
},
|
|
507
|
-
];
|
|
508
|
-
export function createDriftMCPServer(config) {
|
|
509
|
-
const server = new Server({ name: 'drift', version: '0.1.0' }, { capabilities: { tools: {} } });
|
|
510
|
-
const patternStore = new PatternStore({ rootDir: config.projectRoot });
|
|
511
|
-
const manifestStore = new ManifestStore(config.projectRoot);
|
|
512
|
-
const historyStore = new HistoryStore({ rootDir: config.projectRoot });
|
|
513
|
-
const dnaStore = new DNAStore({ rootDir: config.projectRoot });
|
|
514
|
-
const boundaryStore = new BoundaryStore({ rootDir: config.projectRoot });
|
|
515
|
-
const packManager = new PackManager(config.projectRoot, patternStore);
|
|
516
|
-
const feedbackManager = new FeedbackManager(config.projectRoot);
|
|
517
|
-
// List available tools
|
|
518
|
-
server.setRequestHandler(ListToolsRequestSchema, async () => ({
|
|
519
|
-
tools: TOOLS,
|
|
520
|
-
}));
|
|
521
|
-
// Handle tool calls
|
|
522
|
-
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
523
|
-
const { name, arguments: args } = request.params;
|
|
524
|
-
try {
|
|
525
|
-
switch (name) {
|
|
526
|
-
case 'drift_status':
|
|
527
|
-
return await handleStatus(patternStore);
|
|
528
|
-
case 'drift_patterns':
|
|
529
|
-
return await handlePatterns(patternStore, args);
|
|
530
|
-
case 'drift_files':
|
|
531
|
-
return await handleFiles(manifestStore, args);
|
|
532
|
-
case 'drift_where':
|
|
533
|
-
return await handleWhere(patternStore, args);
|
|
534
|
-
case 'drift_export':
|
|
535
|
-
return await handleExport(patternStore, args);
|
|
536
|
-
case 'drift_contracts':
|
|
537
|
-
return await handleContracts(config.projectRoot, args);
|
|
538
|
-
case 'drift_examples':
|
|
539
|
-
return await handleExamples(config.projectRoot, patternStore, packManager, feedbackManager, args);
|
|
540
|
-
case 'drift_pack':
|
|
541
|
-
return await handlePack(packManager, args);
|
|
542
|
-
case 'drift_feedback':
|
|
543
|
-
return await handleFeedback(feedbackManager, args);
|
|
544
|
-
case 'drift_trends':
|
|
545
|
-
return await handleTrends(historyStore, args);
|
|
546
|
-
case 'drift_parser_info':
|
|
547
|
-
return await handleParserInfo(args);
|
|
548
|
-
case 'drift_dna':
|
|
549
|
-
return await handleDNA(config.projectRoot, dnaStore, args);
|
|
550
|
-
case 'drift_playbook':
|
|
551
|
-
return await handlePlaybook(config.projectRoot, dnaStore, args);
|
|
552
|
-
case 'drift_mutations':
|
|
553
|
-
return await handleMutations(dnaStore, args);
|
|
554
|
-
case 'drift_dna_check':
|
|
555
|
-
return await handleDNACheck(config.projectRoot, dnaStore, args);
|
|
556
|
-
case 'drift_boundaries':
|
|
557
|
-
return await handleBoundaries(boundaryStore, args);
|
|
558
|
-
case 'drift_callgraph':
|
|
559
|
-
return await handleCallGraph(config.projectRoot, args);
|
|
560
|
-
case 'drift_projects':
|
|
561
|
-
return await handleProjects(args);
|
|
562
|
-
default:
|
|
563
|
-
return {
|
|
564
|
-
content: [{ type: 'text', text: `Unknown tool: ${name}` }],
|
|
565
|
-
isError: true,
|
|
566
|
-
};
|
|
567
|
-
}
|
|
568
|
-
}
|
|
569
|
-
catch (error) {
|
|
570
|
-
return {
|
|
571
|
-
content: [{
|
|
572
|
-
type: 'text',
|
|
573
|
-
text: `Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
574
|
-
}],
|
|
575
|
-
isError: true,
|
|
576
|
-
};
|
|
577
|
-
}
|
|
578
|
-
});
|
|
579
|
-
return server;
|
|
580
|
-
}
|
|
581
|
-
async function handleStatus(store) {
|
|
582
|
-
await store.initialize();
|
|
583
|
-
const stats = store.getStats();
|
|
584
|
-
const result = {
|
|
585
|
-
totalPatterns: stats.totalPatterns,
|
|
586
|
-
byCategory: stats.byCategory,
|
|
587
|
-
byConfidence: stats.byConfidenceLevel,
|
|
588
|
-
byStatus: stats.byStatus,
|
|
589
|
-
totalLocations: stats.totalLocations,
|
|
590
|
-
totalOutliers: stats.totalOutliers,
|
|
591
|
-
categories: PATTERN_CATEGORIES,
|
|
592
|
-
};
|
|
593
|
-
return {
|
|
594
|
-
content: [{ type: 'text', text: JSON.stringify(result, null, 2) }],
|
|
595
|
-
};
|
|
596
|
-
}
|
|
597
|
-
async function handlePatterns(store, args) {
|
|
598
|
-
await store.initialize();
|
|
599
|
-
let patterns = store.getAll();
|
|
600
|
-
// Filter by categories
|
|
601
|
-
if (args.categories && args.categories.length > 0) {
|
|
602
|
-
const cats = new Set(args.categories);
|
|
603
|
-
patterns = patterns.filter(p => cats.has(p.category));
|
|
604
|
-
}
|
|
605
|
-
// Filter by confidence
|
|
606
|
-
if (args.minConfidence !== undefined) {
|
|
607
|
-
patterns = patterns.filter(p => p.confidence.score >= args.minConfidence);
|
|
608
|
-
}
|
|
609
|
-
const totalCount = patterns.length;
|
|
610
|
-
const limit = args.limit ?? 30;
|
|
611
|
-
const includeExamples = args.includeExamples ?? true;
|
|
612
|
-
// Apply limit
|
|
613
|
-
patterns = patterns.slice(0, limit);
|
|
614
|
-
// Format for AI consumption
|
|
615
|
-
const result = patterns.map(p => ({
|
|
616
|
-
id: p.id,
|
|
617
|
-
name: p.name,
|
|
618
|
-
category: p.category,
|
|
619
|
-
subcategory: p.subcategory,
|
|
620
|
-
description: p.description,
|
|
621
|
-
confidence: p.confidence.score,
|
|
622
|
-
confidenceLevel: p.confidence.level,
|
|
623
|
-
locationCount: p.locations.length,
|
|
624
|
-
outlierCount: p.outliers.length,
|
|
625
|
-
...(includeExamples ? {
|
|
626
|
-
exampleLocations: p.locations.slice(0, 2).map(l => ({
|
|
627
|
-
file: l.file,
|
|
628
|
-
line: l.line,
|
|
629
|
-
})),
|
|
630
|
-
} : {}),
|
|
631
|
-
}));
|
|
632
|
-
// Add pagination info
|
|
633
|
-
const output = {
|
|
634
|
-
patterns: result,
|
|
635
|
-
total: totalCount,
|
|
636
|
-
showing: result.length,
|
|
637
|
-
...(totalCount > limit ? { truncated: true, hint: `Use limit parameter to see more (showing ${limit} of ${totalCount})` } : {}),
|
|
638
|
-
};
|
|
639
|
-
return {
|
|
640
|
-
content: [{ type: 'text', text: JSON.stringify(output, null, 2) }],
|
|
641
|
-
};
|
|
642
|
-
}
|
|
643
|
-
async function handleFiles(store, args) {
|
|
644
|
-
await store.load();
|
|
645
|
-
const query = {
|
|
646
|
-
path: args.path,
|
|
647
|
-
};
|
|
648
|
-
if (args.category) {
|
|
649
|
-
query.category = args.category;
|
|
650
|
-
}
|
|
651
|
-
const result = store.queryFile(query);
|
|
652
|
-
if (!result) {
|
|
653
|
-
return {
|
|
654
|
-
content: [{ type: 'text', text: `No patterns found in "${args.path}"` }],
|
|
655
|
-
};
|
|
656
|
-
}
|
|
657
|
-
const limit = args.limit ?? 20;
|
|
658
|
-
const offset = args.offset ?? 0;
|
|
659
|
-
const compact = args.compact ?? false;
|
|
660
|
-
// Handle array results (from glob patterns)
|
|
661
|
-
if (Array.isArray(result)) {
|
|
662
|
-
const totalCount = result.length;
|
|
663
|
-
const paginatedResult = result.slice(offset, offset + limit);
|
|
664
|
-
if (compact) {
|
|
665
|
-
// Return summary only
|
|
666
|
-
const summary = {
|
|
667
|
-
totalFiles: totalCount,
|
|
668
|
-
showing: paginatedResult.length,
|
|
669
|
-
offset,
|
|
670
|
-
files: paginatedResult.map((r) => ({
|
|
671
|
-
file: r.file,
|
|
672
|
-
patternCount: r.patterns?.length ?? 0,
|
|
673
|
-
})),
|
|
674
|
-
...(totalCount > offset + limit ? {
|
|
675
|
-
truncated: true,
|
|
676
|
-
hint: `Use offset=${offset + limit} to see next page`
|
|
677
|
-
} : {}),
|
|
678
|
-
};
|
|
679
|
-
return {
|
|
680
|
-
content: [{ type: 'text', text: JSON.stringify(summary, null, 2) }],
|
|
681
|
-
};
|
|
682
|
-
}
|
|
683
|
-
const output = {
|
|
684
|
-
totalFiles: totalCount,
|
|
685
|
-
showing: paginatedResult.length,
|
|
686
|
-
offset,
|
|
687
|
-
results: paginatedResult,
|
|
688
|
-
...(totalCount > offset + limit ? {
|
|
689
|
-
truncated: true,
|
|
690
|
-
hint: `Use offset=${offset + limit} to see next page`
|
|
691
|
-
} : {}),
|
|
692
|
-
};
|
|
693
|
-
return {
|
|
694
|
-
content: [{ type: 'text', text: JSON.stringify(output, null, 2) }],
|
|
695
|
-
};
|
|
696
|
-
}
|
|
697
|
-
// Single file result - limit patterns within the file
|
|
698
|
-
if (result.patterns && Array.isArray(result.patterns)) {
|
|
699
|
-
const totalPatterns = result.patterns.length;
|
|
700
|
-
const paginatedPatterns = result.patterns.slice(offset, offset + limit);
|
|
701
|
-
if (compact) {
|
|
702
|
-
const summary = {
|
|
703
|
-
file: result.file,
|
|
704
|
-
totalPatterns,
|
|
705
|
-
showing: paginatedPatterns.length,
|
|
706
|
-
categories: [...new Set(result.patterns.map((p) => p.category))],
|
|
707
|
-
...(totalPatterns > offset + limit ? {
|
|
708
|
-
truncated: true,
|
|
709
|
-
hint: `Use offset=${offset + limit} to see next page`
|
|
710
|
-
} : {}),
|
|
711
|
-
};
|
|
712
|
-
return {
|
|
713
|
-
content: [{ type: 'text', text: JSON.stringify(summary, null, 2) }],
|
|
714
|
-
};
|
|
715
|
-
}
|
|
716
|
-
const output = {
|
|
717
|
-
...result,
|
|
718
|
-
patterns: paginatedPatterns,
|
|
719
|
-
totalPatterns,
|
|
720
|
-
showing: paginatedPatterns.length,
|
|
721
|
-
offset,
|
|
722
|
-
...(totalPatterns > offset + limit ? {
|
|
723
|
-
truncated: true,
|
|
724
|
-
hint: `Use offset=${offset + limit} to see next page`
|
|
725
|
-
} : {}),
|
|
726
|
-
};
|
|
727
|
-
return {
|
|
728
|
-
content: [{ type: 'text', text: JSON.stringify(output, null, 2) }],
|
|
729
|
-
};
|
|
730
|
-
}
|
|
731
|
-
return {
|
|
732
|
-
content: [{ type: 'text', text: JSON.stringify(result, null, 2) }],
|
|
733
|
-
};
|
|
734
|
-
}
|
|
735
|
-
async function handleWhere(store, args) {
|
|
736
|
-
await store.initialize();
|
|
737
|
-
const searchTerm = args.pattern.toLowerCase();
|
|
738
|
-
let patterns = store.getAll().filter(p => p.id.toLowerCase().includes(searchTerm) ||
|
|
739
|
-
p.name.toLowerCase().includes(searchTerm));
|
|
740
|
-
if (args.category) {
|
|
741
|
-
patterns = patterns.filter(p => p.category === args.category);
|
|
742
|
-
}
|
|
743
|
-
const limit = args.limit ?? 10;
|
|
744
|
-
const maxLocations = args.maxLocations ?? 5;
|
|
745
|
-
const totalPatterns = patterns.length;
|
|
746
|
-
// Apply pattern limit
|
|
747
|
-
patterns = patterns.slice(0, limit);
|
|
748
|
-
const result = patterns.map(p => ({
|
|
749
|
-
id: p.id,
|
|
750
|
-
name: p.name,
|
|
751
|
-
category: p.category,
|
|
752
|
-
totalLocations: p.locations.length,
|
|
753
|
-
locations: p.locations.slice(0, maxLocations).map(l => ({
|
|
754
|
-
file: l.file,
|
|
755
|
-
line: l.line,
|
|
756
|
-
column: l.column,
|
|
757
|
-
})),
|
|
758
|
-
...(p.locations.length > maxLocations ? {
|
|
759
|
-
locationsHint: `Showing ${maxLocations} of ${p.locations.length}. Use maxLocations parameter to see more.`
|
|
760
|
-
} : {}),
|
|
761
|
-
}));
|
|
762
|
-
const output = {
|
|
763
|
-
patterns: result,
|
|
764
|
-
total: totalPatterns,
|
|
765
|
-
showing: result.length,
|
|
766
|
-
...(totalPatterns > limit ? {
|
|
767
|
-
truncated: true,
|
|
768
|
-
hint: `Showing ${limit} of ${totalPatterns} patterns. Use limit parameter to see more.`
|
|
769
|
-
} : {}),
|
|
770
|
-
};
|
|
771
|
-
return {
|
|
772
|
-
content: [{ type: 'text', text: JSON.stringify(output, null, 2) }],
|
|
773
|
-
};
|
|
774
|
-
}
|
|
775
|
-
async function handleExport(store, args) {
|
|
776
|
-
await store.initialize();
|
|
777
|
-
let patterns = store.getAll();
|
|
778
|
-
// Filter by categories
|
|
779
|
-
if (args.categories && args.categories.length > 0) {
|
|
780
|
-
const cats = new Set(args.categories);
|
|
781
|
-
patterns = patterns.filter(p => cats.has(p.category));
|
|
782
|
-
}
|
|
783
|
-
const format = args.format ?? 'ai-context';
|
|
784
|
-
if (format === 'ai-context') {
|
|
785
|
-
// Optimized format for LLM consumption
|
|
786
|
-
const grouped = new Map();
|
|
787
|
-
for (const p of patterns) {
|
|
788
|
-
if (!grouped.has(p.category)) {
|
|
789
|
-
grouped.set(p.category, []);
|
|
790
|
-
}
|
|
791
|
-
grouped.get(p.category).push(p);
|
|
792
|
-
}
|
|
793
|
-
let output = '# Codebase Patterns\n\n';
|
|
794
|
-
output += `Total: ${patterns.length} patterns across ${grouped.size} categories\n\n`;
|
|
795
|
-
for (const [category, categoryPatterns] of grouped) {
|
|
796
|
-
output += `## ${category.toUpperCase()}\n\n`;
|
|
797
|
-
for (const p of categoryPatterns) {
|
|
798
|
-
output += `### ${p.name}\n`;
|
|
799
|
-
output += `- Confidence: ${(p.confidence.score * 100).toFixed(0)}%\n`;
|
|
800
|
-
output += `- Found in ${p.locations.length} locations\n`;
|
|
801
|
-
if (p.description) {
|
|
802
|
-
output += `- ${p.description}\n`;
|
|
803
|
-
}
|
|
804
|
-
if (!args.compact && p.locations.length > 0) {
|
|
805
|
-
output += `- Examples: ${p.locations.slice(0, 2).map(l => l.file).join(', ')}\n`;
|
|
806
|
-
}
|
|
807
|
-
output += '\n';
|
|
808
|
-
}
|
|
809
|
-
}
|
|
810
|
-
return {
|
|
811
|
-
content: [{ type: 'text', text: output }],
|
|
812
|
-
};
|
|
813
|
-
}
|
|
814
|
-
if (format === 'summary') {
|
|
815
|
-
const stats = store.getStats();
|
|
816
|
-
return {
|
|
817
|
-
content: [{
|
|
818
|
-
type: 'text',
|
|
819
|
-
text: JSON.stringify({
|
|
820
|
-
totalPatterns: stats.totalPatterns,
|
|
821
|
-
byCategory: stats.byCategory,
|
|
822
|
-
byConfidence: stats.byConfidenceLevel,
|
|
823
|
-
}, null, 2),
|
|
824
|
-
}],
|
|
825
|
-
};
|
|
826
|
-
}
|
|
827
|
-
// JSON format
|
|
828
|
-
return {
|
|
829
|
-
content: [{ type: 'text', text: JSON.stringify(patterns, null, 2) }],
|
|
830
|
-
};
|
|
831
|
-
}
|
|
832
|
-
async function handleContracts(projectRoot, args) {
|
|
833
|
-
const fs = await import('node:fs/promises');
|
|
834
|
-
const path = await import('node:path');
|
|
835
|
-
const contractsDir = path.join(projectRoot, '.drift', 'contracts');
|
|
836
|
-
try {
|
|
837
|
-
const dirs = ['discovered', 'verified', 'mismatch'];
|
|
838
|
-
const contracts = [];
|
|
839
|
-
for (const dir of dirs) {
|
|
840
|
-
if (args.status && args.status !== 'all' && args.status !== dir) {
|
|
841
|
-
continue;
|
|
842
|
-
}
|
|
843
|
-
const dirPath = path.join(contractsDir, dir);
|
|
844
|
-
try {
|
|
845
|
-
const files = await fs.readdir(dirPath);
|
|
846
|
-
for (const file of files) {
|
|
847
|
-
if (file.endsWith('.json') && !file.startsWith('.')) {
|
|
848
|
-
const content = await fs.readFile(path.join(dirPath, file), 'utf-8');
|
|
849
|
-
const data = JSON.parse(content);
|
|
850
|
-
contracts.push({ status: dir, contracts: data.contracts ?? [] });
|
|
851
|
-
}
|
|
852
|
-
}
|
|
853
|
-
}
|
|
854
|
-
catch {
|
|
855
|
-
// Directory doesn't exist
|
|
856
|
-
}
|
|
857
|
-
}
|
|
858
|
-
return {
|
|
859
|
-
content: [{ type: 'text', text: JSON.stringify(contracts, null, 2) }],
|
|
860
|
-
};
|
|
861
|
-
}
|
|
862
|
-
catch (error) {
|
|
863
|
-
return {
|
|
864
|
-
content: [{
|
|
865
|
-
type: 'text',
|
|
866
|
-
text: `No contracts found. Run \`drift scan\` first.`,
|
|
867
|
-
}],
|
|
868
|
-
};
|
|
869
|
-
}
|
|
870
|
-
}
|
|
871
|
-
async function handleExamples(projectRoot, store, packManager, feedbackManager, args) {
|
|
872
|
-
const fs = await import('node:fs/promises');
|
|
873
|
-
const path = await import('node:path');
|
|
874
|
-
// Require at least one filter to avoid processing all 800+ patterns
|
|
875
|
-
if ((!args.categories || args.categories.length === 0) && !args.pattern) {
|
|
876
|
-
return {
|
|
877
|
-
content: [{
|
|
878
|
-
type: 'text',
|
|
879
|
-
text: 'Error: Please specify at least one filter:\n' +
|
|
880
|
-
'- categories: ["auth", "security", "api"] etc.\n' +
|
|
881
|
-
'- pattern: "middleware", "token", "rate-limit" etc.\n\n' +
|
|
882
|
-
'Valid categories: structural, components, styling, api, auth, errors, ' +
|
|
883
|
-
'data-access, testing, logging, security, config, types, performance, ' +
|
|
884
|
-
'accessibility, documentation',
|
|
885
|
-
}],
|
|
886
|
-
isError: true,
|
|
887
|
-
};
|
|
888
|
-
}
|
|
889
|
-
await store.initialize();
|
|
890
|
-
await packManager.initialize();
|
|
891
|
-
await feedbackManager.initialize();
|
|
892
|
-
// Track usage for pack learning
|
|
893
|
-
if (args.categories && args.categories.length > 0) {
|
|
894
|
-
await packManager.trackUsage({
|
|
895
|
-
categories: args.categories,
|
|
896
|
-
patterns: args.pattern ? [args.pattern] : undefined,
|
|
897
|
-
timestamp: new Date().toISOString(),
|
|
898
|
-
context: 'code_generation',
|
|
899
|
-
});
|
|
900
|
-
}
|
|
901
|
-
const maxExamples = args.maxExamples ?? 3;
|
|
902
|
-
const contextLines = args.contextLines ?? 10;
|
|
903
|
-
const includeDeprecated = args.includeDeprecated ?? false;
|
|
904
|
-
let patterns = store.getAll();
|
|
905
|
-
// Filter by categories
|
|
906
|
-
if (args.categories && args.categories.length > 0) {
|
|
907
|
-
const cats = new Set(args.categories);
|
|
908
|
-
patterns = patterns.filter(p => cats.has(p.category));
|
|
909
|
-
}
|
|
910
|
-
// Filter by pattern name/id
|
|
911
|
-
if (args.pattern) {
|
|
912
|
-
const searchTerm = args.pattern.toLowerCase();
|
|
913
|
-
patterns = patterns.filter(p => p.id.toLowerCase().includes(searchTerm) ||
|
|
914
|
-
p.name.toLowerCase().includes(searchTerm) ||
|
|
915
|
-
p.subcategory.toLowerCase().includes(searchTerm));
|
|
916
|
-
}
|
|
917
|
-
// Deduplicate patterns by subcategory to get unique pattern types
|
|
918
|
-
const uniquePatterns = new Map();
|
|
919
|
-
for (const p of patterns) {
|
|
920
|
-
const key = `${p.category}/${p.subcategory}`;
|
|
921
|
-
if (!uniquePatterns.has(key) || p.locations.length > uniquePatterns.get(key).locations.length) {
|
|
922
|
-
uniquePatterns.set(key, p);
|
|
923
|
-
}
|
|
924
|
-
}
|
|
925
|
-
// Limit to 20 unique patterns max to avoid timeout
|
|
926
|
-
const limitedPatterns = Array.from(uniquePatterns.entries()).slice(0, 20);
|
|
927
|
-
// File filtering patterns (same as packs.ts)
|
|
928
|
-
const EXAMPLE_EXCLUDE_PATTERNS = [
|
|
929
|
-
/README/i, /CHANGELOG/i, /CONTRIBUTING/i, /LICENSE/i, /\.md$/i,
|
|
930
|
-
/\.github\//, /\.gitlab\//, /\.ya?ml$/i, /\.toml$/i,
|
|
931
|
-
/Dockerfile/i, /docker-compose/i,
|
|
932
|
-
/package\.json$/i, /package-lock\.json$/i, /pnpm-lock\.yaml$/i,
|
|
933
|
-
/requirements\.txt$/i, /pyproject\.toml$/i,
|
|
934
|
-
/\.env/i, /dist\//, /build\//, /node_modules\//,
|
|
935
|
-
];
|
|
936
|
-
const DEPRECATION_MARKERS = [
|
|
937
|
-
/DEPRECATED/i, /LEGACY/i, /@deprecated/i,
|
|
938
|
-
/TODO:\s*remove/i, /REMOVAL:\s*planned/i,
|
|
939
|
-
/backward.?compat/i, /will be removed/i,
|
|
940
|
-
];
|
|
941
|
-
const shouldExcludeFile = (filePath) => {
|
|
942
|
-
// Check feedback-based exclusion first
|
|
943
|
-
if (feedbackManager.shouldExcludeFile(filePath)) {
|
|
944
|
-
return true;
|
|
945
|
-
}
|
|
946
|
-
return EXAMPLE_EXCLUDE_PATTERNS.some(pattern => pattern.test(filePath));
|
|
947
|
-
};
|
|
948
|
-
const scoreLocation = (filePath) => {
|
|
949
|
-
let score = 1.0;
|
|
950
|
-
// Apply feedback-based scoring
|
|
951
|
-
score *= feedbackManager.getFileScore(filePath);
|
|
952
|
-
// Apply heuristic scoring
|
|
953
|
-
if (/\.md$/i.test(filePath))
|
|
954
|
-
score *= 0.1;
|
|
955
|
-
if (/README/i.test(filePath))
|
|
956
|
-
score *= 0.1;
|
|
957
|
-
if (/\.ya?ml$/i.test(filePath))
|
|
958
|
-
score *= 0.2;
|
|
959
|
-
if (/\.json$/i.test(filePath))
|
|
960
|
-
score *= 0.3;
|
|
961
|
-
if (/\.(ts|tsx|js|jsx|py|rb|go|rs|java)$/i.test(filePath))
|
|
962
|
-
score *= 1.5;
|
|
963
|
-
if (/\/src\//i.test(filePath))
|
|
964
|
-
score *= 1.3;
|
|
965
|
-
if (/\.(test|spec)\./i.test(filePath))
|
|
966
|
-
score *= 0.7;
|
|
967
|
-
return score;
|
|
968
|
-
};
|
|
969
|
-
// Read actual code snippets
|
|
970
|
-
const fileCache = new Map();
|
|
971
|
-
const fileContentCache = new Map();
|
|
972
|
-
let excludedCount = 0;
|
|
973
|
-
let deprecatedCount = 0;
|
|
974
|
-
async function getFileLines(filePath) {
|
|
975
|
-
if (fileCache.has(filePath)) {
|
|
976
|
-
return fileCache.get(filePath);
|
|
977
|
-
}
|
|
978
|
-
try {
|
|
979
|
-
const fullPath = path.join(projectRoot, filePath);
|
|
980
|
-
const content = await fs.readFile(fullPath, 'utf-8');
|
|
981
|
-
const lines = content.split('\n');
|
|
982
|
-
fileCache.set(filePath, lines);
|
|
983
|
-
fileContentCache.set(filePath, content);
|
|
984
|
-
return lines;
|
|
985
|
-
}
|
|
986
|
-
catch {
|
|
987
|
-
return [];
|
|
988
|
-
}
|
|
989
|
-
}
|
|
990
|
-
function extractSnippet(lines, startLine, endLine) {
|
|
991
|
-
const start = Math.max(0, startLine - contextLines - 1);
|
|
992
|
-
const end = Math.min(lines.length, (endLine ?? startLine) + contextLines);
|
|
993
|
-
return lines.slice(start, end).join('\n');
|
|
994
|
-
}
|
|
995
|
-
function isDeprecatedContent(content) {
|
|
996
|
-
const header = content.slice(0, 500);
|
|
997
|
-
return DEPRECATION_MARKERS.some(pattern => pattern.test(header));
|
|
998
|
-
}
|
|
999
|
-
const results = [];
|
|
1000
|
-
for (const [, pattern] of limitedPatterns) {
|
|
1001
|
-
const examples = [];
|
|
1002
|
-
// Sort locations by quality score and filter excluded files
|
|
1003
|
-
const scoredLocations = pattern.locations
|
|
1004
|
-
.map(loc => ({ loc, score: scoreLocation(loc.file) }))
|
|
1005
|
-
.filter(({ loc }) => !shouldExcludeFile(loc.file))
|
|
1006
|
-
.sort((a, b) => b.score - a.score);
|
|
1007
|
-
excludedCount += pattern.locations.length - scoredLocations.length;
|
|
1008
|
-
// Get unique files to avoid duplicate examples from same file
|
|
1009
|
-
const seenFiles = new Set();
|
|
1010
|
-
for (const { loc } of scoredLocations) {
|
|
1011
|
-
if (seenFiles.has(loc.file))
|
|
1012
|
-
continue;
|
|
1013
|
-
if (examples.length >= maxExamples)
|
|
1014
|
-
break;
|
|
1015
|
-
const lines = await getFileLines(loc.file);
|
|
1016
|
-
if (lines.length === 0)
|
|
1017
|
-
continue;
|
|
1018
|
-
// Check for deprecation markers
|
|
1019
|
-
const content = fileContentCache.get(loc.file) || '';
|
|
1020
|
-
if (!includeDeprecated && isDeprecatedContent(content)) {
|
|
1021
|
-
deprecatedCount++;
|
|
1022
|
-
continue;
|
|
1023
|
-
}
|
|
1024
|
-
const snippet = extractSnippet(lines, loc.line, loc.endLine);
|
|
1025
|
-
if (snippet.trim()) {
|
|
1026
|
-
examples.push({
|
|
1027
|
-
file: loc.file,
|
|
1028
|
-
line: loc.line,
|
|
1029
|
-
code: snippet,
|
|
1030
|
-
});
|
|
1031
|
-
seenFiles.add(loc.file);
|
|
1032
|
-
}
|
|
1033
|
-
}
|
|
1034
|
-
if (examples.length > 0) {
|
|
1035
|
-
results.push({
|
|
1036
|
-
category: pattern.category,
|
|
1037
|
-
subcategory: pattern.subcategory,
|
|
1038
|
-
patternName: pattern.name,
|
|
1039
|
-
description: pattern.description,
|
|
1040
|
-
confidence: pattern.confidence.score,
|
|
1041
|
-
examples,
|
|
1042
|
-
});
|
|
1043
|
-
}
|
|
1044
|
-
}
|
|
1045
|
-
// Format output for AI consumption
|
|
1046
|
-
let output = '# Code Pattern Examples\n\n';
|
|
1047
|
-
output += `Found ${results.length} unique patterns with code examples.\n`;
|
|
1048
|
-
if (excludedCount > 0 || deprecatedCount > 0) {
|
|
1049
|
-
output += `*(${excludedCount} non-source files excluded`;
|
|
1050
|
-
if (deprecatedCount > 0) {
|
|
1051
|
-
output += `, ${deprecatedCount} deprecated files skipped`;
|
|
1052
|
-
}
|
|
1053
|
-
output += `)*\n`;
|
|
1054
|
-
}
|
|
1055
|
-
output += '\n';
|
|
1056
|
-
// Group by category
|
|
1057
|
-
const grouped = new Map();
|
|
1058
|
-
for (const r of results) {
|
|
1059
|
-
if (!grouped.has(r.category)) {
|
|
1060
|
-
grouped.set(r.category, []);
|
|
1061
|
-
}
|
|
1062
|
-
grouped.get(r.category).push(r);
|
|
1063
|
-
}
|
|
1064
|
-
for (const [category, categoryResults] of grouped) {
|
|
1065
|
-
output += `## ${category.toUpperCase()}\n\n`;
|
|
1066
|
-
for (const r of categoryResults) {
|
|
1067
|
-
output += `### ${r.subcategory}\n`;
|
|
1068
|
-
output += `**${r.patternName}** (${(r.confidence * 100).toFixed(0)}% confidence)\n`;
|
|
1069
|
-
if (r.description) {
|
|
1070
|
-
output += `${r.description}\n`;
|
|
1071
|
-
}
|
|
1072
|
-
output += '\n';
|
|
1073
|
-
for (const ex of r.examples) {
|
|
1074
|
-
output += `**${ex.file}:${ex.line}**\n`;
|
|
1075
|
-
output += '```\n';
|
|
1076
|
-
output += ex.code;
|
|
1077
|
-
output += '\n```\n\n';
|
|
1078
|
-
}
|
|
1079
|
-
}
|
|
1080
|
-
}
|
|
1081
|
-
return {
|
|
1082
|
-
content: [{ type: 'text', text: output }],
|
|
1083
|
-
};
|
|
1084
|
-
}
|
|
1085
|
-
async function handlePack(packManager, args) {
|
|
1086
|
-
await packManager.initialize();
|
|
1087
|
-
// Determine action (support legacy 'list' boolean)
|
|
1088
|
-
const action = args.action ?? (args.list ? 'list' : (args.name ? 'get' : 'list'));
|
|
1089
|
-
switch (action) {
|
|
1090
|
-
case 'list': {
|
|
1091
|
-
const packs = packManager.getAllPacks();
|
|
1092
|
-
let output = '# Available Pattern Packs\n\n';
|
|
1093
|
-
output += 'Use `drift_pack` with a pack name to get pre-computed pattern context.\n\n';
|
|
1094
|
-
output += '## Actions\n';
|
|
1095
|
-
output += '- `action="get"` - Get a pack by name (default)\n';
|
|
1096
|
-
output += '- `action="list"` - List all packs\n';
|
|
1097
|
-
output += '- `action="suggest"` - Suggest packs based on your usage patterns\n';
|
|
1098
|
-
output += '- `action="infer"` - Suggest packs from file structure analysis\n';
|
|
1099
|
-
output += '- `action="create"` - Create a custom pack\n';
|
|
1100
|
-
output += '- `action="delete"` - Delete a custom pack\n\n';
|
|
1101
|
-
for (const pack of packs) {
|
|
1102
|
-
output += `## ${pack.name}\n`;
|
|
1103
|
-
output += `${pack.description}\n`;
|
|
1104
|
-
output += `- Categories: ${pack.categories.join(', ')}\n`;
|
|
1105
|
-
if (pack.patterns) {
|
|
1106
|
-
output += `- Pattern filters: ${pack.patterns.join(', ')}\n`;
|
|
1107
|
-
}
|
|
1108
|
-
output += '\n';
|
|
1109
|
-
}
|
|
1110
|
-
return {
|
|
1111
|
-
content: [{ type: 'text', text: output }],
|
|
1112
|
-
};
|
|
1113
|
-
}
|
|
1114
|
-
case 'suggest': {
|
|
1115
|
-
const suggestions = await packManager.suggestPacks();
|
|
1116
|
-
if (suggestions.length === 0) {
|
|
1117
|
-
return {
|
|
1118
|
-
content: [{
|
|
1119
|
-
type: 'text',
|
|
1120
|
-
text: '# No Pack Suggestions Yet\n\n' +
|
|
1121
|
-
'Pack suggestions are based on your usage patterns. Keep using `drift_examples` and `drift_pack` ' +
|
|
1122
|
-
'with different category combinations, and suggestions will appear here.\n\n' +
|
|
1123
|
-
'Alternatively, use `action="infer"` to get suggestions based on file structure analysis.',
|
|
1124
|
-
}],
|
|
1125
|
-
};
|
|
1126
|
-
}
|
|
1127
|
-
let output = '# Suggested Packs\n\n';
|
|
1128
|
-
output += 'Based on your usage patterns, these category combinations might be useful as packs:\n\n';
|
|
1129
|
-
for (const s of suggestions) {
|
|
1130
|
-
output += `## ${s.name}\n`;
|
|
1131
|
-
output += `${s.description}\n`;
|
|
1132
|
-
output += `- Categories: ${s.categories.join(', ')}\n`;
|
|
1133
|
-
if (s.patterns && s.patterns.length > 0) {
|
|
1134
|
-
output += `- Patterns: ${s.patterns.join(', ')}\n`;
|
|
1135
|
-
}
|
|
1136
|
-
output += `- Used ${s.usageCount} times (last: ${s.lastUsed})\n\n`;
|
|
1137
|
-
output += `To create this pack:\n`;
|
|
1138
|
-
output += '```\n';
|
|
1139
|
-
output += `drift_pack action="create" name="${s.name}" description="${s.description}" categories=${JSON.stringify(s.categories)}\n`;
|
|
1140
|
-
output += '```\n\n';
|
|
1141
|
-
}
|
|
1142
|
-
return {
|
|
1143
|
-
content: [{ type: 'text', text: output }],
|
|
1144
|
-
};
|
|
1145
|
-
}
|
|
1146
|
-
case 'infer': {
|
|
1147
|
-
const suggestions = await packManager.inferPacksFromStructure();
|
|
1148
|
-
if (suggestions.length === 0) {
|
|
1149
|
-
return {
|
|
1150
|
-
content: [{
|
|
1151
|
-
type: 'text',
|
|
1152
|
-
text: '# No Inferred Packs\n\n' +
|
|
1153
|
-
'Could not find significant pattern co-occurrences in the codebase. ' +
|
|
1154
|
-
'This usually means patterns are well-separated by file, or the codebase needs more scanning.',
|
|
1155
|
-
}],
|
|
1156
|
-
};
|
|
1157
|
-
}
|
|
1158
|
-
let output = '# Inferred Packs from File Structure\n\n';
|
|
1159
|
-
output += 'These category combinations frequently appear together in the same files:\n\n';
|
|
1160
|
-
for (const s of suggestions) {
|
|
1161
|
-
output += `## ${s.name}\n`;
|
|
1162
|
-
output += `${s.description}\n`;
|
|
1163
|
-
output += `- Categories: ${s.categories.join(', ')}\n`;
|
|
1164
|
-
output += `- Found in ${s.usageCount} files\n\n`;
|
|
1165
|
-
output += `To create this pack:\n`;
|
|
1166
|
-
output += '```\n';
|
|
1167
|
-
output += `drift_pack action="create" name="${s.name}" description="${s.description}" categories=${JSON.stringify(s.categories)}\n`;
|
|
1168
|
-
output += '```\n\n';
|
|
1169
|
-
}
|
|
1170
|
-
return {
|
|
1171
|
-
content: [{ type: 'text', text: output }],
|
|
1172
|
-
};
|
|
1173
|
-
}
|
|
1174
|
-
case 'create': {
|
|
1175
|
-
if (!args.name) {
|
|
1176
|
-
return {
|
|
1177
|
-
content: [{
|
|
1178
|
-
type: 'text',
|
|
1179
|
-
text: 'Error: Pack name is required for create action.\n\n' +
|
|
1180
|
-
'Example: drift_pack action="create" name="my_pack" description="My custom pack" categories=["api", "auth"]',
|
|
1181
|
-
}],
|
|
1182
|
-
isError: true,
|
|
1183
|
-
};
|
|
1184
|
-
}
|
|
1185
|
-
if (!args.categories || args.categories.length === 0) {
|
|
1186
|
-
return {
|
|
1187
|
-
content: [{
|
|
1188
|
-
type: 'text',
|
|
1189
|
-
text: 'Error: At least one category is required for create action.\n\n' +
|
|
1190
|
-
`Valid categories: ${PATTERN_CATEGORIES.join(', ')}`,
|
|
1191
|
-
}],
|
|
1192
|
-
isError: true,
|
|
1193
|
-
};
|
|
1194
|
-
}
|
|
1195
|
-
const packDef = {
|
|
1196
|
-
name: args.name,
|
|
1197
|
-
description: args.description ?? `Custom pack: ${args.name}`,
|
|
1198
|
-
categories: args.categories,
|
|
1199
|
-
patterns: args.patterns,
|
|
1200
|
-
};
|
|
1201
|
-
await packManager.createCustomPack(packDef);
|
|
1202
|
-
return {
|
|
1203
|
-
content: [{
|
|
1204
|
-
type: 'text',
|
|
1205
|
-
text: `# Pack Created: ${args.name}\n\n` +
|
|
1206
|
-
`Successfully created custom pack "${args.name}".\n\n` +
|
|
1207
|
-
`- Categories: ${args.categories.join(', ')}\n` +
|
|
1208
|
-
(args.patterns ? `- Patterns: ${args.patterns.join(', ')}\n` : '') +
|
|
1209
|
-
`\nUse \`drift_pack name="${args.name}"\` to get the pack content.`,
|
|
1210
|
-
}],
|
|
1211
|
-
};
|
|
1212
|
-
}
|
|
1213
|
-
case 'delete': {
|
|
1214
|
-
if (!args.name) {
|
|
1215
|
-
return {
|
|
1216
|
-
content: [{
|
|
1217
|
-
type: 'text',
|
|
1218
|
-
text: 'Error: Pack name is required for delete action.',
|
|
1219
|
-
}],
|
|
1220
|
-
isError: true,
|
|
1221
|
-
};
|
|
1222
|
-
}
|
|
1223
|
-
const deleted = await packManager.deleteCustomPack(args.name);
|
|
1224
|
-
if (!deleted) {
|
|
1225
|
-
return {
|
|
1226
|
-
content: [{
|
|
1227
|
-
type: 'text',
|
|
1228
|
-
text: `Error: Pack "${args.name}" not found or is a built-in pack (cannot delete built-in packs).`,
|
|
1229
|
-
}],
|
|
1230
|
-
isError: true,
|
|
1231
|
-
};
|
|
1232
|
-
}
|
|
1233
|
-
return {
|
|
1234
|
-
content: [{
|
|
1235
|
-
type: 'text',
|
|
1236
|
-
text: `Successfully deleted custom pack "${args.name}".`,
|
|
1237
|
-
}],
|
|
1238
|
-
};
|
|
1239
|
-
}
|
|
1240
|
-
case 'get':
|
|
1241
|
-
default: {
|
|
1242
|
-
if (!args.name) {
|
|
1243
|
-
// Fall back to list if no name provided
|
|
1244
|
-
return handlePack(packManager, { action: 'list' });
|
|
1245
|
-
}
|
|
1246
|
-
try {
|
|
1247
|
-
const result = await packManager.getPackContent(args.name, {
|
|
1248
|
-
refresh: args.refresh ?? false,
|
|
1249
|
-
});
|
|
1250
|
-
// Track usage for learning
|
|
1251
|
-
const pack = packManager.getPack(args.name);
|
|
1252
|
-
if (pack) {
|
|
1253
|
-
await packManager.trackUsage({
|
|
1254
|
-
categories: pack.categories,
|
|
1255
|
-
patterns: pack.patterns,
|
|
1256
|
-
timestamp: new Date().toISOString(),
|
|
1257
|
-
context: 'code_generation',
|
|
1258
|
-
});
|
|
1259
|
-
}
|
|
1260
|
-
let header = '';
|
|
1261
|
-
if (result.fromCache) {
|
|
1262
|
-
header = `<!-- Served from cache (generated: ${result.generatedAt}) -->\n\n`;
|
|
1263
|
-
}
|
|
1264
|
-
else if (result.staleReason) {
|
|
1265
|
-
header = `<!-- Regenerated: ${result.staleReason} -->\n\n`;
|
|
1266
|
-
}
|
|
1267
|
-
else {
|
|
1268
|
-
header = `<!-- Freshly generated -->\n\n`;
|
|
1269
|
-
}
|
|
1270
|
-
return {
|
|
1271
|
-
content: [{ type: 'text', text: header + result.content }],
|
|
1272
|
-
};
|
|
1273
|
-
}
|
|
1274
|
-
catch (error) {
|
|
1275
|
-
return {
|
|
1276
|
-
content: [{
|
|
1277
|
-
type: 'text',
|
|
1278
|
-
text: `Error: ${error instanceof Error ? error.message : String(error)}`,
|
|
1279
|
-
}],
|
|
1280
|
-
isError: true,
|
|
1281
|
-
};
|
|
1282
|
-
}
|
|
1283
|
-
}
|
|
1284
|
-
}
|
|
1285
|
-
}
|
|
1286
|
-
async function handleFeedback(feedbackManager, args) {
|
|
1287
|
-
await feedbackManager.initialize();
|
|
1288
|
-
const action = args.action ?? 'stats';
|
|
1289
|
-
switch (action) {
|
|
1290
|
-
case 'rate': {
|
|
1291
|
-
if (!args.file || !args.rating) {
|
|
1292
|
-
return {
|
|
1293
|
-
content: [{
|
|
1294
|
-
type: 'text',
|
|
1295
|
-
text: 'Error: "file" and "rating" are required for rate action.\n\n' +
|
|
1296
|
-
'Example: drift_feedback action="rate" file="api/routes/auth.py" line=42 ' +
|
|
1297
|
-
'rating="good" patternName="token-handling" category="auth"',
|
|
1298
|
-
}],
|
|
1299
|
-
isError: true,
|
|
1300
|
-
};
|
|
1301
|
-
}
|
|
1302
|
-
await feedbackManager.recordFeedback({
|
|
1303
|
-
patternId: args.patternId ?? 'unknown',
|
|
1304
|
-
patternName: args.patternName ?? 'unknown',
|
|
1305
|
-
category: args.category ?? 'unknown',
|
|
1306
|
-
file: args.file,
|
|
1307
|
-
line: args.line ?? 0,
|
|
1308
|
-
rating: args.rating,
|
|
1309
|
-
reason: args.reason,
|
|
1310
|
-
});
|
|
1311
|
-
const emoji = args.rating === 'good' ? '๐' : args.rating === 'bad' ? '๐' : '๐คท';
|
|
1312
|
-
return {
|
|
1313
|
-
content: [{
|
|
1314
|
-
type: 'text',
|
|
1315
|
-
text: `${emoji} Feedback recorded for ${args.file}:${args.line ?? 0}\n\n` +
|
|
1316
|
-
`Rating: ${args.rating}\n` +
|
|
1317
|
-
(args.reason ? `Reason: ${args.reason}\n` : '') +
|
|
1318
|
-
`\nThis feedback will improve future example suggestions.`,
|
|
1319
|
-
}],
|
|
1320
|
-
};
|
|
1321
|
-
}
|
|
1322
|
-
case 'stats': {
|
|
1323
|
-
const stats = await feedbackManager.getStats();
|
|
1324
|
-
let output = '# Example Feedback Statistics\n\n';
|
|
1325
|
-
output += `Total feedback: ${stats.totalFeedback}\n`;
|
|
1326
|
-
output += `- Good examples: ${stats.goodExamples}\n`;
|
|
1327
|
-
output += `- Bad examples: ${stats.badExamples}\n`;
|
|
1328
|
-
output += `- Irrelevant: ${stats.irrelevantExamples}\n\n`;
|
|
1329
|
-
if (stats.topGoodPatterns.length > 0) {
|
|
1330
|
-
output += '## Top Patterns with Good Examples\n';
|
|
1331
|
-
for (const p of stats.topGoodPatterns) {
|
|
1332
|
-
output += `- ${p.pattern}: ${p.count} good\n`;
|
|
1333
|
-
}
|
|
1334
|
-
output += '\n';
|
|
1335
|
-
}
|
|
1336
|
-
if (stats.topBadPatterns.length > 0) {
|
|
1337
|
-
output += '## Patterns Needing Improvement\n';
|
|
1338
|
-
for (const p of stats.topBadPatterns) {
|
|
1339
|
-
output += `- ${p.pattern}: ${p.count} bad\n`;
|
|
1340
|
-
}
|
|
1341
|
-
output += '\n';
|
|
1342
|
-
}
|
|
1343
|
-
if (stats.topBadFiles.length > 0) {
|
|
1344
|
-
output += '## Files Producing Poor Examples\n';
|
|
1345
|
-
output += 'These files are being deprioritized in example selection:\n';
|
|
1346
|
-
for (const f of stats.topBadFiles) {
|
|
1347
|
-
output += `- ${f.file}: ${f.count} negative ratings\n`;
|
|
1348
|
-
}
|
|
1349
|
-
}
|
|
1350
|
-
return {
|
|
1351
|
-
content: [{ type: 'text', text: output }],
|
|
1352
|
-
};
|
|
1353
|
-
}
|
|
1354
|
-
case 'clear': {
|
|
1355
|
-
await feedbackManager.clearFeedback();
|
|
1356
|
-
return {
|
|
1357
|
-
content: [{
|
|
1358
|
-
type: 'text',
|
|
1359
|
-
text: 'โ
All feedback has been cleared. Example scoring reset to defaults.',
|
|
1360
|
-
}],
|
|
1361
|
-
};
|
|
1362
|
-
}
|
|
1363
|
-
default:
|
|
1364
|
-
return {
|
|
1365
|
-
content: [{
|
|
1366
|
-
type: 'text',
|
|
1367
|
-
text: `Unknown action: ${action}. Valid actions: rate, stats, clear`,
|
|
1368
|
-
}],
|
|
1369
|
-
isError: true,
|
|
1370
|
-
};
|
|
1371
|
-
}
|
|
1372
|
-
}
|
|
1373
|
-
async function handleTrends(historyStore, args) {
|
|
1374
|
-
await historyStore.initialize();
|
|
1375
|
-
const period = args.period ?? '7d';
|
|
1376
|
-
const summary = await historyStore.getTrendSummary(period);
|
|
1377
|
-
if (!summary) {
|
|
1378
|
-
return {
|
|
1379
|
-
content: [{
|
|
1380
|
-
type: 'text',
|
|
1381
|
-
text: '# No Trend Data Available\n\n' +
|
|
1382
|
-
'Pattern trends require at least 2 snapshots. Snapshots are created automatically when you run `drift scan`.\n\n' +
|
|
1383
|
-
'Run a few scans over time to start tracking trends.',
|
|
1384
|
-
}],
|
|
1385
|
-
};
|
|
1386
|
-
}
|
|
1387
|
-
// Filter by category if specified
|
|
1388
|
-
let regressions = summary.regressions;
|
|
1389
|
-
let improvements = summary.improvements;
|
|
1390
|
-
if (args.category) {
|
|
1391
|
-
regressions = regressions.filter(t => t.category === args.category);
|
|
1392
|
-
improvements = improvements.filter(t => t.category === args.category);
|
|
1393
|
-
}
|
|
1394
|
-
// Filter by severity
|
|
1395
|
-
if (args.severity && args.severity !== 'all') {
|
|
1396
|
-
regressions = regressions.filter(t => t.severity === args.severity);
|
|
1397
|
-
}
|
|
1398
|
-
// Filter by type
|
|
1399
|
-
if (args.type === 'regressions') {
|
|
1400
|
-
improvements = [];
|
|
1401
|
-
}
|
|
1402
|
-
else if (args.type === 'improvements') {
|
|
1403
|
-
regressions = [];
|
|
1404
|
-
}
|
|
1405
|
-
// Build output
|
|
1406
|
-
let output = `# Pattern Trends (${period})\n\n`;
|
|
1407
|
-
output += `Period: ${summary.startDate} โ ${summary.endDate}\n`;
|
|
1408
|
-
output += `Overall trend: **${summary.overallTrend.toUpperCase()}** (${summary.healthDelta >= 0 ? '+' : ''}${(summary.healthDelta * 100).toFixed(1)}% health)\n\n`;
|
|
1409
|
-
// Summary stats
|
|
1410
|
-
output += `## Summary\n`;
|
|
1411
|
-
output += `- Regressions: ${regressions.length}\n`;
|
|
1412
|
-
output += `- Improvements: ${improvements.length}\n`;
|
|
1413
|
-
output += `- Stable patterns: ${summary.stable}\n\n`;
|
|
1414
|
-
// Critical regressions first
|
|
1415
|
-
const criticalRegressions = regressions.filter(t => t.severity === 'critical');
|
|
1416
|
-
if (criticalRegressions.length > 0) {
|
|
1417
|
-
output += `## โ ๏ธ Critical Regressions\n\n`;
|
|
1418
|
-
for (const t of criticalRegressions) {
|
|
1419
|
-
output += `### ${t.patternName} (${t.category})\n`;
|
|
1420
|
-
output += `- ${t.details}\n`;
|
|
1421
|
-
output += `- Metric: ${t.metric}\n`;
|
|
1422
|
-
output += `- Change: ${t.changePercent >= 0 ? '+' : ''}${t.changePercent.toFixed(1)}%\n\n`;
|
|
1423
|
-
}
|
|
1424
|
-
}
|
|
1425
|
-
// Warning regressions
|
|
1426
|
-
const warningRegressions = regressions.filter(t => t.severity === 'warning');
|
|
1427
|
-
if (warningRegressions.length > 0) {
|
|
1428
|
-
output += `## โก Warning Regressions\n\n`;
|
|
1429
|
-
for (const t of warningRegressions) {
|
|
1430
|
-
output += `### ${t.patternName} (${t.category})\n`;
|
|
1431
|
-
output += `- ${t.details}\n`;
|
|
1432
|
-
output += `- Metric: ${t.metric}\n`;
|
|
1433
|
-
output += `- Change: ${t.changePercent >= 0 ? '+' : ''}${t.changePercent.toFixed(1)}%\n\n`;
|
|
1434
|
-
}
|
|
1435
|
-
}
|
|
1436
|
-
// Improvements
|
|
1437
|
-
if (improvements.length > 0) {
|
|
1438
|
-
output += `## โ
Improvements\n\n`;
|
|
1439
|
-
for (const t of improvements) {
|
|
1440
|
-
output += `### ${t.patternName} (${t.category})\n`;
|
|
1441
|
-
output += `- ${t.details}\n`;
|
|
1442
|
-
output += `- Metric: ${t.metric}\n`;
|
|
1443
|
-
output += `- Change: +${t.changePercent.toFixed(1)}%\n\n`;
|
|
1444
|
-
}
|
|
1445
|
-
}
|
|
1446
|
-
// Category breakdown
|
|
1447
|
-
const categoryEntries = Object.entries(summary.categoryTrends);
|
|
1448
|
-
if (categoryEntries.length > 0) {
|
|
1449
|
-
output += `## Category Trends\n\n`;
|
|
1450
|
-
for (const [category, trend] of categoryEntries) {
|
|
1451
|
-
const emoji = trend.trend === 'improving' ? '๐' : trend.trend === 'declining' ? '๐' : 'โก๏ธ';
|
|
1452
|
-
output += `- ${emoji} **${category}**: ${trend.trend} (confidence: ${trend.avgConfidenceChange >= 0 ? '+' : ''}${(trend.avgConfidenceChange * 100).toFixed(1)}%, compliance: ${trend.complianceChange >= 0 ? '+' : ''}${(trend.complianceChange * 100).toFixed(1)}%)\n`;
|
|
1453
|
-
}
|
|
1454
|
-
}
|
|
1455
|
-
return {
|
|
1456
|
-
content: [{ type: 'text', text: output }],
|
|
1457
|
-
};
|
|
1458
|
-
}
|
|
1459
|
-
/**
|
|
1460
|
-
* Handle drift_parser_info tool
|
|
1461
|
-
*
|
|
1462
|
-
* Returns information about parser capabilities and status.
|
|
1463
|
-
*/
|
|
1464
|
-
async function handleParserInfo(args) {
|
|
1465
|
-
const language = args.language ?? 'all';
|
|
1466
|
-
const info = {};
|
|
1467
|
-
// Python parser info
|
|
1468
|
-
if (language === 'python' || language === 'all') {
|
|
1469
|
-
let treeSitterAvailable = false;
|
|
1470
|
-
let loadingError;
|
|
1471
|
-
try {
|
|
1472
|
-
const core = await import('driftdetect-core');
|
|
1473
|
-
// Check if the functions exist (they may not be exported yet)
|
|
1474
|
-
if ('isTreeSitterAvailable' in core && 'getLoadingError' in core) {
|
|
1475
|
-
treeSitterAvailable = core.isTreeSitterAvailable();
|
|
1476
|
-
loadingError = core.getLoadingError() ?? undefined;
|
|
1477
|
-
}
|
|
1478
|
-
}
|
|
1479
|
-
catch {
|
|
1480
|
-
loadingError = 'driftdetect-core not available';
|
|
1481
|
-
}
|
|
1482
|
-
info.python = {
|
|
1483
|
-
treeSitterAvailable,
|
|
1484
|
-
activeParser: treeSitterAvailable ? 'tree-sitter' : 'regex',
|
|
1485
|
-
capabilities: {
|
|
1486
|
-
basicRouteDetection: true,
|
|
1487
|
-
simplePydanticModels: true,
|
|
1488
|
-
pydanticModels: treeSitterAvailable,
|
|
1489
|
-
nestedTypes: treeSitterAvailable,
|
|
1490
|
-
fieldConstraints: treeSitterAvailable,
|
|
1491
|
-
inheritance: treeSitterAvailable,
|
|
1492
|
-
generics: treeSitterAvailable,
|
|
1493
|
-
django: treeSitterAvailable,
|
|
1494
|
-
typeHints: treeSitterAvailable,
|
|
1495
|
-
},
|
|
1496
|
-
supportedFrameworks: treeSitterAvailable
|
|
1497
|
-
? ['fastapi', 'flask', 'django', 'starlette']
|
|
1498
|
-
: ['fastapi', 'flask'],
|
|
1499
|
-
expectedConfidence: treeSitterAvailable ? 'high (0.7-0.9)' : 'low (0.3-0.5)',
|
|
1500
|
-
loadingError,
|
|
1501
|
-
};
|
|
1502
|
-
}
|
|
1503
|
-
// C# parser info
|
|
1504
|
-
if (language === 'csharp' || language === 'all') {
|
|
1505
|
-
let treeSitterAvailable = false;
|
|
1506
|
-
let loadingError;
|
|
1507
|
-
try {
|
|
1508
|
-
const { isCSharpTreeSitterAvailable, getCSharpLoadingError } = await import('driftdetect-core');
|
|
1509
|
-
treeSitterAvailable = isCSharpTreeSitterAvailable();
|
|
1510
|
-
loadingError = getCSharpLoadingError() ?? undefined;
|
|
1511
|
-
}
|
|
1512
|
-
catch {
|
|
1513
|
-
loadingError = 'C# parser not available';
|
|
1514
|
-
}
|
|
1515
|
-
info.csharp = {
|
|
1516
|
-
treeSitterAvailable,
|
|
1517
|
-
activeParser: treeSitterAvailable ? 'tree-sitter' : 'regex',
|
|
1518
|
-
capabilities: {
|
|
1519
|
-
basicParsing: true,
|
|
1520
|
-
classExtraction: treeSitterAvailable,
|
|
1521
|
-
methodExtraction: treeSitterAvailable,
|
|
1522
|
-
attributeExtraction: treeSitterAvailable,
|
|
1523
|
-
aspNetControllers: treeSitterAvailable,
|
|
1524
|
-
minimalApis: treeSitterAvailable,
|
|
1525
|
-
recordTypes: treeSitterAvailable,
|
|
1526
|
-
},
|
|
1527
|
-
supportedFrameworks: treeSitterAvailable
|
|
1528
|
-
? ['asp.net-core', 'minimal-apis', 'web-api']
|
|
1529
|
-
: [],
|
|
1530
|
-
loadingError,
|
|
1531
|
-
};
|
|
1532
|
-
}
|
|
1533
|
-
// TypeScript parser info
|
|
1534
|
-
if (language === 'typescript' || language === 'all') {
|
|
1535
|
-
info.typescript = {
|
|
1536
|
-
treeSitterAvailable: true, // TypeScript uses compiler API, always available
|
|
1537
|
-
activeParser: 'typescript-compiler-api',
|
|
1538
|
-
capabilities: {
|
|
1539
|
-
fullAST: true,
|
|
1540
|
-
typeInference: true,
|
|
1541
|
-
interfaces: true,
|
|
1542
|
-
generics: true,
|
|
1543
|
-
decorators: true,
|
|
1544
|
-
},
|
|
1545
|
-
supportedFrameworks: ['express', 'nestjs', 'fastify'],
|
|
1546
|
-
};
|
|
1547
|
-
}
|
|
1548
|
-
// Java parser info
|
|
1549
|
-
if (language === 'java' || language === 'all') {
|
|
1550
|
-
let javaTreeSitterAvailable = false;
|
|
1551
|
-
let javaLoadingError;
|
|
1552
|
-
try {
|
|
1553
|
-
const core = await import('driftdetect-core');
|
|
1554
|
-
// Check if the functions exist (they may not be exported yet)
|
|
1555
|
-
if ('isJavaTreeSitterAvailable' in core && 'getJavaLoadingError' in core) {
|
|
1556
|
-
javaTreeSitterAvailable = core.isJavaTreeSitterAvailable();
|
|
1557
|
-
javaLoadingError = core.getJavaLoadingError() ?? undefined;
|
|
1558
|
-
}
|
|
1559
|
-
else {
|
|
1560
|
-
javaLoadingError = 'Java parser functions not yet available in driftdetect-core';
|
|
1561
|
-
}
|
|
1562
|
-
}
|
|
1563
|
-
catch {
|
|
1564
|
-
javaLoadingError = 'Java parser not available';
|
|
1565
|
-
}
|
|
1566
|
-
info.java = {
|
|
1567
|
-
treeSitterAvailable: javaTreeSitterAvailable,
|
|
1568
|
-
activeParser: javaTreeSitterAvailable ? 'tree-sitter' : 'regex',
|
|
1569
|
-
capabilities: {
|
|
1570
|
-
basicParsing: true,
|
|
1571
|
-
classExtraction: javaTreeSitterAvailable,
|
|
1572
|
-
methodExtraction: javaTreeSitterAvailable,
|
|
1573
|
-
annotationExtraction: javaTreeSitterAvailable,
|
|
1574
|
-
springControllers: javaTreeSitterAvailable,
|
|
1575
|
-
springData: javaTreeSitterAvailable,
|
|
1576
|
-
springSecurity: javaTreeSitterAvailable,
|
|
1577
|
-
recordTypes: javaTreeSitterAvailable,
|
|
1578
|
-
},
|
|
1579
|
-
supportedFrameworks: javaTreeSitterAvailable
|
|
1580
|
-
? ['spring-boot', 'spring-mvc', 'spring-data', 'spring-security']
|
|
1581
|
-
: [],
|
|
1582
|
-
loadingError: javaLoadingError,
|
|
1583
|
-
};
|
|
1584
|
-
}
|
|
1585
|
-
// PHP parser info
|
|
1586
|
-
if (language === 'php' || language === 'all') {
|
|
1587
|
-
let phpTreeSitterAvailable = false;
|
|
1588
|
-
let phpLoadingError;
|
|
1589
|
-
try {
|
|
1590
|
-
const core = await import('driftdetect-core');
|
|
1591
|
-
// Check if the functions exist
|
|
1592
|
-
if ('isPhpTreeSitterAvailable' in core && 'getPhpLoadingError' in core) {
|
|
1593
|
-
phpTreeSitterAvailable = core.isPhpTreeSitterAvailable();
|
|
1594
|
-
phpLoadingError = core.getPhpLoadingError() ?? undefined;
|
|
1595
|
-
}
|
|
1596
|
-
else {
|
|
1597
|
-
phpLoadingError = 'PHP parser functions not yet available in driftdetect-core';
|
|
1598
|
-
}
|
|
1599
|
-
}
|
|
1600
|
-
catch {
|
|
1601
|
-
phpLoadingError = 'PHP parser not available';
|
|
1602
|
-
}
|
|
1603
|
-
info.php = {
|
|
1604
|
-
treeSitterAvailable: phpTreeSitterAvailable,
|
|
1605
|
-
activeParser: phpTreeSitterAvailable ? 'tree-sitter' : 'regex',
|
|
1606
|
-
capabilities: {
|
|
1607
|
-
basicParsing: true,
|
|
1608
|
-
classExtraction: phpTreeSitterAvailable,
|
|
1609
|
-
methodExtraction: phpTreeSitterAvailable,
|
|
1610
|
-
attributeExtraction: phpTreeSitterAvailable,
|
|
1611
|
-
laravelControllers: phpTreeSitterAvailable,
|
|
1612
|
-
laravelModels: phpTreeSitterAvailable,
|
|
1613
|
-
traitExtraction: phpTreeSitterAvailable,
|
|
1614
|
-
enumExtraction: phpTreeSitterAvailable,
|
|
1615
|
-
},
|
|
1616
|
-
supportedFrameworks: phpTreeSitterAvailable
|
|
1617
|
-
? ['laravel', 'symfony', 'php8']
|
|
1618
|
-
: ['laravel'],
|
|
1619
|
-
loadingError: phpLoadingError,
|
|
1620
|
-
};
|
|
1621
|
-
}
|
|
1622
|
-
// Build human-readable output
|
|
1623
|
-
let output = '# Parser Information\n\n';
|
|
1624
|
-
if (info.python) {
|
|
1625
|
-
const py = info.python;
|
|
1626
|
-
output += '## Python\n\n';
|
|
1627
|
-
output += `- **Active Parser:** ${py.activeParser}\n`;
|
|
1628
|
-
output += `- **Tree-sitter:** ${py.treeSitterAvailable ? 'โ available' : 'โ not installed'}\n`;
|
|
1629
|
-
output += `- **Expected Confidence:** ${py.expectedConfidence}\n`;
|
|
1630
|
-
output += `- **Supported Frameworks:** ${py.supportedFrameworks.join(', ')}\n\n`;
|
|
1631
|
-
output += '### Capabilities\n\n';
|
|
1632
|
-
for (const [cap, enabled] of Object.entries(py.capabilities)) {
|
|
1633
|
-
const emoji = enabled ? 'โ' : 'โ';
|
|
1634
|
-
const capName = cap.replace(/([A-Z])/g, ' $1').toLowerCase().trim();
|
|
1635
|
-
output += `- ${emoji} ${capName}\n`;
|
|
1636
|
-
}
|
|
1637
|
-
output += '\n';
|
|
1638
|
-
if (py.loadingError) {
|
|
1639
|
-
output += `> โ ๏ธ Loading error: ${py.loadingError}\n\n`;
|
|
1640
|
-
}
|
|
1641
|
-
}
|
|
1642
|
-
if (info.csharp) {
|
|
1643
|
-
const cs = info.csharp;
|
|
1644
|
-
output += '## C#\n\n';
|
|
1645
|
-
output += `- **Active Parser:** ${cs.activeParser}\n`;
|
|
1646
|
-
output += `- **Tree-sitter:** ${cs.treeSitterAvailable ? 'โ available' : 'โ not installed'}\n`;
|
|
1647
|
-
if (cs.supportedFrameworks.length > 0) {
|
|
1648
|
-
output += `- **Supported Frameworks:** ${cs.supportedFrameworks.join(', ')}\n`;
|
|
1649
|
-
}
|
|
1650
|
-
output += '\n';
|
|
1651
|
-
output += '### Capabilities\n\n';
|
|
1652
|
-
for (const [cap, enabled] of Object.entries(cs.capabilities)) {
|
|
1653
|
-
const emoji = enabled ? 'โ' : 'โ';
|
|
1654
|
-
const capName = cap.replace(/([A-Z])/g, ' $1').toLowerCase().trim();
|
|
1655
|
-
output += `- ${emoji} ${capName}\n`;
|
|
1656
|
-
}
|
|
1657
|
-
output += '\n';
|
|
1658
|
-
if (cs.loadingError) {
|
|
1659
|
-
output += `> โ ๏ธ Loading error: ${cs.loadingError}\n\n`;
|
|
1660
|
-
}
|
|
1661
|
-
}
|
|
1662
|
-
if (info.typescript) {
|
|
1663
|
-
const ts = info.typescript;
|
|
1664
|
-
output += '## TypeScript/JavaScript\n\n';
|
|
1665
|
-
output += `- **Active Parser:** ${ts.activeParser}\n\n`;
|
|
1666
|
-
output += '### Capabilities\n\n';
|
|
1667
|
-
for (const [cap, enabled] of Object.entries(ts.capabilities)) {
|
|
1668
|
-
const emoji = enabled ? 'โ' : 'โ';
|
|
1669
|
-
const capName = cap.replace(/([A-Z])/g, ' $1').toLowerCase().trim();
|
|
1670
|
-
output += `- ${emoji} ${capName}\n`;
|
|
1671
|
-
}
|
|
1672
|
-
output += '\n';
|
|
1673
|
-
}
|
|
1674
|
-
if (info.java) {
|
|
1675
|
-
const java = info.java;
|
|
1676
|
-
output += '## Java\n\n';
|
|
1677
|
-
output += `- **Active Parser:** ${java.activeParser}\n`;
|
|
1678
|
-
output += `- **Tree-sitter:** ${java.treeSitterAvailable ? 'โ available' : 'โ not installed'}\n`;
|
|
1679
|
-
if (java.supportedFrameworks.length > 0) {
|
|
1680
|
-
output += `- **Supported Frameworks:** ${java.supportedFrameworks.join(', ')}\n`;
|
|
1681
|
-
}
|
|
1682
|
-
output += '\n';
|
|
1683
|
-
output += '### Capabilities\n\n';
|
|
1684
|
-
for (const [cap, enabled] of Object.entries(java.capabilities)) {
|
|
1685
|
-
const emoji = enabled ? 'โ' : 'โ';
|
|
1686
|
-
const capName = cap.replace(/([A-Z])/g, ' $1').toLowerCase().trim();
|
|
1687
|
-
output += `- ${emoji} ${capName}\n`;
|
|
1688
|
-
}
|
|
1689
|
-
output += '\n';
|
|
1690
|
-
if (java.loadingError) {
|
|
1691
|
-
output += `> โ ๏ธ Loading error: ${java.loadingError}\n\n`;
|
|
1692
|
-
}
|
|
1693
|
-
}
|
|
1694
|
-
if (info.php) {
|
|
1695
|
-
const php = info.php;
|
|
1696
|
-
output += '## PHP\n\n';
|
|
1697
|
-
output += `- **Active Parser:** ${php.activeParser}\n`;
|
|
1698
|
-
output += `- **Tree-sitter:** ${php.treeSitterAvailable ? 'โ available' : 'โ not installed'}\n`;
|
|
1699
|
-
if (php.supportedFrameworks.length > 0) {
|
|
1700
|
-
output += `- **Supported Frameworks:** ${php.supportedFrameworks.join(', ')}\n`;
|
|
1701
|
-
}
|
|
1702
|
-
output += '\n';
|
|
1703
|
-
output += '### Capabilities\n\n';
|
|
1704
|
-
for (const [cap, enabled] of Object.entries(php.capabilities)) {
|
|
1705
|
-
const emoji = enabled ? 'โ' : 'โ';
|
|
1706
|
-
const capName = cap.replace(/([A-Z])/g, ' $1').toLowerCase().trim();
|
|
1707
|
-
output += `- ${emoji} ${capName}\n`;
|
|
1708
|
-
}
|
|
1709
|
-
output += '\n';
|
|
1710
|
-
if (php.loadingError) {
|
|
1711
|
-
output += `> โ ๏ธ Loading error: ${php.loadingError}\n\n`;
|
|
1712
|
-
}
|
|
1713
|
-
}
|
|
1714
|
-
// Installation tips
|
|
1715
|
-
if ((info.python && !info.python.treeSitterAvailable) || (info.csharp && !info.csharp.treeSitterAvailable) || (info.java && !info.java.treeSitterAvailable) || (info.php && !info.php.treeSitterAvailable)) {
|
|
1716
|
-
output += '## Installation Tips\n\n';
|
|
1717
|
-
if (info.python && !info.python.treeSitterAvailable) {
|
|
1718
|
-
output += 'To enable full Python support (Pydantic, Django, nested types):\n';
|
|
1719
|
-
output += '```bash\npnpm add tree-sitter tree-sitter-python\n```\n\n';
|
|
1720
|
-
}
|
|
1721
|
-
if (info.csharp && !info.csharp.treeSitterAvailable) {
|
|
1722
|
-
output += 'To enable full C# support (ASP.NET, attributes, records):\n';
|
|
1723
|
-
output += '```bash\npnpm add tree-sitter tree-sitter-c-sharp\n```\n\n';
|
|
1724
|
-
}
|
|
1725
|
-
if (info.java && !info.java.treeSitterAvailable) {
|
|
1726
|
-
output += 'To enable full Java support (Spring Boot, annotations, records):\n';
|
|
1727
|
-
output += '```bash\npnpm add tree-sitter tree-sitter-java\n```\n\n';
|
|
1728
|
-
}
|
|
1729
|
-
if (info.php && !info.php.treeSitterAvailable) {
|
|
1730
|
-
output += 'To enable full PHP support (Laravel, Symfony, PHP 8 attributes):\n';
|
|
1731
|
-
output += '```bash\npnpm add tree-sitter tree-sitter-php\n```\n\n';
|
|
1732
|
-
}
|
|
1733
|
-
}
|
|
1734
|
-
return {
|
|
1735
|
-
content: [{ type: 'text', text: output }],
|
|
1736
|
-
};
|
|
1737
|
-
}
|
|
1738
|
-
// ============================================================================
|
|
1739
|
-
// DNA Handler Functions
|
|
1740
|
-
// ============================================================================
|
|
1741
|
-
/**
|
|
1742
|
-
* Handle drift_dna tool - Get styling DNA profile
|
|
1743
|
-
*/
|
|
1744
|
-
async function handleDNA(_projectRoot, store, args) {
|
|
1745
|
-
await store.initialize();
|
|
1746
|
-
const profile = await store.load();
|
|
1747
|
-
if (!profile) {
|
|
1748
|
-
return {
|
|
1749
|
-
content: [{
|
|
1750
|
-
type: 'text',
|
|
1751
|
-
text: '# No DNA Profile Found\n\n' +
|
|
1752
|
-
'Run `drift dna scan` to analyze your codebase styling patterns.\n\n' +
|
|
1753
|
-
'The DNA profile captures how your codebase handles:\n' +
|
|
1754
|
-
'- Variant handling (primary/secondary buttons, sizes)\n' +
|
|
1755
|
-
'- Responsive design (mobile-first, breakpoints)\n' +
|
|
1756
|
-
'- State styling (hover, focus, disabled)\n' +
|
|
1757
|
-
'- Theming (dark mode, CSS variables)\n' +
|
|
1758
|
-
'- Spacing philosophy (design tokens, scales)\n' +
|
|
1759
|
-
'- Animation approach (transitions, motion)',
|
|
1760
|
-
}],
|
|
1761
|
-
};
|
|
1762
|
-
}
|
|
1763
|
-
const format = args.format ?? 'ai-context';
|
|
1764
|
-
// Filter by specific gene if requested
|
|
1765
|
-
if (args.gene && GENE_IDS.includes(args.gene)) {
|
|
1766
|
-
const gene = profile.genes[args.gene];
|
|
1767
|
-
const mutations = profile.mutations.filter(m => m.gene === args.gene);
|
|
1768
|
-
let output = `# Gene: ${gene.name}\n\n`;
|
|
1769
|
-
output += `${gene.description}\n\n`;
|
|
1770
|
-
output += `## Summary\n`;
|
|
1771
|
-
output += `- **Dominant Allele:** ${gene.dominant?.name ?? 'None established'}\n`;
|
|
1772
|
-
output += `- **Confidence:** ${Math.round(gene.confidence * 100)}%\n`;
|
|
1773
|
-
output += `- **Consistency:** ${Math.round(gene.consistency * 100)}%\n\n`;
|
|
1774
|
-
if (gene.alleles.length > 0) {
|
|
1775
|
-
output += `## Alleles Detected\n\n`;
|
|
1776
|
-
for (const allele of gene.alleles) {
|
|
1777
|
-
const marker = allele.isDominant ? ' โ DOMINANT' : '';
|
|
1778
|
-
output += `### ${allele.name}${marker}\n`;
|
|
1779
|
-
output += `- Frequency: ${Math.round(allele.frequency * 100)}% (${allele.fileCount} files)\n`;
|
|
1780
|
-
if (allele.examples.length > 0) {
|
|
1781
|
-
const ex = allele.examples[0];
|
|
1782
|
-
if (ex) {
|
|
1783
|
-
output += `- Example: \`${ex.file}:${ex.line}\`\n`;
|
|
1784
|
-
output += `\`\`\`\n${ex.code}\n\`\`\`\n`;
|
|
1785
|
-
}
|
|
1786
|
-
}
|
|
1787
|
-
output += '\n';
|
|
1788
|
-
}
|
|
1789
|
-
}
|
|
1790
|
-
if (gene.exemplars.length > 0) {
|
|
1791
|
-
output += `## Exemplar Files\n`;
|
|
1792
|
-
for (const f of gene.exemplars) {
|
|
1793
|
-
output += `- ${f}\n`;
|
|
1794
|
-
}
|
|
1795
|
-
output += '\n';
|
|
1796
|
-
}
|
|
1797
|
-
if (mutations.length > 0) {
|
|
1798
|
-
output += `## Mutations (${mutations.length})\n\n`;
|
|
1799
|
-
for (const m of mutations.slice(0, 5)) {
|
|
1800
|
-
output += `- **${m.file}:${m.line}** - ${m.actual} (expected: ${m.expected})\n`;
|
|
1801
|
-
}
|
|
1802
|
-
if (mutations.length > 5) {
|
|
1803
|
-
output += `- ... and ${mutations.length - 5} more\n`;
|
|
1804
|
-
}
|
|
1805
|
-
}
|
|
1806
|
-
return { content: [{ type: 'text', text: output }] };
|
|
1807
|
-
}
|
|
1808
|
-
// Full profile output
|
|
1809
|
-
if (format === 'full' || format === 'json') {
|
|
1810
|
-
return {
|
|
1811
|
-
content: [{ type: 'text', text: JSON.stringify(profile, null, 2) }],
|
|
1812
|
-
};
|
|
1813
|
-
}
|
|
1814
|
-
if (format === 'summary') {
|
|
1815
|
-
let output = `# Styling DNA Summary\n\n`;
|
|
1816
|
-
output += `- **Health Score:** ${profile.summary.healthScore}/100\n`;
|
|
1817
|
-
output += `- **Framework:** ${profile.summary.dominantFramework}\n`;
|
|
1818
|
-
output += `- **Genetic Diversity:** ${profile.summary.geneticDiversity.toFixed(2)}\n`;
|
|
1819
|
-
output += `- **Components Analyzed:** ${profile.summary.totalComponentsAnalyzed}\n`;
|
|
1820
|
-
output += `- **Mutations:** ${profile.mutations.length}\n\n`;
|
|
1821
|
-
output += `## Genes\n\n`;
|
|
1822
|
-
output += `| Gene | Dominant | Confidence |\n`;
|
|
1823
|
-
output += `|------|----------|------------|\n`;
|
|
1824
|
-
for (const geneId of GENE_IDS) {
|
|
1825
|
-
const gene = profile.genes[geneId];
|
|
1826
|
-
output += `| ${gene.name} | ${gene.dominant?.name ?? 'None'} | ${Math.round(gene.confidence * 100)}% |\n`;
|
|
1827
|
-
}
|
|
1828
|
-
return { content: [{ type: 'text', text: output }] };
|
|
1829
|
-
}
|
|
1830
|
-
// AI context format (default)
|
|
1831
|
-
const level = (args.level ?? 3);
|
|
1832
|
-
const builder = new AIContextBuilder();
|
|
1833
|
-
const context = builder.build(profile, level);
|
|
1834
|
-
return { content: [{ type: 'text', text: context }] };
|
|
1835
|
-
}
|
|
1836
|
-
/**
|
|
1837
|
-
* Handle drift_playbook tool - Generate styling playbook
|
|
1838
|
-
*/
|
|
1839
|
-
async function handlePlaybook(_projectRoot, store, args) {
|
|
1840
|
-
await store.initialize();
|
|
1841
|
-
const profile = await store.load();
|
|
1842
|
-
if (!profile) {
|
|
1843
|
-
return {
|
|
1844
|
-
content: [{
|
|
1845
|
-
type: 'text',
|
|
1846
|
-
text: '# No DNA Profile Found\n\n' +
|
|
1847
|
-
'Run `drift dna scan` first to analyze your codebase styling patterns.',
|
|
1848
|
-
}],
|
|
1849
|
-
};
|
|
1850
|
-
}
|
|
1851
|
-
const generator = new PlaybookGenerator();
|
|
1852
|
-
const playbook = generator.generate(profile);
|
|
1853
|
-
// Filter to specific section if requested
|
|
1854
|
-
if (args.section && GENE_IDS.includes(args.section)) {
|
|
1855
|
-
const gene = profile.genes[args.section];
|
|
1856
|
-
const sectionRegex = new RegExp(`## ${gene.name}[\\s\\S]*?(?=\\n## |$)`, 'i');
|
|
1857
|
-
const match = playbook.match(sectionRegex);
|
|
1858
|
-
if (match) {
|
|
1859
|
-
return { content: [{ type: 'text', text: match[0] }] };
|
|
1860
|
-
}
|
|
1861
|
-
}
|
|
1862
|
-
return { content: [{ type: 'text', text: playbook }] };
|
|
1863
|
-
}
|
|
1864
|
-
/**
|
|
1865
|
-
* Handle drift_mutations tool - Get styling mutations
|
|
1866
|
-
*/
|
|
1867
|
-
async function handleMutations(store, args) {
|
|
1868
|
-
await store.initialize();
|
|
1869
|
-
const profile = await store.load();
|
|
1870
|
-
if (!profile) {
|
|
1871
|
-
return {
|
|
1872
|
-
content: [{
|
|
1873
|
-
type: 'text',
|
|
1874
|
-
text: '# No DNA Profile Found\n\n' +
|
|
1875
|
-
'Run `drift dna scan` first to analyze your codebase styling patterns.',
|
|
1876
|
-
}],
|
|
1877
|
-
};
|
|
1878
|
-
}
|
|
1879
|
-
let mutations = profile.mutations;
|
|
1880
|
-
// Filter by gene
|
|
1881
|
-
if (args.gene && GENE_IDS.includes(args.gene)) {
|
|
1882
|
-
mutations = mutations.filter(m => m.gene === args.gene);
|
|
1883
|
-
}
|
|
1884
|
-
// Filter by impact
|
|
1885
|
-
if (args.impact) {
|
|
1886
|
-
const validImpacts = ['low', 'medium', 'high'];
|
|
1887
|
-
if (validImpacts.includes(args.impact)) {
|
|
1888
|
-
mutations = mutations.filter(m => m.impact === args.impact);
|
|
1889
|
-
}
|
|
1890
|
-
}
|
|
1891
|
-
if (mutations.length === 0) {
|
|
1892
|
-
return {
|
|
1893
|
-
content: [{
|
|
1894
|
-
type: 'text',
|
|
1895
|
-
text: '# No Mutations Found\n\n' +
|
|
1896
|
-
'โ Your codebase styling is consistent with the established DNA patterns.',
|
|
1897
|
-
}],
|
|
1898
|
-
};
|
|
1899
|
-
}
|
|
1900
|
-
// Group by impact
|
|
1901
|
-
const byImpact = { high: [], medium: [], low: [] };
|
|
1902
|
-
for (const m of mutations)
|
|
1903
|
-
byImpact[m.impact].push(m);
|
|
1904
|
-
let output = `# Styling Mutations (${mutations.length})\n\n`;
|
|
1905
|
-
output += 'Mutations are files that deviate from the established styling DNA.\n\n';
|
|
1906
|
-
if (byImpact.high.length > 0) {
|
|
1907
|
-
output += `## โ ๏ธ High Impact (${byImpact.high.length})\n\n`;
|
|
1908
|
-
for (const m of byImpact.high) {
|
|
1909
|
-
output += `### ${m.file}:${m.line}\n`;
|
|
1910
|
-
output += `- **Gene:** ${m.gene}\n`;
|
|
1911
|
-
output += `- **Found:** ${m.actual}\n`;
|
|
1912
|
-
output += `- **Expected:** ${m.expected}\n`;
|
|
1913
|
-
if (args.suggest && m.suggestion) {
|
|
1914
|
-
output += `- **Suggestion:** ${m.suggestion}\n`;
|
|
1915
|
-
}
|
|
1916
|
-
output += '\n';
|
|
1917
|
-
}
|
|
1918
|
-
}
|
|
1919
|
-
if (byImpact.medium.length > 0) {
|
|
1920
|
-
output += `## โก Medium Impact (${byImpact.medium.length})\n\n`;
|
|
1921
|
-
for (const m of byImpact.medium.slice(0, 10)) {
|
|
1922
|
-
output += `- **${m.file}:${m.line}** - ${m.actual} โ ${m.expected}\n`;
|
|
1923
|
-
if (args.suggest && m.suggestion) {
|
|
1924
|
-
output += ` - ๐ก ${m.suggestion}\n`;
|
|
1925
|
-
}
|
|
1926
|
-
}
|
|
1927
|
-
if (byImpact.medium.length > 10) {
|
|
1928
|
-
output += `- ... and ${byImpact.medium.length - 10} more\n`;
|
|
1929
|
-
}
|
|
1930
|
-
output += '\n';
|
|
1931
|
-
}
|
|
1932
|
-
if (byImpact.low.length > 0) {
|
|
1933
|
-
output += `## Low Impact (${byImpact.low.length})\n\n`;
|
|
1934
|
-
for (const m of byImpact.low.slice(0, 5)) {
|
|
1935
|
-
output += `- ${m.file}:${m.line} - ${m.actual}\n`;
|
|
1936
|
-
}
|
|
1937
|
-
if (byImpact.low.length > 5) {
|
|
1938
|
-
output += `- ... and ${byImpact.low.length - 5} more\n`;
|
|
1939
|
-
}
|
|
1940
|
-
}
|
|
1941
|
-
return { content: [{ type: 'text', text: output }] };
|
|
1942
|
-
}
|
|
1943
|
-
/**
|
|
1944
|
-
* Handle drift_dna_check tool - Check code against DNA
|
|
1945
|
-
*/
|
|
1946
|
-
async function handleDNACheck(projectRoot, store, args) {
|
|
1947
|
-
await store.initialize();
|
|
1948
|
-
const profile = await store.load();
|
|
1949
|
-
if (!profile) {
|
|
1950
|
-
return {
|
|
1951
|
-
content: [{
|
|
1952
|
-
type: 'text',
|
|
1953
|
-
text: '# No DNA Profile Found\n\n' +
|
|
1954
|
-
'Run `drift dna scan` first to establish styling patterns.',
|
|
1955
|
-
}],
|
|
1956
|
-
};
|
|
1957
|
-
}
|
|
1958
|
-
if (!args.code && !args.file) {
|
|
1959
|
-
// Return DNA conventions for code generation
|
|
1960
|
-
const builder = new AIContextBuilder();
|
|
1961
|
-
const context = builder.build(profile, 3);
|
|
1962
|
-
return {
|
|
1963
|
-
content: [{
|
|
1964
|
-
type: 'text',
|
|
1965
|
-
text: '# Styling DNA Conventions\n\n' +
|
|
1966
|
-
'Use these conventions when generating component code:\n\n' +
|
|
1967
|
-
context,
|
|
1968
|
-
}],
|
|
1969
|
-
};
|
|
1970
|
-
}
|
|
1971
|
-
// If file is provided, read it
|
|
1972
|
-
let code = args.code ?? '';
|
|
1973
|
-
if (args.file) {
|
|
1974
|
-
try {
|
|
1975
|
-
const fs = await import('node:fs/promises');
|
|
1976
|
-
const path = await import('node:path');
|
|
1977
|
-
code = await fs.readFile(path.join(projectRoot, args.file), 'utf-8');
|
|
1978
|
-
}
|
|
1979
|
-
catch {
|
|
1980
|
-
return {
|
|
1981
|
-
content: [{
|
|
1982
|
-
type: 'text',
|
|
1983
|
-
text: `Error: Could not read file "${args.file}"`,
|
|
1984
|
-
}],
|
|
1985
|
-
isError: true,
|
|
1986
|
-
};
|
|
1987
|
-
}
|
|
1988
|
-
}
|
|
1989
|
-
// Simple pattern matching check against DNA
|
|
1990
|
-
const issues = [];
|
|
1991
|
-
const genesToCheck = args.genes?.filter(g => GENE_IDS.includes(g)) ?? [...GENE_IDS];
|
|
1992
|
-
for (const geneId of genesToCheck) {
|
|
1993
|
-
const gene = profile.genes[geneId];
|
|
1994
|
-
if (!gene.dominant)
|
|
1995
|
-
continue;
|
|
1996
|
-
// Check for non-dominant alleles in the code
|
|
1997
|
-
for (const allele of gene.alleles) {
|
|
1998
|
-
if (allele.isDominant)
|
|
1999
|
-
continue;
|
|
2000
|
-
// Simple heuristic checks based on allele patterns
|
|
2001
|
-
const patterns = {
|
|
2002
|
-
'styled-variants': [/styled\.\w+/, /\$\{props\s*=>/],
|
|
2003
|
-
'inline-conditionals': [/className=\{[^}]*\?[^}]*\}/],
|
|
2004
|
-
'tailwind-desktop-first': [/className="[^"]*\b(lg|xl|2xl):[^"]*\bsm:/],
|
|
2005
|
-
'js-responsive': [/useMediaQuery|useBreakpoint/],
|
|
2006
|
-
'hardcoded': [/style=\{\{[^}]*padding:\s*['"]?\d+px/],
|
|
2007
|
-
'framer-motion': [/<motion\./],
|
|
2008
|
-
};
|
|
2009
|
-
const allelePatterns = patterns[allele.id];
|
|
2010
|
-
if (allelePatterns) {
|
|
2011
|
-
for (const pattern of allelePatterns) {
|
|
2012
|
-
if (pattern.test(code)) {
|
|
2013
|
-
issues.push({
|
|
2014
|
-
gene: gene.name,
|
|
2015
|
-
issue: `Found ${allele.name} pattern`,
|
|
2016
|
-
suggestion: `Consider using ${gene.dominant.name} instead`,
|
|
2017
|
-
});
|
|
2018
|
-
break;
|
|
2019
|
-
}
|
|
2020
|
-
}
|
|
2021
|
-
}
|
|
2022
|
-
}
|
|
2023
|
-
}
|
|
2024
|
-
if (issues.length === 0) {
|
|
2025
|
-
return {
|
|
2026
|
-
content: [{
|
|
2027
|
-
type: 'text',
|
|
2028
|
-
text: '# DNA Check: โ Passed\n\n' +
|
|
2029
|
-
'The code appears to follow the established styling DNA patterns.',
|
|
2030
|
-
}],
|
|
2031
|
-
};
|
|
2032
|
-
}
|
|
2033
|
-
let output = `# DNA Check: โ ๏ธ ${issues.length} Issue(s) Found\n\n`;
|
|
2034
|
-
for (const issue of issues) {
|
|
2035
|
-
output += `## ${issue.gene}\n`;
|
|
2036
|
-
output += `- **Issue:** ${issue.issue}\n`;
|
|
2037
|
-
output += `- **Suggestion:** ${issue.suggestion}\n\n`;
|
|
2038
|
-
}
|
|
2039
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2040
|
-
}
|
|
2041
|
-
/**
|
|
2042
|
-
* Handle drift_boundaries tool - Data access boundaries
|
|
2043
|
-
*/
|
|
2044
|
-
async function handleBoundaries(store, args) {
|
|
2045
|
-
await store.initialize();
|
|
2046
|
-
const action = args.action ?? 'overview';
|
|
2047
|
-
const includeViolations = args.includeViolations ?? true;
|
|
2048
|
-
const limit = args.limit ?? 10;
|
|
2049
|
-
switch (action) {
|
|
2050
|
-
case 'overview': {
|
|
2051
|
-
const accessMap = store.getAccessMap();
|
|
2052
|
-
const sensitiveFields = store.getSensitiveAccess();
|
|
2053
|
-
let output = '# Data Boundaries Overview\n\n';
|
|
2054
|
-
output += `- **Tables:** ${accessMap.stats.totalTables}\n`;
|
|
2055
|
-
output += `- **Access Points:** ${accessMap.stats.totalAccessPoints}\n`;
|
|
2056
|
-
output += `- **Sensitive Fields:** ${accessMap.stats.totalSensitiveFields}\n`;
|
|
2057
|
-
output += `- **ORM Models:** ${accessMap.stats.totalModels}\n\n`;
|
|
2058
|
-
if (accessMap.stats.totalTables > 0) {
|
|
2059
|
-
output += '## Tables\n\n';
|
|
2060
|
-
const tableEntries = Object.entries(accessMap.tables)
|
|
2061
|
-
.map(([name, info]) => ({
|
|
2062
|
-
name,
|
|
2063
|
-
accessCount: info.accessedBy.length,
|
|
2064
|
-
hasSensitive: info.sensitiveFields.length > 0,
|
|
2065
|
-
}))
|
|
2066
|
-
.sort((a, b) => b.accessCount - a.accessCount);
|
|
2067
|
-
const totalTables = tableEntries.length;
|
|
2068
|
-
const limitedTables = tableEntries.slice(0, limit);
|
|
2069
|
-
for (const table of limitedTables) {
|
|
2070
|
-
const sensitive = table.hasSensitive ? ' โ ๏ธ' : '';
|
|
2071
|
-
output += `- **${table.name}**${sensitive}: ${table.accessCount} access points\n`;
|
|
2072
|
-
}
|
|
2073
|
-
if (totalTables > limit) {
|
|
2074
|
-
output += `\n*... and ${totalTables - limit} more tables. Use \`drift_boundaries action="table" table="<name>"\` for details.*\n`;
|
|
2075
|
-
}
|
|
2076
|
-
output += '\n';
|
|
2077
|
-
}
|
|
2078
|
-
if (sensitiveFields.length > 0) {
|
|
2079
|
-
output += '## Sensitive Fields\n\n';
|
|
2080
|
-
const fieldCounts = new Map();
|
|
2081
|
-
for (const field of sensitiveFields) {
|
|
2082
|
-
const key = field.table ? `${field.table}.${field.field}` : field.field;
|
|
2083
|
-
fieldCounts.set(key, (fieldCounts.get(key) ?? 0) + 1);
|
|
2084
|
-
}
|
|
2085
|
-
const sortedFields = Array.from(fieldCounts.entries())
|
|
2086
|
-
.sort((a, b) => b[1] - a[1]);
|
|
2087
|
-
const totalFields = sortedFields.length;
|
|
2088
|
-
const limitedFields = sortedFields.slice(0, limit);
|
|
2089
|
-
for (const [fieldName, count] of limitedFields) {
|
|
2090
|
-
output += `- **${fieldName}**: ${count} locations\n`;
|
|
2091
|
-
}
|
|
2092
|
-
if (totalFields > limit) {
|
|
2093
|
-
output += `\n*... and ${totalFields - limit} more fields. Use \`drift_boundaries action="sensitive"\` for full list.*\n`;
|
|
2094
|
-
}
|
|
2095
|
-
output += '\n';
|
|
2096
|
-
}
|
|
2097
|
-
if (includeViolations) {
|
|
2098
|
-
const rules = store.getRules();
|
|
2099
|
-
if (rules) {
|
|
2100
|
-
const violations = store.checkAllViolations();
|
|
2101
|
-
if (violations.length > 0) {
|
|
2102
|
-
const limitedViolations = violations.slice(0, limit);
|
|
2103
|
-
output += `## โ ๏ธ Violations (${violations.length})\n\n`;
|
|
2104
|
-
for (const v of limitedViolations) {
|
|
2105
|
-
output += `- **${v.file}:${v.line}** - ${v.message}\n`;
|
|
2106
|
-
}
|
|
2107
|
-
if (violations.length > limit) {
|
|
2108
|
-
output += `\n*... and ${violations.length - limit} more. Use \`drift_boundaries action="check"\` for full list.*\n`;
|
|
2109
|
-
}
|
|
2110
|
-
}
|
|
2111
|
-
else {
|
|
2112
|
-
output += '## โ No Violations\n\nAll data access follows configured boundaries.\n';
|
|
2113
|
-
}
|
|
2114
|
-
}
|
|
2115
|
-
else {
|
|
2116
|
-
output += '## No Rules Configured\n\n';
|
|
2117
|
-
output += 'Create `.drift/boundaries/rules.json` to enable boundary enforcement.\n';
|
|
2118
|
-
output += 'Run `drift boundaries init-rules` to generate a starter config.\n';
|
|
2119
|
-
}
|
|
2120
|
-
}
|
|
2121
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2122
|
-
}
|
|
2123
|
-
case 'table': {
|
|
2124
|
-
if (!args.table) {
|
|
2125
|
-
return {
|
|
2126
|
-
content: [{ type: 'text', text: 'Error: table parameter required for "table" action' }],
|
|
2127
|
-
isError: true,
|
|
2128
|
-
};
|
|
2129
|
-
}
|
|
2130
|
-
const tableInfo = store.getTableAccess(args.table);
|
|
2131
|
-
if (!tableInfo) {
|
|
2132
|
-
return {
|
|
2133
|
-
content: [{ type: 'text', text: `Table '${args.table}' not found in access map.` }],
|
|
2134
|
-
};
|
|
2135
|
-
}
|
|
2136
|
-
let output = `# Table: ${args.table}\n\n`;
|
|
2137
|
-
if (tableInfo.model) {
|
|
2138
|
-
output += `**Model:** ${tableInfo.model}\n`;
|
|
2139
|
-
}
|
|
2140
|
-
output += `**Fields:** ${tableInfo.fields.join(', ') || 'none detected'}\n`;
|
|
2141
|
-
output += `**Access Points:** ${tableInfo.accessedBy.length}\n\n`;
|
|
2142
|
-
if (tableInfo.sensitiveFields.length > 0) {
|
|
2143
|
-
output += '## โ ๏ธ Sensitive Fields\n\n';
|
|
2144
|
-
for (const sf of tableInfo.sensitiveFields) {
|
|
2145
|
-
output += `- **${sf.field}** (${sf.sensitivityType})\n`;
|
|
2146
|
-
}
|
|
2147
|
-
output += '\n';
|
|
2148
|
-
}
|
|
2149
|
-
output += '## Access Points\n\n';
|
|
2150
|
-
const byFile = new Map();
|
|
2151
|
-
for (const ap of tableInfo.accessedBy) {
|
|
2152
|
-
if (!byFile.has(ap.file))
|
|
2153
|
-
byFile.set(ap.file, []);
|
|
2154
|
-
byFile.get(ap.file).push(ap);
|
|
2155
|
-
}
|
|
2156
|
-
for (const [file, accessPoints] of byFile) {
|
|
2157
|
-
output += `### ${file}\n`;
|
|
2158
|
-
for (const ap of accessPoints) {
|
|
2159
|
-
output += `- Line ${ap.line}: **${ap.operation}** ${ap.fields.join(', ')}\n`;
|
|
2160
|
-
}
|
|
2161
|
-
output += '\n';
|
|
2162
|
-
}
|
|
2163
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2164
|
-
}
|
|
2165
|
-
case 'file': {
|
|
2166
|
-
if (!args.file) {
|
|
2167
|
-
return {
|
|
2168
|
-
content: [{ type: 'text', text: 'Error: file parameter required for "file" action' }],
|
|
2169
|
-
isError: true,
|
|
2170
|
-
};
|
|
2171
|
-
}
|
|
2172
|
-
const fileAccess = store.getFileAccess(args.file);
|
|
2173
|
-
let output = `# Data Access: ${args.file}\n\n`;
|
|
2174
|
-
if (fileAccess.length === 0) {
|
|
2175
|
-
output += 'No data access detected for this file/pattern.\n';
|
|
2176
|
-
}
|
|
2177
|
-
else {
|
|
2178
|
-
for (const fileInfo of fileAccess) {
|
|
2179
|
-
output += `## ${fileInfo.file}\n\n`;
|
|
2180
|
-
output += `**Tables:** ${fileInfo.tables.join(', ')}\n`;
|
|
2181
|
-
output += `**Access Points:** ${fileInfo.accessPoints.length}\n\n`;
|
|
2182
|
-
for (const ap of fileInfo.accessPoints) {
|
|
2183
|
-
output += `- Line ${ap.line}: **${ap.operation}** ${ap.table} ${ap.fields.join(', ')}\n`;
|
|
2184
|
-
}
|
|
2185
|
-
output += '\n';
|
|
2186
|
-
}
|
|
2187
|
-
}
|
|
2188
|
-
// Include boundary rules context if available
|
|
2189
|
-
const rules = store.getRules();
|
|
2190
|
-
if (rules && fileAccess.length > 0) {
|
|
2191
|
-
const allTables = new Set(fileAccess.flatMap(f => f.tables));
|
|
2192
|
-
const applicableRules = rules.boundaries.filter(b => b.tables?.some(t => allTables.has(t)) ||
|
|
2193
|
-
b.fields?.some(f => allTables.has(f.split('.')[0] || '')));
|
|
2194
|
-
if (applicableRules.length > 0) {
|
|
2195
|
-
output += '## Applicable Boundaries\n\n';
|
|
2196
|
-
for (const rule of applicableRules) {
|
|
2197
|
-
output += `- **${rule.id}**: ${rule.description}\n`;
|
|
2198
|
-
output += ` - Allowed: ${rule.allowedPaths.join(', ')}\n`;
|
|
2199
|
-
}
|
|
2200
|
-
}
|
|
2201
|
-
}
|
|
2202
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2203
|
-
}
|
|
2204
|
-
case 'sensitive': {
|
|
2205
|
-
const sensitiveFields = store.getSensitiveAccess();
|
|
2206
|
-
let output = '# Sensitive Data Access\n\n';
|
|
2207
|
-
if (sensitiveFields.length === 0) {
|
|
2208
|
-
output += 'No sensitive fields detected.\n';
|
|
2209
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2210
|
-
}
|
|
2211
|
-
// Group by sensitivity type
|
|
2212
|
-
const byType = new Map();
|
|
2213
|
-
for (const field of sensitiveFields) {
|
|
2214
|
-
const type = field.sensitivityType;
|
|
2215
|
-
if (!byType.has(type))
|
|
2216
|
-
byType.set(type, []);
|
|
2217
|
-
byType.get(type).push(field);
|
|
2218
|
-
}
|
|
2219
|
-
for (const [type, fields] of byType) {
|
|
2220
|
-
output += `## ${type.toUpperCase()} (${fields.length})\n\n`;
|
|
2221
|
-
for (const f of fields) {
|
|
2222
|
-
const fieldName = f.table ? `${f.table}.${f.field}` : f.field;
|
|
2223
|
-
output += `- **${fieldName}**\n`;
|
|
2224
|
-
output += ` - ${f.file}:${f.line}\n`;
|
|
2225
|
-
}
|
|
2226
|
-
output += '\n';
|
|
2227
|
-
}
|
|
2228
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2229
|
-
}
|
|
2230
|
-
case 'check': {
|
|
2231
|
-
const rules = store.getRules();
|
|
2232
|
-
if (!rules) {
|
|
2233
|
-
return {
|
|
2234
|
-
content: [{
|
|
2235
|
-
type: 'text',
|
|
2236
|
-
text: '# No Boundary Rules Configured\n\n' +
|
|
2237
|
-
'Create `.drift/boundaries/rules.json` to enable boundary enforcement.\n' +
|
|
2238
|
-
'Run `drift boundaries init-rules` to generate a starter config.',
|
|
2239
|
-
}],
|
|
2240
|
-
};
|
|
2241
|
-
}
|
|
2242
|
-
const violations = store.checkAllViolations();
|
|
2243
|
-
if (violations.length === 0) {
|
|
2244
|
-
return {
|
|
2245
|
-
content: [{
|
|
2246
|
-
type: 'text',
|
|
2247
|
-
text: '# โ No Boundary Violations\n\n' +
|
|
2248
|
-
`Checked ${rules.boundaries.length} rules. All data access follows configured boundaries.`,
|
|
2249
|
-
}],
|
|
2250
|
-
};
|
|
2251
|
-
}
|
|
2252
|
-
let output = `# โ ๏ธ Boundary Violations (${violations.length})\n\n`;
|
|
2253
|
-
// Group by severity
|
|
2254
|
-
const errors = violations.filter(v => v.severity === 'error');
|
|
2255
|
-
const warnings = violations.filter(v => v.severity === 'warning');
|
|
2256
|
-
const infos = violations.filter(v => v.severity === 'info');
|
|
2257
|
-
if (errors.length > 0) {
|
|
2258
|
-
output += `## ๐ด Errors (${errors.length})\n\n`;
|
|
2259
|
-
for (const v of errors) {
|
|
2260
|
-
output += `### ${v.file}:${v.line}\n`;
|
|
2261
|
-
output += `- **Rule:** ${v.ruleId}\n`;
|
|
2262
|
-
output += `- **Message:** ${v.message}\n`;
|
|
2263
|
-
if (v.suggestion) {
|
|
2264
|
-
output += `- **Suggestion:** ${v.suggestion}\n`;
|
|
2265
|
-
}
|
|
2266
|
-
output += '\n';
|
|
2267
|
-
}
|
|
2268
|
-
}
|
|
2269
|
-
if (warnings.length > 0) {
|
|
2270
|
-
output += `## ๐ก Warnings (${warnings.length})\n\n`;
|
|
2271
|
-
for (const v of warnings) {
|
|
2272
|
-
output += `- **${v.file}:${v.line}** - ${v.message}\n`;
|
|
2273
|
-
}
|
|
2274
|
-
output += '\n';
|
|
2275
|
-
}
|
|
2276
|
-
if (infos.length > 0) {
|
|
2277
|
-
output += `## โน๏ธ Info (${infos.length})\n\n`;
|
|
2278
|
-
for (const v of infos.slice(0, 5)) {
|
|
2279
|
-
output += `- ${v.file}:${v.line} - ${v.message}\n`;
|
|
2280
|
-
}
|
|
2281
|
-
if (infos.length > 5) {
|
|
2282
|
-
output += `- ... and ${infos.length - 5} more\n`;
|
|
2283
|
-
}
|
|
2284
|
-
}
|
|
2285
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2286
|
-
}
|
|
2287
|
-
case 'rules': {
|
|
2288
|
-
const rules = store.getRules();
|
|
2289
|
-
if (!rules) {
|
|
2290
|
-
return {
|
|
2291
|
-
content: [{
|
|
2292
|
-
type: 'text',
|
|
2293
|
-
text: '# No Boundary Rules Configured\n\n' +
|
|
2294
|
-
'Create `.drift/boundaries/rules.json` to define data access boundaries.\n\n' +
|
|
2295
|
-
'## Example Rules\n\n' +
|
|
2296
|
-
'```json\n' +
|
|
2297
|
-
'{\n' +
|
|
2298
|
-
' "version": "1.0",\n' +
|
|
2299
|
-
' "sensitivity": {\n' +
|
|
2300
|
-
' "critical": ["users.password_hash", "users.ssn"],\n' +
|
|
2301
|
-
' "sensitive": ["users.email", "users.phone"],\n' +
|
|
2302
|
-
' "general": []\n' +
|
|
2303
|
-
' },\n' +
|
|
2304
|
-
' "boundaries": [\n' +
|
|
2305
|
-
' {\n' +
|
|
2306
|
-
' "id": "auth-owns-credentials",\n' +
|
|
2307
|
-
' "description": "Only auth module can access credentials",\n' +
|
|
2308
|
-
' "fields": ["users.password_hash"],\n' +
|
|
2309
|
-
' "allowedPaths": ["**/auth/**"],\n' +
|
|
2310
|
-
' "severity": "error"\n' +
|
|
2311
|
-
' }\n' +
|
|
2312
|
-
' ]\n' +
|
|
2313
|
-
'}\n' +
|
|
2314
|
-
'```',
|
|
2315
|
-
}],
|
|
2316
|
-
};
|
|
2317
|
-
}
|
|
2318
|
-
let output = '# Data Boundary Rules\n\n';
|
|
2319
|
-
output += '## Sensitivity Tiers\n\n';
|
|
2320
|
-
output += `- **Critical:** ${rules.sensitivity.critical.join(', ') || 'none'}\n`;
|
|
2321
|
-
output += `- **Sensitive:** ${rules.sensitivity.sensitive.join(', ') || 'none'}\n`;
|
|
2322
|
-
output += '\n';
|
|
2323
|
-
output += `## Boundaries (${rules.boundaries.length})\n\n`;
|
|
2324
|
-
for (const b of rules.boundaries) {
|
|
2325
|
-
const enabled = b.enabled !== false ? 'โ' : 'โ';
|
|
2326
|
-
output += `### ${enabled} ${b.id}\n`;
|
|
2327
|
-
output += `${b.description}\n\n`;
|
|
2328
|
-
if (b.tables)
|
|
2329
|
-
output += `- **Tables:** ${b.tables.join(', ')}\n`;
|
|
2330
|
-
if (b.fields)
|
|
2331
|
-
output += `- **Fields:** ${b.fields.join(', ')}\n`;
|
|
2332
|
-
if (b.operations)
|
|
2333
|
-
output += `- **Operations:** ${b.operations.join(', ')}\n`;
|
|
2334
|
-
output += `- **Allowed:** ${b.allowedPaths.join(', ')}\n`;
|
|
2335
|
-
output += `- **Severity:** ${b.severity}\n\n`;
|
|
2336
|
-
}
|
|
2337
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2338
|
-
}
|
|
2339
|
-
default:
|
|
2340
|
-
return {
|
|
2341
|
-
content: [{ type: 'text', text: `Unknown action: ${action}` }],
|
|
2342
|
-
isError: true,
|
|
2343
|
-
};
|
|
2344
|
-
}
|
|
2345
|
-
}
|
|
2346
|
-
// ============================================================================
|
|
2347
|
-
// Call Graph Handler Function
|
|
2348
|
-
// ============================================================================
|
|
2349
|
-
/**
|
|
2350
|
-
* Handle drift_callgraph tool - Call graph analysis
|
|
2351
|
-
*/
|
|
2352
|
-
async function handleCallGraph(projectRoot, args) {
|
|
2353
|
-
const action = args.action ?? 'status';
|
|
2354
|
-
const maxDepth = args.maxDepth ?? 10;
|
|
2355
|
-
const limit = args.limit ?? 10;
|
|
2356
|
-
const analyzer = createCallGraphAnalyzer({ rootDir: projectRoot });
|
|
2357
|
-
switch (action) {
|
|
2358
|
-
case 'build': {
|
|
2359
|
-
try {
|
|
2360
|
-
// File patterns to scan
|
|
2361
|
-
const filePatterns = [
|
|
2362
|
-
'**/*.ts',
|
|
2363
|
-
'**/*.tsx',
|
|
2364
|
-
'**/*.js',
|
|
2365
|
-
'**/*.jsx',
|
|
2366
|
-
'**/*.py',
|
|
2367
|
-
];
|
|
2368
|
-
// Step 1: Run semantic data access scanner (tree-sitter/TypeScript compiler based)
|
|
2369
|
-
const semanticScanner = createSemanticDataAccessScanner({ rootDir: projectRoot });
|
|
2370
|
-
const semanticResult = await semanticScanner.scanDirectory({ patterns: filePatterns });
|
|
2371
|
-
// Use semantic results as primary source
|
|
2372
|
-
let dataAccessPoints = semanticResult.accessPoints;
|
|
2373
|
-
// Step 2: Fall back to boundary scanner for additional coverage (regex-based)
|
|
2374
|
-
const boundaryScanner = createBoundaryScanner({ rootDir: projectRoot });
|
|
2375
|
-
await boundaryScanner.initialize();
|
|
2376
|
-
const boundaryResult = await boundaryScanner.scanDirectory({ patterns: filePatterns });
|
|
2377
|
-
// Merge boundary results with semantic results (semantic takes precedence)
|
|
2378
|
-
for (const [, accessPoint] of Object.entries(boundaryResult.accessMap.accessPoints)) {
|
|
2379
|
-
const existing = dataAccessPoints.get(accessPoint.file) ?? [];
|
|
2380
|
-
// Only add if not already detected by semantic scanner
|
|
2381
|
-
const isDuplicate = existing.some(ap => ap.line === accessPoint.line && ap.table === accessPoint.table);
|
|
2382
|
-
if (!isDuplicate) {
|
|
2383
|
-
existing.push(accessPoint);
|
|
2384
|
-
dataAccessPoints.set(accessPoint.file, existing);
|
|
2385
|
-
}
|
|
2386
|
-
}
|
|
2387
|
-
// Step 3: Build call graph with data access points
|
|
2388
|
-
await analyzer.initialize();
|
|
2389
|
-
const graph = await analyzer.scan(filePatterns, dataAccessPoints);
|
|
2390
|
-
let output = '# Call Graph Built\n\n';
|
|
2391
|
-
output += `- **Functions:** ${graph.stats.totalFunctions}\n`;
|
|
2392
|
-
output += `- **Call Sites:** ${graph.stats.totalCallSites}\n`;
|
|
2393
|
-
output += `- **Resolved:** ${graph.stats.resolvedCallSites} (${Math.round(graph.stats.resolvedCallSites / Math.max(1, graph.stats.totalCallSites) * 100)}%)\n`;
|
|
2394
|
-
output += `- **Entry Points:** ${graph.entryPoints.length}\n`;
|
|
2395
|
-
output += `- **Data Accessors:** ${graph.dataAccessors.length}\n\n`;
|
|
2396
|
-
// Semantic scanner stats (primary)
|
|
2397
|
-
if (semanticResult.stats.accessPointsFound > 0) {
|
|
2398
|
-
output += '## Data Access Detection (Semantic)\n\n';
|
|
2399
|
-
output += `- **Files Scanned:** ${semanticResult.stats.filesScanned}\n`;
|
|
2400
|
-
output += `- **Access Points:** ${semanticResult.stats.accessPointsFound}\n`;
|
|
2401
|
-
if (Object.keys(semanticResult.stats.byOrm).length > 0) {
|
|
2402
|
-
const orms = Object.entries(semanticResult.stats.byOrm)
|
|
2403
|
-
.sort((a, b) => b[1] - a[1])
|
|
2404
|
-
.map(([orm, count]) => `${orm}: ${count}`)
|
|
2405
|
-
.join(', ');
|
|
2406
|
-
output += `- **By ORM:** ${orms}\n`;
|
|
2407
|
-
}
|
|
2408
|
-
output += '\n';
|
|
2409
|
-
}
|
|
2410
|
-
// Boundary stats (fallback)
|
|
2411
|
-
if (boundaryResult.stats.accessPointsFound > 0) {
|
|
2412
|
-
output += '## Data Access Detection (Regex Fallback)\n\n';
|
|
2413
|
-
output += `- **Tables Found:** ${boundaryResult.stats.tablesFound}\n`;
|
|
2414
|
-
output += `- **Access Points:** ${boundaryResult.stats.accessPointsFound}\n`;
|
|
2415
|
-
output += `- **Sensitive Fields:** ${boundaryResult.stats.sensitiveFieldsFound}\n\n`;
|
|
2416
|
-
}
|
|
2417
|
-
const languages = Object.entries(graph.stats.byLanguage)
|
|
2418
|
-
.filter(([, count]) => count > 0)
|
|
2419
|
-
.sort((a, b) => b[1] - a[1]);
|
|
2420
|
-
if (languages.length > 0) {
|
|
2421
|
-
output += '## By Language\n\n';
|
|
2422
|
-
for (const [lang, count] of languages) {
|
|
2423
|
-
output += `- **${lang}:** ${count} functions\n`;
|
|
2424
|
-
}
|
|
2425
|
-
}
|
|
2426
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2427
|
-
}
|
|
2428
|
-
catch (error) {
|
|
2429
|
-
return {
|
|
2430
|
-
content: [{ type: 'text', text: `Error building call graph: ${error}` }],
|
|
2431
|
-
isError: true,
|
|
2432
|
-
};
|
|
2433
|
-
}
|
|
2434
|
-
}
|
|
2435
|
-
case 'status': {
|
|
2436
|
-
try {
|
|
2437
|
-
await analyzer.initialize();
|
|
2438
|
-
const graph = analyzer.getGraph();
|
|
2439
|
-
if (!graph) {
|
|
2440
|
-
return {
|
|
2441
|
-
content: [{
|
|
2442
|
-
type: 'text',
|
|
2443
|
-
text: '# No Call Graph Found\n\n' +
|
|
2444
|
-
'Run `drift_callgraph action="build"` to build the call graph first.\n\n' +
|
|
2445
|
-
'The call graph enables:\n' +
|
|
2446
|
-
'- Forward reachability: "What data can this code access?"\n' +
|
|
2447
|
-
'- Inverse reachability: "Who can access this data?"\n' +
|
|
2448
|
-
'- Security impact analysis\n',
|
|
2449
|
-
}],
|
|
2450
|
-
};
|
|
2451
|
-
}
|
|
2452
|
-
let output = '# Call Graph Status\n\n';
|
|
2453
|
-
output += `- **Functions:** ${graph.stats.totalFunctions}\n`;
|
|
2454
|
-
output += `- **Call Sites:** ${graph.stats.totalCallSites} (${graph.stats.resolvedCallSites} resolved)\n`;
|
|
2455
|
-
output += `- **Entry Points:** ${graph.entryPoints.length}\n`;
|
|
2456
|
-
output += `- **Data Accessors:** ${graph.dataAccessors.length}\n\n`;
|
|
2457
|
-
if (graph.entryPoints.length > 0) {
|
|
2458
|
-
output += '## Entry Points\n\n';
|
|
2459
|
-
const limitedEntryPoints = graph.entryPoints.slice(0, limit);
|
|
2460
|
-
for (const id of limitedEntryPoints) {
|
|
2461
|
-
const func = graph.functions.get(id);
|
|
2462
|
-
if (func) {
|
|
2463
|
-
output += `- **${func.qualifiedName}** @ ${func.file}:${func.startLine}\n`;
|
|
2464
|
-
}
|
|
2465
|
-
}
|
|
2466
|
-
if (graph.entryPoints.length > limit) {
|
|
2467
|
-
output += `\n*... and ${graph.entryPoints.length - limit} more entry points*\n`;
|
|
2468
|
-
}
|
|
2469
|
-
output += '\n';
|
|
2470
|
-
}
|
|
2471
|
-
if (graph.dataAccessors.length > 0) {
|
|
2472
|
-
output += '## Data Accessors\n\n';
|
|
2473
|
-
const limitedAccessors = graph.dataAccessors.slice(0, limit);
|
|
2474
|
-
for (const id of limitedAccessors) {
|
|
2475
|
-
const func = graph.functions.get(id);
|
|
2476
|
-
if (func) {
|
|
2477
|
-
const tables = [...new Set(func.dataAccess.map(d => d.table))];
|
|
2478
|
-
output += `- **${func.qualifiedName}** โ [${tables.join(', ')}]\n`;
|
|
2479
|
-
}
|
|
2480
|
-
}
|
|
2481
|
-
if (graph.dataAccessors.length > limit) {
|
|
2482
|
-
output += `\n*... and ${graph.dataAccessors.length - limit} more data accessors*\n`;
|
|
2483
|
-
}
|
|
2484
|
-
}
|
|
2485
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2486
|
-
}
|
|
2487
|
-
catch (error) {
|
|
2488
|
-
return {
|
|
2489
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2490
|
-
isError: true,
|
|
2491
|
-
};
|
|
2492
|
-
}
|
|
2493
|
-
}
|
|
2494
|
-
case 'reach': {
|
|
2495
|
-
if (!args.location) {
|
|
2496
|
-
return {
|
|
2497
|
-
content: [{
|
|
2498
|
-
type: 'text',
|
|
2499
|
-
text: 'Error: location parameter required for "reach" action.\n\n' +
|
|
2500
|
-
'Examples:\n' +
|
|
2501
|
-
'- `drift_callgraph action="reach" location="src/auth.py:45"`\n' +
|
|
2502
|
-
'- `drift_callgraph action="reach" location="login_user"`',
|
|
2503
|
-
}],
|
|
2504
|
-
isError: true,
|
|
2505
|
-
};
|
|
2506
|
-
}
|
|
2507
|
-
try {
|
|
2508
|
-
await analyzer.initialize();
|
|
2509
|
-
const graph = analyzer.getGraph();
|
|
2510
|
-
if (!graph) {
|
|
2511
|
-
return {
|
|
2512
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
2513
|
-
isError: true,
|
|
2514
|
-
};
|
|
2515
|
-
}
|
|
2516
|
-
// Parse location
|
|
2517
|
-
let result;
|
|
2518
|
-
if (args.location.includes(':')) {
|
|
2519
|
-
const parts = args.location.split(':');
|
|
2520
|
-
const file = parts[0];
|
|
2521
|
-
const lineStr = parts[1];
|
|
2522
|
-
if (file && lineStr) {
|
|
2523
|
-
const line = parseInt(lineStr, 10);
|
|
2524
|
-
if (!isNaN(line)) {
|
|
2525
|
-
result = analyzer.getReachableData(file, line, { maxDepth });
|
|
2526
|
-
}
|
|
2527
|
-
}
|
|
2528
|
-
}
|
|
2529
|
-
if (!result) {
|
|
2530
|
-
// Try as function name
|
|
2531
|
-
let funcId;
|
|
2532
|
-
for (const [id, func] of graph.functions) {
|
|
2533
|
-
if (func.name === args.location || func.qualifiedName === args.location) {
|
|
2534
|
-
funcId = id;
|
|
2535
|
-
break;
|
|
2536
|
-
}
|
|
2537
|
-
}
|
|
2538
|
-
if (funcId) {
|
|
2539
|
-
result = analyzer.getReachableDataFromFunction(funcId, { maxDepth });
|
|
2540
|
-
}
|
|
2541
|
-
else {
|
|
2542
|
-
return {
|
|
2543
|
-
content: [{ type: 'text', text: `Function or location '${args.location}' not found.` }],
|
|
2544
|
-
isError: true,
|
|
2545
|
-
};
|
|
2546
|
-
}
|
|
2547
|
-
}
|
|
2548
|
-
let output = '# Reachability Analysis\n\n';
|
|
2549
|
-
output += `**Origin:** ${args.location}\n`;
|
|
2550
|
-
output += `**Tables Reachable:** ${result.tables.join(', ') || 'none'}\n`;
|
|
2551
|
-
output += `**Functions Traversed:** ${result.functionsTraversed}\n`;
|
|
2552
|
-
output += `**Max Depth:** ${result.maxDepth}\n\n`;
|
|
2553
|
-
if (result.sensitiveFields.length > 0) {
|
|
2554
|
-
output += '## โ ๏ธ Sensitive Fields Accessible\n\n';
|
|
2555
|
-
for (const sf of result.sensitiveFields) {
|
|
2556
|
-
output += `- **${sf.field.table}.${sf.field.field}** (${sf.field.sensitivityType})\n`;
|
|
2557
|
-
output += ` - ${sf.accessCount} access point(s), ${sf.paths.length} path(s)\n`;
|
|
2558
|
-
}
|
|
2559
|
-
output += '\n';
|
|
2560
|
-
}
|
|
2561
|
-
if (result.reachableAccess.length > 0) {
|
|
2562
|
-
output += '## Data Access Points\n\n';
|
|
2563
|
-
for (const ra of result.reachableAccess.slice(0, 15)) {
|
|
2564
|
-
output += `- **${ra.access.operation}** ${ra.access.table}.${ra.access.fields.join(', ')}\n`;
|
|
2565
|
-
output += ` - Path: ${ra.path.map(p => p.functionName).join(' โ ')}\n`;
|
|
2566
|
-
}
|
|
2567
|
-
if (result.reachableAccess.length > 15) {
|
|
2568
|
-
output += `- ... and ${result.reachableAccess.length - 15} more\n`;
|
|
2569
|
-
}
|
|
2570
|
-
}
|
|
2571
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2572
|
-
}
|
|
2573
|
-
catch (error) {
|
|
2574
|
-
return {
|
|
2575
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2576
|
-
isError: true,
|
|
2577
|
-
};
|
|
2578
|
-
}
|
|
2579
|
-
}
|
|
2580
|
-
case 'inverse': {
|
|
2581
|
-
if (!args.target) {
|
|
2582
|
-
return {
|
|
2583
|
-
content: [{
|
|
2584
|
-
type: 'text',
|
|
2585
|
-
text: 'Error: target parameter required for "inverse" action.\n\n' +
|
|
2586
|
-
'Examples:\n' +
|
|
2587
|
-
'- `drift_callgraph action="inverse" target="users"`\n' +
|
|
2588
|
-
'- `drift_callgraph action="inverse" target="users.password_hash"`',
|
|
2589
|
-
}],
|
|
2590
|
-
isError: true,
|
|
2591
|
-
};
|
|
2592
|
-
}
|
|
2593
|
-
try {
|
|
2594
|
-
await analyzer.initialize();
|
|
2595
|
-
const graph = analyzer.getGraph();
|
|
2596
|
-
if (!graph) {
|
|
2597
|
-
return {
|
|
2598
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
2599
|
-
isError: true,
|
|
2600
|
-
};
|
|
2601
|
-
}
|
|
2602
|
-
const parts = args.target.split('.');
|
|
2603
|
-
const table = parts[0] ?? '';
|
|
2604
|
-
const field = parts.length > 1 ? parts.slice(1).join('.') : undefined;
|
|
2605
|
-
const result = analyzer.getCodePathsToData(field ? { table, field, maxDepth } : { table, maxDepth });
|
|
2606
|
-
let output = '# Inverse Reachability\n\n';
|
|
2607
|
-
output += `**Target:** ${args.target}\n`;
|
|
2608
|
-
output += `**Direct Accessors:** ${result.totalAccessors}\n`;
|
|
2609
|
-
output += `**Entry Points That Can Reach:** ${result.entryPoints.length}\n\n`;
|
|
2610
|
-
if (result.accessPaths.length > 0) {
|
|
2611
|
-
output += '## Access Paths\n\n';
|
|
2612
|
-
for (const ap of result.accessPaths.slice(0, 10)) {
|
|
2613
|
-
const entryFunc = graph.functions.get(ap.entryPoint);
|
|
2614
|
-
if (entryFunc) {
|
|
2615
|
-
output += `### ${entryFunc.qualifiedName}\n`;
|
|
2616
|
-
output += `- Path: ${ap.path.map(p => p.functionName).join(' โ ')}\n\n`;
|
|
2617
|
-
}
|
|
2618
|
-
}
|
|
2619
|
-
if (result.accessPaths.length > 10) {
|
|
2620
|
-
output += `... and ${result.accessPaths.length - 10} more paths\n`;
|
|
2621
|
-
}
|
|
2622
|
-
}
|
|
2623
|
-
else {
|
|
2624
|
-
output += 'No entry points can reach this data.\n';
|
|
2625
|
-
}
|
|
2626
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2627
|
-
}
|
|
2628
|
-
catch (error) {
|
|
2629
|
-
return {
|
|
2630
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2631
|
-
isError: true,
|
|
2632
|
-
};
|
|
2633
|
-
}
|
|
2634
|
-
}
|
|
2635
|
-
case 'function': {
|
|
2636
|
-
if (!args.functionName) {
|
|
2637
|
-
return {
|
|
2638
|
-
content: [{
|
|
2639
|
-
type: 'text',
|
|
2640
|
-
text: 'Error: functionName parameter required for "function" action.',
|
|
2641
|
-
}],
|
|
2642
|
-
isError: true,
|
|
2643
|
-
};
|
|
2644
|
-
}
|
|
2645
|
-
try {
|
|
2646
|
-
await analyzer.initialize();
|
|
2647
|
-
const graph = analyzer.getGraph();
|
|
2648
|
-
if (!graph) {
|
|
2649
|
-
return {
|
|
2650
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
2651
|
-
isError: true,
|
|
2652
|
-
};
|
|
2653
|
-
}
|
|
2654
|
-
let func;
|
|
2655
|
-
for (const [, f] of graph.functions) {
|
|
2656
|
-
if (f.name === args.functionName || f.qualifiedName === args.functionName) {
|
|
2657
|
-
func = f;
|
|
2658
|
-
break;
|
|
2659
|
-
}
|
|
2660
|
-
}
|
|
2661
|
-
if (!func) {
|
|
2662
|
-
return {
|
|
2663
|
-
content: [{ type: 'text', text: `Function '${args.functionName}' not found.` }],
|
|
2664
|
-
isError: true,
|
|
2665
|
-
};
|
|
2666
|
-
}
|
|
2667
|
-
let output = `# Function: ${func.qualifiedName}\n\n`;
|
|
2668
|
-
output += `- **File:** ${func.file}:${func.startLine}\n`;
|
|
2669
|
-
output += `- **Language:** ${func.language}\n`;
|
|
2670
|
-
if (func.className)
|
|
2671
|
-
output += `- **Class:** ${func.className}\n`;
|
|
2672
|
-
output += `- **Exported:** ${func.isExported ? 'yes' : 'no'}\n`;
|
|
2673
|
-
output += `- **Async:** ${func.isAsync ? 'yes' : 'no'}\n\n`;
|
|
2674
|
-
if (func.parameters.length > 0) {
|
|
2675
|
-
output += '## Parameters\n\n';
|
|
2676
|
-
for (const p of func.parameters) {
|
|
2677
|
-
output += `- **${p.name}**${p.type ? `: ${p.type}` : ''}\n`;
|
|
2678
|
-
}
|
|
2679
|
-
output += '\n';
|
|
2680
|
-
}
|
|
2681
|
-
if (func.calls.length > 0) {
|
|
2682
|
-
output += `## Calls (${func.calls.length})\n\n`;
|
|
2683
|
-
for (const c of func.calls.slice(0, 10)) {
|
|
2684
|
-
const status = c.resolved ? 'โ' : '?';
|
|
2685
|
-
output += `- ${status} **${c.calleeName}** (line ${c.line})\n`;
|
|
2686
|
-
}
|
|
2687
|
-
if (func.calls.length > 10) {
|
|
2688
|
-
output += `- ... and ${func.calls.length - 10} more\n`;
|
|
2689
|
-
}
|
|
2690
|
-
output += '\n';
|
|
2691
|
-
}
|
|
2692
|
-
if (func.calledBy.length > 0) {
|
|
2693
|
-
output += `## Called By (${func.calledBy.length})\n\n`;
|
|
2694
|
-
for (const c of func.calledBy.slice(0, 10)) {
|
|
2695
|
-
const caller = graph.functions.get(c.callerId);
|
|
2696
|
-
output += `- **${caller?.qualifiedName ?? c.callerId}**\n`;
|
|
2697
|
-
}
|
|
2698
|
-
if (func.calledBy.length > 10) {
|
|
2699
|
-
output += `- ... and ${func.calledBy.length - 10} more\n`;
|
|
2700
|
-
}
|
|
2701
|
-
output += '\n';
|
|
2702
|
-
}
|
|
2703
|
-
if (func.dataAccess.length > 0) {
|
|
2704
|
-
output += '## Data Access\n\n';
|
|
2705
|
-
for (const d of func.dataAccess) {
|
|
2706
|
-
output += `- **${d.operation}** ${d.table}.${d.fields.join(', ')}\n`;
|
|
2707
|
-
}
|
|
2708
|
-
}
|
|
2709
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2710
|
-
}
|
|
2711
|
-
catch (error) {
|
|
2712
|
-
return {
|
|
2713
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2714
|
-
isError: true,
|
|
2715
|
-
};
|
|
2716
|
-
}
|
|
2717
|
-
}
|
|
2718
|
-
case 'security': {
|
|
2719
|
-
try {
|
|
2720
|
-
// Import security prioritizer dynamically to avoid circular deps
|
|
2721
|
-
const { createSecurityPrioritizer } = await import('driftdetect-core');
|
|
2722
|
-
// Run boundary scan
|
|
2723
|
-
const boundaryScanner = createBoundaryScanner({ rootDir: projectRoot });
|
|
2724
|
-
await boundaryScanner.initialize();
|
|
2725
|
-
const filePatterns = ['**/*.ts', '**/*.tsx', '**/*.js', '**/*.jsx', '**/*.py'];
|
|
2726
|
-
const boundaryResult = await boundaryScanner.scanDirectory({ patterns: filePatterns });
|
|
2727
|
-
// Prioritize by security
|
|
2728
|
-
const prioritizer = createSecurityPrioritizer();
|
|
2729
|
-
const prioritized = prioritizer.prioritize(boundaryResult.accessMap);
|
|
2730
|
-
const { summary } = prioritized;
|
|
2731
|
-
let output = '# ๐ Security-Prioritized Data Access\n\n';
|
|
2732
|
-
output += '## Summary\n\n';
|
|
2733
|
-
output += `- **Total Access Points:** ${summary.totalAccessPoints}\n`;
|
|
2734
|
-
output += `- **๐ด Critical (P0/P1):** ${summary.criticalCount}\n`;
|
|
2735
|
-
output += `- **๐ก High (P2):** ${summary.highCount}\n`;
|
|
2736
|
-
output += `- **โช Low (P3/P4):** ${summary.lowCount}\n`;
|
|
2737
|
-
output += `- **๐ฆ Noise (filtered):** ${summary.noiseCount}\n\n`;
|
|
2738
|
-
// Regulations
|
|
2739
|
-
if (summary.regulations.length > 0) {
|
|
2740
|
-
output += '## Regulatory Implications\n\n';
|
|
2741
|
-
output += summary.regulations.map(r => `**${r.toUpperCase()}**`).join(', ') + '\n\n';
|
|
2742
|
-
}
|
|
2743
|
-
// Critical items - use limit parameter
|
|
2744
|
-
if (prioritized.critical.length > 0) {
|
|
2745
|
-
const limitedCritical = prioritized.critical.slice(0, limit);
|
|
2746
|
-
output += '## ๐จ Critical Security Items (P0/P1)\n\n';
|
|
2747
|
-
output += 'These require immediate attention:\n\n';
|
|
2748
|
-
for (const p of limitedCritical) {
|
|
2749
|
-
const sensitivityIcon = p.security.maxSensitivity === 'credentials' ? '๐' :
|
|
2750
|
-
p.security.maxSensitivity === 'financial' ? '๐ฐ' :
|
|
2751
|
-
p.security.maxSensitivity === 'health' ? '๐ฅ' :
|
|
2752
|
-
p.security.maxSensitivity === 'pii' ? '๐ค' : 'โ';
|
|
2753
|
-
output += `### ${p.security.tier} ${sensitivityIcon} ${p.accessPoint.table}\n`;
|
|
2754
|
-
output += `- **Operation:** ${p.accessPoint.operation}\n`;
|
|
2755
|
-
output += `- **Fields:** ${p.accessPoint.fields.join(', ') || '*'}\n`;
|
|
2756
|
-
output += `- **Location:** ${p.accessPoint.file}:${p.accessPoint.line}\n`;
|
|
2757
|
-
output += `- **Risk Score:** ${p.security.riskScore}/100\n`;
|
|
2758
|
-
output += `- **Rationale:** ${p.security.rationale}\n`;
|
|
2759
|
-
if (p.security.regulations.length > 0) {
|
|
2760
|
-
output += `- **Regulations:** ${p.security.regulations.join(', ')}\n`;
|
|
2761
|
-
}
|
|
2762
|
-
output += '\n';
|
|
2763
|
-
}
|
|
2764
|
-
if (prioritized.critical.length > limit) {
|
|
2765
|
-
output += `*... and ${prioritized.critical.length - limit} more critical items. Use \`limit=${limit + 10}\` to see more.*\n\n`;
|
|
2766
|
-
}
|
|
2767
|
-
}
|
|
2768
|
-
// High priority items - use limit parameter
|
|
2769
|
-
if (prioritized.high.length > 0) {
|
|
2770
|
-
const highLimit = Math.max(5, Math.floor(limit / 2));
|
|
2771
|
-
const limitedHigh = prioritized.high.slice(0, highLimit);
|
|
2772
|
-
output += '## โ ๏ธ High Priority Items (P2)\n\n';
|
|
2773
|
-
for (const p of limitedHigh) {
|
|
2774
|
-
output += `- **${p.accessPoint.table}**.${p.accessPoint.fields.join(', ') || '*'} (${p.accessPoint.operation})\n`;
|
|
2775
|
-
output += ` - ${p.accessPoint.file}:${p.accessPoint.line}\n`;
|
|
2776
|
-
}
|
|
2777
|
-
if (prioritized.high.length > highLimit) {
|
|
2778
|
-
output += `\n*... and ${prioritized.high.length - highLimit} more high priority items*\n`;
|
|
2779
|
-
}
|
|
2780
|
-
output += '\n';
|
|
2781
|
-
}
|
|
2782
|
-
// Sensitivity breakdown
|
|
2783
|
-
output += '## By Sensitivity Type\n\n';
|
|
2784
|
-
if (summary.bySensitivity.credentials > 0) {
|
|
2785
|
-
output += `- **๐ Credentials:** ${summary.bySensitivity.credentials}\n`;
|
|
2786
|
-
}
|
|
2787
|
-
if (summary.bySensitivity.financial > 0) {
|
|
2788
|
-
output += `- **๐ฐ Financial:** ${summary.bySensitivity.financial}\n`;
|
|
2789
|
-
}
|
|
2790
|
-
if (summary.bySensitivity.health > 0) {
|
|
2791
|
-
output += `- **๐ฅ Health:** ${summary.bySensitivity.health}\n`;
|
|
2792
|
-
}
|
|
2793
|
-
if (summary.bySensitivity.pii > 0) {
|
|
2794
|
-
output += `- **๐ค PII:** ${summary.bySensitivity.pii}\n`;
|
|
2795
|
-
}
|
|
2796
|
-
output += `- **โ Unknown:** ${summary.bySensitivity.unknown}\n`;
|
|
2797
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2798
|
-
}
|
|
2799
|
-
catch (error) {
|
|
2800
|
-
return {
|
|
2801
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2802
|
-
isError: true,
|
|
2803
|
-
};
|
|
2804
|
-
}
|
|
2805
|
-
}
|
|
2806
|
-
case 'impact': {
|
|
2807
|
-
if (!args.target) {
|
|
2808
|
-
return {
|
|
2809
|
-
content: [{
|
|
2810
|
-
type: 'text',
|
|
2811
|
-
text: 'Error: target parameter required for "impact" action.\n\n' +
|
|
2812
|
-
'Examples:\n' +
|
|
2813
|
-
'- `drift_callgraph action="impact" target="src/auth.py"` (analyze file)\n' +
|
|
2814
|
-
'- `drift_callgraph action="impact" target="login_user"` (analyze function)',
|
|
2815
|
-
}],
|
|
2816
|
-
isError: true,
|
|
2817
|
-
};
|
|
2818
|
-
}
|
|
2819
|
-
try {
|
|
2820
|
-
await analyzer.initialize();
|
|
2821
|
-
const graph = analyzer.getGraph();
|
|
2822
|
-
if (!graph) {
|
|
2823
|
-
return {
|
|
2824
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
2825
|
-
isError: true,
|
|
2826
|
-
};
|
|
2827
|
-
}
|
|
2828
|
-
const impactAnalyzer = createImpactAnalyzer(graph);
|
|
2829
|
-
let result;
|
|
2830
|
-
// Determine if target is a file or function
|
|
2831
|
-
if (args.target.includes('/') || args.target.includes('.py') || args.target.includes('.ts') || args.target.includes('.js')) {
|
|
2832
|
-
result = impactAnalyzer.analyzeFile(args.target);
|
|
2833
|
-
}
|
|
2834
|
-
else {
|
|
2835
|
-
result = impactAnalyzer.analyzeFunctionByName(args.target);
|
|
2836
|
-
}
|
|
2837
|
-
let output = '# ๐ฅ Impact Analysis\n\n';
|
|
2838
|
-
// Target info
|
|
2839
|
-
if (result.target.type === 'file') {
|
|
2840
|
-
output += `**Target:** ${result.target.file} (${result.changedFunctions.length} functions)\n\n`;
|
|
2841
|
-
}
|
|
2842
|
-
else {
|
|
2843
|
-
output += `**Target:** ${result.target.functionName ?? result.target.functionId ?? 'unknown'}\n\n`;
|
|
2844
|
-
}
|
|
2845
|
-
// Risk assessment
|
|
2846
|
-
const riskEmoji = result.risk === 'critical' ? '๐ด' :
|
|
2847
|
-
result.risk === 'high' ? '๐ ' :
|
|
2848
|
-
result.risk === 'medium' ? '๐ก' : '๐ข';
|
|
2849
|
-
output += `**Risk Level:** ${riskEmoji} **${result.risk.toUpperCase()}** (score: ${result.riskScore}/100)\n\n`;
|
|
2850
|
-
// Summary
|
|
2851
|
-
output += '## Summary\n\n';
|
|
2852
|
-
output += `- **Direct Callers:** ${result.summary.directCallers}\n`;
|
|
2853
|
-
output += `- **Transitive Callers:** ${result.summary.transitiveCallers}\n`;
|
|
2854
|
-
output += `- **Affected Entry Points:** ${result.summary.affectedEntryPoints}\n`;
|
|
2855
|
-
output += `- **Sensitive Data Paths:** ${result.summary.affectedDataPaths}\n`;
|
|
2856
|
-
output += `- **Max Call Depth:** ${result.summary.maxDepth}\n\n`;
|
|
2857
|
-
// Entry points affected - use limit
|
|
2858
|
-
if (result.entryPoints.length > 0) {
|
|
2859
|
-
const limitedEntryPoints = result.entryPoints.slice(0, limit);
|
|
2860
|
-
output += '## ๐ช Affected Entry Points (User-Facing Impact)\n\n';
|
|
2861
|
-
for (const ep of limitedEntryPoints) {
|
|
2862
|
-
output += `### ${ep.qualifiedName}\n`;
|
|
2863
|
-
output += `- **Location:** ${ep.file}:${ep.line}\n`;
|
|
2864
|
-
output += `- **Path:** ${ep.pathToChange.map(p => p.functionName).join(' โ ')}\n\n`;
|
|
2865
|
-
}
|
|
2866
|
-
if (result.entryPoints.length > limit) {
|
|
2867
|
-
output += `*... and ${result.entryPoints.length - limit} more entry points. Use \`limit=${limit + 10}\` to see more.*\n\n`;
|
|
2868
|
-
}
|
|
2869
|
-
}
|
|
2870
|
-
// Sensitive data paths - use limit
|
|
2871
|
-
if (result.sensitiveDataPaths.length > 0) {
|
|
2872
|
-
const limitedPaths = result.sensitiveDataPaths.slice(0, limit);
|
|
2873
|
-
output += '## ๐ Sensitive Data Paths Affected\n\n';
|
|
2874
|
-
for (const dp of limitedPaths) {
|
|
2875
|
-
const sensitivityIcon = dp.sensitivity === 'credentials' ? '๐' :
|
|
2876
|
-
dp.sensitivity === 'financial' ? '๐ฐ' :
|
|
2877
|
-
dp.sensitivity === 'health' ? '๐ฅ' : '๐ค';
|
|
2878
|
-
output += `### ${sensitivityIcon} ${dp.sensitivity}: ${dp.table}.${dp.fields.join(', ')}\n`;
|
|
2879
|
-
output += `- **Operation:** ${dp.operation}\n`;
|
|
2880
|
-
output += `- **Entry Point:** ${dp.entryPoint}\n`;
|
|
2881
|
-
output += `- **Path:** ${dp.fullPath.map(n => n.functionName).join(' โ ')}\n\n`;
|
|
2882
|
-
}
|
|
2883
|
-
if (result.sensitiveDataPaths.length > limit) {
|
|
2884
|
-
output += `*... and ${result.sensitiveDataPaths.length - limit} more sensitive paths*\n\n`;
|
|
2885
|
-
}
|
|
2886
|
-
}
|
|
2887
|
-
// Direct callers - use limit
|
|
2888
|
-
const directCallers = result.affected.filter(a => a.depth === 1);
|
|
2889
|
-
if (directCallers.length > 0) {
|
|
2890
|
-
const limitedDirect = directCallers.slice(0, limit);
|
|
2891
|
-
output += '## ๐ Direct Callers (Immediate Impact)\n\n';
|
|
2892
|
-
for (const caller of limitedDirect) {
|
|
2893
|
-
const icon = caller.accessesSensitiveData ? '๐ด' : 'โช';
|
|
2894
|
-
output += `- ${icon} **${caller.qualifiedName}** @ ${caller.file}:${caller.line}\n`;
|
|
2895
|
-
}
|
|
2896
|
-
if (directCallers.length > limit) {
|
|
2897
|
-
output += `\n*... and ${directCallers.length - limit} more direct callers*\n`;
|
|
2898
|
-
}
|
|
2899
|
-
output += '\n';
|
|
2900
|
-
}
|
|
2901
|
-
// Transitive callers - use smaller limit
|
|
2902
|
-
const transitiveCallers = result.affected.filter(a => a.depth > 1);
|
|
2903
|
-
if (transitiveCallers.length > 0) {
|
|
2904
|
-
const transitiveLimit = Math.max(5, Math.floor(limit / 2));
|
|
2905
|
-
const limitedTransitive = transitiveCallers.slice(0, transitiveLimit);
|
|
2906
|
-
output += '## ๐ Transitive Callers (Ripple Effect)\n\n';
|
|
2907
|
-
for (const caller of limitedTransitive) {
|
|
2908
|
-
output += `- [depth ${caller.depth}] **${caller.qualifiedName}**\n`;
|
|
2909
|
-
}
|
|
2910
|
-
if (transitiveCallers.length > transitiveLimit) {
|
|
2911
|
-
output += `\n*... and ${transitiveCallers.length - transitiveLimit} more transitive callers*\n`;
|
|
2912
|
-
}
|
|
2913
|
-
output += '\n';
|
|
2914
|
-
}
|
|
2915
|
-
// Recommendations
|
|
2916
|
-
if (result.risk === 'critical' || result.risk === 'high') {
|
|
2917
|
-
output += '## โ ๏ธ Recommendations\n\n';
|
|
2918
|
-
if (result.sensitiveDataPaths.length > 0) {
|
|
2919
|
-
output += '- Review all sensitive data paths before merging\n';
|
|
2920
|
-
}
|
|
2921
|
-
if (result.entryPoints.length > 5) {
|
|
2922
|
-
output += '- Consider incremental rollout - many entry points affected\n';
|
|
2923
|
-
}
|
|
2924
|
-
if (result.summary.maxDepth > 5) {
|
|
2925
|
-
output += '- Deep call chain - test thoroughly for regressions\n';
|
|
2926
|
-
}
|
|
2927
|
-
}
|
|
2928
|
-
return { content: [{ type: 'text', text: output }] };
|
|
2929
|
-
}
|
|
2930
|
-
catch (error) {
|
|
2931
|
-
return {
|
|
2932
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
2933
|
-
isError: true,
|
|
2934
|
-
};
|
|
2935
|
-
}
|
|
2936
|
-
}
|
|
2937
|
-
case 'dead': {
|
|
2938
|
-
try {
|
|
2939
|
-
await analyzer.initialize();
|
|
2940
|
-
const graph = analyzer.getGraph();
|
|
2941
|
-
if (!graph) {
|
|
2942
|
-
return {
|
|
2943
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
2944
|
-
isError: true,
|
|
2945
|
-
};
|
|
2946
|
-
}
|
|
2947
|
-
const detector = createDeadCodeDetector(graph);
|
|
2948
|
-
const minConfidence = args.confidence ?? 'low';
|
|
2949
|
-
const result = detector.detect({ minConfidence });
|
|
2950
|
-
let output = '# ๐ Dead Code Analysis\n\n';
|
|
2951
|
-
// Summary
|
|
2952
|
-
output += '## Summary\n\n';
|
|
2953
|
-
output += `- **Total Functions:** ${result.summary.totalFunctions}\n`;
|
|
2954
|
-
output += `- **Dead Candidates:** ${result.summary.deadCandidates}\n`;
|
|
2955
|
-
output += `- **High Confidence:** ${result.summary.highConfidence}\n`;
|
|
2956
|
-
output += `- **Medium Confidence:** ${result.summary.mediumConfidence}\n`;
|
|
2957
|
-
output += `- **Low Confidence:** ${result.summary.lowConfidence}\n`;
|
|
2958
|
-
output += `- **Estimated Dead Lines:** ~${result.summary.estimatedDeadLines.toLocaleString()}\n\n`;
|
|
2959
|
-
// Excluded
|
|
2960
|
-
output += '## Excluded from Analysis\n\n';
|
|
2961
|
-
output += `- Entry Points: ${result.excluded.entryPoints}\n`;
|
|
2962
|
-
output += `- Functions with Callers: ${result.excluded.withCallers}\n`;
|
|
2963
|
-
output += `- Framework Hooks: ${result.excluded.frameworkHooks}\n\n`;
|
|
2964
|
-
// By language
|
|
2965
|
-
if (Object.keys(result.summary.byLanguage).length > 0) {
|
|
2966
|
-
output += '## By Language\n\n';
|
|
2967
|
-
for (const [lang, count] of Object.entries(result.summary.byLanguage)) {
|
|
2968
|
-
output += `- **${lang}:** ${count}\n`;
|
|
2969
|
-
}
|
|
2970
|
-
output += '\n';
|
|
2971
|
-
}
|
|
2972
|
-
// High confidence candidates - use limit
|
|
2973
|
-
const highConf = result.candidates.filter(c => c.confidence === 'high');
|
|
2974
|
-
if (highConf.length > 0) {
|
|
2975
|
-
const limitedHigh = highConf.slice(0, limit);
|
|
2976
|
-
output += '## ๐ด High Confidence Dead Code\n\n';
|
|
2977
|
-
output += 'These functions are very likely unused:\n\n';
|
|
2978
|
-
for (const c of limitedHigh) {
|
|
2979
|
-
output += `### ${c.qualifiedName}\n`;
|
|
2980
|
-
output += `- **File:** ${c.file}:${c.line}\n`;
|
|
2981
|
-
output += `- **Lines:** ${c.linesOfCode}\n`;
|
|
2982
|
-
if (c.hasDataAccess) {
|
|
2983
|
-
output += `- โ ๏ธ Has data access\n`;
|
|
2984
|
-
}
|
|
2985
|
-
output += '\n';
|
|
2986
|
-
}
|
|
2987
|
-
if (highConf.length > limit) {
|
|
2988
|
-
output += `*... and ${highConf.length - limit} more high-confidence candidates. Use \`limit=${limit + 10}\` to see more.*\n\n`;
|
|
2989
|
-
}
|
|
2990
|
-
}
|
|
2991
|
-
// Medium confidence candidates - use smaller limit
|
|
2992
|
-
const medConf = result.candidates.filter(c => c.confidence === 'medium');
|
|
2993
|
-
if (medConf.length > 0) {
|
|
2994
|
-
const medLimit = Math.max(5, Math.floor(limit / 2));
|
|
2995
|
-
const limitedMed = medConf.slice(0, medLimit);
|
|
2996
|
-
output += '## ๐ก Medium Confidence Dead Code\n\n';
|
|
2997
|
-
output += 'These might be unused but have some indicators they could be called:\n\n';
|
|
2998
|
-
for (const c of limitedMed) {
|
|
2999
|
-
output += `- **${c.qualifiedName}** @ ${c.file}:${c.line}`;
|
|
3000
|
-
if (c.possibleFalsePositives.length > 0) {
|
|
3001
|
-
output += ` (${c.possibleFalsePositives.join(', ')})`;
|
|
3002
|
-
}
|
|
3003
|
-
output += '\n';
|
|
3004
|
-
}
|
|
3005
|
-
if (medConf.length > medLimit) {
|
|
3006
|
-
output += `\n*... and ${medConf.length - medLimit} more medium-confidence candidates*\n`;
|
|
3007
|
-
}
|
|
3008
|
-
output += '\n';
|
|
3009
|
-
}
|
|
3010
|
-
// Files with most dead code - use limit
|
|
3011
|
-
if (result.summary.byFile.length > 0) {
|
|
3012
|
-
const limitedFiles = result.summary.byFile.slice(0, limit);
|
|
3013
|
-
output += '## Files with Most Dead Code\n\n';
|
|
3014
|
-
for (const f of limitedFiles) {
|
|
3015
|
-
output += `- **${f.file}**: ${f.count} functions (~${f.lines} lines)\n`;
|
|
3016
|
-
}
|
|
3017
|
-
if (result.summary.byFile.length > limit) {
|
|
3018
|
-
output += `\n*... and ${result.summary.byFile.length - limit} more files*\n`;
|
|
3019
|
-
}
|
|
3020
|
-
output += '\n';
|
|
3021
|
-
}
|
|
3022
|
-
// Recommendations
|
|
3023
|
-
if (result.summary.highConfidence > 0) {
|
|
3024
|
-
output += '## ๐ก Recommendations\n\n';
|
|
3025
|
-
output += '1. Start with high-confidence candidates - they\'re safest to remove\n';
|
|
3026
|
-
output += '2. Search for dynamic calls (getattr, reflection) before removing\n';
|
|
3027
|
-
output += '3. Check if functions are called from tests\n';
|
|
3028
|
-
output += '4. Consider adding `# pragma: no cover` for intentionally unused code\n';
|
|
3029
|
-
}
|
|
3030
|
-
return { content: [{ type: 'text', text: output }] };
|
|
3031
|
-
}
|
|
3032
|
-
catch (error) {
|
|
3033
|
-
return {
|
|
3034
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
3035
|
-
isError: true,
|
|
3036
|
-
};
|
|
3037
|
-
}
|
|
3038
|
-
}
|
|
3039
|
-
case 'coverage': {
|
|
3040
|
-
try {
|
|
3041
|
-
await analyzer.initialize();
|
|
3042
|
-
const graph = analyzer.getGraph();
|
|
3043
|
-
if (!graph) {
|
|
3044
|
-
return {
|
|
3045
|
-
content: [{ type: 'text', text: 'No call graph found. Run build first.' }],
|
|
3046
|
-
isError: true,
|
|
3047
|
-
};
|
|
3048
|
-
}
|
|
3049
|
-
const coverageAnalyzer = createCoverageAnalyzer(graph);
|
|
3050
|
-
const result = coverageAnalyzer.analyze();
|
|
3051
|
-
let output = '# ๐งช Sensitive Data Test Coverage\n\n';
|
|
3052
|
-
// Summary
|
|
3053
|
-
output += '## Summary\n\n';
|
|
3054
|
-
output += `- **Sensitive Fields:** ${result.summary.totalSensitiveFields}\n`;
|
|
3055
|
-
output += `- **Access Paths:** ${result.summary.totalAccessPaths}\n`;
|
|
3056
|
-
output += `- **Tested Paths:** ${result.summary.testedAccessPaths}\n`;
|
|
3057
|
-
output += `- **Coverage:** ${result.summary.coveragePercent}%\n`;
|
|
3058
|
-
output += `- **Test Files:** ${result.testFiles.length}\n`;
|
|
3059
|
-
output += `- **Test Functions:** ${result.testFunctions}\n\n`;
|
|
3060
|
-
// By sensitivity
|
|
3061
|
-
output += '## Coverage by Sensitivity\n\n';
|
|
3062
|
-
const sensOrder = ['credentials', 'financial', 'health', 'pii'];
|
|
3063
|
-
for (const sens of sensOrder) {
|
|
3064
|
-
const s = result.summary.bySensitivity[sens];
|
|
3065
|
-
if (s.fields > 0) {
|
|
3066
|
-
const icon = sens === 'credentials' ? '๐' :
|
|
3067
|
-
sens === 'financial' ? '๐ฐ' :
|
|
3068
|
-
sens === 'health' ? '๐ฅ' : '๐ค';
|
|
3069
|
-
const status = s.coveragePercent >= 80 ? 'โ' : s.coveragePercent >= 50 ? 'โ' : 'โ';
|
|
3070
|
-
output += `- ${icon} **${sens}**: ${status} ${s.coveragePercent}% (${s.testedPaths}/${s.paths} paths)\n`;
|
|
3071
|
-
}
|
|
3072
|
-
}
|
|
3073
|
-
output += '\n';
|
|
3074
|
-
// Field coverage - use limit
|
|
3075
|
-
if (result.fields.length > 0) {
|
|
3076
|
-
const limitedFields = result.fields.slice(0, limit);
|
|
3077
|
-
output += '## Field Coverage\n\n';
|
|
3078
|
-
for (const f of limitedFields) {
|
|
3079
|
-
const statusIcon = f.status === 'covered' ? 'โ' :
|
|
3080
|
-
f.status === 'partial' ? 'โ' : 'โ';
|
|
3081
|
-
const sensIcon = f.sensitivity === 'credentials' ? '๐' :
|
|
3082
|
-
f.sensitivity === 'financial' ? '๐ฐ' :
|
|
3083
|
-
f.sensitivity === 'health' ? '๐ฅ' : '๐ค';
|
|
3084
|
-
output += `- ${statusIcon} ${sensIcon} **${f.fullName}**: ${f.testedPaths}/${f.totalPaths} paths tested (${f.coveragePercent}%)\n`;
|
|
3085
|
-
}
|
|
3086
|
-
if (result.fields.length > limit) {
|
|
3087
|
-
output += `\n*... and ${result.fields.length - limit} more fields. Use \`limit=${limit + 10}\` to see more.*\n`;
|
|
3088
|
-
}
|
|
3089
|
-
output += '\n';
|
|
3090
|
-
}
|
|
3091
|
-
// Uncovered paths by priority - use smaller limits for each sensitivity type
|
|
3092
|
-
const pathLimit = Math.max(3, Math.floor(limit / 3));
|
|
3093
|
-
const uncoveredByCredentials = result.uncoveredPaths.filter((p) => p.sensitivity === 'credentials');
|
|
3094
|
-
const uncoveredByFinancial = result.uncoveredPaths.filter((p) => p.sensitivity === 'financial');
|
|
3095
|
-
const uncoveredByHealth = result.uncoveredPaths.filter((p) => p.sensitivity === 'health');
|
|
3096
|
-
const uncoveredByPii = result.uncoveredPaths.filter((p) => p.sensitivity === 'pii');
|
|
3097
|
-
if (uncoveredByCredentials.length > 0) {
|
|
3098
|
-
const limitedCreds = uncoveredByCredentials.slice(0, pathLimit);
|
|
3099
|
-
output += '## ๐ Untested Credential Access Paths\n\n';
|
|
3100
|
-
output += '**CRITICAL: These paths access credentials without test coverage**\n\n';
|
|
3101
|
-
for (const p of limitedCreds) {
|
|
3102
|
-
output += `### ${p.table}.${p.field}\n`;
|
|
3103
|
-
output += `- **Entry Point:** ${p.entryPoint.name} @ ${p.entryPoint.file}:${p.entryPoint.line}\n`;
|
|
3104
|
-
output += `- **Accessor:** ${p.accessor.name} @ ${p.accessor.file}:${p.accessor.line}\n`;
|
|
3105
|
-
output += `- **Depth:** ${p.depth}\n\n`;
|
|
3106
|
-
}
|
|
3107
|
-
if (uncoveredByCredentials.length > pathLimit) {
|
|
3108
|
-
output += `*... and ${uncoveredByCredentials.length - pathLimit} more credential paths*\n\n`;
|
|
3109
|
-
}
|
|
3110
|
-
}
|
|
3111
|
-
if (uncoveredByFinancial.length > 0) {
|
|
3112
|
-
const limitedFinancial = uncoveredByFinancial.slice(0, pathLimit);
|
|
3113
|
-
output += '## ๐ฐ Untested Financial Data Paths\n\n';
|
|
3114
|
-
for (const p of limitedFinancial) {
|
|
3115
|
-
output += `- **${p.table}.${p.field}**: ${p.entryPoint.name} โ ${p.accessor.name}\n`;
|
|
3116
|
-
}
|
|
3117
|
-
if (uncoveredByFinancial.length > pathLimit) {
|
|
3118
|
-
output += `\n*... and ${uncoveredByFinancial.length - pathLimit} more*\n`;
|
|
3119
|
-
}
|
|
3120
|
-
output += '\n';
|
|
3121
|
-
}
|
|
3122
|
-
if (uncoveredByHealth.length > 0) {
|
|
3123
|
-
const limitedHealth = uncoveredByHealth.slice(0, pathLimit);
|
|
3124
|
-
output += '## ๐ฅ Untested Health Data Paths\n\n';
|
|
3125
|
-
for (const p of limitedHealth) {
|
|
3126
|
-
output += `- **${p.table}.${p.field}**: ${p.entryPoint.name} โ ${p.accessor.name}\n`;
|
|
3127
|
-
}
|
|
3128
|
-
if (uncoveredByHealth.length > pathLimit) {
|
|
3129
|
-
output += `\n*... and ${uncoveredByHealth.length - pathLimit} more*\n`;
|
|
3130
|
-
}
|
|
3131
|
-
output += '\n';
|
|
3132
|
-
}
|
|
3133
|
-
if (uncoveredByPii.length > 0) {
|
|
3134
|
-
const limitedPii = uncoveredByPii.slice(0, pathLimit);
|
|
3135
|
-
output += '## ๐ค Untested PII Access Paths\n\n';
|
|
3136
|
-
for (const p of limitedPii) {
|
|
3137
|
-
output += `- **${p.table}.${p.field}**: ${p.entryPoint.name} โ ${p.accessor.name}\n`;
|
|
3138
|
-
}
|
|
3139
|
-
if (uncoveredByPii.length > pathLimit) {
|
|
3140
|
-
output += `\n*... and ${uncoveredByPii.length - pathLimit} more*\n`;
|
|
3141
|
-
}
|
|
3142
|
-
output += '\n';
|
|
3143
|
-
}
|
|
3144
|
-
// Recommendations
|
|
3145
|
-
if (result.uncoveredPaths.length > 0) {
|
|
3146
|
-
output += '## ๐ก Recommendations\n\n';
|
|
3147
|
-
if (uncoveredByCredentials.length > 0) {
|
|
3148
|
-
output += `1. **CRITICAL:** ${uncoveredByCredentials.length} credential access paths need tests\n`;
|
|
3149
|
-
}
|
|
3150
|
-
if (uncoveredByFinancial.length > 0) {
|
|
3151
|
-
output += `2. ${uncoveredByFinancial.length} financial data paths need tests\n`;
|
|
3152
|
-
}
|
|
3153
|
-
if (result.summary.coveragePercent < 50) {
|
|
3154
|
-
output += `3. Overall coverage is ${result.summary.coveragePercent}% - consider adding integration tests\n`;
|
|
3155
|
-
}
|
|
3156
|
-
}
|
|
3157
|
-
else {
|
|
3158
|
-
output += '## โ All Sensitive Data Paths Tested\n\n';
|
|
3159
|
-
output += 'Great job! All sensitive data access paths are covered by tests.\n';
|
|
3160
|
-
}
|
|
3161
|
-
return { content: [{ type: 'text', text: output }] };
|
|
3162
|
-
}
|
|
3163
|
-
catch (error) {
|
|
3164
|
-
return {
|
|
3165
|
-
content: [{ type: 'text', text: `Error: ${error}` }],
|
|
3166
|
-
isError: true,
|
|
3167
|
-
};
|
|
3168
|
-
}
|
|
3169
|
-
}
|
|
3170
|
-
default:
|
|
3171
|
-
return {
|
|
3172
|
-
content: [{ type: 'text', text: `Unknown action: ${action}` }],
|
|
3173
|
-
isError: true,
|
|
3174
|
-
};
|
|
3175
|
-
}
|
|
3176
|
-
}
|
|
3177
|
-
//# sourceMappingURL=server.js.map
|