codebase-context 1.4.1 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/README.md +193 -45
  2. package/dist/analyzers/generic/index.d.ts +0 -1
  3. package/dist/analyzers/generic/index.d.ts.map +1 -1
  4. package/dist/analyzers/generic/index.js +0 -13
  5. package/dist/analyzers/generic/index.js.map +1 -1
  6. package/dist/constants/codebase-context.d.ts +2 -0
  7. package/dist/constants/codebase-context.d.ts.map +1 -1
  8. package/dist/constants/codebase-context.js +2 -0
  9. package/dist/constants/codebase-context.js.map +1 -1
  10. package/dist/constants/git-patterns.d.ts +12 -0
  11. package/dist/constants/git-patterns.d.ts.map +1 -0
  12. package/dist/constants/git-patterns.js +11 -0
  13. package/dist/constants/git-patterns.js.map +1 -0
  14. package/dist/core/analyzer-registry.d.ts.map +1 -1
  15. package/dist/core/analyzer-registry.js +3 -1
  16. package/dist/core/analyzer-registry.js.map +1 -1
  17. package/dist/core/indexer.d.ts +2 -0
  18. package/dist/core/indexer.d.ts.map +1 -1
  19. package/dist/core/indexer.js +179 -34
  20. package/dist/core/indexer.js.map +1 -1
  21. package/dist/core/manifest.d.ts +39 -0
  22. package/dist/core/manifest.d.ts.map +1 -0
  23. package/dist/core/manifest.js +86 -0
  24. package/dist/core/manifest.js.map +1 -0
  25. package/dist/core/reranker.d.ts +23 -0
  26. package/dist/core/reranker.d.ts.map +1 -0
  27. package/dist/core/reranker.js +120 -0
  28. package/dist/core/reranker.js.map +1 -0
  29. package/dist/core/search-quality.d.ts +10 -0
  30. package/dist/core/search-quality.d.ts.map +1 -0
  31. package/dist/core/search-quality.js +64 -0
  32. package/dist/core/search-quality.js.map +1 -0
  33. package/dist/core/search.d.ts +26 -2
  34. package/dist/core/search.d.ts.map +1 -1
  35. package/dist/core/search.js +508 -69
  36. package/dist/core/search.js.map +1 -1
  37. package/dist/embeddings/transformers.d.ts.map +1 -1
  38. package/dist/embeddings/transformers.js +17 -7
  39. package/dist/embeddings/transformers.js.map +1 -1
  40. package/dist/embeddings/types.d.ts.map +1 -1
  41. package/dist/embeddings/types.js +3 -0
  42. package/dist/embeddings/types.js.map +1 -1
  43. package/dist/index.d.ts +1 -1
  44. package/dist/index.d.ts.map +1 -1
  45. package/dist/index.js +460 -55
  46. package/dist/index.js.map +1 -1
  47. package/dist/memory/git-memory.d.ts +9 -0
  48. package/dist/memory/git-memory.d.ts.map +1 -0
  49. package/dist/memory/git-memory.js +51 -0
  50. package/dist/memory/git-memory.js.map +1 -0
  51. package/dist/memory/store.d.ts +16 -0
  52. package/dist/memory/store.d.ts.map +1 -1
  53. package/dist/memory/store.js +40 -1
  54. package/dist/memory/store.js.map +1 -1
  55. package/dist/patterns/semantics.d.ts +4 -0
  56. package/dist/patterns/semantics.d.ts.map +1 -0
  57. package/dist/patterns/semantics.js +24 -0
  58. package/dist/patterns/semantics.js.map +1 -0
  59. package/dist/preflight/evidence-lock.d.ts +50 -0
  60. package/dist/preflight/evidence-lock.d.ts.map +1 -0
  61. package/dist/preflight/evidence-lock.js +130 -0
  62. package/dist/preflight/evidence-lock.js.map +1 -0
  63. package/dist/preflight/query-scope.d.ts +3 -0
  64. package/dist/preflight/query-scope.d.ts.map +1 -0
  65. package/dist/preflight/query-scope.js +40 -0
  66. package/dist/preflight/query-scope.js.map +1 -0
  67. package/dist/resources/uri.d.ts +5 -0
  68. package/dist/resources/uri.d.ts.map +1 -0
  69. package/dist/resources/uri.js +15 -0
  70. package/dist/resources/uri.js.map +1 -0
  71. package/dist/storage/lancedb.d.ts +1 -0
  72. package/dist/storage/lancedb.d.ts.map +1 -1
  73. package/dist/storage/lancedb.js +24 -3
  74. package/dist/storage/lancedb.js.map +1 -1
  75. package/dist/storage/types.d.ts +5 -0
  76. package/dist/storage/types.d.ts.map +1 -1
  77. package/dist/storage/types.js.map +1 -1
  78. package/dist/types/index.d.ts +20 -3
  79. package/dist/types/index.d.ts.map +1 -1
  80. package/dist/utils/chunking.js +2 -2
  81. package/dist/utils/chunking.js.map +1 -1
  82. package/dist/utils/git-dates.d.ts +1 -0
  83. package/dist/utils/git-dates.d.ts.map +1 -1
  84. package/dist/utils/git-dates.js +20 -0
  85. package/dist/utils/git-dates.js.map +1 -1
  86. package/dist/utils/usage-tracker.d.ts.map +1 -1
  87. package/dist/utils/usage-tracker.js +3 -8
  88. package/dist/utils/usage-tracker.js.map +1 -1
  89. package/package.json +17 -9
package/dist/index.js CHANGED
@@ -16,9 +16,16 @@ import { analyzerRegistry } from './core/analyzer-registry.js';
16
16
  import { AngularAnalyzer } from './analyzers/angular/index.js';
17
17
  import { GenericAnalyzer } from './analyzers/generic/index.js';
18
18
  import { InternalFileGraph } from './utils/usage-tracker.js';
19
+ import { getFileCommitDates } from './utils/git-dates.js';
19
20
  import { IndexCorruptedError } from './errors/index.js';
20
21
  import { CODEBASE_CONTEXT_DIRNAME, MEMORY_FILENAME, INTELLIGENCE_FILENAME, KEYWORD_INDEX_FILENAME, VECTOR_DB_DIRNAME } from './constants/codebase-context.js';
21
- import { appendMemoryFile, readMemoriesFile, filterMemories, applyUnfilteredLimit } from './memory/store.js';
22
+ import { appendMemoryFile, readMemoriesFile, filterMemories, applyUnfilteredLimit, withConfidence } from './memory/store.js';
23
+ import { parseGitLogLineToMemory } from './memory/git-memory.js';
24
+ import { buildEvidenceLock } from './preflight/evidence-lock.js';
25
+ import { shouldIncludePatternConflictCategory } from './preflight/query-scope.js';
26
+ import { isComplementaryPatternCategory, isComplementaryPatternConflict, shouldSkipLegacyTestingFrameworkCategory } from './patterns/semantics.js';
27
+ import { CONTEXT_RESOURCE_URI, isContextResourceUri } from './resources/uri.js';
28
+ import { assessSearchQuality } from './core/search-quality.js';
22
29
  analyzerRegistry.register(new AngularAnalyzer());
23
30
  analyzerRegistry.register(new GenericAnalyzer());
24
31
  // Resolve root path with validation
@@ -44,9 +51,7 @@ const PATHS = {
44
51
  keywordIndex: path.join(ROOT_PATH, CODEBASE_CONTEXT_DIRNAME, KEYWORD_INDEX_FILENAME),
45
52
  vectorDb: path.join(ROOT_PATH, CODEBASE_CONTEXT_DIRNAME, VECTOR_DB_DIRNAME)
46
53
  };
47
- // Legacy paths for migration
48
54
  const LEGACY_PATHS = {
49
- // Pre-v1.5
50
55
  intelligence: path.join(ROOT_PATH, '.codebase-intelligence.json'),
51
56
  keywordIndex: path.join(ROOT_PATH, '.codebase-index.json'),
52
57
  vectorDb: path.join(ROOT_PATH, '.codebase-index')
@@ -110,12 +115,14 @@ async function migrateToNewStructure() {
110
115
  return false;
111
116
  }
112
117
  }
118
+ // Read version from package.json so it never drifts
119
+ const PKG_VERSION = JSON.parse(await fs.readFile(new URL('../package.json', import.meta.url), 'utf-8')).version;
113
120
  const indexState = {
114
121
  status: 'idle'
115
122
  };
116
123
  const server = new Server({
117
124
  name: 'codebase-context',
118
- version: '1.4.0'
125
+ version: PKG_VERSION
119
126
  }, {
120
127
  capabilities: {
121
128
  tools: {},
@@ -127,6 +134,8 @@ const TOOLS = [
127
134
  name: 'search_codebase',
128
135
  description: 'Search the indexed codebase using natural language queries. Returns code summaries with file locations. ' +
129
136
  'Supports framework-specific queries and architectural layer filtering. ' +
137
+ 'When intent is "edit", "refactor", or "migrate", returns a preflight card with risk level, ' +
138
+ 'patterns to use/avoid, impact candidates, related memories, and an evidence lock score — all in one call. ' +
130
139
  'Use the returned filePath with other tools to read complete file contents.',
131
140
  inputSchema: {
132
141
  type: 'object',
@@ -135,6 +144,13 @@ const TOOLS = [
135
144
  type: 'string',
136
145
  description: 'Natural language search query'
137
146
  },
147
+ intent: {
148
+ type: 'string',
149
+ enum: ['explore', 'edit', 'refactor', 'migrate'],
150
+ description: 'Search intent. Use "explore" (default) for read-only browsing. ' +
151
+ 'Use "edit", "refactor", or "migrate" to get a preflight card with risk assessment, ' +
152
+ 'patterns to prefer/avoid, affected files, relevant team memories, and ready-to-edit evidence checks.'
153
+ },
138
154
  limit: {
139
155
  type: 'number',
140
156
  description: 'Maximum number of results to return (default: 5)',
@@ -289,8 +305,9 @@ const TOOLS = [
289
305
  properties: {
290
306
  type: {
291
307
  type: 'string',
292
- enum: ['convention', 'decision', 'gotcha'],
293
- description: 'Type of memory being recorded'
308
+ enum: ['convention', 'decision', 'gotcha', 'failure'],
309
+ description: 'Type of memory being recorded. Use "failure" for things that were tried and failed — ' +
310
+ 'prevents repeating the same mistakes.'
294
311
  },
295
312
  category: {
296
313
  type: 'string',
@@ -325,7 +342,7 @@ const TOOLS = [
325
342
  type: {
326
343
  type: 'string',
327
344
  description: 'Filter by memory type',
328
- enum: ['convention', 'decision', 'gotcha']
345
+ enum: ['convention', 'decision', 'gotcha', 'failure']
329
346
  },
330
347
  query: {
331
348
  type: 'string',
@@ -341,7 +358,7 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
341
358
  // MCP Resources - Proactive context injection
342
359
  const RESOURCES = [
343
360
  {
344
- uri: 'codebase://context',
361
+ uri: CONTEXT_RESOURCE_URI,
345
362
  name: 'Codebase Intelligence',
346
363
  description: 'Automatic codebase context: libraries used, team patterns, and conventions. ' +
347
364
  'Read this BEFORE generating code to follow team standards.',
@@ -389,16 +406,35 @@ async function generateCodebaseContext() {
389
406
  }
390
407
  // Pattern consensus
391
408
  if (intelligence.patterns && Object.keys(intelligence.patterns).length > 0) {
409
+ const patterns = intelligence.patterns;
392
410
  lines.push("## YOUR Codebase's Actual Patterns (Not Generic Best Practices)");
393
411
  lines.push('');
394
412
  lines.push('These patterns were detected by analyzing your actual code.');
395
413
  lines.push('This is what YOUR team does in practice, not what tutorials recommend.');
396
414
  lines.push('');
397
- for (const [category, data] of Object.entries(intelligence.patterns)) {
415
+ for (const [category, data] of Object.entries(patterns)) {
416
+ if (shouldSkipLegacyTestingFrameworkCategory(category, patterns)) {
417
+ continue;
418
+ }
398
419
  const patternData = data;
399
420
  const primary = patternData.primary;
421
+ const alternatives = patternData.alsoDetected ?? [];
400
422
  if (!primary)
401
423
  continue;
424
+ if (isComplementaryPatternCategory(category, [primary.name, ...alternatives.map((alt) => alt.name)].filter(Boolean))) {
425
+ const secondary = alternatives[0];
426
+ if (secondary) {
427
+ const categoryName = category
428
+ .replace(/([A-Z])/g, ' $1')
429
+ .trim()
430
+ .replace(/^./, (str) => str.toUpperCase());
431
+ lines.push(`### ${categoryName}: **${primary.name}** (${primary.frequency}) + **${secondary.name}** (${secondary.frequency})`);
432
+ lines.push(' → Computed and effect are complementary Signals primitives and are commonly used together.');
433
+ lines.push(' → Treat this as balanced usage, not a hard split decision.');
434
+ lines.push('');
435
+ continue;
436
+ }
437
+ }
402
438
  const percentage = parseInt(primary.frequency);
403
439
  const categoryName = category
404
440
  .replace(/([A-Z])/g, ' $1')
@@ -411,16 +447,16 @@ async function generateCodebaseContext() {
411
447
  else if (percentage >= 80) {
412
448
  lines.push(`### ${categoryName}: **${primary.name}** (${primary.frequency} - strong consensus)`);
413
449
  lines.push(` → Your team strongly prefers ${primary.name}`);
414
- if (patternData.alsoDetected?.length) {
415
- const alt = patternData.alsoDetected[0];
450
+ if (alternatives.length) {
451
+ const alt = alternatives[0];
416
452
  lines.push(` → Minority pattern: ${alt.name} (${alt.frequency}) - avoid for new code`);
417
453
  }
418
454
  }
419
455
  else if (percentage >= 60) {
420
456
  lines.push(`### ${categoryName}: **${primary.name}** (${primary.frequency} - majority)`);
421
457
  lines.push(` → Most code uses ${primary.name}, but not unanimous`);
422
- if (patternData.alsoDetected?.length) {
423
- lines.push(` → Also detected: ${patternData.alsoDetected[0].name} (${patternData.alsoDetected[0].frequency})`);
458
+ if (alternatives.length) {
459
+ lines.push(` → Also detected: ${alternatives[0].name} (${alternatives[0].frequency})`);
424
460
  }
425
461
  }
426
462
  else {
@@ -428,8 +464,8 @@ async function generateCodebaseContext() {
428
464
  lines.push(`### ${categoryName}: ⚠️ NO TEAM CONSENSUS`);
429
465
  lines.push(` Your codebase is split between multiple approaches:`);
430
466
  lines.push(` - ${primary.name} (${primary.frequency})`);
431
- if (patternData.alsoDetected?.length) {
432
- for (const alt of patternData.alsoDetected.slice(0, 2)) {
467
+ if (alternatives.length) {
468
+ for (const alt of alternatives.slice(0, 2)) {
433
469
  lines.push(` - ${alt.name} (${alt.frequency})`);
434
470
  }
435
471
  }
@@ -450,12 +486,12 @@ async function generateCodebaseContext() {
450
486
  }
451
487
  server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
452
488
  const uri = request.params.uri;
453
- if (uri === 'codebase://context') {
489
+ if (isContextResourceUri(uri)) {
454
490
  const content = await generateCodebaseContext();
455
491
  return {
456
492
  contents: [
457
493
  {
458
- uri,
494
+ uri: CONTEXT_RESOURCE_URI,
459
495
  mimeType: 'text/plain',
460
496
  text: content
461
497
  }
@@ -464,13 +500,51 @@ server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
464
500
  }
465
501
  throw new Error(`Unknown resource: ${uri}`);
466
502
  });
467
- async function performIndexing() {
503
+ /**
504
+ * Extract memories from conventional git commits (refactor:, migrate:, fix:, revert:).
505
+ * Scans last 90 days. Deduplicates via content hash. Zero friction alternative to manual memory.
506
+ */
507
+ async function extractGitMemories() {
508
+ // Quick check: skip if not a git repo
509
+ if (!(await fileExists(path.join(ROOT_PATH, '.git'))))
510
+ return 0;
511
+ const { execSync } = await import('child_process');
512
+ let log;
513
+ try {
514
+ // Format: ISO-date<TAB>hash subject (e.g. "2026-01-15T10:00:00+00:00\tabc1234 fix: race condition")
515
+ log = execSync('git log --format="%aI\t%h %s" --since="90 days ago" --no-merges', {
516
+ cwd: ROOT_PATH,
517
+ encoding: 'utf-8',
518
+ timeout: 5000
519
+ }).trim();
520
+ }
521
+ catch {
522
+ // Git not available or command failed — silently skip
523
+ return 0;
524
+ }
525
+ if (!log)
526
+ return 0;
527
+ const lines = log.split('\n').filter(Boolean);
528
+ let added = 0;
529
+ for (const line of lines) {
530
+ const parsedMemory = parseGitLogLineToMemory(line);
531
+ if (!parsedMemory)
532
+ continue;
533
+ const result = await appendMemoryFile(PATHS.memory, parsedMemory);
534
+ if (result.status === 'added')
535
+ added++;
536
+ }
537
+ return added;
538
+ }
539
+ async function performIndexing(incrementalOnly) {
468
540
  indexState.status = 'indexing';
469
- console.error(`Indexing: ${ROOT_PATH}`);
541
+ const mode = incrementalOnly ? 'incremental' : 'full';
542
+ console.error(`Indexing (${mode}): ${ROOT_PATH}`);
470
543
  try {
471
544
  let lastLoggedProgress = { phase: '', percentage: -1 };
472
545
  const indexer = new CodebaseIndexer({
473
546
  rootPath: ROOT_PATH,
547
+ incrementalOnly,
474
548
  onProgress: (progress) => {
475
549
  // Only log when phase or percentage actually changes (prevents duplicate logs)
476
550
  const shouldLog = progress.phase !== lastLoggedProgress.phase ||
@@ -487,6 +561,16 @@ async function performIndexing() {
487
561
  indexState.lastIndexed = new Date();
488
562
  indexState.stats = stats;
489
563
  console.error(`Complete: ${stats.indexedFiles} files, ${stats.totalChunks} chunks in ${(stats.duration / 1000).toFixed(2)}s`);
564
+ // Auto-extract memories from git history (non-blocking, best-effort)
565
+ try {
566
+ const gitMemories = await extractGitMemories();
567
+ if (gitMemories > 0) {
568
+ console.error(`[git-memory] Extracted ${gitMemories} new memor${gitMemories === 1 ? 'y' : 'ies'} from git history`);
569
+ }
570
+ }
571
+ catch {
572
+ // Git memory extraction is optional — never fail indexing over it
573
+ }
490
574
  }
491
575
  catch (error) {
492
576
  indexState.status = 'error';
@@ -509,7 +593,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
509
593
  try {
510
594
  switch (name) {
511
595
  case 'search_codebase': {
512
- const { query, limit, filters } = args;
596
+ const { query, limit, filters, intent } = args;
513
597
  if (indexState.status === 'indexing') {
514
598
  return {
515
599
  content: [
@@ -539,8 +623,13 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
539
623
  }
540
624
  const searcher = new CodebaseSearcher(ROOT_PATH);
541
625
  let results;
626
+ const searchProfile = intent && ['explore', 'edit', 'refactor', 'migrate'].includes(intent)
627
+ ? intent
628
+ : 'explore';
542
629
  try {
543
- results = await searcher.search(query, limit || 5, filters);
630
+ results = await searcher.search(query, limit || 5, filters, {
631
+ profile: searchProfile
632
+ });
544
633
  }
545
634
  catch (error) {
546
635
  if (error instanceof IndexCorruptedError) {
@@ -550,7 +639,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
550
639
  console.error('[Auto-Heal] Success. Retrying search...');
551
640
  const freshSearcher = new CodebaseSearcher(ROOT_PATH);
552
641
  try {
553
- results = await freshSearcher.search(query, limit || 5, filters);
642
+ results = await freshSearcher.search(query, limit || 5, filters, {
643
+ profile: searchProfile
644
+ });
554
645
  }
555
646
  catch (retryError) {
556
647
  return {
@@ -585,36 +676,305 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
585
676
  throw error; // Propagate unexpected errors
586
677
  }
587
678
  }
588
- // Load memories for keyword matching
679
+ // Load memories for keyword matching, enriched with confidence
589
680
  const allMemories = await readMemoriesFile(PATHS.memory);
590
- const findRelatedMemories = (queryTerms) => {
591
- return allMemories.filter((m) => {
592
- const searchText = `${m.memory} ${m.reason}`.toLowerCase();
593
- return queryTerms.some((term) => searchText.includes(term));
594
- });
595
- };
681
+ const allMemoriesWithConf = withConfidence(allMemories);
596
682
  const queryTerms = query.toLowerCase().split(/\s+/);
597
- const relatedMemories = findRelatedMemories(queryTerms);
683
+ const relatedMemories = allMemoriesWithConf
684
+ .filter((m) => {
685
+ const searchText = `${m.memory} ${m.reason}`.toLowerCase();
686
+ return queryTerms.some((term) => searchText.includes(term));
687
+ })
688
+ .sort((a, b) => b.effectiveConfidence - a.effectiveConfidence);
689
+ // Load intelligence data for enrichment (all intents, not just preflight)
690
+ let intelligence = null;
691
+ try {
692
+ const intelligenceContent = await fs.readFile(PATHS.intelligence, 'utf-8');
693
+ intelligence = JSON.parse(intelligenceContent);
694
+ }
695
+ catch {
696
+ /* graceful degradation — intelligence file may not exist yet */
697
+ }
698
+ // Build reverse import map from intelligence graph
699
+ const reverseImports = new Map();
700
+ if (intelligence?.internalFileGraph?.imports) {
701
+ for (const [file, deps] of Object.entries(intelligence.internalFileGraph.imports)) {
702
+ for (const dep of deps) {
703
+ if (!reverseImports.has(dep))
704
+ reverseImports.set(dep, []);
705
+ reverseImports.get(dep).push(file);
706
+ }
707
+ }
708
+ }
709
+ // Load git dates for lastModified enrichment
710
+ let gitDates = null;
711
+ try {
712
+ gitDates = await getFileCommitDates(ROOT_PATH);
713
+ }
714
+ catch {
715
+ /* not a git repo */
716
+ }
717
+ // Enrich a search result with relationship data
718
+ function enrichResult(r) {
719
+ const rPath = r.filePath;
720
+ // importedBy: files that import this result (reverse lookup)
721
+ const importedBy = [];
722
+ for (const [dep, importers] of reverseImports) {
723
+ if (dep.endsWith(rPath) || rPath.endsWith(dep)) {
724
+ importedBy.push(...importers);
725
+ }
726
+ }
727
+ // imports: files this result depends on (forward lookup)
728
+ const imports = [];
729
+ if (intelligence?.internalFileGraph?.imports) {
730
+ for (const [file, deps] of Object.entries(intelligence.internalFileGraph.imports)) {
731
+ if (file.endsWith(rPath) || rPath.endsWith(file)) {
732
+ imports.push(...deps);
733
+ }
734
+ }
735
+ }
736
+ // testedIn: heuristic — same basename with .spec/.test extension
737
+ const testedIn = [];
738
+ const baseName = path.basename(rPath).replace(/\.[^.]+$/, '');
739
+ if (intelligence?.internalFileGraph?.imports) {
740
+ for (const file of Object.keys(intelligence.internalFileGraph.imports)) {
741
+ const fileBase = path.basename(file);
742
+ if ((fileBase.includes('.spec.') || fileBase.includes('.test.')) &&
743
+ fileBase.startsWith(baseName)) {
744
+ testedIn.push(file);
745
+ }
746
+ }
747
+ }
748
+ // lastModified: from git dates
749
+ let lastModified;
750
+ if (gitDates) {
751
+ // Try matching by relative path (git dates use repo-relative forward-slash paths)
752
+ const relPath = path.relative(ROOT_PATH, rPath).replace(/\\/g, '/');
753
+ const date = gitDates.get(relPath);
754
+ if (date) {
755
+ lastModified = date.toISOString();
756
+ }
757
+ }
758
+ // Only return if we have at least one piece of data
759
+ if (importedBy.length === 0 &&
760
+ imports.length === 0 &&
761
+ testedIn.length === 0 &&
762
+ !lastModified) {
763
+ return undefined;
764
+ }
765
+ return {
766
+ ...(importedBy.length > 0 && { importedBy }),
767
+ ...(imports.length > 0 && { imports }),
768
+ ...(testedIn.length > 0 && { testedIn }),
769
+ ...(lastModified && { lastModified })
770
+ };
771
+ }
772
+ const searchQuality = assessSearchQuality(query, results);
773
+ // Compose preflight card for edit/refactor/migrate intents
774
+ let preflight = undefined;
775
+ const preflightIntents = ['edit', 'refactor', 'migrate'];
776
+ if (intent && preflightIntents.includes(intent) && intelligence) {
777
+ try {
778
+ // --- Avoid / Prefer patterns ---
779
+ const avoidPatterns = [];
780
+ const preferredPatterns = [];
781
+ const patterns = intelligence.patterns || {};
782
+ for (const [category, data] of Object.entries(patterns)) {
783
+ // Primary pattern = preferred if Rising or Stable
784
+ if (data.primary) {
785
+ const p = data.primary;
786
+ if (p.trend === 'Rising' || p.trend === 'Stable') {
787
+ preferredPatterns.push({
788
+ pattern: p.name,
789
+ category,
790
+ adoption: p.frequency,
791
+ trend: p.trend,
792
+ guidance: p.guidance,
793
+ ...(p.canonicalExample && { example: p.canonicalExample.file })
794
+ });
795
+ }
796
+ }
797
+ // Also-detected patterns that are Declining = avoid
798
+ if (data.alsoDetected) {
799
+ for (const alt of data.alsoDetected) {
800
+ if (alt.trend === 'Declining') {
801
+ avoidPatterns.push({
802
+ pattern: alt.name,
803
+ category,
804
+ adoption: alt.frequency,
805
+ trend: 'Declining',
806
+ guidance: alt.guidance
807
+ });
808
+ }
809
+ }
810
+ }
811
+ }
812
+ // --- Impact candidates (files importing the result files) ---
813
+ const impactCandidates = [];
814
+ const resultPaths = results.map((r) => r.filePath);
815
+ if (intelligence.internalFileGraph?.imports) {
816
+ const allImports = intelligence.internalFileGraph.imports;
817
+ for (const [file, deps] of Object.entries(allImports)) {
818
+ if (deps.some((dep) => resultPaths.some((rp) => dep.endsWith(rp) || rp.endsWith(dep)))) {
819
+ if (!resultPaths.some((rp) => file.endsWith(rp) || rp.endsWith(file))) {
820
+ impactCandidates.push(file);
821
+ }
822
+ }
823
+ }
824
+ }
825
+ // --- Risk level (based on circular deps + impact breadth) ---
826
+ let riskLevel = 'low';
827
+ let cycleCount = 0;
828
+ if (intelligence.internalFileGraph) {
829
+ try {
830
+ const graph = InternalFileGraph.fromJSON(intelligence.internalFileGraph, ROOT_PATH);
831
+ // Use directory prefixes as scope (not full file paths)
832
+ // findCycles(scope) filters files by startsWith, so a full path would only match itself
833
+ const scopes = new Set(resultPaths.map((rp) => {
834
+ const lastSlash = rp.lastIndexOf('/');
835
+ return lastSlash > 0 ? rp.substring(0, lastSlash + 1) : rp;
836
+ }));
837
+ for (const scope of scopes) {
838
+ const cycles = graph.findCycles(scope);
839
+ cycleCount += cycles.length;
840
+ }
841
+ }
842
+ catch {
843
+ // Graph reconstruction failed — skip cycle check
844
+ }
845
+ }
846
+ if (cycleCount > 0 || impactCandidates.length > 10) {
847
+ riskLevel = 'high';
848
+ }
849
+ else if (impactCandidates.length > 3) {
850
+ riskLevel = 'medium';
851
+ }
852
+ // --- Golden files (exemplar code) ---
853
+ const goldenFiles = (intelligence.goldenFiles || []).slice(0, 3).map((g) => ({
854
+ file: g.file,
855
+ score: g.score
856
+ }));
857
+ // --- Confidence (index freshness) ---
858
+ let confidence = 'stale';
859
+ if (intelligence.generatedAt) {
860
+ const indexAge = Date.now() - new Date(intelligence.generatedAt).getTime();
861
+ const hoursOld = indexAge / (1000 * 60 * 60);
862
+ if (hoursOld < 24) {
863
+ confidence = 'fresh';
864
+ }
865
+ else if (hoursOld < 168) {
866
+ confidence = 'aging';
867
+ }
868
+ }
869
+ // --- Failure memories (1.5x relevance boost) ---
870
+ const failureWarnings = relatedMemories
871
+ .filter((m) => m.type === 'failure' && !m.stale)
872
+ .map((m) => ({
873
+ memory: m.memory,
874
+ reason: m.reason,
875
+ confidence: m.effectiveConfidence
876
+ }))
877
+ .slice(0, 3);
878
+ const preferredPatternsForOutput = preferredPatterns.slice(0, 5);
879
+ const avoidPatternsForOutput = avoidPatterns.slice(0, 5);
880
+ // --- Pattern conflicts (split decisions within categories) ---
881
+ const patternConflicts = [];
882
+ const hasUnitTestFramework = Boolean(patterns.unitTestFramework?.primary);
883
+ for (const [cat, data] of Object.entries(patterns)) {
884
+ if (shouldSkipLegacyTestingFrameworkCategory(cat, patterns))
885
+ continue;
886
+ if (!shouldIncludePatternConflictCategory(cat, query))
887
+ continue;
888
+ if (!data.primary || !data.alsoDetected?.length)
889
+ continue;
890
+ const primaryFreq = parseFloat(data.primary.frequency) || 100;
891
+ if (primaryFreq >= 80)
892
+ continue;
893
+ for (const alt of data.alsoDetected) {
894
+ const altFreq = parseFloat(alt.frequency) || 0;
895
+ if (altFreq >= 20) {
896
+ if (isComplementaryPatternConflict(cat, data.primary.name, alt.name))
897
+ continue;
898
+ if (hasUnitTestFramework && cat === 'testingFramework')
899
+ continue;
900
+ patternConflicts.push({
901
+ category: cat,
902
+ primary: { name: data.primary.name, adoption: data.primary.frequency },
903
+ alternative: { name: alt.name, adoption: alt.frequency }
904
+ });
905
+ }
906
+ }
907
+ }
908
+ const evidenceLock = buildEvidenceLock({
909
+ results,
910
+ preferredPatterns: preferredPatternsForOutput,
911
+ relatedMemories,
912
+ failureWarnings,
913
+ patternConflicts
914
+ });
915
+ // Bump risk if there are active failure memories for this area
916
+ if (failureWarnings.length > 0 && riskLevel === 'low') {
917
+ riskLevel = 'medium';
918
+ }
919
+ // If evidence triangulation is weak, avoid claiming low risk
920
+ if (evidenceLock.status === 'block' && riskLevel === 'low') {
921
+ riskLevel = 'medium';
922
+ }
923
+ // If epistemic stress says abstain, bump risk
924
+ if (evidenceLock.epistemicStress?.abstain && riskLevel === 'low') {
925
+ riskLevel = 'medium';
926
+ }
927
+ preflight = {
928
+ intent,
929
+ riskLevel,
930
+ confidence,
931
+ evidenceLock,
932
+ ...(preferredPatternsForOutput.length > 0 && {
933
+ preferredPatterns: preferredPatternsForOutput
934
+ }),
935
+ ...(avoidPatternsForOutput.length > 0 && {
936
+ avoidPatterns: avoidPatternsForOutput
937
+ }),
938
+ ...(goldenFiles.length > 0 && { goldenFiles }),
939
+ ...(impactCandidates.length > 0 && {
940
+ impactCandidates: impactCandidates.slice(0, 10)
941
+ }),
942
+ ...(cycleCount > 0 && { circularDependencies: cycleCount }),
943
+ ...(failureWarnings.length > 0 && { failureWarnings })
944
+ };
945
+ }
946
+ catch {
947
+ // Preflight construction failed — skip preflight, don't fail the search
948
+ }
949
+ }
598
950
  return {
599
951
  content: [
600
952
  {
601
953
  type: 'text',
602
954
  text: JSON.stringify({
603
955
  status: 'success',
604
- results: results.map((r) => ({
605
- summary: r.summary,
606
- snippet: r.snippet,
607
- filePath: `${r.filePath}:${r.startLine}-${r.endLine}`,
608
- score: r.score,
609
- relevanceReason: r.relevanceReason,
610
- componentType: r.componentType,
611
- layer: r.layer,
612
- framework: r.framework,
613
- trend: r.trend,
614
- patternWarning: r.patternWarning
615
- })),
956
+ ...(preflight && { preflight }),
957
+ searchQuality,
958
+ results: results.map((r) => {
959
+ const relationships = enrichResult(r);
960
+ return {
961
+ summary: r.summary,
962
+ snippet: r.snippet,
963
+ filePath: `${r.filePath}:${r.startLine}-${r.endLine}`,
964
+ score: r.score,
965
+ relevanceReason: r.relevanceReason,
966
+ componentType: r.componentType,
967
+ layer: r.layer,
968
+ framework: r.framework,
969
+ trend: r.trend,
970
+ patternWarning: r.patternWarning,
971
+ ...(relationships && { relationships })
972
+ };
973
+ }),
616
974
  totalResults: results.length,
617
- ...(relatedMemories.length > 0 && { relatedMemories })
975
+ ...(relatedMemories.length > 0 && {
976
+ relatedMemories: relatedMemories.slice(0, 5)
977
+ })
618
978
  }, null, 2)
619
979
  }
620
980
  ]
@@ -635,7 +995,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
635
995
  totalFiles: indexState.stats.totalFiles,
636
996
  indexedFiles: indexState.stats.indexedFiles,
637
997
  totalChunks: indexState.stats.totalChunks,
638
- duration: `${(indexState.stats.duration / 1000).toFixed(2)}s`
998
+ duration: `${(indexState.stats.duration / 1000).toFixed(2)}s`,
999
+ incremental: indexState.stats.incremental
639
1000
  }
640
1001
  : undefined,
641
1002
  progress: progress
@@ -657,10 +1018,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
657
1018
  const { reason, incrementalOnly } = args;
658
1019
  const mode = incrementalOnly ? 'incremental' : 'full';
659
1020
  console.error(`Refresh requested (${mode}): ${reason || 'Manual trigger'}`);
660
- // TODO: When incremental indexing is implemented (Phase 2),
661
- // use `incrementalOnly` to only re-index changed files.
662
- // For now, always do full re-index but acknowledge the intention.
663
- performIndexing();
1021
+ performIndexing(incrementalOnly);
664
1022
  return {
665
1023
  content: [
666
1024
  {
@@ -669,12 +1027,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
669
1027
  status: 'started',
670
1028
  mode,
671
1029
  message: incrementalOnly
672
- ? 'Incremental re-indexing requested. Check status with get_indexing_status.'
1030
+ ? 'Incremental re-indexing started. Only changed files will be re-embedded.'
673
1031
  : 'Full re-indexing started. Check status with get_indexing_status.',
674
- reason,
675
- note: incrementalOnly
676
- ? 'Incremental mode requested. Full re-index for now; true incremental indexing coming in Phase 2.'
677
- : undefined
1032
+ reason
678
1033
  }, null, 2)
679
1034
  }
680
1035
  ]
@@ -828,6 +1183,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
828
1183
  result.stateManagement = intelligence.patterns?.stateManagement;
829
1184
  }
830
1185
  else if (category === 'testing') {
1186
+ result.unitTestFramework = intelligence.patterns?.unitTestFramework;
1187
+ result.e2eFramework = intelligence.patterns?.e2eFramework;
831
1188
  result.testingFramework = intelligence.patterns?.testingFramework;
832
1189
  result.testMocking = intelligence.patterns?.testMocking;
833
1190
  }
@@ -857,6 +1214,47 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
857
1214
  catch (_error) {
858
1215
  // No memory file yet, that's fine - don't fail the whole request
859
1216
  }
1217
+ // Detect pattern conflicts: primary < 80% and any alternative > 20%
1218
+ const conflicts = [];
1219
+ const patternsData = intelligence.patterns || {};
1220
+ const hasUnitTestFramework = Boolean(patternsData.unitTestFramework?.primary);
1221
+ for (const [cat, data] of Object.entries(patternsData)) {
1222
+ if (shouldSkipLegacyTestingFrameworkCategory(cat, patternsData))
1223
+ continue;
1224
+ if (category && category !== 'all' && cat !== category)
1225
+ continue;
1226
+ if (!data.primary || !data.alsoDetected?.length)
1227
+ continue;
1228
+ const primaryFreq = parseFloat(data.primary.frequency) || 100;
1229
+ if (primaryFreq >= 80)
1230
+ continue;
1231
+ for (const alt of data.alsoDetected) {
1232
+ const altFreq = parseFloat(alt.frequency) || 0;
1233
+ if (altFreq < 20)
1234
+ continue;
1235
+ if (isComplementaryPatternConflict(cat, data.primary.name, alt.name))
1236
+ continue;
1237
+ if (hasUnitTestFramework && cat === 'testingFramework')
1238
+ continue;
1239
+ conflicts.push({
1240
+ category: cat,
1241
+ primary: {
1242
+ name: data.primary.name,
1243
+ adoption: data.primary.frequency,
1244
+ trend: data.primary.trend
1245
+ },
1246
+ alternative: {
1247
+ name: alt.name,
1248
+ adoption: alt.frequency,
1249
+ trend: alt.trend
1250
+ },
1251
+ note: `Split decision: ${data.primary.frequency} ${data.primary.name} (${data.primary.trend || 'unknown'}) vs ${alt.frequency} ${alt.name} (${alt.trend || 'unknown'})`
1252
+ });
1253
+ }
1254
+ }
1255
+ if (conflicts.length > 0) {
1256
+ result.conflicts = conflicts;
1257
+ }
860
1258
  return {
861
1259
  content: [{ type: 'text', text: JSON.stringify(result, null, 2) }]
862
1260
  };
@@ -1099,19 +1497,26 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1099
1497
  }
1100
1498
  const filtered = filterMemories(allMemories, { category, type, query });
1101
1499
  const limited = applyUnfilteredLimit(filtered, { category, type, query }, 20);
1500
+ // Enrich with confidence decay
1501
+ const enriched = withConfidence(limited.memories);
1502
+ const staleCount = enriched.filter((m) => m.stale).length;
1102
1503
  return {
1103
1504
  content: [
1104
1505
  {
1105
1506
  type: 'text',
1106
1507
  text: JSON.stringify({
1107
1508
  status: 'success',
1108
- count: limited.memories.length,
1509
+ count: enriched.length,
1109
1510
  totalCount: limited.totalCount,
1110
1511
  truncated: limited.truncated,
1512
+ ...(staleCount > 0 && {
1513
+ staleCount,
1514
+ staleNote: `${staleCount} memor${staleCount === 1 ? 'y' : 'ies'} below 30% confidence. Consider reviewing or removing.`
1515
+ }),
1111
1516
  message: limited.truncated
1112
1517
  ? 'Showing 20 most recent. Use filters (category/type/query) for targeted results.'
1113
1518
  : undefined,
1114
- memories: limited.memories
1519
+ memories: enriched
1115
1520
  }, null, 2)
1116
1521
  }
1117
1522
  ]