smart-context-mcp 1.11.0 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -56,7 +56,7 @@ Restart your AI client. Done.
56
56
  # Check installed version
57
57
  npm list -g smart-context-mcp
58
58
 
59
- # Should show: smart-context-mcp@1.11.0 (or later)
59
+ # Should show: smart-context-mcp@1.13.0 (or later)
60
60
 
61
61
  # Update to latest version
62
62
  npm update -g smart-context-mcp
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "smart-context-mcp",
3
3
  "mcpName": "io.github.Arrayo/smart-context-mcp",
4
- "version": "1.11.0",
4
+ "version": "1.13.0",
5
5
  "description": "MCP server that reduces agent token usage by 90% with intelligent context compression, task checkpoint persistence, and workflow-aware agent guidance.",
6
6
  "author": "Francisco Caballero Portero <fcp1978@hotmail.com>",
7
7
  "type": "module",
package/server.json CHANGED
@@ -6,12 +6,12 @@
6
6
  "url": "https://github.com/Arrayo/smart-context-mcp",
7
7
  "source": "github"
8
8
  },
9
- "version": "1.11.0",
9
+ "version": "1.13.0",
10
10
  "packages": [
11
11
  {
12
12
  "registryType": "npm",
13
13
  "identifier": "smart-context-mcp",
14
- "version": "1.11.0",
14
+ "version": "1.13.0",
15
15
  "transport": {
16
16
  "type": "stdio"
17
17
  },
@@ -3,7 +3,7 @@ import path from 'node:path';
3
3
  import { execFile as execFileCallback } from 'node:child_process';
4
4
  import { promisify } from 'node:util';
5
5
  import { projectRoot } from './utils/paths.js';
6
- import { loadIndex, buildIndex as buildIndexCore } from './index.js';
6
+ import { loadIndex, buildIndexIncremental, persistIndex } from './index.js';
7
7
 
8
8
  const execFile = promisify(execFileCallback);
9
9
 
@@ -83,23 +83,28 @@ export const ensureIndexReady = async (options = {}) => {
83
83
  }
84
84
 
85
85
  log('Building search index...');
86
-
86
+
87
87
  try {
88
- const buildPromise = buildIndexCore({ root, incremental: true });
88
+ const buildPromise = (async () => {
89
+ const { index, stats } = buildIndexIncremental(root);
90
+ await persistIndex(index, root);
91
+ return { stats, fileCount: Object.keys(index.files).length, version: index.version };
92
+ })();
93
+
89
94
  const result = await Promise.race([
90
95
  buildPromise,
91
- timeout(timeoutMs, 'Index build timeout')
96
+ timeout(timeoutMs, 'Index build timeout'),
92
97
  ]);
93
-
98
+
94
99
  saveIndexMetadata({
95
100
  builtAt: Date.now(),
96
101
  gitHead: getGitHead(root),
97
- fileCount: result?.files?.length || 0,
98
- version: result?.version
102
+ fileCount: result.fileCount,
103
+ version: result.version,
99
104
  }, root);
100
-
105
+
101
106
  log('Index ready');
102
- return { status: 'built', cached: false, fileCount: result?.files?.length || 0 };
107
+ return { status: 'built', cached: false, fileCount: result.fileCount };
103
108
  } catch (error) {
104
109
  log(`Index build failed: ${error.message}`);
105
110
  return { status: 'fallback', error: error.message };
package/src/server.js CHANGED
@@ -39,7 +39,7 @@ export const asTextResult = (result) => ({
39
39
  content: [
40
40
  {
41
41
  type: 'text',
42
- text: JSON.stringify(result, null, 2),
42
+ text: JSON.stringify(result),
43
43
  },
44
44
  ],
45
45
  });
@@ -61,7 +61,7 @@ export const createDevctxServer = () => {
61
61
 
62
62
  server.prompt(
63
63
  'use-devctx',
64
- 'Force the agent to use devctx tools for the current task. Use this prompt at the start of your message to ensure devctx is used instead of native tools.',
64
+ 'Force the agent to use devctx tools for the current task.',
65
65
  {},
66
66
  async () => ({
67
67
  messages: [
@@ -69,7 +69,7 @@ export const createDevctxServer = () => {
69
69
  role: 'user',
70
70
  content: {
71
71
  type: 'text',
72
- text: 'Use devctx: smart_turn(start) smart_context/smart_search smart_read → smart_turn(end)',
72
+ text: 'Use devctx MCP tools for this task. Start with smart_context(task) for multi-file context. Use smart_read(outline)smart_read(symbol) cascade for individual files. Never use native Read on large files.',
73
73
  },
74
74
  },
75
75
  ],
@@ -78,7 +78,7 @@ export const createDevctxServer = () => {
78
78
 
79
79
  server.prompt(
80
80
  'devctx-workflow',
81
- 'Complete devctx workflow template with all recommended steps. Includes session start, context building, file reading, and session end.',
81
+ 'Complete devctx workflow for complex tasks with session continuity.',
82
82
  {},
83
83
  async () => ({
84
84
  messages: [
@@ -86,15 +86,7 @@ export const createDevctxServer = () => {
86
86
  role: 'user',
87
87
  content: {
88
88
  type: 'text',
89
- text: `Follow this devctx workflow:
90
-
91
- 1. smart_turn(start) - Start session and recover previous context
92
- 2. smart_context(task) - Build complete context for the task
93
- 3. smart_search(query) - Search for specific patterns if needed
94
- 4. smart_read(file) - Read files with appropriate mode (outline/signatures/symbol)
95
- 5. smart_turn(end) - Save checkpoint for next session
96
-
97
- Use devctx tools instead of native Read/Grep/Shell when possible.`,
89
+ text: 'Follow devctx workflow: 1) smart_turn(start) to recover session 2) smart_context(task) for curated context (replaces search+read cycle) 3) smart_read(symbol) only for specific functions not covered by smart_context 4) smart_turn(end) to checkpoint. Never skip to smart_read(full) — use the cascade: outline → signatures → symbol → full.',
98
90
  },
99
91
  },
100
92
  ],
@@ -103,7 +95,7 @@ Use devctx tools instead of native Read/Grep/Shell when possible.`,
103
95
 
104
96
  server.prompt(
105
97
  'devctx-preflight',
106
- 'Preflight checklist before starting work. Ensures index is built and session is initialized.',
98
+ 'Preflight: build index and initialize session before work.',
107
99
  {},
108
100
  async () => ({
109
101
  messages: [
@@ -111,13 +103,7 @@ Use devctx tools instead of native Read/Grep/Shell when possible.`,
111
103
  role: 'user',
112
104
  content: {
113
105
  type: 'text',
114
- text: `Preflight checklist:
115
-
116
- 1. build_index(incremental=true) - Build/update symbol index
117
- 2. smart_turn(start) - Initialize session and recover context
118
- 3. Proceed with your task using devctx tools
119
-
120
- This ensures optimal performance and context recovery.`,
106
+ text: 'Preflight: 1) build_index(incremental=true) 2) smart_turn(start) 3) Proceed with devctx tools.',
121
107
  },
122
108
  },
123
109
  ],
@@ -126,7 +112,7 @@ This ensures optimal performance and context recovery.`,
126
112
 
127
113
  server.tool(
128
114
  'smart_read',
129
- 'Read a file with token-efficient modes. PREFER outline/signatures/symbol full mode saves 0 tokens (same content as native Read, capped at 12k chars). Mode guide: outline (~90% savings): file structure, exports, top-level symbols — use for orientation, code review, deciding what to read next. signatures (~85% savings): function/method signatures only — use when you need parameter names and return types without bodies. symbol: extract one or more functions/classes/methods by name (string or array for batch) — use when you know exactly what to read; add context=true to include callers, tests, and referenced types from the dependency graph (returns graphCoverage: full|partial|none). range: specific line range with line numbers — use only when you need exact lines. full: raw file content, no compression, no savings — only use when the exact byte-for-byte content is required (e.g. config files, lock files). maxTokens: token budget — auto-selects the most detailed mode that fits (full → outline → signatures → truncated). Responses are cached in memory per session and invalidated by file mtime; cached=true when served from cache. Every response includes a unified confidence block: { parser, truncated, cached, graphCoverage? }. Supports JS/TS, Python, Go, Rust, Java, C#, Kotlin, PHP, Swift, shell, Terraform, Dockerfile, SQL, JSON, TOML, YAML.',
115
+ 'Read a file with token-efficient modes. ALWAYS prefer outline/signatures/symbol over full. Reading cascade: outline signatures symbol range full (last resort). Mode guide: outline (~90% savings): file structure, exports, top-level symbols — use first for orientation. signatures (~85% savings): function signatures with parameters and return types — use when you need the API surface. symbol: extract specific functions/classes by name (string or array) — use when you know what to read; add context=true for callers, tests, and dependencies. range: specific line range — use only when you need exact lines. full: raw content, no savings — only for config/lock files. maxTokens: token budget — auto-cascades to fit (outline → signatures → truncated). Supports JS/TS, Python, Go, Rust, Java, C#, Kotlin, PHP, Swift, shell, Terraform, Dockerfile, SQL, JSON, TOML, YAML.',
130
116
  {
131
117
  filePath: z.string(),
132
118
  mode: z.enum(['full', 'outline', 'signatures', 'range', 'symbol']).optional(),
@@ -160,7 +146,7 @@ This ensures optimal performance and context recovery.`,
160
146
 
161
147
  server.tool(
162
148
  'smart_search',
163
- 'Search code across the project using ripgrep (with filesystem fallback). Returns grouped, ranked results. Optional intent (implementation/debug/tests/config/docs/explore) adjusts ranking: tests boosts test files, config boosts config files, docs reduces penalty on READMEs. Includes a unified confidence block: { level, indexFreshness } plus retrievalConfidence and provenance metadata.',
149
+ 'Search code across the project using ripgrep (with filesystem fallback). Returns grouped, ranked results. Optional intent (implementation/debug/tests/config/docs/explore) adjusts ranking. Use instead of native Grep for ranked, deduplicated results with index boosting.',
164
150
  {
165
151
  query: z.string(),
166
152
  cwd: z.string().optional(),
@@ -171,7 +157,7 @@ This ensures optimal performance and context recovery.`,
171
157
 
172
158
  server.tool(
173
159
  'smart_context',
174
- 'Get curated context for a task in one call. Combines smart_search + smart_read + graph expansion. Returns relevant files, evidence for why each file was included, related tests, dependencies, symbol previews from the index, and symbol details — optimized for tokens. Includes a unified confidence block: { indexFreshness, graphCoverage } indicating index state and how complete the relational context is. Replaces the manual search → read → read cycle. Optional intent override, token budget, diff mode (pass diff=true for HEAD or diff="main" to scope context to changed files only), detail mode (minimal=index+signatures+snippets, balanced=default, deep=full content), include array to control which fields are returned (["content","graph","hints","symbolDetail"]), and prefetch=true to enable intelligent context prediction based on historical patterns (reduces round-trips by 40-60%).',
160
+ 'PREFERRED for multi-file tasks. Gets curated context in one call replaces the manual search read read cycle. Combines search + graph expansion + selective reading. Returns relevant files with symbols and content, optimized for tokens. Options: intent, maxTokens (budget), diff (true for HEAD or branch name), detail (minimal/balanced/deep), include (content/graph/hints/symbolDetail), prefetch (true for predictive loading). Call this FIRST before individual smart_read/smart_search calls.',
175
161
  {
176
162
  task: z.string(),
177
163
  intent: z.enum(['implementation', 'debug', 'tests', 'config', 'docs', 'explore']).optional(),
@@ -188,7 +174,7 @@ This ensures optimal performance and context recovery.`,
188
174
 
189
175
  server.tool(
190
176
  'smart_shell',
191
- 'Run a diagnostic shell command from an allowlist. Allowed: pwd, ls, find, rg, git (status/diff/show/log/branch/rev-parse), npm/pnpm/yarn/bun (test/run/lint/build/typecheck/check). Blocks shell operators, pipes, and unsafe commands. For large diffs: output is split by file (up to 8 files, 60 lines each) with a hint to run git show -- <file> for the full body of any truncated file; prefer git diff --stat first to see which files changed, then git show -- <file> per file for targeted reading. Includes a unified confidence block: { blocked, timedOut }.',
177
+ 'Run a diagnostic shell command from an allowlist. Allowed: pwd, ls, find, rg, git (status/diff/show/log/branch/rev-parse), npm/pnpm/yarn/bun (test/run/lint/build/typecheck/check). Blocks shell operators, pipes, and unsafe commands. For large diffs: output is split by file (up to 8 files, 60 lines each); prefer git diff --stat first, then git show -- <file> per file.',
192
178
  {
193
179
  command: z.string(),
194
180
  },
@@ -1,12 +1,35 @@
1
1
  import { encodingForModel } from 'js-tiktoken';
2
2
 
3
- const fallbackModel = 'gpt-4o-mini';
4
- const encoder = encodingForModel(fallbackModel);
3
+ const CLAUDE_ALIASES = new Set(['claude', 'anthropic']);
5
4
 
6
- export const countTokens = (text = '') => {
7
- if (!text) {
8
- return 0;
5
+ // js-tiktoken does not ship Claude's tokenizer; gpt-4o (o200k_base) is the
6
+ // closest available encoding. Accuracy for Claude models: ±15-20%.
7
+ const CLAUDE_FALLBACK = 'gpt-4o';
8
+ const DEFAULT_MODEL = 'gpt-4o-mini';
9
+
10
+ const resolveModel = () => {
11
+ const requested = (process.env.DEVCTX_TOKEN_MODEL || '').toLowerCase().trim();
12
+ if (!requested) return DEFAULT_MODEL;
13
+ if (CLAUDE_ALIASES.has(requested) || requested.startsWith('claude')) {
14
+ return CLAUDE_FALLBACK;
15
+ }
16
+ return requested;
17
+ };
18
+
19
+ const buildEncoder = () => {
20
+ const model = resolveModel();
21
+ try {
22
+ return encodingForModel(model);
23
+ } catch {
24
+ return encodingForModel(DEFAULT_MODEL);
9
25
  }
26
+ };
10
27
 
28
+ // Encoder is initialised once; if the env var changes at runtime the process
29
+ // must be restarted (acceptable for a CLI/MCP server).
30
+ const encoder = buildEncoder();
31
+
32
+ export const countTokens = (text = '') => {
33
+ if (!text) return 0;
11
34
  return encoder.encode(String(text)).length;
12
35
  };
@@ -15,7 +15,6 @@ import { predictContextFiles, recordContextAccess } from '../context-patterns.js
15
15
  import { recordToolUsage } from '../usage-feedback.js';
16
16
  import { recordDecision, DECISION_REASONS, EXPECTED_BENEFITS } from '../decision-explainer.js';
17
17
  import { recordDevctxOperation } from '../missed-opportunities.js';
18
- import { buildMetricsDisplay } from '../utils/metrics-display.js';
19
18
  import { createProgressReporter } from '../streaming.js';
20
19
  import {
21
20
  getDetailedDiff,
@@ -35,7 +34,6 @@ import {
35
34
  } from '../utils/query-extraction.js';
36
35
  import {
37
36
  dedupeEvidence,
38
- formatReasonIncluded,
39
37
  buildSymbolPreviews,
40
38
  attachSymbolEvidence,
41
39
  computeStaticUtility,
@@ -226,29 +224,6 @@ const getSymbolSignatureLimit = (item, detailMode, readMode) => {
226
224
  const getSymbolSignatures = (entries, maxItems = 10) =>
227
225
  entries.filter((entry) => entry.signature).slice(0, maxItems).map((entry) => entry.signature);
228
226
 
229
- const serializeEvidencePayload = (item) => {
230
- const evidence = dedupeEvidence(item.evidence ?? []);
231
- if (evidence.length === 0) return [];
232
-
233
- const limit = item.role === 'primary' ? 2 : 1;
234
- const preferred = item.role === 'primary'
235
- ? evidence
236
- : [
237
- evidence.find((entry) => ['testOf', 'dependencyOf', 'dependentOf'].includes(entry.type)),
238
- evidence[0],
239
- ].filter(Boolean);
240
-
241
- return uniqueList(preferred)
242
- .slice(0, limit)
243
- .map((entry) => ({
244
- type: entry.type,
245
- ...(entry.via ? { via: entry.via } : {}),
246
- ...(entry.query && item.role === 'primary' ? { query: entry.query } : {}),
247
- ...(entry.ref && item.role === 'primary' ? { ref: entry.ref } : {}),
248
- ...(Array.isArray(entry.symbols) && entry.symbols.length > 0 ? { symbols: entry.symbols.slice(0, 2) } : {}),
249
- }));
250
- };
251
-
252
227
  const shouldIncludeSymbolNames = (item, symbolPreviews, readMode) => {
253
228
  if (item.role === 'primary') return true;
254
229
  if (readMode === 'full') return true;
@@ -273,14 +248,10 @@ const buildContextItemPayload = (item, index, detailMode, readMode = 'index-only
273
248
  const symbolSignatures = shouldIncludeSymbolSignatures(item, symbolPreviews)
274
249
  ? getSymbolSignatures(fileSymbolEntries, getSymbolSignatureLimit(item, detailMode, readMode))
275
250
  : [];
276
- const evidence = serializeEvidencePayload(item);
277
251
 
278
252
  return {
279
253
  file: item.rel,
280
254
  role: item.role,
281
- readMode,
282
- reasonIncluded: formatReasonIncluded(item.evidence),
283
- evidence,
284
255
  ...(fileSymbols.length > 0 ? { symbols: fileSymbols } : {}),
285
256
  ...(symbolSignatures.length > 0 ? { symbolSignatures } : {}),
286
257
  ...(symbolPreviews.length > 0 ? { symbolPreviews } : {}),
@@ -669,16 +640,9 @@ export const smartContext = async ({
669
640
 
670
641
  const filtered = filterFoundSymbols(symbolResult.content, symbolCandidates);
671
642
  if (filtered) {
672
- const symbolEvidence = dedupeEvidence([{
673
- type: 'symbolDetail',
674
- symbols: symbolCandidates.slice(0, 3),
675
- }]);
676
643
  const symbolPayload = {
677
644
  file: topPrimary.rel,
678
645
  role: 'symbolDetail',
679
- readMode: 'symbol',
680
- reasonIncluded: formatReasonIncluded(symbolEvidence),
681
- evidence: symbolEvidence,
682
646
  content: filtered,
683
647
  };
684
648
  const symbolTokens = countTokens(JSON.stringify(symbolPayload));
@@ -692,7 +656,6 @@ export const smartContext = async ({
692
656
  const existing = context[existingIdx];
693
657
  const signaturesOnly = {
694
658
  ...existing,
695
- readMode: 'signatures-only',
696
659
  content: '(omitted — see symbolDetail)',
697
660
  };
698
661
  const oldTokens = countTokens(JSON.stringify(existing));
@@ -747,7 +710,6 @@ export const smartContext = async ({
747
710
 
748
711
  const contentTokens = countTokens(context.map((c) => c.content).join('\n'));
749
712
  const previewTokens = context.reduce((sum, item) => sum + countTokens(JSON.stringify(item.symbolPreviews ?? [])), 0);
750
- const indexOnlyItems = context.filter((item) => item.readMode === 'index-only').length;
751
713
  const contentItems = context.filter((item) => typeof item.content === 'string' && item.content.length > 0).length;
752
714
  const primaryItem = context.find((item) => item.role === 'primary');
753
715
 
@@ -816,17 +778,6 @@ export const smartContext = async ({
816
778
  };
817
779
 
818
780
  const filesIncluded = new Set(context.map((c) => c.file)).size;
819
- const metricsDisplay = buildMetricsDisplay({
820
- tool: 'smart_context',
821
- target: task,
822
- metrics: {
823
- rawTokens: totalRawTokens,
824
- compressedTokens: totalCompressedTokens,
825
- savedTokens,
826
- },
827
- startTime: enableProgress ? startTime : null,
828
- filesCount: filesIncluded,
829
- });
830
781
 
831
782
  if (progress) {
832
783
  progress.complete({
@@ -845,28 +796,20 @@ export const smartContext = async ({
845
796
  confidence: { indexFreshness, graphCoverage: graphCov },
846
797
  context,
847
798
  ...(includeSet.has('graph') ? { graph: graphSummary, graphCoverage: graphCov } : {}),
848
- metrics: {
849
- contentTokens,
850
- totalTokens: 0,
799
+ stats: {
851
800
  filesIncluded,
852
801
  filesEvaluated: expanded.size,
853
- savingsPct,
854
802
  detailMode,
855
- include: [...includeSet],
856
- previewTokens,
857
- indexOnlyItems,
858
- contentItems,
859
- primaryReadMode: primaryItem?.readMode ?? null,
803
+ totalTokens: countTokens(context.map((c) => c.content || '').join('')),
860
804
  ...(prefetchResult ? {
861
805
  prefetch: {
862
806
  enabled: true,
863
807
  confidence: prefetchResult.confidence || 0,
864
808
  predictedFiles: prefetchResult.predicted?.length || 0,
865
- matchedPattern: prefetchResult.matchedPattern || null
866
- }
867
- } : {})
809
+ matchedPattern: prefetchResult.matchedPattern || null,
810
+ },
811
+ } : {}),
868
812
  },
869
- metricsDisplay,
870
813
  ...(includeSet.has('hints') ? { hints } : {}),
871
814
  };
872
815
 
@@ -875,7 +818,5 @@ export const smartContext = async ({
875
818
  result.diffSummary = diffSummary;
876
819
  }
877
820
 
878
- result.metrics.totalTokens = countTokens(JSON.stringify(result));
879
-
880
821
  return result;
881
822
  };
@@ -105,15 +105,25 @@ const formatDeclarationName = (name) => {
105
105
 
106
106
  const collectVariableNames = (declarationList) => declarationList.declarations.map((declaration) => formatDeclarationName(declaration.name));
107
107
 
108
- const formatTopLevelStatement = (statement, sourceFile) => {
108
+ const getFunctionSignature = (statement, sourceFile) => {
109
+ const body = statement.body;
110
+ if (!body) return statement.getText(sourceFile).split('\n')[0];
111
+ const fullText = statement.getText(sourceFile);
112
+ const bodyOffset = body.getStart(sourceFile) - statement.getStart(sourceFile);
113
+ const sig = fullText.slice(0, bodyOffset).replace(/\s+$/, '');
114
+ return sig.length > 120 ? `${sig.slice(0, 120)}...` : sig;
115
+ };
116
+
117
+ const formatTopLevelStatement = (statement, sourceFile, mode = 'outline') => {
109
118
  const exported = statement.modifiers?.some((modifier) => modifier.kind === ts.SyntaxKind.ExportKeyword) ?? false;
110
119
  const prefix = exported ? 'export ' : '';
111
120
 
112
121
  if (ts.isImportDeclaration(statement)) {
113
- return formatImport(statement);
122
+ return null;
114
123
  }
115
124
 
116
125
  if (ts.isFunctionDeclaration(statement)) {
126
+ if (mode === 'signatures') return getFunctionSignature(statement, sourceFile);
117
127
  return `${prefix}function ${getNodeName(statement)}()`;
118
128
  }
119
129
 
@@ -143,7 +153,8 @@ const formatTopLevelStatement = (statement, sourceFile) => {
143
153
  }
144
154
 
145
155
  if (ts.isExportAssignment(statement)) {
146
- return `export default ${statement.expression.getText(sourceFile)}`;
156
+ const text = statement.expression.getText(sourceFile);
157
+ return `export default ${text.length > 60 ? `${text.slice(0, 60)}...` : text}`;
147
158
  }
148
159
 
149
160
  return statement.getText(sourceFile).split('\n')[0];
@@ -248,7 +259,8 @@ export const summarizeCode = (fullPath, content, mode) => {
248
259
  const sourceFile = parseSource(fullPath, content);
249
260
  const topLevel = sourceFile.statements.flatMap((statement) => {
250
261
  if (isIIFE(statement)) return extractIIFEMembers(statement, sourceFile);
251
- return [formatTopLevelStatement(statement, sourceFile)];
262
+ const formatted = formatTopLevelStatement(statement, sourceFile, mode);
263
+ return formatted !== null ? [formatted] : [];
252
264
  });
253
265
  const hooks = collectHooks(sourceFile);
254
266
 
@@ -4,7 +4,6 @@ import { countTokens } from '../tokenCounter.js';
4
4
  export const smartReadBatch = async ({ files, maxTokens }) => {
5
5
  const results = [];
6
6
  let totalTokens = 0;
7
- let totalRawTokens = 0;
8
7
  let filesSkipped = 0;
9
8
 
10
9
  for (const item of files) {
@@ -40,13 +39,11 @@ export const smartReadBatch = async ({ files, maxTokens }) => {
40
39
  parser: readResult.parser,
41
40
  truncated: readResult.truncated,
42
41
  content: readResult.content,
43
- ...(readResult.confidence ? { confidence: readResult.confidence } : {}),
44
42
  ...(readResult.indexHint !== undefined ? { indexHint: readResult.indexHint } : {}),
45
43
  ...(readResult.chosenMode ? { chosenMode: readResult.chosenMode, budgetApplied: true } : {}),
46
44
  });
47
45
 
48
46
  totalTokens += itemTokens;
49
- totalRawTokens += readResult.metrics?.rawTokens ?? 0;
50
47
  } catch (err) {
51
48
  results.push({
52
49
  filePath: item.path,
@@ -56,17 +53,12 @@ export const smartReadBatch = async ({ files, maxTokens }) => {
56
53
  }
57
54
  }
58
55
 
59
- const totalSavingsPct = totalRawTokens > 0
60
- ? Math.max(0, Math.round(((totalRawTokens - totalTokens) / totalRawTokens) * 100))
61
- : 0;
62
-
63
56
  return {
64
57
  results,
65
58
  metrics: {
66
59
  totalTokens,
67
60
  filesRead: results.length,
68
61
  filesSkipped,
69
- totalSavingsPct,
70
62
  },
71
63
  };
72
64
  };
@@ -12,7 +12,6 @@ import { countTokens } from '../tokenCounter.js';
12
12
  import { recordToolUsage } from '../usage-feedback.js';
13
13
  import { recordDecision, DECISION_REASONS, EXPECTED_BENEFITS } from '../decision-explainer.js';
14
14
  import { recordDevctxOperation } from '../missed-opportunities.js';
15
- import { buildMetricsDisplay } from '../utils/metrics-display.js';
16
15
  import { createProgressReporter } from '../streaming.js';
17
16
 
18
17
  const execFile = promisify(execFileCb);
@@ -563,16 +562,6 @@ export const smartRead = async ({ filePath, mode = 'outline', startLine, endLine
563
562
  context: `${lineCount} lines, ${metrics.rawTokens} tokens → ${metrics.compressedTokens} tokens`,
564
563
  });
565
564
 
566
- const confidence = { parser, truncated, cached: cacheHit && !contextResult };
567
- if (contextResult) confidence.graphCoverage = contextResult.graphCoverage;
568
-
569
- const metricsDisplay = buildMetricsDisplay({
570
- tool: 'smart_read',
571
- target: path.relative(effectiveRoot, fullPath),
572
- metrics,
573
- startTime: enableProgress ? startTime : null,
574
- });
575
-
576
565
  if (progress) {
577
566
  progress.complete({
578
567
  file: path.relative(effectiveRoot, fullPath),
@@ -588,12 +577,7 @@ export const smartRead = async ({ filePath, mode = 'outline', startLine, endLine
588
577
  parser,
589
578
  truncated,
590
579
  content: compressedText,
591
- confidence,
592
- metrics,
593
- metricsDisplay,
594
580
  };
595
-
596
- if (cacheHit && !contextResult) result.cached = true;
597
581
  if (mode === 'symbol') result.indexHint = indexHintUsed;
598
582
  if (validBudget && effectiveMode !== mode) {
599
583
  result.chosenMode = effectiveMode;
@@ -12,7 +12,6 @@ import { recordToolUsage } from '../usage-feedback.js';
12
12
  import { recordDecision, DECISION_REASONS, EXPECTED_BENEFITS } from '../decision-explainer.js';
13
13
  import { recordDevctxOperation } from '../missed-opportunities.js';
14
14
  import { IGNORED_DIRS, IGNORED_FILE_NAMES, IGNORED_FILE_PATTERNS } from '../config/ignored-paths.js';
15
- import { buildMetricsDisplay } from '../utils/metrics-display.js';
16
15
  import { createProgressReporter } from '../streaming.js';
17
16
  import { ensureIndexReady } from '../index-manager.js';
18
17
 
@@ -506,16 +505,6 @@ export const smartSearch = async ({ query, cwd = '.', intent, _testForceWalk = f
506
505
  else if (usedFallback) retrievalConfidence = provenance?.skippedItemsTotal > 0 ? 'low' : 'medium';
507
506
  else if (provenance?.skippedItemsTotal > 0) retrievalConfidence = 'low';
508
507
 
509
- const confidence = { level: retrievalConfidence, indexFreshness };
510
-
511
- const metricsDisplay = buildMetricsDisplay({
512
- tool: 'smart_search',
513
- target: query,
514
- metrics,
515
- startTime: enableProgress ? startTime : null,
516
- filesCount: groups.length,
517
- });
518
-
519
508
  if (progress) {
520
509
  progress.complete({
521
510
  query,
@@ -528,23 +517,17 @@ export const smartSearch = async ({ query, cwd = '.', intent, _testForceWalk = f
528
517
 
529
518
  const result = {
530
519
  query,
531
- root,
532
- engine,
533
- retrievalConfidence,
534
520
  indexFreshness,
535
- sourceBreakdown: breakdown,
536
- confidence,
537
521
  ...(validIntent ? { intent: validIntent } : {}),
538
522
  ...(indexHits ? { indexBoosted: indexHits.size } : {}),
539
523
  totalMatches: dedupedMatches.length,
540
524
  matchedFiles: groups.length,
541
525
  topFiles: groups.slice(0, 10).map((group) => ({ file: group.file, count: group.count, score: group.score })),
542
526
  matches: compressedText,
543
- metrics,
544
- metricsDisplay,
545
527
  };
546
528
 
547
- if (provenance) result.provenance = provenance;
529
+ if (provenance?.fallbackReason) result.searchMode = provenance.fallbackReason;
530
+ if (retrievalConfidence !== 'high') result.retrievalConfidence = retrievalConfidence;
548
531
 
549
532
  return result;
550
533
  };
@@ -7,10 +7,13 @@ import { pickRelevantLines, truncate, uniqueLines } from '../utils/text.js';
7
7
  import { recordToolUsage } from '../usage-feedback.js';
8
8
  import { recordDecision, DECISION_REASONS, EXPECTED_BENEFITS } from '../decision-explainer.js';
9
9
  import { recordDevctxOperation } from '../missed-opportunities.js';
10
- import { buildMetricsDisplay } from '../utils/metrics-display.js';
11
-
12
10
  const execFile = promisify(execFileCallback);
13
11
  const isShellDisabled = () => process.env.DEVCTX_SHELL_DISABLED === 'true';
12
+ const DEFAULT_TIMEOUT_MS = 15000;
13
+ const getTimeoutMs = () => {
14
+ const env = parseInt(process.env.DEVCTX_SHELL_TIMEOUT_MS, 10);
15
+ return Number.isFinite(env) && env > 0 ? env : DEFAULT_TIMEOUT_MS;
16
+ };
14
17
  const allowedCommands = new Set(['pwd', 'ls', 'find', 'rg', 'git', 'npm', 'pnpm', 'yarn', 'bun']);
15
18
  const allowedGitSubcommands = new Set(['status', 'diff', 'show', 'log', 'branch', 'rev-parse', 'blame']);
16
19
  const allowedPackageManagerSubcommands = new Set(['test', 'run', 'lint', 'build', 'typecheck', 'check']);
@@ -237,8 +240,6 @@ const buildBlockedResult = async (command, message) => {
237
240
  exitCode: 126,
238
241
  blocked: true,
239
242
  output: message,
240
- confidence: { blocked: true, timedOut: false },
241
- metrics,
242
243
  };
243
244
  };
244
245
 
@@ -273,16 +274,17 @@ export const smartShell = async ({ command }) => {
273
274
  }
274
275
 
275
276
  const resolvedFile = file === 'rg' ? rgPath : file;
277
+ const timeoutMs = getTimeoutMs();
276
278
  const execution = await execFile(resolvedFile, args, {
277
279
  cwd: projectRoot,
278
280
  maxBuffer: 1024 * 1024 * 10,
279
- timeout: 15000,
281
+ timeout: timeoutMs,
280
282
  }).then(
281
283
  ({ stdout, stderr }) => ({ stdout, stderr, code: 0 }),
282
284
  (error) => ({
283
285
  stdout: error.stdout ?? '',
284
286
  stderr: error.killed
285
- ? `Command timed out after 15s: ${command}`
287
+ ? `Command timed out after ${timeoutMs / 1000}s: ${command}`
286
288
  : (error.stderr ?? error.message ?? ''),
287
289
  code: Number.isInteger(error.code) ? error.code : 1,
288
290
  timedOut: !!error.killed,
@@ -331,24 +333,13 @@ export const smartShell = async ({ command }) => {
331
333
  context: `${outputLines} lines → ${compressedText.split('\n').length} lines (relevant only)`,
332
334
  });
333
335
 
334
- const metricsDisplay = buildMetricsDisplay({
335
- tool: 'smart_shell',
336
- target: command,
337
- metrics,
338
- startTime: null,
339
- });
340
-
341
336
  const result = {
342
337
  command,
343
338
  exitCode: execution.code,
344
339
  blocked: false,
345
340
  output: compressedText,
346
- confidence: { blocked: false, timedOut: !!execution.timedOut },
347
- metrics,
348
- metricsDisplay,
341
+ ...(execution.timedOut ? { timedOut: true } : {}),
349
342
  };
350
343
 
351
- if (execution.timedOut) result.timedOut = true;
352
-
353
344
  return result;
354
345
  };
@@ -13,6 +13,9 @@ import { smartContext } from './smart-context.js';
13
13
  import { smartMetrics } from './smart-metrics.js';
14
14
  import { smartSummary } from './smart-summary.js';
15
15
 
16
+ const isStorageUnhealthy = (health) =>
17
+ health && health.status !== 'ok' && health.status !== null && health.status !== undefined;
18
+
16
19
  const DEFAULT_START_MAX_TOKENS = 400;
17
20
  const DEFAULT_END_MAX_TOKENS = 500;
18
21
  const DEFAULT_END_EVENT = 'milestone';
@@ -129,10 +132,6 @@ const classifyContinuity = ({ prompt, summaryResult }) => {
129
132
  state: 'resume',
130
133
  shouldReuseContext: true,
131
134
  reason: 'A persisted session was found and no prompt terms were available for comparison.',
132
- sharedTerms: [],
133
- promptTermCount: 0,
134
- summaryTermCount: 0,
135
- matchScore: 1,
136
135
  };
137
136
  }
138
137
 
@@ -147,10 +146,6 @@ const classifyContinuity = ({ prompt, summaryResult }) => {
147
146
  state: 'aligned',
148
147
  shouldReuseContext: true,
149
148
  reason: 'Prompt terms align with persisted task context.',
150
- sharedTerms: sharedTerms.slice(0, 8),
151
- promptTermCount: promptTerms.length,
152
- summaryTermCount: summaryTerms.length,
153
- matchScore,
154
149
  };
155
150
  }
156
151
 
@@ -159,10 +154,6 @@ const classifyContinuity = ({ prompt, summaryResult }) => {
159
154
  state: 'possible_shift',
160
155
  shouldReuseContext: true,
161
156
  reason: 'Prompt partially overlaps the persisted context; review before continuing.',
162
- sharedTerms: sharedTerms.slice(0, 8),
163
- promptTermCount: promptTerms.length,
164
- summaryTermCount: summaryTerms.length,
165
- matchScore,
166
157
  };
167
158
  }
168
159
 
@@ -170,10 +161,6 @@ const classifyContinuity = ({ prompt, summaryResult }) => {
170
161
  state: 'context_mismatch',
171
162
  shouldReuseContext: false,
172
163
  reason: 'Prompt terms do not align with the persisted session summary.',
173
- sharedTerms: [],
174
- promptTermCount: promptTerms.length,
175
- summaryTermCount: summaryTerms.length,
176
- matchScore,
177
164
  };
178
165
  };
179
166
 
@@ -342,7 +329,7 @@ const buildStartRecommendedPath = ({
342
329
  autoCreated,
343
330
  isolatedSession,
344
331
  nextTools: [...new Set(nextTools)],
345
- steps,
332
+ instructions: steps.map((s) => `${s.tool}: ${s.instruction}`).join(' | '),
346
333
  };
347
334
  };
348
335
 
@@ -389,7 +376,7 @@ const buildEndRecommendedPath = ({ event, checkpoint, mutationSafety, workflow }
389
376
  : 'checkpointed',
390
377
  checkpointEvent: event,
391
378
  nextTools: [...new Set(nextTools)],
392
- steps,
379
+ instructions: steps.map((s) => `${s.tool}: ${s.instruction}`).join(' | '),
393
380
  };
394
381
  };
395
382
 
@@ -579,7 +566,7 @@ const startTurn = async ({
579
566
  ...(summaryResult.candidates ? { candidates: summaryResult.candidates } : {}),
580
567
  ...(summaryResult.recommendedSessionId ? { recommendedSessionId: summaryResult.recommendedSessionId } : {}),
581
568
  ...(metrics ? { metrics: summarizeMetrics(metrics) } : {}),
582
- storageHealth: summaryResult.storageHealth ?? metrics?.storageHealth ?? null,
569
+ ...(isStorageUnhealthy(summaryResult.storageHealth ?? metrics?.storageHealth) ? { storageHealth: summaryResult.storageHealth ?? metrics?.storageHealth } : {}),
583
570
  recommendedPath,
584
571
  message: mutationSafety?.blocked
585
572
  ? mutationSafety.message
@@ -694,7 +681,7 @@ const endTurn = async ({
694
681
  checkpoint,
695
682
  ...(workflow ? { workflow } : {}),
696
683
  ...(metrics ? { metrics: summarizeMetrics(metrics) } : {}),
697
- storageHealth: checkpoint.storageHealth ?? metrics?.storageHealth ?? null,
684
+ ...(isStorageUnhealthy(checkpoint.storageHealth ?? metrics?.storageHealth) ? { storageHealth: checkpoint.storageHealth ?? metrics?.storageHealth } : {}),
698
685
  recommendedPath,
699
686
  message: mutationSafety?.blocked ? mutationSafety.message : checkpoint.message,
700
687
  }, {
@@ -77,9 +77,9 @@ export const attachSafetyMetadata = (
77
77
 
78
78
  return {
79
79
  ...result,
80
- ...(mutationSafety ? { mutationSafety } : {}),
81
- repoSafety,
82
- sideEffectsSuppressed: Boolean(sideEffectsSuppressed),
80
+ ...(mutationSafety?.blocked ? { mutationSafety } : {}),
81
+ ...(repoSafety && (mutationSafety?.blocked || sideEffectsSuppressed) ? { repoSafety } : {}),
82
+ ...(sideEffectsSuppressed ? { sideEffectsSuppressed: true } : {}),
83
83
  ...(degraded ? { degradedMode: degraded } : {}),
84
84
  };
85
85
  };
package/src/utils/text.js CHANGED
@@ -8,6 +8,7 @@ export const truncate = (text = '', maxChars = 4000) => {
8
8
 
9
9
  export const uniqueLines = (text = '') => {
10
10
  const seen = new Set();
11
+ let prevEmpty = false;
11
12
 
12
13
  return text
13
14
  .split('\n')
@@ -15,9 +16,13 @@ export const uniqueLines = (text = '') => {
15
16
  const key = line.trim();
16
17
 
17
18
  if (!key) {
19
+ if (prevEmpty) return false;
20
+ prevEmpty = true;
18
21
  return true;
19
22
  }
20
23
 
24
+ prevEmpty = false;
25
+
21
26
  if (seen.has(key)) {
22
27
  return false;
23
28
  }