context-mode 1.0.111 → 1.0.112
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +2 -2
- package/.claude-plugin/plugin.json +1 -1
- package/.openclaw-plugin/index.ts +3 -2
- package/.openclaw-plugin/openclaw.plugin.json +1 -1
- package/.openclaw-plugin/package.json +1 -1
- package/README.md +152 -34
- package/bin/statusline.mjs +144 -127
- package/build/adapters/base.d.ts +8 -5
- package/build/adapters/base.js +8 -18
- package/build/adapters/claude-code/index.d.ts +24 -3
- package/build/adapters/claude-code/index.js +44 -11
- package/build/adapters/codex/hooks.d.ts +10 -5
- package/build/adapters/codex/hooks.js +10 -5
- package/build/adapters/codex/index.d.ts +17 -5
- package/build/adapters/codex/index.js +337 -37
- package/build/adapters/codex/paths.d.ts +1 -0
- package/build/adapters/codex/paths.js +12 -0
- package/build/adapters/cursor/index.d.ts +6 -0
- package/build/adapters/cursor/index.js +83 -2
- package/build/adapters/detect.d.ts +1 -1
- package/build/adapters/detect.js +29 -6
- package/build/adapters/omp/index.d.ts +65 -0
- package/build/adapters/omp/index.js +182 -0
- package/build/adapters/omp/plugin.d.ts +75 -0
- package/build/adapters/omp/plugin.js +220 -0
- package/build/adapters/openclaw/mcp-tools.d.ts +54 -0
- package/build/adapters/openclaw/mcp-tools.js +198 -0
- package/build/adapters/openclaw/plugin.d.ts +130 -0
- package/build/adapters/openclaw/plugin.js +629 -0
- package/build/adapters/openclaw/workspace-router.d.ts +29 -0
- package/build/adapters/openclaw/workspace-router.js +64 -0
- package/build/adapters/opencode/plugin.d.ts +145 -0
- package/build/adapters/opencode/plugin.js +457 -0
- package/build/adapters/pi/extension.d.ts +26 -0
- package/build/adapters/pi/extension.js +552 -0
- package/build/adapters/pi/index.d.ts +57 -0
- package/build/adapters/pi/index.js +173 -0
- package/build/adapters/pi/mcp-bridge.d.ts +113 -0
- package/build/adapters/pi/mcp-bridge.js +251 -0
- package/build/adapters/types.d.ts +11 -6
- package/build/cli.js +186 -170
- package/build/db-base.d.ts +15 -2
- package/build/db-base.js +50 -5
- package/build/executor.d.ts +2 -0
- package/build/executor.js +15 -2
- package/build/runPool.d.ts +36 -0
- package/build/runPool.js +51 -0
- package/build/runtime.js +64 -5
- package/build/search/auto-memory.js +6 -4
- package/build/security.js +30 -10
- package/build/server.d.ts +23 -1
- package/build/server.js +652 -174
- package/build/session/analytics.d.ts +404 -1
- package/build/session/analytics.js +1347 -42
- package/build/session/db.d.ts +114 -5
- package/build/session/db.js +275 -27
- package/build/session/event-emit.d.ts +48 -0
- package/build/session/event-emit.js +101 -0
- package/build/session/extract.d.ts +1 -0
- package/build/session/extract.js +79 -12
- package/build/session/purge.d.ts +111 -0
- package/build/session/purge.js +138 -0
- package/build/store.d.ts +7 -0
- package/build/store.js +69 -6
- package/build/util/claude-config.d.ts +26 -0
- package/build/util/claude-config.js +91 -0
- package/build/util/hook-config.d.ts +4 -0
- package/build/util/hook-config.js +39 -0
- package/cli.bundle.mjs +411 -208
- package/configs/antigravity/GEMINI.md +0 -3
- package/configs/claude-code/CLAUDE.md +1 -4
- package/configs/codex/AGENTS.md +1 -4
- package/configs/codex/config.toml +3 -0
- package/configs/codex/hooks.json +8 -0
- package/configs/cursor/context-mode.mdc +0 -3
- package/configs/gemini-cli/GEMINI.md +0 -3
- package/configs/jetbrains-copilot/copilot-instructions.md +0 -3
- package/configs/kilo/AGENTS.md +0 -3
- package/configs/kiro/KIRO.md +0 -3
- package/configs/omp/SYSTEM.md +85 -0
- package/configs/omp/mcp.json +7 -0
- package/configs/openclaw/AGENTS.md +0 -3
- package/configs/opencode/AGENTS.md +0 -3
- package/configs/pi/AGENTS.md +0 -3
- package/configs/qwen-code/QWEN.md +1 -4
- package/configs/vscode-copilot/copilot-instructions.md +0 -3
- package/configs/zed/AGENTS.md +0 -3
- package/hooks/codex/posttooluse.mjs +9 -2
- package/hooks/codex/precompact.mjs +69 -0
- package/hooks/codex/sessionstart.mjs +13 -9
- package/hooks/codex/stop.mjs +1 -2
- package/hooks/codex/userpromptsubmit.mjs +1 -2
- package/hooks/core/routing.mjs +237 -18
- package/hooks/cursor/afteragentresponse.mjs +1 -1
- package/hooks/cursor/hooks.json +31 -0
- package/hooks/cursor/posttooluse.mjs +1 -1
- package/hooks/cursor/sessionstart.mjs +5 -5
- package/hooks/cursor/stop.mjs +1 -1
- package/hooks/ensure-deps.mjs +12 -13
- package/hooks/gemini-cli/aftertool.mjs +1 -1
- package/hooks/gemini-cli/beforeagent.mjs +1 -1
- package/hooks/gemini-cli/precompress.mjs +3 -2
- package/hooks/gemini-cli/sessionstart.mjs +9 -9
- package/hooks/jetbrains-copilot/posttooluse.mjs +1 -1
- package/hooks/jetbrains-copilot/precompact.mjs +3 -2
- package/hooks/jetbrains-copilot/sessionstart.mjs +9 -9
- package/hooks/kiro/agentspawn.mjs +5 -5
- package/hooks/kiro/posttooluse.mjs +2 -2
- package/hooks/kiro/userpromptsubmit.mjs +1 -1
- package/hooks/posttooluse.mjs +45 -0
- package/hooks/precompact.mjs +17 -0
- package/hooks/pretooluse.mjs +23 -0
- package/hooks/routing-block.mjs +0 -12
- package/hooks/run-hook.mjs +16 -3
- package/hooks/session-db.bundle.mjs +27 -18
- package/hooks/session-extract.bundle.mjs +2 -2
- package/hooks/session-helpers.mjs +101 -64
- package/hooks/sessionstart.mjs +51 -2
- package/hooks/vscode-copilot/posttooluse.mjs +1 -1
- package/hooks/vscode-copilot/precompact.mjs +3 -2
- package/hooks/vscode-copilot/sessionstart.mjs +9 -9
- package/openclaw.plugin.json +1 -1
- package/package.json +14 -8
- package/server.bundle.mjs +349 -147
- package/skills/UPSTREAM-CREDITS.md +0 -51
- package/skills/context-mode-ops/SKILL.md +0 -299
- package/skills/context-mode-ops/agent-teams.md +0 -198
- package/skills/context-mode-ops/communication.md +0 -224
- package/skills/context-mode-ops/marketing.md +0 -124
- package/skills/context-mode-ops/release.md +0 -214
- package/skills/context-mode-ops/review-pr.md +0 -269
- package/skills/context-mode-ops/tdd.md +0 -329
- package/skills/context-mode-ops/triage-issue.md +0 -266
- package/skills/context-mode-ops/validation.md +0 -307
- package/skills/diagnose/SKILL.md +0 -122
- package/skills/diagnose/scripts/hitl-loop.template.sh +0 -41
- package/skills/grill-me/SKILL.md +0 -15
- package/skills/grill-with-docs/ADR-FORMAT.md +0 -47
- package/skills/grill-with-docs/CONTEXT-FORMAT.md +0 -77
- package/skills/grill-with-docs/SKILL.md +0 -93
- package/skills/improve-codebase-architecture/DEEPENING.md +0 -37
- package/skills/improve-codebase-architecture/INTERFACE-DESIGN.md +0 -44
- package/skills/improve-codebase-architecture/LANGUAGE.md +0 -53
- package/skills/improve-codebase-architecture/SKILL.md +0 -76
- package/skills/tdd/SKILL.md +0 -114
- package/skills/tdd/deep-modules.md +0 -33
- package/skills/tdd/interface-design.md +0 -31
- package/skills/tdd/mocking.md +0 -59
- package/skills/tdd/refactoring.md +0 -10
- package/skills/tdd/tests.md +0 -61
package/build/server.js
CHANGED
|
@@ -2,29 +2,33 @@
|
|
|
2
2
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
3
3
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
4
|
import { createRequire } from "node:module";
|
|
5
|
-
import { createHash } from "node:crypto";
|
|
6
5
|
import { existsSync, unlinkSync, readdirSync, readFileSync, writeFileSync, renameSync, rmSync, mkdirSync, cpSync, statSync, symlinkSync, lstatSync } from "node:fs";
|
|
7
|
-
import { execSync } from "node:child_process";
|
|
6
|
+
import { execSync, spawnSync } from "node:child_process";
|
|
8
7
|
import { join, dirname, resolve, sep, isAbsolute } from "node:path";
|
|
9
8
|
import { fileURLToPath } from "node:url";
|
|
10
9
|
import { homedir, tmpdir, cpus } from "node:os";
|
|
11
10
|
import { request as httpsRequest } from "node:https";
|
|
12
11
|
import { z } from "zod";
|
|
13
12
|
import { PolyglotExecutor } from "./executor.js";
|
|
14
|
-
import { runPool } from "./
|
|
13
|
+
import { runPool } from "./runPool.js";
|
|
15
14
|
import { ContentStore, cleanupStaleDBs, cleanupStaleContentDBs } from "./store.js";
|
|
16
15
|
import { composeFetchCacheKey } from "./fetch-cache.js";
|
|
17
16
|
import { readBashPolicies, evaluateCommandDenyOnly, extractShellCommands, readToolDenyPatterns, evaluateFilePath, } from "./security.js";
|
|
18
17
|
import { detectRuntimes, getRuntimeSummary, getAvailableLanguages, hasBunRuntime, } from "./runtime.js";
|
|
19
18
|
import { classifyNonZeroExit } from "./exit-classify.js";
|
|
20
19
|
import { startLifecycleGuard } from "./lifecycle.js";
|
|
21
|
-
import {
|
|
20
|
+
import { hashProjectDirCanonical, hashProjectDirLegacy, resolveContentStorePath, resolveSessionDbPath, SessionDB } from "./session/db.js";
|
|
21
|
+
import { purgeSession } from "./session/purge.js";
|
|
22
|
+
import { emitCacheHitEvent, emitIndexWriteEvent, emitSandboxExecuteEvent, } from "./session/event-emit.js";
|
|
22
23
|
import { persistToolCallCounter, restoreSessionStats } from "./session/persist-tool-calls.js";
|
|
23
24
|
import { searchAllSources } from "./search/unified.js";
|
|
24
25
|
import { buildNodeCommand } from "./adapters/types.js";
|
|
25
26
|
import { detectPlatform, getSessionDirSegments } from "./adapters/detect.js";
|
|
27
|
+
import { resolveCodexConfigDir } from "./adapters/codex/paths.js";
|
|
28
|
+
import { getHookScriptPaths } from "./util/hook-config.js";
|
|
29
|
+
import { resolveClaudeConfigDir } from "./util/claude-config.js";
|
|
26
30
|
import { loadDatabase } from "./db-base.js";
|
|
27
|
-
import { AnalyticsEngine, formatReport, getLifetimeStats, OPUS_INPUT_PRICE_PER_TOKEN } from "./session/analytics.js";
|
|
31
|
+
import { AnalyticsEngine, formatReport, getConversationStats, getLifetimeStats, getMultiAdapterLifetimeStats, getRealBytesStats, OPUS_INPUT_PRICE_PER_TOKEN } from "./session/analytics.js";
|
|
28
32
|
const __pkg_dir = dirname(fileURLToPath(import.meta.url));
|
|
29
33
|
const VERSION = (() => {
|
|
30
34
|
for (const rel of ["../package.json", "./package.json"]) {
|
|
@@ -106,9 +110,34 @@ let _detectedAdapter = null;
|
|
|
106
110
|
// Tracks the ctx_insight dashboard child so shutdown can terminate it.
|
|
107
111
|
// See ctx_insight handler + shutdown() in main().
|
|
108
112
|
let _insightChild = null;
|
|
113
|
+
/**
|
|
114
|
+
* Resolve the Claude Code config root, honoring `CLAUDE_CONFIG_DIR` (incl.
|
|
115
|
+
* leading `~`) before falling back to `~/.claude`. Mirrors
|
|
116
|
+
* `hooks/session-helpers.mjs::resolveConfigDir` and
|
|
117
|
+
* `ClaudeCodeAdapter.getConfigDir` so the pre-detection path agrees with
|
|
118
|
+
* hooks/adapter on where Claude Code session data lives. See issue #453.
|
|
119
|
+
*
|
|
120
|
+
* Issue #460 round-3: delegates to the canonical util so empty/whitespace
|
|
121
|
+
* env values fall back instead of poisoning downstream `join()` calls.
|
|
122
|
+
*/
|
|
123
|
+
function resolveClaudeConfigRoot() {
|
|
124
|
+
return resolveClaudeConfigDir();
|
|
125
|
+
}
|
|
126
|
+
async function getDiagnosticAdapter() {
|
|
127
|
+
if (_detectedAdapter)
|
|
128
|
+
return _detectedAdapter;
|
|
129
|
+
try {
|
|
130
|
+
const { getAdapter } = await import("./adapters/detect.js");
|
|
131
|
+
const signal = detectPlatform();
|
|
132
|
+
return await getAdapter(signal.platform);
|
|
133
|
+
}
|
|
134
|
+
catch {
|
|
135
|
+
return null;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
109
138
|
/**
|
|
110
139
|
* Get the platform-specific sessions directory from the detected adapter.
|
|
111
|
-
* Falls back to
|
|
140
|
+
* Falls back to the detected platform config root before adapter detection.
|
|
112
141
|
*/
|
|
113
142
|
function getSessionDir() {
|
|
114
143
|
if (_detectedAdapter)
|
|
@@ -116,18 +145,27 @@ function getSessionDir() {
|
|
|
116
145
|
// Pre-detection path (race window before MCP `initialize` completes):
|
|
117
146
|
// call detectPlatform() (sync, env-var-based) and look up segments via
|
|
118
147
|
// getSessionDirSegments() (sync map, no adapter instantiation). This keeps
|
|
119
|
-
// non-Claude platforms from spilling sessions into ~/.claude/.
|
|
148
|
+
// non-Claude platforms from spilling sessions into ~/.claude/. For Claude
|
|
149
|
+
// Code/Codex (single-segment roots), reroute through their config-dir
|
|
150
|
+
// contracts so the pre-detection window does not split-state with hooks.
|
|
120
151
|
try {
|
|
121
152
|
const signal = detectPlatform();
|
|
122
153
|
const segments = getSessionDirSegments(signal.platform);
|
|
123
154
|
if (segments) {
|
|
124
|
-
|
|
155
|
+
let root = join(homedir(), ...segments);
|
|
156
|
+
if (segments.length === 1 && segments[0] === ".claude") {
|
|
157
|
+
root = resolveClaudeConfigRoot();
|
|
158
|
+
}
|
|
159
|
+
else if (segments.length === 1 && segments[0] === ".codex") {
|
|
160
|
+
root = resolveCodexConfigDir();
|
|
161
|
+
}
|
|
162
|
+
const dir = join(root, "context-mode", "sessions");
|
|
125
163
|
mkdirSync(dir, { recursive: true });
|
|
126
164
|
return dir;
|
|
127
165
|
}
|
|
128
166
|
}
|
|
129
|
-
catch { /* fall through to
|
|
130
|
-
const dir = join(
|
|
167
|
+
catch { /* fall through to claude fallback */ }
|
|
168
|
+
const dir = join(resolveClaudeConfigRoot(), "context-mode", "sessions");
|
|
131
169
|
mkdirSync(dir, { recursive: true });
|
|
132
170
|
return dir;
|
|
133
171
|
}
|
|
@@ -161,23 +199,17 @@ function resolveProjectPath(filePath) {
|
|
|
161
199
|
return isAbsolute(filePath) ? filePath : resolve(getProjectDir(), filePath);
|
|
162
200
|
}
|
|
163
201
|
/**
|
|
164
|
-
*
|
|
165
|
-
*
|
|
166
|
-
*
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
const projectDir = getProjectDir();
|
|
170
|
-
const normalized = projectDir.replace(/\\/g, "/");
|
|
171
|
-
return createHash("sha256").update(normalized).digest("hex").slice(0, 16);
|
|
172
|
-
}
|
|
173
|
-
/**
|
|
174
|
-
* Resolve the per-project SessionDB path the way 4742160 originally did
|
|
175
|
-
* for `persistToolCallCounter`. Centralized so the write-back, the
|
|
176
|
-
* restore-on-startup, and any future SessionDB consumer all hash to the
|
|
177
|
-
* same file under worktree isolation.
|
|
202
|
+
* Resolve the per-project SessionDB path. Delegates to
|
|
203
|
+
* {@link resolveSessionDbPath} so casing-only variants of the same
|
|
204
|
+
* physical worktree on macOS / Windows hit ONE DB, not two — and any
|
|
205
|
+
* pre-existing legacy raw-casing DB gets migrated in place on first
|
|
206
|
+
* resolve. Linux is a no-op.
|
|
178
207
|
*/
|
|
179
208
|
function getSessionDbPath() {
|
|
180
|
-
return
|
|
209
|
+
return resolveSessionDbPath({
|
|
210
|
+
projectDir: getProjectDir(),
|
|
211
|
+
sessionsDir: getSessionDir(),
|
|
212
|
+
});
|
|
181
213
|
}
|
|
182
214
|
/**
|
|
183
215
|
* Compute a per-project, per-platform persistent path for the ContentStore.
|
|
@@ -189,12 +221,14 @@ function getSessionDbPath() {
|
|
|
189
221
|
* ~/.cursor/context-mode/content/87c28c41ddb64d38.db
|
|
190
222
|
*/
|
|
191
223
|
function getStorePath() {
|
|
192
|
-
const hash = hashProjectDir();
|
|
193
224
|
// Derive content dir from session dir: .../sessions/ → .../content/
|
|
194
|
-
const
|
|
195
|
-
const dir = join(dirname(sessDir), "content");
|
|
225
|
+
const dir = join(dirname(getSessionDir()), "content");
|
|
196
226
|
mkdirSync(dir, { recursive: true });
|
|
197
|
-
|
|
227
|
+
// Delegate to resolveContentStorePath: same case-fold + one-shot legacy
|
|
228
|
+
// rename behavior as resolveSessionDbPath. On macOS / Windows, an
|
|
229
|
+
// existing legacy raw-casing FTS5 db (with -wal/-shm sidecars) is
|
|
230
|
+
// migrated in place on first call. On Linux it's a no-op.
|
|
231
|
+
return resolveContentStorePath({ projectDir: getProjectDir(), contentDir: dir });
|
|
198
232
|
}
|
|
199
233
|
function getStore() {
|
|
200
234
|
if (!_store) {
|
|
@@ -202,6 +236,21 @@ function getStore() {
|
|
|
202
236
|
// Server just opens whatever DB exists (or creates new if hook deleted it).
|
|
203
237
|
const dbPath = getStorePath();
|
|
204
238
|
_store = new ContentStore(dbPath);
|
|
239
|
+
// Wire deny-policy hook: store re-checks the Read deny list before
|
|
240
|
+
// re-reading any file_path during auto-refresh. Catches policy edits
|
|
241
|
+
// made after a file was originally indexed. See #442 round-3.
|
|
242
|
+
_store.setDenyChecker((filePath) => {
|
|
243
|
+
try {
|
|
244
|
+
const projectDir = getProjectDir();
|
|
245
|
+
const denyGlobs = readToolDenyPatterns("Read", projectDir);
|
|
246
|
+
const r = evaluateFilePath(filePath, denyGlobs, process.platform === "win32", projectDir);
|
|
247
|
+
return r.denied;
|
|
248
|
+
}
|
|
249
|
+
catch {
|
|
250
|
+
// Fail-closed for refresh: skip on error rather than re-read.
|
|
251
|
+
return true;
|
|
252
|
+
}
|
|
253
|
+
});
|
|
205
254
|
// One-time startup cleanup: remove stale content DBs (>14 days)
|
|
206
255
|
try {
|
|
207
256
|
const contentDir = dirname(getStorePath());
|
|
@@ -310,11 +359,15 @@ function healCacheMidSession() {
|
|
|
310
359
|
return;
|
|
311
360
|
_cacheHealDone = true;
|
|
312
361
|
try {
|
|
313
|
-
|
|
362
|
+
// Issue #460 round-3: honor $CLAUDE_CONFIG_DIR so users who relocate
|
|
363
|
+
// their CC config root don't have plugin cache healing operate against
|
|
364
|
+
// the wrong tree (and silently miss dangling-symlink cleanup).
|
|
365
|
+
const claudeRoot = resolveClaudeConfigDir();
|
|
366
|
+
const ipPath = resolve(claudeRoot, "plugins", "installed_plugins.json");
|
|
314
367
|
if (!existsSync(ipPath))
|
|
315
368
|
return;
|
|
316
369
|
const ip = JSON.parse(readFileSync(ipPath, "utf-8"));
|
|
317
|
-
const cacheRoot = resolve(
|
|
370
|
+
const cacheRoot = resolve(claudeRoot, "plugins", "cache");
|
|
318
371
|
// Plugin root: build/ for tsc, plugin root for bundle
|
|
319
372
|
const pluginRoot = existsSync(resolve(__pkg_dir, "package.json")) ? __pkg_dir : dirname(__pkg_dir);
|
|
320
373
|
for (const [key, entries] of Object.entries((ip.plugins ?? {}))) {
|
|
@@ -368,11 +421,34 @@ function trackResponse(toolName, response) {
|
|
|
368
421
|
// setImmediate keeps this off the response hot path; the helper itself
|
|
369
422
|
// is best-effort (never throws).
|
|
370
423
|
setImmediate(() => persistToolCallCounter(getSessionDbPath(), toolName, bytes));
|
|
424
|
+
// D2 Phase 5/7 — sandbox-execute event emission. Tracks the bytes the
|
|
425
|
+
// user actually saw from sandboxed runs so getRealBytesStats() can
|
|
426
|
+
// replace the conservative `events × 256` estimate. Best-effort and
|
|
427
|
+
// off the hot path, same shape as persistToolCallCounter above.
|
|
428
|
+
if (toolName === "ctx_execute"
|
|
429
|
+
|| toolName === "ctx_execute_file"
|
|
430
|
+
|| toolName === "ctx_batch_execute") {
|
|
431
|
+
setImmediate(() => emitSandboxExecuteEvent({
|
|
432
|
+
sessionDbPath: getSessionDbPath(),
|
|
433
|
+
toolName,
|
|
434
|
+
bytesReturned: bytes,
|
|
435
|
+
}));
|
|
436
|
+
}
|
|
371
437
|
return response;
|
|
372
438
|
}
|
|
373
|
-
function trackIndexed(bytes) {
|
|
439
|
+
function trackIndexed(bytes, source = "unknown") {
|
|
374
440
|
sessionStats.bytesIndexed += bytes;
|
|
375
441
|
persistStats();
|
|
442
|
+
// D2 Phase 5/7 — index-write event emission. `bytes_avoided` because
|
|
443
|
+
// these are bytes that would have flooded context if the user had
|
|
444
|
+
// Read'd the source instead of indexing.
|
|
445
|
+
if (bytes > 0) {
|
|
446
|
+
setImmediate(() => emitIndexWriteEvent({
|
|
447
|
+
sessionDbPath: getSessionDbPath(),
|
|
448
|
+
source,
|
|
449
|
+
bytesAvoided: bytes,
|
|
450
|
+
}));
|
|
451
|
+
}
|
|
376
452
|
}
|
|
377
453
|
// ─────────────────────────────────────────────────────────
|
|
378
454
|
// Stats persistence — written after every tool call so
|
|
@@ -805,7 +881,7 @@ export async function runBatchCommands(commands, opts, executor) {
|
|
|
805
881
|
// ─────────────────────────────────────────────────────────
|
|
806
882
|
server.registerTool("ctx_execute", {
|
|
807
883
|
title: "Execute Code",
|
|
808
|
-
description: `MANDATORY: Use for any command where output exceeds 20 lines. Execute code in a sandboxed subprocess. Only stdout enters context — raw data stays in the subprocess.${bunNote} Available: ${langList}.\n\nPREFER THIS OVER BASH for: API calls (gh, curl, aws), test runners (npm test, pytest), git queries (git log, git diff), data processing, and ANY CLI command that may produce large output. Bash should only be used for file mutations, git writes, and navigation.\n\nTHINK IN CODE: When you need to analyze, count, filter, compare, or process data — write code that does the work and console.log() only the answer. Do NOT read raw data into context to process mentally. Program the analysis, don't compute it in your reasoning. Write robust, pure JavaScript (no npm dependencies). Use only Node.js built-ins (fs, path, child_process). Always wrap in try/catch. Handle null/undefined. Works on both Node.js and Bun
|
|
884
|
+
description: `MANDATORY: Use for any command where output exceeds 20 lines. Execute code in a sandboxed subprocess. Only stdout enters context — raw data stays in the subprocess.${bunNote} Available: ${langList}.\n\nPREFER THIS OVER BASH for: API calls (gh, curl, aws), test runners (npm test, pytest), git queries (git log, git diff), data processing, and ANY CLI command that may produce large output. Bash should only be used for file mutations, git writes, and navigation.\n\nTHINK IN CODE: When you need to analyze, count, filter, compare, or process data — write code that does the work and console.log() only the answer. Do NOT read raw data into context to process mentally. Program the analysis, don't compute it in your reasoning. Write robust, pure JavaScript (no npm dependencies). Use only Node.js built-ins (fs, path, child_process). Always wrap in try/catch. Handle null/undefined. Works on both Node.js and Bun.`,
|
|
809
885
|
inputSchema: z.object({
|
|
810
886
|
language: z
|
|
811
887
|
.enum([
|
|
@@ -1097,7 +1173,7 @@ function intentSearch(stdout, intent, source, maxResults = 5) {
|
|
|
1097
1173
|
// ─────────────────────────────────────────────────────────
|
|
1098
1174
|
server.registerTool("ctx_execute_file", {
|
|
1099
1175
|
title: "Execute File Processing",
|
|
1100
|
-
description: "Read a file and process it without loading contents into context. The file is read into a FILE_CONTENT variable inside the sandbox. Only your printed summary enters context.\n\nPREFER THIS OVER Read/cat for: log files, data files (CSV, JSON, XML), large source files for analysis, and any file where you need to extract specific information rather than read the entire content.\n\nTHINK IN CODE: Write code that processes FILE_CONTENT and console.log() only the answer. Don't read files into context to analyze mentally. Write robust, pure JavaScript — no npm deps, try/catch, null-safe. Node.js + Bun compatible
|
|
1176
|
+
description: "Read a file and process it without loading contents into context. The file is read into a FILE_CONTENT variable inside the sandbox. Only your printed summary enters context.\n\nPREFER THIS OVER Read/cat for: log files, data files (CSV, JSON, XML), large source files for analysis, and any file where you need to extract specific information rather than read the entire content.\n\nTHINK IN CODE: Write code that processes FILE_CONTENT and console.log() only the answer. Don't read files into context to analyze mentally. Write robust, pure JavaScript — no npm deps, try/catch, null-safe. Node.js + Bun compatible.",
|
|
1101
1177
|
inputSchema: z.object({
|
|
1102
1178
|
path: z
|
|
1103
1179
|
.string()
|
|
@@ -1132,7 +1208,7 @@ server.registerTool("ctx_execute_file", {
|
|
|
1132
1208
|
}),
|
|
1133
1209
|
}, async ({ path, language, code, timeout, intent }) => {
|
|
1134
1210
|
// Security: check file path against Read deny patterns
|
|
1135
|
-
const pathDenied = checkFilePathDenyPolicy(path, "
|
|
1211
|
+
const pathDenied = checkFilePathDenyPolicy(path, "ctx_execute_file");
|
|
1136
1212
|
if (pathDenied)
|
|
1137
1213
|
return pathDenied;
|
|
1138
1214
|
// Security: check code parameter against Bash deny patterns
|
|
@@ -1267,6 +1343,15 @@ server.registerTool("ctx_index", {
|
|
|
1267
1343
|
isError: true,
|
|
1268
1344
|
});
|
|
1269
1345
|
}
|
|
1346
|
+
// Apply Read deny-policy to prevent indexing sensitive files into the
|
|
1347
|
+
// FTS5 store, which would otherwise be queryable via ctx_search and
|
|
1348
|
+
// exfiltrate content into the model's context (issue #442). Mirrors the
|
|
1349
|
+
// check ctx_execute_file already performs.
|
|
1350
|
+
if (path) {
|
|
1351
|
+
const pathDenied = checkFilePathDenyPolicy(path, "ctx_index");
|
|
1352
|
+
if (pathDenied)
|
|
1353
|
+
return pathDenied;
|
|
1354
|
+
}
|
|
1270
1355
|
try {
|
|
1271
1356
|
const resolvedPath = path ? resolveProjectPath(path) : undefined;
|
|
1272
1357
|
// Track the raw bytes being indexed (content or file)
|
|
@@ -1343,8 +1428,7 @@ server.registerTool("ctx_search", {
|
|
|
1343
1428
|
"Pass ALL search questions as queries array in ONE call. " +
|
|
1344
1429
|
"File-backed sources are auto-refreshed when the source file changes.\n\n" +
|
|
1345
1430
|
"TIPS: 2-4 specific terms per query. Use 'source' to scope results.\n\n" +
|
|
1346
|
-
"SESSION STATE: If skills, roles, or decisions were set earlier in this conversation, they are still active. Do not discard or contradict them
|
|
1347
|
-
"When reporting results — terse like caveman. Technical substance exact. Only fluff die. Pattern: [thing] [action] [reason]. [next step].",
|
|
1431
|
+
"SESSION STATE: If skills, roles, or decisions were set earlier in this conversation, they are still active. Do not discard or contradict them.",
|
|
1348
1432
|
inputSchema: z.object({
|
|
1349
1433
|
queries: z.preprocess(coerceJsonArray, z
|
|
1350
1434
|
.array(z.string())
|
|
@@ -1438,14 +1522,15 @@ server.registerTool("ctx_search", {
|
|
|
1438
1522
|
if (sort === "timeline") {
|
|
1439
1523
|
try {
|
|
1440
1524
|
const sessionsDir = getSessionDir();
|
|
1441
|
-
const
|
|
1525
|
+
const projectDir = getProjectDir();
|
|
1526
|
+
const dbFile = resolveSessionDbPath({ projectDir, sessionsDir });
|
|
1442
1527
|
if (existsSync(dbFile)) {
|
|
1443
1528
|
timelineDB = new SessionDB({ dbPath: dbFile });
|
|
1444
1529
|
}
|
|
1445
1530
|
}
|
|
1446
1531
|
catch { /* SessionDB unavailable — search ContentStore + auto-memory only */ }
|
|
1447
1532
|
}
|
|
1448
|
-
const configDir = _detectedAdapter?.getConfigDir() ?? (
|
|
1533
|
+
const configDir = _detectedAdapter?.getConfigDir() ?? resolveClaudeConfigRoot();
|
|
1449
1534
|
try {
|
|
1450
1535
|
for (const q of queryList) {
|
|
1451
1536
|
if (totalSize > MAX_TOTAL) {
|
|
@@ -1551,17 +1636,151 @@ function resolveGfmPluginPath() {
|
|
|
1551
1636
|
// Subprocess code that fetches a URL, detects Content-Type, and outputs a
|
|
1552
1637
|
// __CM_CT__:<type> marker on the first line so the handler can route to the
|
|
1553
1638
|
// appropriate indexing strategy. HTML is converted to markdown via Turndown.
|
|
1554
|
-
function buildFetchCode(url, outputPath) {
|
|
1639
|
+
export function buildFetchCode(url, outputPath) {
|
|
1555
1640
|
const turndownPath = JSON.stringify(resolveTurndownPath());
|
|
1556
1641
|
const gfmPath = JSON.stringify(resolveGfmPluginPath());
|
|
1557
1642
|
const escapedOutputPath = JSON.stringify(outputPath);
|
|
1643
|
+
// Embed classifyIp into the subprocess so the connect-time DNS lookup is
|
|
1644
|
+
// re-validated with the same policy as ssrfGuard. Without this, an attacker
|
|
1645
|
+
// can serve a public IP for the parent's pre-flight ssrfGuard lookup and
|
|
1646
|
+
// then a blocked IP (e.g. 169.254.169.254 IMDS) for the subprocess fetch's
|
|
1647
|
+
// own lookup — classic DNS rebinding across the parent/child boundary.
|
|
1648
|
+
const classifyIpSrc = classifyIp.toString();
|
|
1649
|
+
const strictMode = process.env.CTX_FETCH_STRICT === "1";
|
|
1558
1650
|
return `
|
|
1559
1651
|
const TurndownService = require(${turndownPath});
|
|
1560
1652
|
const { gfm } = require(${gfmPath});
|
|
1561
1653
|
const fs = require('fs');
|
|
1654
|
+
const dns = require('node:dns');
|
|
1655
|
+
const dnsPromises = require('node:dns/promises');
|
|
1562
1656
|
const url = ${JSON.stringify(url)};
|
|
1563
1657
|
const outputPath = ${escapedOutputPath};
|
|
1564
1658
|
|
|
1659
|
+
// Strip proxy env vars from this subprocess only. A configured outbound
|
|
1660
|
+
// proxy (HTTP_PROXY / HTTPS_PROXY / ALL_PROXY) would route fetch through
|
|
1661
|
+
// an arbitrary target — DNS resolution happens at the proxy and the
|
|
1662
|
+
// in-subprocess DNS rebinding guard never sees the rebound IP. The
|
|
1663
|
+
// sandbox fetch path has no legitimate need for an upstream proxy.
|
|
1664
|
+
delete process.env.HTTP_PROXY;
|
|
1665
|
+
delete process.env.HTTPS_PROXY;
|
|
1666
|
+
delete process.env.ALL_PROXY;
|
|
1667
|
+
delete process.env.http_proxy;
|
|
1668
|
+
delete process.env.https_proxy;
|
|
1669
|
+
delete process.env.all_proxy;
|
|
1670
|
+
delete process.env.npm_config_proxy;
|
|
1671
|
+
delete process.env.npm_config_https_proxy;
|
|
1672
|
+
|
|
1673
|
+
${classifyIpSrc}
|
|
1674
|
+
|
|
1675
|
+
const STRICT = ${JSON.stringify(strictMode)};
|
|
1676
|
+
|
|
1677
|
+
// SSRF rebinding defense: every dns.lookup call inside this subprocess
|
|
1678
|
+
// (including the one undici performs to connect the fetch socket) is
|
|
1679
|
+
// re-validated against the same policy ssrfGuard runs in the parent.
|
|
1680
|
+
// Even if a hostname rebinds between the parent's pre-flight check and
|
|
1681
|
+
// the subprocess's actual connect, the connect-time lookup re-classifies
|
|
1682
|
+
// every returned record and aborts before TCP if any verdict is "block".
|
|
1683
|
+
const _origLookup = dns.lookup;
|
|
1684
|
+
dns.lookup = function patchedLookup(hostname, options, callback) {
|
|
1685
|
+
if (typeof options === 'function') { callback = options; options = {}; }
|
|
1686
|
+
if (typeof options === 'number') { options = { family: options }; }
|
|
1687
|
+
const wantAll = options && options.all;
|
|
1688
|
+
const opts = Object.assign({}, options || {}, { all: true, verbatim: true });
|
|
1689
|
+
_origLookup(hostname, opts, function(err, records) {
|
|
1690
|
+
if (err) return callback(err);
|
|
1691
|
+
if (!Array.isArray(records)) {
|
|
1692
|
+
records = [{ address: records, family: (options && options.family) || 4 }];
|
|
1693
|
+
}
|
|
1694
|
+
for (var i = 0; i < records.length; i++) {
|
|
1695
|
+
var verdict = classifyIp(records[i].address);
|
|
1696
|
+
if (verdict === 'block' || (STRICT && verdict === 'private')) {
|
|
1697
|
+
return callback(new Error(
|
|
1698
|
+
'SSRF blocked at connect-time: ' + hostname +
|
|
1699
|
+
' resolves to ' + records[i].address +
|
|
1700
|
+
' (' + verdict + ')'
|
|
1701
|
+
));
|
|
1702
|
+
}
|
|
1703
|
+
}
|
|
1704
|
+
if (wantAll) callback(null, records);
|
|
1705
|
+
else callback(null, records[0].address, records[0].family);
|
|
1706
|
+
});
|
|
1707
|
+
};
|
|
1708
|
+
|
|
1709
|
+
// dns/promises is a separate function reference. Patching dns.lookup does
|
|
1710
|
+
// NOT affect dnsPromises.lookup. Today undici's connect path uses callback
|
|
1711
|
+
// dns.lookup so default fetch is covered, but the invariant is fragile —
|
|
1712
|
+
// any future undici switch (or user code calling dnsPromises.lookup
|
|
1713
|
+
// directly) would bypass the guard. Patch both to keep the contract.
|
|
1714
|
+
const _origPromisesLookup = dnsPromises.lookup;
|
|
1715
|
+
dnsPromises.lookup = async function patchedPromisesLookup(hostname, options) {
|
|
1716
|
+
const opts = Object.assign({}, options || {}, { all: true, verbatim: true });
|
|
1717
|
+
const records = await _origPromisesLookup(hostname, opts);
|
|
1718
|
+
const list = Array.isArray(records) ? records : [records];
|
|
1719
|
+
for (var i = 0; i < list.length; i++) {
|
|
1720
|
+
var verdict = classifyIp(list[i].address);
|
|
1721
|
+
if (verdict === 'block' || (STRICT && verdict === 'private')) {
|
|
1722
|
+
throw new Error(
|
|
1723
|
+
'SSRF blocked at connect-time: ' + hostname +
|
|
1724
|
+
' resolves to ' + list[i].address + ' (' + verdict + ')'
|
|
1725
|
+
);
|
|
1726
|
+
}
|
|
1727
|
+
}
|
|
1728
|
+
return options && options.all
|
|
1729
|
+
? list
|
|
1730
|
+
: { address: list[0].address, family: list[0].family };
|
|
1731
|
+
};
|
|
1732
|
+
|
|
1733
|
+
// dns.resolve4 / dns.resolve6 use a different code path (no getaddrinfo,
|
|
1734
|
+
// no /etc/hosts) than dns.lookup — they must be patched separately or the
|
|
1735
|
+
// guard is trivially bypassed by any caller using dns.resolve* directly.
|
|
1736
|
+
['resolve4', 'resolve6'].forEach(function patchResolve(name) {
|
|
1737
|
+
const _origResolve = dns[name];
|
|
1738
|
+
dns[name] = function patchedResolve(hostname, options, cb) {
|
|
1739
|
+
if (typeof options === 'function') { cb = options; options = undefined; }
|
|
1740
|
+
_origResolve.call(dns, hostname, options || {}, function(err, addrs) {
|
|
1741
|
+
if (err) return cb(err);
|
|
1742
|
+
var withTtl = options && options.ttl;
|
|
1743
|
+
for (var i = 0; i < addrs.length; i++) {
|
|
1744
|
+
var ip = withTtl ? addrs[i].address : addrs[i];
|
|
1745
|
+
var v = classifyIp(ip);
|
|
1746
|
+
if (v === 'block' || (STRICT && v === 'private')) {
|
|
1747
|
+
return cb(new Error(
|
|
1748
|
+
'SSRF blocked at connect-time: ' + hostname +
|
|
1749
|
+
' resolves to ' + ip + ' (' + v + ')'
|
|
1750
|
+
));
|
|
1751
|
+
}
|
|
1752
|
+
}
|
|
1753
|
+
cb(null, addrs);
|
|
1754
|
+
});
|
|
1755
|
+
};
|
|
1756
|
+
});
|
|
1757
|
+
|
|
1758
|
+
// Generic dns.resolve is a polymorphic dispatcher (rrtype-driven). Internally
|
|
1759
|
+
// Node delegates to dns.resolve4/dns.resolve6 for A/AAAA, but the patches
|
|
1760
|
+
// above hook the *exported* references — Node's internal dispatcher holds
|
|
1761
|
+
// captured originals and bypasses our patch. Patch the wrapper explicitly:
|
|
1762
|
+
// classify A/AAAA records the same way; pass through CNAME/MX/TXT/SRV/etc.
|
|
1763
|
+
const _origResolveGeneric = dns.resolve;
|
|
1764
|
+
dns.resolve = function patchedResolveGeneric(hostname, rrtype, cb) {
|
|
1765
|
+
if (typeof rrtype === 'function') { cb = rrtype; rrtype = 'A'; }
|
|
1766
|
+
_origResolveGeneric.call(dns, hostname, rrtype, function(err, records) {
|
|
1767
|
+
if (err) return cb(err);
|
|
1768
|
+
if ((rrtype === 'A' || rrtype === 'AAAA') && Array.isArray(records)) {
|
|
1769
|
+
for (var i = 0; i < records.length; i++) {
|
|
1770
|
+
var ip = records[i];
|
|
1771
|
+
var v = classifyIp(ip);
|
|
1772
|
+
if (v === 'block' || (STRICT && v === 'private')) {
|
|
1773
|
+
return cb(new Error(
|
|
1774
|
+
'SSRF blocked at connect-time: ' + hostname +
|
|
1775
|
+
' resolves to ' + ip + ' (' + v + ')'
|
|
1776
|
+
));
|
|
1777
|
+
}
|
|
1778
|
+
}
|
|
1779
|
+
}
|
|
1780
|
+
cb(null, records);
|
|
1781
|
+
});
|
|
1782
|
+
};
|
|
1783
|
+
|
|
1565
1784
|
function emit(ct, content) {
|
|
1566
1785
|
// Write content to file to bypass executor stdout truncation (100KB limit).
|
|
1567
1786
|
// Only the content-type marker goes to stdout.
|
|
@@ -1569,8 +1788,60 @@ function emit(ct, content) {
|
|
|
1569
1788
|
console.log('__CM_CT__:' + ct);
|
|
1570
1789
|
}
|
|
1571
1790
|
|
|
1791
|
+
// Manual redirect handling: a 3xx Location header can rebind the subprocess
|
|
1792
|
+
// fetch to an alternate host the parent's pre-flight ssrfGuard never saw.
|
|
1793
|
+
// Even with the connect-time DNS patch, a redirect target that is a literal
|
|
1794
|
+
// IP (e.g. http://169.254.169.254/) skips getaddrinfo entirely. Walk the
|
|
1795
|
+
// chain manually so every hop runs through classifyIp before the next fetch.
|
|
1796
|
+
const MAX_REDIRECTS = 5;
|
|
1797
|
+
async function fetchWithManualRedirect(initialUrl) {
|
|
1798
|
+
let currentUrl = initialUrl;
|
|
1799
|
+
for (let redirectCount = 0; redirectCount <= MAX_REDIRECTS; redirectCount++) {
|
|
1800
|
+
const resp = await fetch(currentUrl, { redirect: 'manual' });
|
|
1801
|
+
if (resp.status < 300 || resp.status >= 400) return resp;
|
|
1802
|
+
const location = resp.headers.get('location') || resp.headers.get('Location');
|
|
1803
|
+
if (!location) return resp;
|
|
1804
|
+
if (redirectCount === MAX_REDIRECTS) {
|
|
1805
|
+
throw new Error('SSRF blocked: redirect chain exceeded ' + MAX_REDIRECTS + ' hops');
|
|
1806
|
+
}
|
|
1807
|
+
let nextParsed;
|
|
1808
|
+
try { nextParsed = new URL(location, currentUrl); } catch (e) {
|
|
1809
|
+
throw new Error('SSRF blocked: invalid redirect Location: ' + location);
|
|
1810
|
+
}
|
|
1811
|
+
if (nextParsed.protocol !== 'http:' && nextParsed.protocol !== 'https:') {
|
|
1812
|
+
throw new Error('SSRF blocked: redirect to non-http(s) scheme ' + nextParsed.protocol);
|
|
1813
|
+
}
|
|
1814
|
+
// If the redirect target is a literal IP, classify it directly — no DNS
|
|
1815
|
+
// lookup will fire and the connect-time guard would never see it.
|
|
1816
|
+
const hostname = nextParsed.hostname.replace(/^\[|\]$/g, '');
|
|
1817
|
+
const isIpLiteral = /^[0-9.]+$/.test(hostname) || hostname.includes(':');
|
|
1818
|
+
if (isIpLiteral) {
|
|
1819
|
+
const verdict = classifyIp(hostname);
|
|
1820
|
+
if (verdict === 'block' || (STRICT && verdict === 'private')) {
|
|
1821
|
+
throw new Error('SSRF blocked: redirect to ' + hostname + ' (' + verdict + ')');
|
|
1822
|
+
}
|
|
1823
|
+
} else {
|
|
1824
|
+
// Hostname target: resolve and classify every record. The patched
|
|
1825
|
+
// dns.lookup also fires on the next fetch's connect, but checking
|
|
1826
|
+
// here gives a clearer error and short-circuits before TCP setup.
|
|
1827
|
+
const records = await dnsPromises.lookup(hostname, { all: true, verbatim: true });
|
|
1828
|
+
for (const rec of records) {
|
|
1829
|
+
const verdict = classifyIp(rec.address);
|
|
1830
|
+
if (verdict === 'block' || (STRICT && verdict === 'private')) {
|
|
1831
|
+
throw new Error(
|
|
1832
|
+
'SSRF blocked: redirect target ' + hostname +
|
|
1833
|
+
' resolves to ' + rec.address + ' (' + verdict + ')'
|
|
1834
|
+
);
|
|
1835
|
+
}
|
|
1836
|
+
}
|
|
1837
|
+
}
|
|
1838
|
+
currentUrl = nextParsed.toString();
|
|
1839
|
+
}
|
|
1840
|
+
throw new Error('SSRF blocked: redirect chain exceeded ' + MAX_REDIRECTS + ' hops');
|
|
1841
|
+
}
|
|
1842
|
+
|
|
1572
1843
|
async function main() {
|
|
1573
|
-
const resp = await
|
|
1844
|
+
const resp = await fetchWithManualRedirect(url);
|
|
1574
1845
|
if (!resp.ok) { console.error("HTTP " + resp.status); process.exit(1); }
|
|
1575
1846
|
const contentType = resp.headers.get('content-type') || '';
|
|
1576
1847
|
|
|
@@ -1699,7 +1970,14 @@ async function ssrfGuard(rawUrl) {
|
|
|
1699
1970
|
*
|
|
1700
1971
|
* Exported (via the function name) so SSRF tests can exercise the matcher directly.
|
|
1701
1972
|
*/
|
|
1702
|
-
export function classifyIp(
|
|
1973
|
+
export function classifyIp(rawIp) {
|
|
1974
|
+
// RFC 6874 zone identifiers (`fe80::1%eth0`, URL-encoded `%25eth0`) must
|
|
1975
|
+
// be stripped BEFORE any prefix/equality classification. Without the strip,
|
|
1976
|
+
// a loopback `::1%eth0` no longer matches `lower === "::1"` and falls
|
|
1977
|
+
// through to "public" — silently bypassing the SSRF guard. Strip first,
|
|
1978
|
+
// classify second.
|
|
1979
|
+
const pctIdx = rawIp.indexOf("%");
|
|
1980
|
+
const ip = pctIdx === -1 ? rawIp : rawIp.slice(0, pctIdx);
|
|
1703
1981
|
const lower = ip.toLowerCase();
|
|
1704
1982
|
// IPv6 takes priority — check for `:` first so IPv4-mapped addresses
|
|
1705
1983
|
// (`::ffff:127.0.0.1`) don't get incorrectly routed through the IPv4 parser.
|
|
@@ -1857,8 +2135,7 @@ server.registerTool("ctx_fetch_and_index", {
|
|
|
1857
2135
|
" ✅ Use concurrency: 4-8 for: library docs sweep, multi-changelog scan, competitive pricing pages, multi-region docs, GitHub raw file pulls.\n" +
|
|
1858
2136
|
" ❌ Single URL → use the legacy {url, source} shape (concurrency irrelevant).\n" +
|
|
1859
2137
|
" Example: requests: [{url: 'https://react.dev/...', source: 'react'}, {url: 'https://vuejs.org/...', source: 'vue'}], concurrency: 5.\n" +
|
|
1860
|
-
"
|
|
1861
|
-
"When reporting results — terse like caveman. Technical substance exact. Only fluff die. Pattern: [thing] [action] [reason]. [next step].",
|
|
2138
|
+
" Fetches parallelize up to your concurrency setting; FTS5 indexing serializes the writes after (SQLite single-writer rule).",
|
|
1862
2139
|
inputSchema: z.object({
|
|
1863
2140
|
url: z.string().optional().describe("Single URL to fetch and index (legacy single-shape)"),
|
|
1864
2141
|
source: z
|
|
@@ -1930,6 +2207,16 @@ server.registerTool("ctx_fetch_and_index", {
|
|
|
1930
2207
|
if (v.kind === "cached") {
|
|
1931
2208
|
sessionStats.cacheHits++;
|
|
1932
2209
|
sessionStats.cacheBytesSaved += v.estimatedBytes;
|
|
2210
|
+
// D2 Phase 5/7 — cache-hit event emission. `bytes_avoided` is the
|
|
2211
|
+
// size of the cached payload that would have re-entered context
|
|
2212
|
+
// had the TTL window missed. Best-effort, off the hot path.
|
|
2213
|
+
const cachedBytes = v.estimatedBytes;
|
|
2214
|
+
const cachedLabel = v.label;
|
|
2215
|
+
setImmediate(() => emitCacheHitEvent({
|
|
2216
|
+
sessionDbPath: getSessionDbPath(),
|
|
2217
|
+
source: cachedLabel,
|
|
2218
|
+
bytesAvoided: cachedBytes,
|
|
2219
|
+
}));
|
|
1933
2220
|
finalized.push({ kind: "cached", label: v.label, chunkCount: v.chunkCount, ageStr: v.ageStr });
|
|
1934
2221
|
}
|
|
1935
2222
|
else if (v.kind === "fetch_error") {
|
|
@@ -2018,8 +2305,8 @@ server.registerTool("ctx_fetch_and_index", {
|
|
|
2018
2305
|
const cappedNote = capped
|
|
2019
2306
|
? ` cap=${effectiveConcurrency}/${cpus().length}cpu`
|
|
2020
2307
|
: "";
|
|
2021
|
-
//
|
|
2022
|
-
//
|
|
2308
|
+
// Status line: counts + sections + size, with singular/plural agreement
|
|
2309
|
+
// (count=1 → "1 error" not "1 errors") so the line stays grammatical.
|
|
2023
2310
|
const fmt = (n, sing, plur) => `${n} ${n === 1 ? sing : plur}`;
|
|
2024
2311
|
const headerLine = `fetched ${batch.length} c=${effectiveConcurrency}${cappedNote}. ` +
|
|
2025
2312
|
`ok=${fetchedCount} cache=${cachedCount} err=${errorCount}. ` +
|
|
@@ -2052,8 +2339,7 @@ server.registerTool("ctx_batch_execute", {
|
|
|
2052
2339
|
" ❌ Keep concurrency: 1 for: npm test, build, lint, image processing (CPU-bound), or commands sharing state (ports, lock files, same-repo writes).\n" +
|
|
2053
2340
|
" Example: [gh issue view 1, gh issue view 2, gh issue view 3] → concurrency: 3.\n" +
|
|
2054
2341
|
" Speedup depends on workload — applies to I/O wait, not CPU work.\n\n" +
|
|
2055
|
-
"THINK IN CODE — NON-NEGOTIABLE: When commands produce data you need to analyze, count, filter, compare, or transform — add a processing command that runs JavaScript and console.log() ONLY the answer. NEVER pull raw output into context to reason over. Concurrency parallelizes the FETCH; THINK IN CODE owns the PROCESSING. One programmed analysis replaces ten read-and-reason rounds. Pure JavaScript, Node.js built-ins (fs, path, child_process), try/catch, null-safe
|
|
2056
|
-
"When reporting results — terse like caveman. Technical substance exact. Only fluff die. Pattern: [thing] [action] [reason]. [next step].",
|
|
2342
|
+
"THINK IN CODE — NON-NEGOTIABLE: When commands produce data you need to analyze, count, filter, compare, or transform — add a processing command that runs JavaScript and console.log() ONLY the answer. NEVER pull raw output into context to reason over. Concurrency parallelizes the FETCH; THINK IN CODE owns the PROCESSING. One programmed analysis replaces ten read-and-reason rounds. Pure JavaScript, Node.js built-ins (fs, path, child_process), try/catch, null-safe.",
|
|
2057
2343
|
inputSchema: z.object({
|
|
2058
2344
|
commands: z.preprocess(coerceCommandsArray, z
|
|
2059
2345
|
.array(z.object({
|
|
@@ -2203,9 +2489,16 @@ server.registerTool("ctx_stats", {
|
|
|
2203
2489
|
// ONE call, ONE source — AnalyticsEngine.queryAll()
|
|
2204
2490
|
let text;
|
|
2205
2491
|
try {
|
|
2206
|
-
const
|
|
2207
|
-
|
|
2208
|
-
|
|
2492
|
+
const projectDir = getProjectDir();
|
|
2493
|
+
// Canonical hash + migration-aware path. The downstream
|
|
2494
|
+
// getConversationStats / getRealBytesStats reconstruct the DB
|
|
2495
|
+
// filename from worktreeHash; pass the SAME canonical hash that
|
|
2496
|
+
// resolveSessionDbPath used so they hit the same file.
|
|
2497
|
+
const dbHash = hashProjectDirCanonical(projectDir);
|
|
2498
|
+
const sessionDbPath = resolveSessionDbPath({
|
|
2499
|
+
projectDir,
|
|
2500
|
+
sessionsDir: getSessionDir(),
|
|
2501
|
+
});
|
|
2209
2502
|
if (existsSync(sessionDbPath)) {
|
|
2210
2503
|
const Database = loadDatabase();
|
|
2211
2504
|
const sdb = new Database(sessionDbPath, { readonly: true });
|
|
@@ -2217,8 +2510,43 @@ server.registerTool("ctx_stats", {
|
|
|
2217
2510
|
// Lifetime stats span every project's SessionDB + auto-memory dir
|
|
2218
2511
|
// (Bugs #3/#4); failures are absorbed inside getLifetimeStats so a
|
|
2219
2512
|
// corrupt sidecar can never break ctx_stats.
|
|
2220
|
-
|
|
2221
|
-
|
|
2513
|
+
// B3b Slice 3.1: scope to active adapter via getSessionDir() so
|
|
2514
|
+
// non-Claude platforms (Cursor, OpenCode, JetBrains, ...) read
|
|
2515
|
+
// from THEIR sessions dir — not the hardcoded ~/.claude/ default.
|
|
2516
|
+
// Mirrors the statusline contract at src/server.ts:540.
|
|
2517
|
+
const lifetime = getLifetimeStats({ sessionsDir: getSessionDir() });
|
|
2518
|
+
// B3b Slices 3.2-3.6: cross-adapter aggregation so the renderer
|
|
2519
|
+
// can show "Where it came from" + the "across N AI tools"
|
|
2520
|
+
// headline. Best-effort — failures absorbed so a corrupt
|
|
2521
|
+
// sidecar in any adapter dir cannot break ctx_stats.
|
|
2522
|
+
let multiAdapter;
|
|
2523
|
+
try {
|
|
2524
|
+
multiAdapter = getMultiAdapterLifetimeStats();
|
|
2525
|
+
}
|
|
2526
|
+
catch { /* never block ctx_stats */ }
|
|
2527
|
+
// F1: wire conversation + realBytes opts so formatReport renders the
|
|
2528
|
+
// narrative 5-section "kitap gibi" layout (timeline, ladder, receipt,
|
|
2529
|
+
// example cost, auto-memory). Without these, formatReport falls back
|
|
2530
|
+
// to the legacy active-session header. Best-effort — failures absorbed.
|
|
2531
|
+
// Resolve session_id: prefer env (CLAUDE_SESSION_ID), else most-recent
|
|
2532
|
+
// UUID session_id from session_events in this DB.
|
|
2533
|
+
let conversation;
|
|
2534
|
+
let realBytes;
|
|
2535
|
+
try {
|
|
2536
|
+
let sid = process.env.CLAUDE_SESSION_ID;
|
|
2537
|
+
if (!sid) {
|
|
2538
|
+
const row = sdb.prepare("SELECT session_id FROM session_events WHERE session_id LIKE '________-____-____-____-____________' ORDER BY created_at DESC LIMIT 1").get();
|
|
2539
|
+
sid = row?.session_id;
|
|
2540
|
+
}
|
|
2541
|
+
if (sid) {
|
|
2542
|
+
conversation = getConversationStats({ sessionId: sid, sessionsDir: getSessionDir(), worktreeHash: dbHash });
|
|
2543
|
+
const convReal = getRealBytesStats({ sessionId: sid, sessionsDir: getSessionDir(), worktreeHash: dbHash });
|
|
2544
|
+
const lifeReal = getRealBytesStats({ sessionsDir: getSessionDir() });
|
|
2545
|
+
realBytes = { conversation: convReal, lifetime: lifeReal };
|
|
2546
|
+
}
|
|
2547
|
+
}
|
|
2548
|
+
catch { /* never block ctx_stats */ }
|
|
2549
|
+
text = formatReport(report, VERSION, _latestVersion, { lifetime, mcpUsage, multiAdapter, conversation, realBytes });
|
|
2222
2550
|
}
|
|
2223
2551
|
finally {
|
|
2224
2552
|
sdb.close();
|
|
@@ -2229,8 +2557,13 @@ server.registerTool("ctx_stats", {
|
|
|
2229
2557
|
// Lifetime still meaningful (other projects, auto-memory) so include it.
|
|
2230
2558
|
const engine = new AnalyticsEngine(createMinimalDb());
|
|
2231
2559
|
const report = engine.queryAll(sessionStats);
|
|
2232
|
-
const lifetime = getLifetimeStats();
|
|
2233
|
-
|
|
2560
|
+
const lifetime = getLifetimeStats({ sessionsDir: getSessionDir() });
|
|
2561
|
+
let multiAdapter;
|
|
2562
|
+
try {
|
|
2563
|
+
multiAdapter = getMultiAdapterLifetimeStats();
|
|
2564
|
+
}
|
|
2565
|
+
catch { /* never block ctx_stats */ }
|
|
2566
|
+
text = formatReport(report, VERSION, _latestVersion, { lifetime, multiAdapter });
|
|
2234
2567
|
}
|
|
2235
2568
|
}
|
|
2236
2569
|
catch {
|
|
@@ -2239,10 +2572,15 @@ server.registerTool("ctx_stats", {
|
|
|
2239
2572
|
const report = engine.queryAll(sessionStats);
|
|
2240
2573
|
let lifetime;
|
|
2241
2574
|
try {
|
|
2242
|
-
lifetime = getLifetimeStats();
|
|
2575
|
+
lifetime = getLifetimeStats({ sessionsDir: getSessionDir() });
|
|
2576
|
+
}
|
|
2577
|
+
catch { /* never block ctx_stats */ }
|
|
2578
|
+
let multiAdapter;
|
|
2579
|
+
try {
|
|
2580
|
+
multiAdapter = getMultiAdapterLifetimeStats();
|
|
2243
2581
|
}
|
|
2244
2582
|
catch { /* never block ctx_stats */ }
|
|
2245
|
-
text = formatReport(report, VERSION, _latestVersion, lifetime ? { lifetime } : undefined);
|
|
2583
|
+
text = formatReport(report, VERSION, _latestVersion, (lifetime || multiAdapter) ? { lifetime, multiAdapter } : undefined);
|
|
2246
2584
|
}
|
|
2247
2585
|
return trackResponse("ctx_stats", {
|
|
2248
2586
|
content: [{ type: "text", text }],
|
|
@@ -2323,13 +2661,30 @@ server.registerTool("ctx_doctor", {
|
|
|
2323
2661
|
catch { /* best effort */ }
|
|
2324
2662
|
}
|
|
2325
2663
|
}
|
|
2326
|
-
//
|
|
2327
|
-
const
|
|
2328
|
-
if (
|
|
2329
|
-
|
|
2664
|
+
// Hooks
|
|
2665
|
+
const diagnosticAdapter = await getDiagnosticAdapter();
|
|
2666
|
+
if (diagnosticAdapter) {
|
|
2667
|
+
for (const result of diagnosticAdapter.validateHooks(pluginRoot)) {
|
|
2668
|
+
const prefix = result.status === "pass" ? "[OK]" : result.status === "warn" ? "[WARN]" : "[FAIL]";
|
|
2669
|
+
const fix = result.fix ? ` — fix: ${result.fix}` : "";
|
|
2670
|
+
lines.push(`${prefix} ${result.check}: ${result.message}${fix}`);
|
|
2671
|
+
}
|
|
2672
|
+
const hookScriptPaths = getHookScriptPaths(diagnosticAdapter, pluginRoot);
|
|
2673
|
+
if (hookScriptPaths.length === 0) {
|
|
2674
|
+
lines.push("[OK] Hook scripts: no direct .mjs script paths to verify");
|
|
2675
|
+
}
|
|
2676
|
+
for (const scriptPath of hookScriptPaths) {
|
|
2677
|
+
const hookPath = resolve(pluginRoot, scriptPath);
|
|
2678
|
+
if (existsSync(hookPath)) {
|
|
2679
|
+
lines.push(`[OK] Hook script: PASS — ${hookPath}`);
|
|
2680
|
+
}
|
|
2681
|
+
else {
|
|
2682
|
+
lines.push(`[FAIL] Hook script: FAIL — not found at ${hookPath}`);
|
|
2683
|
+
}
|
|
2684
|
+
}
|
|
2330
2685
|
}
|
|
2331
2686
|
else {
|
|
2332
|
-
lines.push(
|
|
2687
|
+
lines.push("[WARN] Hooks: adapter detection unavailable");
|
|
2333
2688
|
}
|
|
2334
2689
|
// Version
|
|
2335
2690
|
lines.push(`[OK] Version: v${VERSION}`);
|
|
@@ -2355,16 +2710,11 @@ server.registerTool("ctx_upgrade", {
|
|
|
2355
2710
|
const sessDir = getSessionDir();
|
|
2356
2711
|
const insightCacheDir = join(dirname(sessDir), "insight-cache");
|
|
2357
2712
|
if (existsSync(insightCacheDir)) {
|
|
2358
|
-
// Kill any running insight server first
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
else {
|
|
2364
|
-
execSync("lsof -ti:4747 | xargs kill 2>/dev/null", { stdio: "pipe" });
|
|
2365
|
-
}
|
|
2366
|
-
}
|
|
2367
|
-
catch { /* no process to kill */ }
|
|
2713
|
+
// Kill any running insight server first via the shared helper —
|
|
2714
|
+
// this is locale-independent on Windows (PR #469) and isolates per-pid
|
|
2715
|
+
// failures. We ignore the structured result: cache cleanup is
|
|
2716
|
+
// best-effort and must never block ctx_upgrade.
|
|
2717
|
+
killProcessOnPort(4747);
|
|
2368
2718
|
rmSync(insightCacheDir, { recursive: true, force: true });
|
|
2369
2719
|
}
|
|
2370
2720
|
}
|
|
@@ -2380,13 +2730,11 @@ server.registerTool("ctx_upgrade", {
|
|
|
2380
2730
|
// Inline fallback: neither CLI file exists (e.g. marketplace installs).
|
|
2381
2731
|
// Generate a self-contained node -e script that performs the upgrade.
|
|
2382
2732
|
const repoUrl = "https://github.com/mksglu/context-mode.git";
|
|
2383
|
-
const copyDirs = ["build", "hooks", "skills", "scripts", ".claude-plugin"];
|
|
2384
|
-
const copyFiles = ["start.mjs", "server.bundle.mjs", "cli.bundle.mjs", "package.json"];
|
|
2385
2733
|
// Write inline script to a temp .mjs file — avoids quote-escaping issues
|
|
2386
2734
|
// across cmd.exe, PowerShell, and bash (node -e '...' breaks on Windows).
|
|
2387
2735
|
const scriptLines = [
|
|
2388
2736
|
`import{execFileSync}from"node:child_process";`,
|
|
2389
|
-
`import{cpSync,rmSync,existsSync,mkdtempSync}from"node:fs";`,
|
|
2737
|
+
`import{cpSync,rmSync,existsSync,mkdtempSync,readFileSync,writeFileSync}from"node:fs";`,
|
|
2390
2738
|
`import{join}from"node:path";`,
|
|
2391
2739
|
`import{tmpdir}from"node:os";`,
|
|
2392
2740
|
`const P=${JSON.stringify(pluginRoot)};`,
|
|
@@ -2398,9 +2746,11 @@ server.registerTool("ctx_upgrade", {
|
|
|
2398
2746
|
`execFileSync(process.platform==="win32"?"npm.cmd":"npm",["install"],{cwd:T,stdio:"inherit",shell:process.platform==="win32"});`,
|
|
2399
2747
|
`execFileSync(process.platform==="win32"?"npm.cmd":"npm",["run","build"],{cwd:T,stdio:"inherit",shell:process.platform==="win32"});`,
|
|
2400
2748
|
`console.log("- [x] Built from source");`,
|
|
2401
|
-
|
|
2402
|
-
...
|
|
2403
|
-
`
|
|
2749
|
+
`const pkg=JSON.parse(readFileSync(join(T,"package.json"),"utf8"));`,
|
|
2750
|
+
`const items=[...(Array.isArray(pkg.files)?pkg.files:[]),"src","package.json"];`,
|
|
2751
|
+
`for(const item of items){const from=join(T,item);const to=join(P,item);if(existsSync(from)){rmSync(to,{recursive:true,force:true});cpSync(from,to,{recursive:true,force:true});}}`,
|
|
2752
|
+
`writeFileSync(join(P,".mcp.json"),JSON.stringify({mcpServers:{"context-mode":{command:"node",args:["\${CLAUDE_PLUGIN_ROOT}/start.mjs"]}}},null,2)+"\\n");`,
|
|
2753
|
+
`console.log("- [x] Copied package files");`,
|
|
2404
2754
|
`execFileSync(process.platform==="win32"?"npm.cmd":"npm",["install","--production"],{cwd:P,stdio:"inherit",shell:process.platform==="win32"});`,
|
|
2405
2755
|
`console.log("- [x] Installed production dependencies");`,
|
|
2406
2756
|
`console.log("## context-mode upgrade complete");`,
|
|
@@ -2462,76 +2812,40 @@ server.registerTool("ctx_purge", {
|
|
|
2462
2812
|
}],
|
|
2463
2813
|
});
|
|
2464
2814
|
}
|
|
2465
|
-
|
|
2466
|
-
//
|
|
2815
|
+
// Close the persistent FTS5 content store handle BEFORE delegating to
|
|
2816
|
+
// purgeSession so the store's lock is released on Windows. The handle
|
|
2817
|
+
// is recreated lazily on the next getStore() call.
|
|
2818
|
+
let storePathForPurge;
|
|
2819
|
+
try {
|
|
2820
|
+
storePathForPurge = getStorePath();
|
|
2821
|
+
}
|
|
2822
|
+
catch { /* best effort — store path may be unresolvable on fresh install */ }
|
|
2467
2823
|
if (_store) {
|
|
2468
|
-
let storeFound = false;
|
|
2469
2824
|
try {
|
|
2470
2825
|
_store.cleanup();
|
|
2471
|
-
storeFound = true;
|
|
2472
2826
|
}
|
|
2473
2827
|
catch { /* best effort */ }
|
|
2474
2828
|
_store = null;
|
|
2475
|
-
if (storeFound)
|
|
2476
|
-
deleted.push("knowledge base (FTS5)");
|
|
2477
|
-
}
|
|
2478
|
-
else {
|
|
2479
|
-
const dbPath = getStorePath();
|
|
2480
|
-
let found = false;
|
|
2481
|
-
for (const suffix of ["", "-wal", "-shm"]) {
|
|
2482
|
-
try {
|
|
2483
|
-
unlinkSync(dbPath + suffix);
|
|
2484
|
-
found = true;
|
|
2485
|
-
}
|
|
2486
|
-
catch { /* file may not exist */ }
|
|
2487
|
-
}
|
|
2488
|
-
if (found)
|
|
2489
|
-
deleted.push("knowledge base (FTS5)");
|
|
2490
|
-
}
|
|
2491
|
-
// 2. Wipe legacy shared content DB (~/.context-mode/content/<hash>.db)
|
|
2492
|
-
try {
|
|
2493
|
-
const legacyPath = join(homedir(), ".context-mode", "content", `${hashProjectDir()}.db`);
|
|
2494
|
-
for (const suffix of ["", "-wal", "-shm"]) {
|
|
2495
|
-
try {
|
|
2496
|
-
unlinkSync(legacyPath + suffix);
|
|
2497
|
-
}
|
|
2498
|
-
catch { /* ignore */ }
|
|
2499
|
-
}
|
|
2500
|
-
}
|
|
2501
|
-
catch { /* best effort */ }
|
|
2502
|
-
// 3. Wipe session events DB (analytics, metadata, resume snapshots)
|
|
2503
|
-
try {
|
|
2504
|
-
const dbHash = hashProjectDir();
|
|
2505
|
-
const worktreeSuffix = getWorktreeSuffix();
|
|
2506
|
-
const sessDir = getSessionDir();
|
|
2507
|
-
const sessDbPath = join(sessDir, `${dbHash}${worktreeSuffix}.db`);
|
|
2508
|
-
const eventsPath = join(sessDir, `${dbHash}${worktreeSuffix}-events.md`);
|
|
2509
|
-
const cleanupFlag = join(sessDir, `${dbHash}${worktreeSuffix}.cleanup`);
|
|
2510
|
-
let sessDbFound = false;
|
|
2511
|
-
for (const suffix of ["", "-wal", "-shm"]) {
|
|
2512
|
-
try {
|
|
2513
|
-
unlinkSync(sessDbPath + suffix);
|
|
2514
|
-
sessDbFound = true;
|
|
2515
|
-
}
|
|
2516
|
-
catch { /* ignore */ }
|
|
2517
|
-
}
|
|
2518
|
-
if (sessDbFound)
|
|
2519
|
-
deleted.push("session events DB");
|
|
2520
|
-
let eventsFound = false;
|
|
2521
|
-
try {
|
|
2522
|
-
unlinkSync(eventsPath);
|
|
2523
|
-
eventsFound = true;
|
|
2524
|
-
}
|
|
2525
|
-
catch { /* ignore */ }
|
|
2526
|
-
if (eventsFound)
|
|
2527
|
-
deleted.push("session events markdown");
|
|
2528
|
-
try {
|
|
2529
|
-
unlinkSync(cleanupFlag);
|
|
2530
|
-
}
|
|
2531
|
-
catch { /* ignore */ }
|
|
2532
2829
|
}
|
|
2533
|
-
|
|
2534
|
-
//
|
|
2830
|
+
// FTS5 store: pass contentDir so purgeSession sweeps BOTH canonical
|
|
2831
|
+
// and legacy raw-casing variants (dual-hash, mirrors session events).
|
|
2832
|
+
// storePath is also passed for the rare case where the resolver picked
|
|
2833
|
+
// an absolute path that differs from the dual-hash pair (e.g. caller
|
|
2834
|
+
// pre-migrated). Both paths are de-duped during unlink.
|
|
2835
|
+
const contentDir = storePathForPurge ? dirname(storePathForPurge) : undefined;
|
|
2836
|
+
const { deleted } = purgeSession({
|
|
2837
|
+
projectDir: getProjectDir(),
|
|
2838
|
+
sessionsDir: getSessionDir(),
|
|
2839
|
+
storePath: storePathForPurge,
|
|
2840
|
+
contentDir,
|
|
2841
|
+
legacyContentDir: join(homedir(), ".context-mode", "content"),
|
|
2842
|
+
// hashProjectDirLegacy mirrors the deployed (≤ v1.0.111) raw-casing
|
|
2843
|
+
// hash that named files under ~/.context-mode/content/. Using the
|
|
2844
|
+
// legacy hash here is correct: that pre-pre-legacy directory was
|
|
2845
|
+
// never migrated and still uses raw casing.
|
|
2846
|
+
contentHash: hashProjectDirLegacy(getProjectDir()),
|
|
2847
|
+
});
|
|
2848
|
+
// Reset in-memory session stats
|
|
2535
2849
|
sessionStats.calls = {};
|
|
2536
2850
|
sessionStats.bytesReturned = {};
|
|
2537
2851
|
sessionStats.bytesIndexed = 0;
|
|
@@ -2554,6 +2868,173 @@ server.registerTool("ctx_purge", {
|
|
|
2554
2868
|
}],
|
|
2555
2869
|
});
|
|
2556
2870
|
});
|
|
2871
|
+
// Hard upper bound on every helper-internal spawnSync call. Caps tail-latency
|
|
2872
|
+
// when an external binary hangs (xdg-open waiting for an X11 session, lsof
|
|
2873
|
+
// stalling on /proc, taskkill blocking on an unresponsive process, etc.) so
|
|
2874
|
+
// the MCP tool surfaces a diagnostic instead of blocking the agent loop.
|
|
2875
|
+
// 5s is comfortably above the 99th-percentile completion of every command we
|
|
2876
|
+
// invoke; anything past that is hung.
|
|
2877
|
+
const HELPER_SPAWN_TIMEOUT_MS = 5000;
|
|
2878
|
+
// Returns the argv attempts for opening `url` on `platform`, in fall-back order.
|
|
2879
|
+
// Pure data — no I/O.
|
|
2880
|
+
export function browserOpenArgv(url, platform) {
|
|
2881
|
+
if (platform === "darwin")
|
|
2882
|
+
return [{ cmd: "open", args: [url] }];
|
|
2883
|
+
if (platform === "win32") {
|
|
2884
|
+
// `start` is a cmd.exe builtin; the empty title arg ("") prevents the URL
|
|
2885
|
+
// from being consumed as the window title.
|
|
2886
|
+
return [{ cmd: "cmd", args: ["/c", "start", "", url] }];
|
|
2887
|
+
}
|
|
2888
|
+
// linux/bsd: try xdg-open, then sensible-browser (Debian/Ubuntu).
|
|
2889
|
+
return [
|
|
2890
|
+
{ cmd: "xdg-open", args: [url] },
|
|
2891
|
+
{ cmd: "sensible-browser", args: [url] },
|
|
2892
|
+
];
|
|
2893
|
+
}
|
|
2894
|
+
// Opens a browser synchronously, waiting for each attempt to complete.
|
|
2895
|
+
// Returns a structured result so callers can surface auto-open failures
|
|
2896
|
+
// to the user instead of falsely reporting success.
|
|
2897
|
+
export function openBrowserSync(url, platform = process.platform, runner = spawnSync) {
|
|
2898
|
+
const attempts = browserOpenArgv(url, platform);
|
|
2899
|
+
const errors = [];
|
|
2900
|
+
for (const { cmd, args } of attempts) {
|
|
2901
|
+
try {
|
|
2902
|
+
const r = runner(cmd, args, { stdio: "ignore", timeout: HELPER_SPAWN_TIMEOUT_MS });
|
|
2903
|
+
// Treat signal-kill (status === null) and any non-zero status as failure
|
|
2904
|
+
// so the next fallback fires.
|
|
2905
|
+
if (!r.error && r.status === 0)
|
|
2906
|
+
return { ok: true, method: cmd };
|
|
2907
|
+
const reason = r.error?.message ?? `status=${r.status === null ? "signaled" : r.status}`;
|
|
2908
|
+
errors.push(`${cmd}: ${reason}`);
|
|
2909
|
+
}
|
|
2910
|
+
catch (e) {
|
|
2911
|
+
errors.push(`${cmd}: ${e instanceof Error ? e.message : String(e)}`);
|
|
2912
|
+
}
|
|
2913
|
+
}
|
|
2914
|
+
return { ok: false, method: "none", reason: errors.join("; ") };
|
|
2915
|
+
}
|
|
2916
|
+
// Kills any process listening on `port`. Returns a structured result so
|
|
2917
|
+
// the caller can distinguish between (a) port was free, (b) kill succeeded,
|
|
2918
|
+
// (c) kill failed (perms, missing binary, or per-pid failure mid-loop).
|
|
2919
|
+
//
|
|
2920
|
+
// On Windows the netstat parser is locale-independent: the STATE column
|
|
2921
|
+
// ("LISTENING" / "ESTABLISHED" / ...) is translated on non-English Windows
|
|
2922
|
+
// (Windows-FR shows "À l'écoute", Windows-DE "ABHÖREN", etc.), but the REMOTE
|
|
2923
|
+
// ADDRESS column is not. A listening TCP socket always has remote
|
|
2924
|
+
// "0.0.0.0:0" (IPv4) or "[::]:0" (IPv6); a connected one has a real
|
|
2925
|
+
// addr:port. We therefore key off the remote column instead of the state
|
|
2926
|
+
// string. This also rules out the pre-fix bug where matching only the local
|
|
2927
|
+
// port number cross-matched a remote :port from an outbound connection and
|
|
2928
|
+
// taskkill'd an unrelated process.
|
|
2929
|
+
export function killProcessOnPort(port, platform = process.platform, runner = spawnSync) {
|
|
2930
|
+
const result = { killedPids: [], attemptedPids: [], errors: [] };
|
|
2931
|
+
if (!Number.isInteger(port) || port < 1 || port > 65535) {
|
|
2932
|
+
result.errors.push(`invalid port: ${port}`);
|
|
2933
|
+
return result;
|
|
2934
|
+
}
|
|
2935
|
+
try {
|
|
2936
|
+
if (platform === "win32") {
|
|
2937
|
+
const r = runner("netstat", ["-ano"], {
|
|
2938
|
+
encoding: "utf-8",
|
|
2939
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
2940
|
+
timeout: HELPER_SPAWN_TIMEOUT_MS,
|
|
2941
|
+
});
|
|
2942
|
+
if (r.error) {
|
|
2943
|
+
result.errors.push(`netstat: ${r.error.message}`);
|
|
2944
|
+
return result;
|
|
2945
|
+
}
|
|
2946
|
+
if (r.status !== 0 || typeof r.stdout !== "string")
|
|
2947
|
+
return result;
|
|
2948
|
+
const portSuffix = `:${port}`;
|
|
2949
|
+
const pids = new Set();
|
|
2950
|
+
for (const rawLine of r.stdout.split(/\r?\n/)) {
|
|
2951
|
+
const line = rawLine.trim();
|
|
2952
|
+
if (!line)
|
|
2953
|
+
continue;
|
|
2954
|
+
const tokens = line.split(/\s+/);
|
|
2955
|
+
// netstat -ano LISTENING row (en-US): "TCP 0.0.0.0:4747 0.0.0.0:0 LISTENING 1234"
|
|
2956
|
+
// The STATE column is locale-translated and may itself contain spaces
|
|
2957
|
+
// (Windows-FR `À l'écoute` splits into two tokens), so we cannot index
|
|
2958
|
+
// STATE by position. PID is always the trailing column; PROTO/LOCAL/
|
|
2959
|
+
// REMOTE are the first three. We anchor on those + a remote-wildcard
|
|
2960
|
+
// check that's locale-independent.
|
|
2961
|
+
if (tokens.length < 5)
|
|
2962
|
+
continue;
|
|
2963
|
+
const proto = tokens[0];
|
|
2964
|
+
const local = tokens[1];
|
|
2965
|
+
const remote = tokens[2];
|
|
2966
|
+
const pid = tokens[tokens.length - 1];
|
|
2967
|
+
if (proto !== "TCP")
|
|
2968
|
+
continue;
|
|
2969
|
+
if (!local.endsWith(portSuffix))
|
|
2970
|
+
continue;
|
|
2971
|
+
// Listening sockets carry a wildcard remote; anything else is a
|
|
2972
|
+
// connection (and matching it would kill an unrelated process).
|
|
2973
|
+
if (remote !== "0.0.0.0:0" && remote !== "[::]:0")
|
|
2974
|
+
continue;
|
|
2975
|
+
if (!/^\d+$/.test(pid))
|
|
2976
|
+
continue;
|
|
2977
|
+
pids.add(pid);
|
|
2978
|
+
}
|
|
2979
|
+
for (const pid of pids) {
|
|
2980
|
+
result.attemptedPids.push(pid);
|
|
2981
|
+
try {
|
|
2982
|
+
const k = runner("taskkill", ["/F", "/PID", pid], {
|
|
2983
|
+
stdio: "ignore",
|
|
2984
|
+
timeout: HELPER_SPAWN_TIMEOUT_MS,
|
|
2985
|
+
});
|
|
2986
|
+
if (k.error || k.status !== 0) {
|
|
2987
|
+
result.errors.push(`taskkill ${pid}: ${k.error?.message ?? `status=${k.status}`}`);
|
|
2988
|
+
}
|
|
2989
|
+
else {
|
|
2990
|
+
result.killedPids.push(pid);
|
|
2991
|
+
}
|
|
2992
|
+
}
|
|
2993
|
+
catch (e) {
|
|
2994
|
+
result.errors.push(`taskkill ${pid}: ${e instanceof Error ? e.message : String(e)}`);
|
|
2995
|
+
}
|
|
2996
|
+
}
|
|
2997
|
+
}
|
|
2998
|
+
else {
|
|
2999
|
+
const r = runner("lsof", ["-ti", `:${port}`], {
|
|
3000
|
+
encoding: "utf-8",
|
|
3001
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
3002
|
+
timeout: HELPER_SPAWN_TIMEOUT_MS,
|
|
3003
|
+
});
|
|
3004
|
+
if (r.error) {
|
|
3005
|
+
// ENOENT (lsof not installed) is a real diagnostic; surface it.
|
|
3006
|
+
result.errors.push(`lsof: ${r.error.message}`);
|
|
3007
|
+
return result;
|
|
3008
|
+
}
|
|
3009
|
+
// lsof exits 1 with empty stdout when the port is free — not an error.
|
|
3010
|
+
if (r.status !== 0 || typeof r.stdout !== "string")
|
|
3011
|
+
return result;
|
|
3012
|
+
const pids = r.stdout.split(/\r?\n/).filter(p => /^\d+$/.test(p));
|
|
3013
|
+
for (const pid of pids) {
|
|
3014
|
+
result.attemptedPids.push(pid);
|
|
3015
|
+
try {
|
|
3016
|
+
const k = runner("kill", [pid], {
|
|
3017
|
+
stdio: "ignore",
|
|
3018
|
+
timeout: HELPER_SPAWN_TIMEOUT_MS,
|
|
3019
|
+
});
|
|
3020
|
+
if (k.error || k.status !== 0) {
|
|
3021
|
+
result.errors.push(`kill ${pid}: ${k.error?.message ?? `status=${k.status}`}`);
|
|
3022
|
+
}
|
|
3023
|
+
else {
|
|
3024
|
+
result.killedPids.push(pid);
|
|
3025
|
+
}
|
|
3026
|
+
}
|
|
3027
|
+
catch (e) {
|
|
3028
|
+
result.errors.push(`kill ${pid}: ${e instanceof Error ? e.message : String(e)}`);
|
|
3029
|
+
}
|
|
3030
|
+
}
|
|
3031
|
+
}
|
|
3032
|
+
}
|
|
3033
|
+
catch (e) {
|
|
3034
|
+
result.errors.push(e instanceof Error ? e.message : String(e));
|
|
3035
|
+
}
|
|
3036
|
+
return result;
|
|
3037
|
+
}
|
|
2557
3038
|
// ── ctx-insight: analytics dashboard ──────────────────────────────────────────
|
|
2558
3039
|
server.registerTool("ctx_insight", {
|
|
2559
3040
|
title: "Open Insight Dashboard",
|
|
@@ -2562,7 +3043,7 @@ server.registerTool("ctx_insight", {
|
|
|
2562
3043
|
"parallel work patterns, project focus, and actionable insights. " +
|
|
2563
3044
|
"First run installs dependencies (~30s). Subsequent runs open instantly.",
|
|
2564
3045
|
inputSchema: z.object({
|
|
2565
|
-
port: z.coerce.number().optional().describe("Port to serve on (default: 4747)"),
|
|
3046
|
+
port: z.coerce.number().int().min(1).max(65535).optional().describe("Port to serve on (default: 4747)"),
|
|
2566
3047
|
sessionDir: z.string().optional().describe("Override INSIGHT_SESSION_DIR: directory containing context-mode session .db files"),
|
|
2567
3048
|
contentDir: z.string().optional().describe("Override INSIGHT_CONTENT_DIR: directory containing context-mode content/index .db files"),
|
|
2568
3049
|
insightSessionDir: z.string().optional().describe("Alias for sessionDir / INSIGHT_SESSION_DIR"),
|
|
@@ -2656,34 +3137,39 @@ server.registerTool("ctx_insight", {
|
|
|
2656
3137
|
if (portOccupied && sourceUpdated) {
|
|
2657
3138
|
// Source was updated but stale server is running on port — kill it so fresh code runs
|
|
2658
3139
|
steps.push("Killing stale dashboard server (source updated)...");
|
|
2659
|
-
|
|
2660
|
-
|
|
2661
|
-
|
|
2662
|
-
|
|
2663
|
-
|
|
2664
|
-
|
|
2665
|
-
|
|
2666
|
-
|
|
3140
|
+
const kill = killProcessOnPort(port);
|
|
3141
|
+
if (kill.attemptedPids.length > 0 && kill.killedPids.length === 0) {
|
|
3142
|
+
// Tried to kill, every attempt failed (perms, race, missing binary).
|
|
3143
|
+
// Surface so the agent doesn't loop on the same port forever.
|
|
3144
|
+
return trackResponse("ctx_insight", {
|
|
3145
|
+
content: [{
|
|
3146
|
+
type: "text",
|
|
3147
|
+
text: `Could not free port ${port} (kill failed for ${kill.attemptedPids.join(", ")}: ${kill.errors.join("; ")}). Try ctx_insight({ port: ${port + 1} }) or stop the process manually.`,
|
|
3148
|
+
}],
|
|
3149
|
+
});
|
|
2667
3150
|
}
|
|
2668
|
-
|
|
2669
|
-
|
|
3151
|
+
if (kill.errors.length > 0 && kill.attemptedPids.length === 0) {
|
|
3152
|
+
// Couldn't even probe the port (e.g. lsof not installed).
|
|
3153
|
+
return trackResponse("ctx_insight", {
|
|
3154
|
+
content: [{
|
|
3155
|
+
type: "text",
|
|
3156
|
+
text: `Cannot reclaim port ${port}: ${kill.errors.join("; ")}. Stop the process manually or pick another port.`,
|
|
3157
|
+
}],
|
|
3158
|
+
});
|
|
3159
|
+
}
|
|
3160
|
+
await new Promise(r => setTimeout(r, 500)); // Wait for port to free
|
|
3161
|
+
steps.push(`Stale server killed (${kill.killedPids.length} pid${kill.killedPids.length === 1 ? "" : "s"}).`);
|
|
2670
3162
|
}
|
|
2671
3163
|
else if (portOccupied) {
|
|
2672
3164
|
// Source unchanged, server is running fine — just open browser
|
|
2673
3165
|
steps.push("Dashboard already running.");
|
|
2674
3166
|
const url = `http://localhost:${port}`;
|
|
2675
|
-
const
|
|
2676
|
-
|
|
2677
|
-
|
|
2678
|
-
|
|
2679
|
-
else if (platform === "win32")
|
|
2680
|
-
execSync(`start "" "${url}"`, { stdio: "pipe" });
|
|
2681
|
-
else
|
|
2682
|
-
execSync(`xdg-open "${url}" 2>/dev/null || sensible-browser "${url}" 2>/dev/null`, { stdio: "pipe" });
|
|
2683
|
-
}
|
|
2684
|
-
catch { /* browser open is best-effort */ }
|
|
3167
|
+
const open = openBrowserSync(url);
|
|
3168
|
+
const tail = open.ok
|
|
3169
|
+
? ""
|
|
3170
|
+
: ` (auto-open failed: ${open.reason}; navigate manually)`;
|
|
2685
3171
|
return trackResponse("ctx_insight", {
|
|
2686
|
-
content: [{ type: "text", text: `Dashboard already running at
|
|
3172
|
+
content: [{ type: "text", text: `Dashboard already running at ${url}${tail}` }],
|
|
2687
3173
|
});
|
|
2688
3174
|
}
|
|
2689
3175
|
// Kill any previous insight child this MCP spawned (e.g. re-invocation).
|
|
@@ -2739,17 +3225,9 @@ server.registerTool("ctx_insight", {
|
|
|
2739
3225
|
}
|
|
2740
3226
|
// Open browser (cross-platform)
|
|
2741
3227
|
const url = `http://localhost:${port}`;
|
|
2742
|
-
const
|
|
2743
|
-
|
|
2744
|
-
|
|
2745
|
-
execSync(`open "${url}"`, { stdio: "pipe" });
|
|
2746
|
-
else if (platform === "win32")
|
|
2747
|
-
execSync(`start "" "${url}"`, { stdio: "pipe" });
|
|
2748
|
-
else
|
|
2749
|
-
execSync(`xdg-open "${url}" 2>/dev/null || sensible-browser "${url}" 2>/dev/null`, { stdio: "pipe" });
|
|
2750
|
-
}
|
|
2751
|
-
catch { /* browser open is best-effort */ }
|
|
2752
|
-
steps.push(`Dashboard running at ${url}`);
|
|
3228
|
+
const open = openBrowserSync(url);
|
|
3229
|
+
const openTail = open.ok ? "" : ` (auto-open failed: ${open.reason}; navigate manually)`;
|
|
3230
|
+
steps.push(`Dashboard running at ${url}${openTail}`);
|
|
2753
3231
|
return trackResponse("ctx_insight", {
|
|
2754
3232
|
content: [{
|
|
2755
3233
|
type: "text",
|