grepmax 0.15.3 → 0.15.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/add.js +54 -0
- package/dist/commands/watch.js +30 -4
- package/dist/lib/daemon/daemon.js +124 -40
- package/dist/lib/daemon/ipc-handler.js +10 -2
- package/dist/lib/index/ignore-patterns.js +1 -0
- package/dist/lib/utils/daemon-client.js +27 -3
- package/package.json +1 -1
- package/plugins/grepmax/.claude-plugin/plugin.json +1 -1
- package/plugins/grepmax/hooks/stop.js +9 -6
package/dist/commands/add.js
CHANGED
|
@@ -43,6 +43,8 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
43
43
|
};
|
|
44
44
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
45
|
exports.add = void 0;
|
|
46
|
+
const fs = __importStar(require("node:fs"));
|
|
47
|
+
const os = __importStar(require("node:os"));
|
|
46
48
|
const path = __importStar(require("node:path"));
|
|
47
49
|
const commander_1 = require("commander");
|
|
48
50
|
const grammar_loader_1 = require("../lib/index/grammar-loader");
|
|
@@ -56,10 +58,41 @@ const project_marker_1 = require("../lib/utils/project-marker");
|
|
|
56
58
|
const project_registry_1 = require("../lib/utils/project-registry");
|
|
57
59
|
const project_root_1 = require("../lib/utils/project-root");
|
|
58
60
|
const watcher_launcher_1 = require("../lib/utils/watcher-launcher");
|
|
61
|
+
function getBlockedRoots() {
|
|
62
|
+
const home = os.homedir();
|
|
63
|
+
return new Set([
|
|
64
|
+
home,
|
|
65
|
+
path.dirname(home),
|
|
66
|
+
"/",
|
|
67
|
+
"/tmp",
|
|
68
|
+
"/private",
|
|
69
|
+
"/private/tmp",
|
|
70
|
+
"/private/var",
|
|
71
|
+
"/var",
|
|
72
|
+
"/usr",
|
|
73
|
+
"/opt",
|
|
74
|
+
"/etc",
|
|
75
|
+
"/System",
|
|
76
|
+
"/Library",
|
|
77
|
+
"/Applications",
|
|
78
|
+
].map((p) => path.resolve(p)));
|
|
79
|
+
}
|
|
80
|
+
function logBlockedAttempt(reason, attempted, extra) {
|
|
81
|
+
var _a, _b;
|
|
82
|
+
try {
|
|
83
|
+
const logPath = path.join(os.homedir(), ".gmax", "logs", "blocked-add.log");
|
|
84
|
+
fs.mkdirSync(path.dirname(logPath), { recursive: true });
|
|
85
|
+
const entry = Object.assign({ ts: new Date().toISOString(), reason,
|
|
86
|
+
attempted, cwd: process.cwd(), pid: process.pid, ppid: process.ppid, argv: process.argv, env_claude_session: (_a = process.env.CLAUDE_SESSION_ID) !== null && _a !== void 0 ? _a : null, env_claude_project_dir: (_b = process.env.CLAUDE_PROJECT_DIR) !== null && _b !== void 0 ? _b : null }, extra);
|
|
87
|
+
fs.appendFileSync(logPath, `${JSON.stringify(entry)}\n`);
|
|
88
|
+
}
|
|
89
|
+
catch (_c) { }
|
|
90
|
+
}
|
|
59
91
|
exports.add = new commander_1.Command("add")
|
|
60
92
|
.description("Add a project to the gmax index")
|
|
61
93
|
.argument("[dir]", "Directory to add (defaults to current directory)")
|
|
62
94
|
.option("--no-index", "Register the project without indexing it")
|
|
95
|
+
.option("--force", "Allow adding a parent of existing projects (will REMOVE absorbed children)")
|
|
63
96
|
.addHelpText("after", `
|
|
64
97
|
Examples:
|
|
65
98
|
gmax add Add the current directory
|
|
@@ -73,6 +106,16 @@ Examples:
|
|
|
73
106
|
const targetDir = dir ? path.resolve(dir) : process.cwd();
|
|
74
107
|
const projectRoot = (_a = (0, project_root_1.findProjectRoot)(targetDir)) !== null && _a !== void 0 ? _a : targetDir;
|
|
75
108
|
const projectName = path.basename(projectRoot);
|
|
109
|
+
const blocked = getBlockedRoots();
|
|
110
|
+
if (blocked.has(path.resolve(projectRoot))) {
|
|
111
|
+
logBlockedAttempt("blocked_root", projectRoot);
|
|
112
|
+
console.error(`Refusing to add ${projectRoot}: this path is blocked from indexing.\n` +
|
|
113
|
+
`(Blocked: home, /, /Users, /tmp, /private, /var, /usr, /opt, /etc, /System, /Library, /Applications.)\n` +
|
|
114
|
+
`Pick a specific project subdirectory instead.\n` +
|
|
115
|
+
`Diagnostic logged to ~/.gmax/logs/blocked-add.log (cwd=${process.cwd()} ppid=${process.ppid}).`);
|
|
116
|
+
process.exitCode = 1;
|
|
117
|
+
return;
|
|
118
|
+
}
|
|
76
119
|
// Check if already registered
|
|
77
120
|
const existing = (0, project_registry_1.getProject)(projectRoot);
|
|
78
121
|
if (existing) {
|
|
@@ -90,6 +133,17 @@ Examples:
|
|
|
90
133
|
// If this is a parent of existing projects, absorb them
|
|
91
134
|
const children = (0, project_registry_1.getChildProjects)(projectRoot);
|
|
92
135
|
if (children.length > 0) {
|
|
136
|
+
if (!opts.force) {
|
|
137
|
+
logBlockedAttempt("would_absorb_children", projectRoot, {
|
|
138
|
+
children: children.map((c) => ({ name: c.name, root: c.root })),
|
|
139
|
+
});
|
|
140
|
+
console.error(`Refusing to add ${projectRoot}: would absorb ${children.length} existing project(s):\n` +
|
|
141
|
+
`${children.map((c) => ` - ${c.name} (${c.root})`).join("\n")}\n` +
|
|
142
|
+
`Re-run with --force to proceed (this will REMOVE the listed projects and their indexed data).\n` +
|
|
143
|
+
`Diagnostic logged to ~/.gmax/logs/blocked-add.log (cwd=${process.cwd()} ppid=${process.ppid}).`);
|
|
144
|
+
process.exitCode = 1;
|
|
145
|
+
return;
|
|
146
|
+
}
|
|
93
147
|
const names = children.map((c) => c.name).join(", ");
|
|
94
148
|
console.log(`Absorbing ${children.length} sub-project(s): ${names}`);
|
|
95
149
|
const { ensureDaemonRunning: checkDaemon, sendStreamingCommand: sendCmd } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/daemon-client")));
|
package/dist/commands/watch.js
CHANGED
|
@@ -81,7 +81,7 @@ exports.watch = new commander_1.Command("watch")
|
|
|
81
81
|
// Skip spawn if daemon already running at the same version.
|
|
82
82
|
// If version mismatches (e.g. after npm install -g), shut down the old
|
|
83
83
|
// daemon so we can start a fresh one with the new code.
|
|
84
|
-
const { isDaemonRunning, sendDaemonCommand } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/daemon-client")));
|
|
84
|
+
const { isDaemonRunning, isDaemonHeartbeatFresh, sendDaemonCommand } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/daemon-client")));
|
|
85
85
|
if (yield isDaemonRunning()) {
|
|
86
86
|
const cliVersion = JSON.parse(fs.readFileSync(path.join(__dirname, "../../package.json"), "utf-8")).version;
|
|
87
87
|
const resp = yield sendDaemonCommand({ cmd: "ping" });
|
|
@@ -89,10 +89,23 @@ exports.watch = new commander_1.Command("watch")
|
|
|
89
89
|
process.exit(0);
|
|
90
90
|
}
|
|
91
91
|
console.log(`Daemon version mismatch (${resp.version} → ${cliVersion}), restarting...`);
|
|
92
|
-
yield sendDaemonCommand({
|
|
92
|
+
yield sendDaemonCommand({
|
|
93
|
+
cmd: "shutdown",
|
|
94
|
+
reason: "version-mismatch",
|
|
95
|
+
from_pid: process.pid,
|
|
96
|
+
from_ppid: process.ppid,
|
|
97
|
+
from_version: cliVersion,
|
|
98
|
+
from_argv: process.argv.slice(0, 4),
|
|
99
|
+
});
|
|
93
100
|
// Brief wait for old daemon to release socket/lock
|
|
94
101
|
yield new Promise((r) => setTimeout(r, 2000));
|
|
95
102
|
}
|
|
103
|
+
else if (isDaemonHeartbeatFresh()) {
|
|
104
|
+
// Ping failed but daemon.lock mtime is fresh — another daemon is
|
|
105
|
+
// alive but too busy to answer (e.g. mid-index). Don't spawn a
|
|
106
|
+
// competitor; the startup code would only kill the busy peer.
|
|
107
|
+
process.exit(0);
|
|
108
|
+
}
|
|
96
109
|
const logFile = path.join(config_1.PATHS.logsDir, "daemon.log");
|
|
97
110
|
const out = (0, log_rotate_1.openRotatedLog)(logFile);
|
|
98
111
|
const child = (0, node_child_process_1.spawn)(process.argv[0], [process.argv[1], "watch", "--daemon"], {
|
|
@@ -126,7 +139,7 @@ exports.watch = new commander_1.Command("watch")
|
|
|
126
139
|
process.on("SIGTERM", () => daemon.shutdown().then(() => (0, exit_1.gracefulExit)()));
|
|
127
140
|
process.on("uncaughtException", (err) => {
|
|
128
141
|
console.error("[daemon] uncaughtException:", err);
|
|
129
|
-
daemon.shutdown().then(() =>
|
|
142
|
+
daemon.shutdown().then(() => (0, exit_1.gracefulExit)(1));
|
|
130
143
|
});
|
|
131
144
|
process.on("unhandledRejection", (reason) => {
|
|
132
145
|
console.error("[daemon] unhandledRejection:", reason);
|
|
@@ -298,7 +311,20 @@ exports.watch
|
|
|
298
311
|
let stoppedDaemon = false;
|
|
299
312
|
// Try shutting down daemon first
|
|
300
313
|
if (yield isDaemonRunning()) {
|
|
301
|
-
|
|
314
|
+
let parentCmd = "?";
|
|
315
|
+
try {
|
|
316
|
+
const { execSync } = yield Promise.resolve().then(() => __importStar(require("node:child_process")));
|
|
317
|
+
parentCmd = execSync(`ps -o command= -p ${process.ppid}`, { encoding: "utf8" }).trim();
|
|
318
|
+
}
|
|
319
|
+
catch (_b) { }
|
|
320
|
+
yield sendDaemonCommand({
|
|
321
|
+
cmd: "shutdown",
|
|
322
|
+
reason: "gmax-watch-stop",
|
|
323
|
+
from_pid: process.pid,
|
|
324
|
+
from_ppid: process.ppid,
|
|
325
|
+
from_argv: process.argv.slice(0, 4),
|
|
326
|
+
from_parent_cmd: parentCmd,
|
|
327
|
+
});
|
|
302
328
|
console.log("Daemon stopped.");
|
|
303
329
|
stoppedDaemon = true;
|
|
304
330
|
}
|
|
@@ -71,9 +71,9 @@ const server_1 = require("../llm/server");
|
|
|
71
71
|
const ipc_handler_1 = require("./ipc-handler");
|
|
72
72
|
const logger_1 = require("../utils/logger");
|
|
73
73
|
const daemon_client_1 = require("../utils/daemon-client");
|
|
74
|
-
const watcher_store_2 = require("../utils/watcher-store");
|
|
75
74
|
const index_config_1 = require("../index/index-config");
|
|
76
75
|
const log_rotate_1 = require("../utils/log-rotate");
|
|
76
|
+
const pool_1 = require("../workers/pool");
|
|
77
77
|
const node_child_process_1 = require("node:child_process");
|
|
78
78
|
const http = __importStar(require("node:http"));
|
|
79
79
|
const IDLE_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes
|
|
@@ -104,26 +104,8 @@ class Daemon {
|
|
|
104
104
|
start() {
|
|
105
105
|
return __awaiter(this, void 0, void 0, function* () {
|
|
106
106
|
process.title = "gmax-daemon";
|
|
107
|
-
// 0. Singleton enforcement:
|
|
108
|
-
|
|
109
|
-
const pidStr = fs.readFileSync(config_1.PATHS.daemonPidFile, "utf-8").trim();
|
|
110
|
-
const existingPid = parseInt(pidStr, 10);
|
|
111
|
-
if (existingPid && existingPid !== process.pid && (0, watcher_store_2.isProcessRunning)(existingPid)) {
|
|
112
|
-
(0, logger_1.log)("daemon", `found existing daemon PID:${existingPid}, checking socket...`);
|
|
113
|
-
const responsive = yield (0, daemon_client_1.isDaemonRunning)();
|
|
114
|
-
if (responsive) {
|
|
115
|
-
(0, logger_1.log)("daemon", "existing daemon is responsive — exiting");
|
|
116
|
-
process.exit(0);
|
|
117
|
-
}
|
|
118
|
-
// Unresponsive but alive — kill it
|
|
119
|
-
(0, logger_1.log)("daemon", `existing daemon PID:${existingPid} unresponsive — killing`);
|
|
120
|
-
yield (0, process_1.killProcess)(existingPid);
|
|
121
|
-
(0, logger_1.log)("daemon", `killed stale daemon PID:${existingPid}`);
|
|
122
|
-
}
|
|
123
|
-
}
|
|
124
|
-
catch (_a) {
|
|
125
|
-
// No PID file or unreadable — proceed normally
|
|
126
|
-
}
|
|
107
|
+
// 0. Singleton enforcement: find and kill ALL stale daemon/worker processes
|
|
108
|
+
yield this.killStaleProcesses();
|
|
127
109
|
// 1. Acquire exclusive lock — kernel-enforced, atomic, auto-released on death
|
|
128
110
|
fs.mkdirSync(path.dirname(config_1.PATHS.daemonLockFile), { recursive: true });
|
|
129
111
|
fs.writeFileSync(config_1.PATHS.daemonLockFile, "", { flag: "a" }); // ensure file exists
|
|
@@ -158,7 +140,7 @@ class Daemon {
|
|
|
158
140
|
try {
|
|
159
141
|
fs.unlinkSync(config_1.PATHS.daemonSocket);
|
|
160
142
|
}
|
|
161
|
-
catch (
|
|
143
|
+
catch (_a) { }
|
|
162
144
|
this.server = net.createServer((conn) => {
|
|
163
145
|
(0, logger_1.debug)("daemon", "client connected");
|
|
164
146
|
let buf = "";
|
|
@@ -259,13 +241,25 @@ class Daemon {
|
|
|
259
241
|
console.error(`[daemon] Failed to watch ${path.basename(p.root)}:`, err);
|
|
260
242
|
}
|
|
261
243
|
}
|
|
262
|
-
// 8b. Index pending/error projects in the background
|
|
244
|
+
// 8b. Index pending/error projects in the background, serialized to avoid
|
|
245
|
+
// racing on shared LanceDB table creation (only one ensureTable() may win the
|
|
246
|
+
// first createTable; the rest crash with "Table 'chunks' already exists").
|
|
247
|
+
// Re-check shuttingDown each iteration: shutdown's pendingLocks drain is a
|
|
248
|
+
// snapshot, so a new project op kicked off after the snapshot would race
|
|
249
|
+
// with vectorDb.close() and fail with "VectorDB connection is closed".
|
|
263
250
|
const pending = allProjects.filter((p) => (p.status === "pending" || p.status === "error") && fs.existsSync(p.root));
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
251
|
+
void (() => __awaiter(this, void 0, void 0, function* () {
|
|
252
|
+
for (const p of pending) {
|
|
253
|
+
if (this.shuttingDown)
|
|
254
|
+
return;
|
|
255
|
+
try {
|
|
256
|
+
yield this.indexPendingProject(p.root);
|
|
257
|
+
}
|
|
258
|
+
catch (err) {
|
|
259
|
+
console.error(`[daemon] Failed to index pending ${path.basename(p.root)}:`, err);
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
}))();
|
|
269
263
|
// 9. Heartbeat + refresh lockfile mtime to prevent stale detection
|
|
270
264
|
this.heartbeatInterval = setInterval(() => {
|
|
271
265
|
(0, watcher_store_1.heartbeat)(process.pid);
|
|
@@ -560,6 +554,11 @@ class Daemon {
|
|
|
560
554
|
return __awaiter(this, void 0, void 0, function* () {
|
|
561
555
|
yield this.withProjectLock(root, () => __awaiter(this, void 0, void 0, function* () {
|
|
562
556
|
var _a;
|
|
557
|
+
// Bail if shutdown raced ahead of us between IIFE iteration and lock
|
|
558
|
+
// acquisition — otherwise we'd start writing to a DB that shutdown is
|
|
559
|
+
// about to close, leaving the project status as "error".
|
|
560
|
+
if (this.shuttingDown)
|
|
561
|
+
return;
|
|
563
562
|
if (!this.vectorDb || !this.metaCache)
|
|
564
563
|
return;
|
|
565
564
|
const name = path.basename(root);
|
|
@@ -968,14 +967,92 @@ class Daemon {
|
|
|
968
967
|
}
|
|
969
968
|
stopMlxServer() {
|
|
970
969
|
var _a;
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
970
|
+
// The spawned process is `uv`, which forks `python` then exits. Killing the
|
|
971
|
+
// recorded PID alone leaves python orphaned (the orphan source for port 8100
|
|
972
|
+
// collisions across daemon restarts). Always also kill whoever owns the port.
|
|
973
|
+
if ((_a = this.mlxChild) === null || _a === void 0 ? void 0 : _a.pid) {
|
|
974
|
+
try {
|
|
975
|
+
process.kill(-this.mlxChild.pid, "SIGTERM");
|
|
976
|
+
}
|
|
977
|
+
catch (_b) {
|
|
978
|
+
try {
|
|
979
|
+
process.kill(this.mlxChild.pid, "SIGTERM");
|
|
980
|
+
}
|
|
981
|
+
catch (_c) { }
|
|
982
|
+
}
|
|
975
983
|
console.log(`[daemon] Stopped MLX embed server (PID: ${this.mlxChild.pid})`);
|
|
984
|
+
this.mlxChild = null;
|
|
985
|
+
}
|
|
986
|
+
const port = parseInt(process.env.MLX_EMBED_PORT || "8100", 10);
|
|
987
|
+
const portOwner = this.getPortPid(port);
|
|
988
|
+
if (portOwner) {
|
|
989
|
+
try {
|
|
990
|
+
process.kill(portOwner, "SIGTERM");
|
|
991
|
+
console.log(`[daemon] Killed orphan MLX on port ${port} (PID: ${portOwner})`);
|
|
992
|
+
}
|
|
993
|
+
catch (_d) { }
|
|
994
|
+
}
|
|
995
|
+
}
|
|
996
|
+
/**
|
|
997
|
+
* Find and kill all stale gmax-daemon and gmax-worker processes.
|
|
998
|
+
* Uses pgrep to scan by process title rather than relying solely on
|
|
999
|
+
* the PID file, which becomes stale when a daemon is orphaned through
|
|
1000
|
+
* the lock-compromise path.
|
|
1001
|
+
*/
|
|
1002
|
+
killStaleProcesses() {
|
|
1003
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
1004
|
+
// 1. Check for other daemon processes
|
|
1005
|
+
const daemonPids = this.findProcessesByTitle("gmax-daemon")
|
|
1006
|
+
.filter((pid) => pid !== process.pid);
|
|
1007
|
+
const workerPids = this.findProcessesByTitle("gmax-worker");
|
|
1008
|
+
if (daemonPids.length === 0 && workerPids.length === 0) {
|
|
1009
|
+
(0, logger_1.log)("daemon", "No stale processes found");
|
|
1010
|
+
return;
|
|
1011
|
+
}
|
|
1012
|
+
for (const pid of daemonPids) {
|
|
1013
|
+
(0, logger_1.log)("daemon", `found daemon PID:${pid}, checking liveness...`);
|
|
1014
|
+
// A busy daemon (mid-index, compaction, big LMDB write) can block the
|
|
1015
|
+
// event loop long enough to miss a ping. Two independent liveness
|
|
1016
|
+
// probes — if either says "alive", defer to the running peer instead
|
|
1017
|
+
// of killing its workers mid-flight.
|
|
1018
|
+
// 1. daemon.lock mtime (refreshed by heartbeat every 60s)
|
|
1019
|
+
// 2. socket ping with a generous 10s timeout
|
|
1020
|
+
const heartbeatFresh = (0, daemon_client_1.isDaemonHeartbeatFresh)();
|
|
1021
|
+
const responsive = yield (0, daemon_client_1.isDaemonRunning)({ timeoutMs: 10000 });
|
|
1022
|
+
if (heartbeatFresh || responsive) {
|
|
1023
|
+
(0, logger_1.log)("daemon", `existing daemon PID:${pid} is alive (heartbeat=${heartbeatFresh} ping=${responsive}) — exiting`);
|
|
1024
|
+
process.exit(0);
|
|
1025
|
+
}
|
|
1026
|
+
(0, logger_1.log)("daemon", `stale daemon PID:${pid} unresponsive and heartbeat stale — killing`);
|
|
1027
|
+
yield (0, process_1.killProcess)(pid);
|
|
1028
|
+
(0, logger_1.log)("daemon", `killed stale daemon PID:${pid}`);
|
|
1029
|
+
}
|
|
1030
|
+
// 2. Kill orphaned workers from previous daemon instances.
|
|
1031
|
+
// Safe because this runs before the new daemon's worker pool is initialized.
|
|
1032
|
+
for (const pid of workerPids) {
|
|
1033
|
+
(0, logger_1.log)("daemon", `killing orphaned worker PID:${pid}`);
|
|
1034
|
+
yield (0, process_1.killProcess)(pid);
|
|
1035
|
+
}
|
|
1036
|
+
(0, logger_1.log)("daemon", `Cleaned up ${daemonPids.length} stale daemon(s), ${workerPids.length} orphaned worker(s)`);
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
findProcessesByTitle(title) {
|
|
1040
|
+
try {
|
|
1041
|
+
const out = (0, node_child_process_1.execSync)(`pgrep -x "${title}"`, {
|
|
1042
|
+
timeout: 5000,
|
|
1043
|
+
encoding: "utf-8",
|
|
1044
|
+
}).trim();
|
|
1045
|
+
if (!out)
|
|
1046
|
+
return [];
|
|
1047
|
+
return out
|
|
1048
|
+
.split("\n")
|
|
1049
|
+
.map((s) => parseInt(s.trim(), 10))
|
|
1050
|
+
.filter((n) => Number.isFinite(n) && n > 0);
|
|
1051
|
+
}
|
|
1052
|
+
catch (_a) {
|
|
1053
|
+
// pgrep exits 1 when no processes match — not an error
|
|
1054
|
+
return [];
|
|
976
1055
|
}
|
|
977
|
-
catch (_b) { }
|
|
978
|
-
this.mlxChild = null;
|
|
979
1056
|
}
|
|
980
1057
|
shutdown() {
|
|
981
1058
|
return __awaiter(this, void 0, void 0, function* () {
|
|
@@ -1009,6 +1086,13 @@ class Daemon {
|
|
|
1009
1086
|
catch (_e) { }
|
|
1010
1087
|
// Stop MLX embed server if we started it
|
|
1011
1088
|
this.stopMlxServer();
|
|
1089
|
+
// Destroy worker pool to prevent orphaned child processes
|
|
1090
|
+
if ((0, pool_1.isWorkerPoolInitialized)()) {
|
|
1091
|
+
try {
|
|
1092
|
+
yield (0, pool_1.destroyWorkerPool)();
|
|
1093
|
+
}
|
|
1094
|
+
catch (_f) { }
|
|
1095
|
+
}
|
|
1012
1096
|
// Stop poll intervals
|
|
1013
1097
|
for (const interval of this.pollIntervals.values()) {
|
|
1014
1098
|
clearInterval(interval);
|
|
@@ -1019,7 +1103,7 @@ class Daemon {
|
|
|
1019
1103
|
try {
|
|
1020
1104
|
yield sub.unsubscribe();
|
|
1021
1105
|
}
|
|
1022
|
-
catch (
|
|
1106
|
+
catch (_g) { }
|
|
1023
1107
|
}
|
|
1024
1108
|
this.subscriptions.clear();
|
|
1025
1109
|
// Close server + socket + PID file + lock
|
|
@@ -1027,16 +1111,16 @@ class Daemon {
|
|
|
1027
1111
|
try {
|
|
1028
1112
|
fs.unlinkSync(config_1.PATHS.daemonSocket);
|
|
1029
1113
|
}
|
|
1030
|
-
catch (
|
|
1114
|
+
catch (_h) { }
|
|
1031
1115
|
try {
|
|
1032
1116
|
fs.unlinkSync(config_1.PATHS.daemonPidFile);
|
|
1033
1117
|
}
|
|
1034
|
-
catch (
|
|
1118
|
+
catch (_j) { }
|
|
1035
1119
|
if (this.releaseLock) {
|
|
1036
1120
|
try {
|
|
1037
1121
|
yield this.releaseLock();
|
|
1038
1122
|
}
|
|
1039
|
-
catch (
|
|
1123
|
+
catch (_k) { }
|
|
1040
1124
|
this.releaseLock = null;
|
|
1041
1125
|
}
|
|
1042
1126
|
// Unregister all
|
|
@@ -1049,11 +1133,11 @@ class Daemon {
|
|
|
1049
1133
|
try {
|
|
1050
1134
|
yield ((_c = this.metaCache) === null || _c === void 0 ? void 0 : _c.close());
|
|
1051
1135
|
}
|
|
1052
|
-
catch (
|
|
1136
|
+
catch (_l) { }
|
|
1053
1137
|
try {
|
|
1054
1138
|
yield ((_d = this.vectorDb) === null || _d === void 0 ? void 0 : _d.close());
|
|
1055
1139
|
}
|
|
1056
|
-
catch (
|
|
1140
|
+
catch (_m) { }
|
|
1057
1141
|
console.log("[daemon] Shutdown complete");
|
|
1058
1142
|
});
|
|
1059
1143
|
}
|
|
@@ -81,6 +81,7 @@ function writeDone(conn, data) {
|
|
|
81
81
|
*/
|
|
82
82
|
function handleCommand(daemon, cmd, conn) {
|
|
83
83
|
return __awaiter(this, void 0, void 0, function* () {
|
|
84
|
+
var _a, _b, _c, _d, _e;
|
|
84
85
|
try {
|
|
85
86
|
(0, logger_1.debug)("daemon", `ipc cmd=${cmd.cmd}${cmd.root ? ` root=${cmd.root}` : ""}`);
|
|
86
87
|
switch (cmd.cmd) {
|
|
@@ -108,10 +109,17 @@ function handleCommand(daemon, cmd, conn) {
|
|
|
108
109
|
projects: daemon.listProjects(),
|
|
109
110
|
diskPressure: daemon.getDiskPressure(),
|
|
110
111
|
};
|
|
111
|
-
case "shutdown":
|
|
112
|
-
|
|
112
|
+
case "shutdown": {
|
|
113
|
+
const reason = String((_a = cmd.reason) !== null && _a !== void 0 ? _a : "unknown");
|
|
114
|
+
const fromPid = (_b = cmd.from_pid) !== null && _b !== void 0 ? _b : "?";
|
|
115
|
+
const fromPpid = (_c = cmd.from_ppid) !== null && _c !== void 0 ? _c : "?";
|
|
116
|
+
const fromVer = (_d = cmd.from_version) !== null && _d !== void 0 ? _d : "?";
|
|
117
|
+
const fromArgv = Array.isArray(cmd.from_argv) ? cmd.from_argv.join(" ") : "?";
|
|
118
|
+
const fromParentCmd = (_e = cmd.from_parent_cmd) !== null && _e !== void 0 ? _e : "?";
|
|
119
|
+
console.log(`[daemon] shutdown command received via IPC: reason=${reason} from_pid=${fromPid} from_ppid=${fromPpid} from_version=${fromVer} from_argv=[${fromArgv}] from_parent_cmd=[${fromParentCmd}]`);
|
|
113
120
|
setImmediate(() => daemon.shutdown());
|
|
114
121
|
return { ok: true };
|
|
122
|
+
}
|
|
115
123
|
// --- Streaming commands (daemon manages connection) ---
|
|
116
124
|
case "add": {
|
|
117
125
|
const root = String(cmd.root || "");
|
|
@@ -44,11 +44,17 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
44
44
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
45
|
exports.sendDaemonCommand = sendDaemonCommand;
|
|
46
46
|
exports.isDaemonRunning = isDaemonRunning;
|
|
47
|
+
exports.isDaemonHeartbeatFresh = isDaemonHeartbeatFresh;
|
|
47
48
|
exports.ensureDaemonRunning = ensureDaemonRunning;
|
|
48
49
|
exports.sendStreamingCommand = sendStreamingCommand;
|
|
50
|
+
const fs = __importStar(require("node:fs"));
|
|
49
51
|
const net = __importStar(require("node:net"));
|
|
50
52
|
const config_1 = require("../../config");
|
|
51
53
|
const DEFAULT_TIMEOUT_MS = 5000;
|
|
54
|
+
// A live daemon refreshes daemon.lock mtime every 60s (HEARTBEAT_INTERVAL_MS).
|
|
55
|
+
// Treat mtime younger than 2.5x that as proof of life, even if a ping times
|
|
56
|
+
// out — a busy daemon with a blocked event loop can still be heartbeating.
|
|
57
|
+
const HEARTBEAT_FRESH_THRESHOLD_MS = 150000;
|
|
52
58
|
/**
|
|
53
59
|
* Send a JSON command to the daemon over the Unix domain socket.
|
|
54
60
|
* Returns the parsed response, or {ok: false, error} on failure.
|
|
@@ -100,14 +106,32 @@ function sendDaemonCommand(cmd, opts) {
|
|
|
100
106
|
});
|
|
101
107
|
}
|
|
102
108
|
/**
|
|
103
|
-
* Check if the daemon is running by sending a ping.
|
|
109
|
+
* Check if the daemon is running by sending a ping. Pass a larger timeoutMs
|
|
110
|
+
* when a busy daemon is plausible (e.g. before killing what might be a live
|
|
111
|
+
* peer) — the default 2s is tight enough that a daemon blocking the event
|
|
112
|
+
* loop mid-index can miss it.
|
|
104
113
|
*/
|
|
105
|
-
function isDaemonRunning() {
|
|
114
|
+
function isDaemonRunning(opts) {
|
|
106
115
|
return __awaiter(this, void 0, void 0, function* () {
|
|
107
|
-
|
|
116
|
+
var _a;
|
|
117
|
+
const resp = yield sendDaemonCommand({ cmd: "ping" }, { timeoutMs: (_a = opts === null || opts === void 0 ? void 0 : opts.timeoutMs) !== null && _a !== void 0 ? _a : 2000 });
|
|
108
118
|
return resp.ok === true;
|
|
109
119
|
});
|
|
110
120
|
}
|
|
121
|
+
/**
|
|
122
|
+
* Lock-file-based liveness probe. A running daemon refreshes daemon.lock's
|
|
123
|
+
* mtime every 60s via its heartbeat loop; a fresh mtime means the daemon is
|
|
124
|
+
* alive even if its socket ping times out under load.
|
|
125
|
+
*/
|
|
126
|
+
function isDaemonHeartbeatFresh() {
|
|
127
|
+
try {
|
|
128
|
+
const stats = fs.statSync(config_1.PATHS.daemonLockFile);
|
|
129
|
+
return Date.now() - stats.mtimeMs < HEARTBEAT_FRESH_THRESHOLD_MS;
|
|
130
|
+
}
|
|
131
|
+
catch (_a) {
|
|
132
|
+
return false;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
111
135
|
/**
|
|
112
136
|
* Ensure the daemon is running — start it if needed, poll up to 5s.
|
|
113
137
|
* Returns true if daemon is ready, false if it couldn't be started.
|
package/package.json
CHANGED
|
@@ -1,6 +1,9 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
1
|
+
// Intentionally a no-op.
|
|
2
|
+
//
|
|
3
|
+
// Previously this ran `gmax watch stop` on every Claude SessionEnd. With
|
|
4
|
+
// multiple concurrent Claude sessions sharing one daemon, that meant *any*
|
|
5
|
+
// session ending killed the daemon for every *other* session — silently
|
|
6
|
+
// breaking their search/index and forcing repeated daemon restarts.
|
|
7
|
+
//
|
|
8
|
+
// The daemon's own 30-minute idle timeout handles cleanup when nothing is
|
|
9
|
+
// using it, so SessionEnd has no work to do here.
|