grepmax 0.7.39 → 0.7.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -58,6 +58,7 @@ const project_root_1 = require("../lib/utils/project-root");
58
58
  const watcher_registry_1 = require("../lib/utils/watcher-registry");
59
59
  const IDLE_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes
60
60
  const IDLE_CHECK_INTERVAL_MS = 60 * 1000; // check every minute
61
+ const MAX_LOG_BYTES = 5 * 1024 * 1024; // 5 MB — rotate log when exceeded
61
62
  exports.watch = new commander_1.Command("watch")
62
63
  .description("Start background file watcher for live reindexing")
63
64
  .option("-b, --background", "Run watcher in background and exit")
@@ -84,6 +85,15 @@ exports.watch = new commander_1.Command("watch")
84
85
  fs.mkdirSync(logDir, { recursive: true });
85
86
  const safeName = projectName.replace(/[^a-zA-Z0-9._-]/g, "_");
86
87
  const logFile = path.join(logDir, `watch-${safeName}.log`);
88
+ // Rotate log if it exceeds MAX_LOG_BYTES
89
+ try {
90
+ const logStat = fs.statSync(logFile);
91
+ if (logStat.size > MAX_LOG_BYTES) {
92
+ const prev = `${logFile}.prev`;
93
+ fs.renameSync(logFile, prev);
94
+ }
95
+ }
96
+ catch (_c) { }
87
97
  const out = fs.openSync(logFile, "a");
88
98
  const child = (0, node_child_process_1.spawn)(process.argv[0], [process.argv[1], ...args], {
89
99
  detached: true,
@@ -48,7 +48,7 @@ exports.computeRetryAction = computeRetryAction;
48
48
  const fs = __importStar(require("node:fs"));
49
49
  const cache_check_1 = require("../utils/cache-check");
50
50
  const file_utils_1 = require("../utils/file-utils");
51
- function processBatchCore(batch, metaCache, pool) {
51
+ function processBatchCore(batch, metaCache, pool, signal) {
52
52
  return __awaiter(this, void 0, void 0, function* () {
53
53
  let reindexed = 0;
54
54
  const changedIds = [];
@@ -57,6 +57,8 @@ function processBatchCore(batch, metaCache, pool) {
57
57
  const metaUpdates = new Map();
58
58
  const metaDeletes = [];
59
59
  for (const [absPath, event] of batch) {
60
+ if (signal === null || signal === void 0 ? void 0 : signal.aborted)
61
+ break;
60
62
  if (event === "unlink") {
61
63
  deletes.push(absPath);
62
64
  metaDeletes.push(absPath);
@@ -74,7 +76,7 @@ function processBatchCore(batch, metaCache, pool) {
74
76
  const result = yield pool.processFile({
75
77
  path: absPath,
76
78
  absolutePath: absPath,
77
- });
79
+ }, signal);
78
80
  const metaEntry = {
79
81
  hash: result.hash,
80
82
  mtimeMs: result.mtimeMs,
@@ -55,6 +55,7 @@ const logger_1 = require("../utils/logger");
55
55
  const lock_1 = require("../utils/lock");
56
56
  const pool_1 = require("../workers/pool");
57
57
  const llm_client_1 = require("../workers/summarize/llm-client");
58
+ const watcher_batch_1 = require("./watcher-batch");
58
59
  // Chokidar ignored — must exclude heavy directories to keep FD count low.
59
60
  // On macOS, chokidar uses FSEvents (single FD) but falls back to fs.watch()
60
61
  // (one FD per directory) if FSEvents isn't available or for some subdirs.
@@ -85,6 +86,8 @@ function startWatcher(opts) {
85
86
  let processing = false;
86
87
  let closed = false;
87
88
  let consecutiveLockFailures = 0;
89
+ let currentBatchAc = null;
90
+ let summarizationAc = null;
88
91
  const MAX_RETRIES = 5;
89
92
  const watcher = (0, chokidar_1.watch)(projectRoot, {
90
93
  ignored: exports.WATCHER_IGNORE_PATTERNS,
@@ -112,19 +115,19 @@ function startWatcher(opts) {
112
115
  })();
113
116
  const BATCH_TIMEOUT_MS = Math.max(Math.ceil(taskTimeoutMs * 1.5), 120000);
114
117
  const processBatch = () => __awaiter(this, void 0, void 0, function* () {
115
- var _a;
116
118
  if (closed || processing || pending.size === 0)
117
119
  return;
118
120
  processing = true;
121
+ const batchAc = new AbortController();
122
+ currentBatchAc = batchAc;
119
123
  const batchTimeout = setTimeout(() => {
120
- if (processing) {
121
- console.error(`[${wtag}] Batch processing timed out after 120s, resetting`);
122
- processing = false;
123
- }
124
+ (0, logger_1.log)(wtag, `Batch timed out after ${BATCH_TIMEOUT_MS}ms, aborting`);
125
+ batchAc.abort();
124
126
  }, BATCH_TIMEOUT_MS);
125
127
  const batch = new Map(pending);
126
128
  pending.clear();
127
- (0, logger_1.log)(wtag, `Processing ${batch.size} changed files`);
129
+ const filenames = [...batch.keys()].map((p) => path.basename(p));
130
+ (0, logger_1.log)(wtag, `Processing ${batch.size} changed files: ${filenames.join(", ")}`);
128
131
  const start = Date.now();
129
132
  let reindexed = 0;
130
133
  const changedIds = [];
@@ -140,6 +143,8 @@ function startWatcher(opts) {
140
143
  const metaUpdates = new Map();
141
144
  const metaDeletes = [];
142
145
  for (const [absPath, event] of batch) {
146
+ if (batchAc.signal.aborted)
147
+ break;
143
148
  if (event === "unlink") {
144
149
  deletes.push(absPath);
145
150
  metaDeletes.push(absPath);
@@ -159,7 +164,7 @@ function startWatcher(opts) {
159
164
  const result = yield pool.processFile({
160
165
  path: absPath,
161
166
  absolutePath: absPath,
162
- });
167
+ }, batchAc.signal);
163
168
  const metaEntry = {
164
169
  hash: result.hash,
165
170
  mtimeMs: result.mtimeMs,
@@ -188,6 +193,8 @@ function startWatcher(opts) {
188
193
  reindexed++;
189
194
  }
190
195
  catch (err) {
196
+ if (batchAc.signal.aborted)
197
+ break;
191
198
  const code = err === null || err === void 0 ? void 0 : err.code;
192
199
  if (code === "ENOENT") {
193
200
  deletes.push(absPath);
@@ -243,23 +250,16 @@ function startWatcher(opts) {
243
250
  consecutiveLockFailures++;
244
251
  }
245
252
  console.error(`[${wtag}] Batch processing failed:`, err);
246
- let dropped = 0;
247
- for (const [absPath, event] of batch) {
248
- const count = ((_a = retryCount.get(absPath)) !== null && _a !== void 0 ? _a : 0) + 1;
249
- if (count >= MAX_RETRIES) {
250
- retryCount.delete(absPath);
251
- dropped++;
252
- }
253
- else if (!pending.has(absPath)) {
253
+ const { requeued, dropped, backoffMs } = (0, watcher_batch_1.computeRetryAction)(batch, retryCount, MAX_RETRIES, isLockError, consecutiveLockFailures, DEBOUNCE_MS);
254
+ for (const [absPath, event] of requeued) {
255
+ if (!pending.has(absPath)) {
254
256
  pending.set(absPath, event);
255
- retryCount.set(absPath, count);
256
257
  }
257
258
  }
258
259
  if (dropped > 0) {
259
260
  console.warn(`[${wtag}] Dropped ${dropped} file(s) after ${MAX_RETRIES} failed retries`);
260
261
  }
261
262
  if (pending.size > 0) {
262
- const backoffMs = Math.min(DEBOUNCE_MS * Math.pow(2, consecutiveLockFailures), 30000);
263
263
  if (debounceTimer)
264
264
  clearTimeout(debounceTimer);
265
265
  debounceTimer = setTimeout(() => processBatch(), backoffMs);
@@ -267,18 +267,24 @@ function startWatcher(opts) {
267
267
  }
268
268
  finally {
269
269
  clearTimeout(batchTimeout);
270
+ currentBatchAc = null;
270
271
  processing = false;
271
272
  // Process any events that came in while we were processing
272
273
  if (pending.size > 0) {
273
274
  scheduleBatch();
274
275
  }
275
276
  }
276
- // Fire-and-forget summarization doesn't block the next batch
277
+ // Cancel previous summarization before starting a new one
278
+ summarizationAc === null || summarizationAc === void 0 ? void 0 : summarizationAc.abort();
279
+ summarizationAc = new AbortController();
280
+ const sumSignal = summarizationAc.signal;
277
281
  if (changedIds.length > 0) {
278
282
  (() => __awaiter(this, void 0, void 0, function* () {
279
283
  try {
280
284
  const table = yield vectorDb.ensureTable();
281
285
  for (const id of changedIds) {
286
+ if (sumSignal.aborted)
287
+ break;
282
288
  const escaped = (0, filter_builder_1.escapeSqlString)(id);
283
289
  const rows = yield table
284
290
  .query()
@@ -298,13 +304,15 @@ function startWatcher(opts) {
298
304
  file: String(r.path || ""),
299
305
  },
300
306
  ]);
307
+ if (sumSignal.aborted)
308
+ break;
301
309
  if (summaries === null || summaries === void 0 ? void 0 : summaries[0]) {
302
310
  yield vectorDb.updateRows([id], "summary", [summaries[0]]);
303
311
  }
304
312
  }
305
313
  }
306
314
  catch (_a) {
307
- // Summarizer unavailable — skip
315
+ // Summarizer unavailable or aborted — skip
308
316
  }
309
317
  }))();
310
318
  }
@@ -339,6 +347,8 @@ function startWatcher(opts) {
339
347
  return {
340
348
  close: () => __awaiter(this, void 0, void 0, function* () {
341
349
  closed = true;
350
+ currentBatchAc === null || currentBatchAc === void 0 ? void 0 : currentBatchAc.abort();
351
+ summarizationAc === null || summarizationAc === void 0 ? void 0 : summarizationAc.abort();
342
352
  if (debounceTimer)
343
353
  clearTimeout(debounceTimer);
344
354
  clearInterval(ftsInterval);
@@ -317,9 +317,8 @@ class WorkerPool {
317
317
  }
318
318
  this.dispatch();
319
319
  }
320
- processFile(input) {
321
- // ProcessFile doesn't currently use cancellation, but we could add it later
322
- return this.enqueue("processFile", input);
320
+ processFile(input, signal) {
321
+ return this.enqueue("processFile", input, signal);
323
322
  }
324
323
  encodeQuery(text, signal) {
325
324
  return this.enqueue("encodeQuery", { text }, signal);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.7.39",
3
+ "version": "0.7.41",
4
4
  "author": "Robert Owens <robowens@me.com>",
5
5
  "homepage": "https://github.com/reowens/grepmax",
6
6
  "bugs": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.7.39",
3
+ "version": "0.7.41",
4
4
  "description": "Semantic code search for Claude Code. Automatically indexes your project and provides intelligent search capabilities.",
5
5
  "author": {
6
6
  "name": "Robert Owens",