gitnexus 1.6.4-rc.15 → 1.6.4-rc.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -156,6 +156,7 @@ gitnexus analyze --embeddings # Enable embedding generation (slower, better s
156
156
  gitnexus analyze --skip-agents-md # Preserve custom AGENTS.md/CLAUDE.md gitnexus section edits
157
157
  gitnexus analyze --verbose # Log skipped files when parsers are unavailable
158
158
  gitnexus analyze --max-file-size 1024 # Skip files larger than N KB (default: 512, cap: 32768)
159
+ gitnexus analyze --worker-timeout 60 # Increase worker idle timeout for slow parses
159
160
  gitnexus mcp # Start MCP server (stdio) — serves all indexed repos
160
161
  gitnexus serve # Start local HTTP server (multi-repo) for web UI
161
162
  gitnexus index # Register an existing .gitnexus/ folder into the global registry
@@ -323,6 +324,21 @@ npx gitnexus analyze
323
324
 
324
325
  Values above **32768 KB (32 MB)** are clamped to the tree-sitter parser ceiling; invalid values fall back to the 512 KB default with a one-time warning. When an override is active, `analyze` prints the effective threshold in its startup banner (e.g. `GITNEXUS_MAX_FILE_SIZE: effective threshold 2048KB (default 512KB)`).
325
326
 
327
+ ### Analyze reports a worker timeout
328
+
329
+ Worker parse timeouts are recoverable. GitNexus retries stalled worker jobs with backoff, splits large jobs to isolate slow files, and falls back to the sequential parser when needed. If a large repository needs more time per worker job, use either:
330
+
331
+ ```bash
332
+ # CLI flag, in seconds
333
+ npx gitnexus analyze --worker-timeout 60
334
+
335
+ # Environment variable, in milliseconds
336
+ export GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS=60000
337
+ npx gitnexus analyze
338
+ ```
339
+
340
+ For repositories with very large source files, `GITNEXUS_WORKER_SUB_BATCH_MAX_BYTES` controls the worker job byte budget. The default is **8388608 bytes (8 MB)**.
341
+
326
342
  ## Privacy
327
343
 
328
344
  - All processing happens locally on your machine
@@ -45,5 +45,7 @@ export interface AnalyzeOptions {
45
45
  * `GITNEXUS_MAX_FILE_SIZE` for the rest of the pipeline.
46
46
  */
47
47
  maxFileSize?: string;
48
+ /** Override worker sub-batch idle timeout in seconds. */
49
+ workerTimeout?: string;
48
50
  }
49
51
  export declare const analyzeCommand: (inputPath?: string, options?: AnalyzeOptions) => Promise<void>;
@@ -55,6 +55,15 @@ export const analyzeCommand = async (inputPath, options) => {
55
55
  if (options?.maxFileSize) {
56
56
  process.env.GITNEXUS_MAX_FILE_SIZE = options.maxFileSize;
57
57
  }
58
+ if (options?.workerTimeout) {
59
+ const workerTimeoutSeconds = Number(options.workerTimeout);
60
+ if (!Number.isFinite(workerTimeoutSeconds) || workerTimeoutSeconds < 1) {
61
+ console.error(' --worker-timeout must be at least 1 second.\n');
62
+ process.exitCode = 1;
63
+ return;
64
+ }
65
+ process.env.GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS = String(Math.round(workerTimeoutSeconds * 1000));
66
+ }
58
67
  console.log('\n GitNexus Analyzer\n');
59
68
  let repoPath;
60
69
  if (inputPath) {
package/dist/cli/index.js CHANGED
@@ -30,9 +30,12 @@ program
30
30
  'Leaves `-r <name>` ambiguous for the two paths; use -r <path> to disambiguate.')
31
31
  .option('-v, --verbose', 'Enable verbose ingestion warnings (default: false)')
32
32
  .option('--max-file-size <kb>', 'Skip files larger than this (KB). Default: 512. Hard cap: 32768 (tree-sitter limit).')
33
+ .option('--worker-timeout <seconds>', 'Worker sub-batch idle timeout before retry/fallback. Default: 30.')
33
34
  .addHelpText('after', '\nEnvironment variables:\n' +
34
35
  ' GITNEXUS_NO_GITIGNORE=1 Skip .gitignore parsing (still reads .gitnexusignore)\n' +
35
36
  ' GITNEXUS_MAX_FILE_SIZE=N Override large-file skip threshold (KB). Default 512, max 32768.\n' +
37
+ ' GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS=N Worker idle timeout in milliseconds. Default 30000.\n' +
38
+ ' GITNEXUS_WORKER_SUB_BATCH_MAX_BYTES=N Worker job byte budget. Default 8388608.\n' +
36
39
  '\nTip: `.gitnexusignore` supports `.gitignore`-style negation. Add e.g.\n' +
37
40
  ' `!__tests__/` to index a directory that is auto-filtered by default (#771).')
38
41
  .action(createLazyAction(() => import('./analyze.js'), 'analyzeCommand'));
@@ -555,6 +555,13 @@ export const processParsing = async (graph, files, symbolTable, astCache,
555
555
  * Pass `undefined` if no consumer needs cross-phase access.
556
556
  */
557
557
  scopeTreeCache, onFileProgress, workerPool) => {
558
+ let lastProgress = 0;
559
+ const reportProgress = onFileProgress
560
+ ? (current, total, detail) => {
561
+ lastProgress = Math.max(lastProgress, current);
562
+ onFileProgress(lastProgress, total, detail);
563
+ }
564
+ : undefined;
558
565
  if (workerPool) {
559
566
  if (scopeTreeCache !== undefined && process.env.PROF_SCOPE_RESOLUTION === '1') {
560
567
  // Trees can't cross MessageChannels, so worker-parsed files land
@@ -564,13 +571,15 @@ scopeTreeCache, onFileProgress, workerPool) => {
564
571
  console.warn(`[scope-resolution prof] worker pool engaged for ${files.length} files — cross-phase tree cache will be empty; scope-resolution re-parses.`);
565
572
  }
566
573
  try {
567
- return await processParsingWithWorkers(graph, files, symbolTable, astCache, workerPool, onFileProgress);
574
+ return await processParsingWithWorkers(graph, files, symbolTable, astCache, workerPool, reportProgress);
568
575
  }
569
576
  catch (err) {
570
- console.warn('Worker pool parsing failed, falling back to sequential:', err instanceof Error ? err.message : err);
577
+ const message = err instanceof Error ? err.message : String(err);
578
+ console.warn('Worker pool parsing stopped; continuing with sequential parser:', message);
579
+ reportProgress?.(lastProgress, files.length, `Sequential fallback after worker issue: ${message}`);
571
580
  }
572
581
  }
573
582
  // Fallback: sequential parsing (no pre-extracted data)
574
- await processParsingSequential(graph, files, symbolTable, astCache, scopeTreeCache, onFileProgress);
583
+ await processParsingSequential(graph, files, symbolTable, astCache, scopeTreeCache, reportProgress);
575
584
  return null;
576
585
  };
@@ -452,7 +452,7 @@ const processBatch = (files, onProgress) => {
452
452
  }
453
453
  let totalProcessed = 0;
454
454
  let lastReported = 0;
455
- const PROGRESS_INTERVAL = 100; // report every 100 files
455
+ const PROGRESS_INTERVAL = Math.max(1, Math.min(100, Math.ceil(files.length / 10)));
456
456
  const onFileProcessed = onProgress
457
457
  ? () => {
458
458
  totalProcessed++;
@@ -519,6 +519,9 @@ const processBatch = (files, onProgress) => {
519
519
  }
520
520
  }
521
521
  }
522
+ if (onProgress && totalProcessed !== lastReported) {
523
+ onProgress(totalProcessed);
524
+ }
522
525
  return result;
523
526
  };
524
527
  const ROUTE_HTTP_METHODS = new Set([
@@ -1,8 +1,7 @@
1
1
  export interface WorkerPool {
2
2
  /**
3
- * Dispatch items across workers. Items are split into chunks (one per worker),
4
- * each worker processes its chunk via sub-batches to limit peak memory,
5
- * and results are concatenated back in order.
3
+ * Dispatch items across workers. Items are split into bounded jobs, each job
4
+ * is committed independently, and stalled jobs are split/retried locally.
6
5
  */
7
6
  dispatch<TInput, TResult>(items: TInput[], onProgress?: (filesProcessed: number) => void): Promise<TResult[]>;
8
7
  /** Terminate all workers. Must be called when done. */
@@ -10,7 +9,15 @@ export interface WorkerPool {
10
9
  /** Number of workers in the pool */
11
10
  readonly size: number;
12
11
  }
12
+ export interface WorkerPoolOptions {
13
+ subBatchSize?: number;
14
+ subBatchMaxBytes?: number;
15
+ subBatchIdleTimeoutMs?: number;
16
+ maxTimeoutRetries?: number;
17
+ timeoutBackoffFactor?: number;
18
+ }
19
+ export declare function resolveWorkerPoolOptions(options?: WorkerPoolOptions): Required<WorkerPoolOptions>;
13
20
  /**
14
21
  * Create a pool of worker threads.
15
22
  */
16
- export declare const createWorkerPool: (workerUrl: URL, poolSize?: number) => WorkerPool;
23
+ export declare const createWorkerPool: (workerUrl: URL, poolSize?: number, options?: WorkerPoolOptions) => WorkerPool;
@@ -7,13 +7,83 @@ import { fileURLToPath } from 'node:url';
7
7
  * Keeps structured-clone memory bounded per sub-batch.
8
8
  */
9
9
  const SUB_BATCH_SIZE = 1500;
10
- /** Per sub-batch timeout. If a single sub-batch takes longer than this,
11
- * likely a pathological file (e.g. minified 50MB JS). Fail fast. */
12
- const SUB_BATCH_TIMEOUT_MS = 30_000;
10
+ const SUB_BATCH_MAX_BYTES = 8 * 1024 * 1024;
11
+ const DEFAULT_SUB_BATCH_IDLE_TIMEOUT_MS = 30_000;
12
+ const DEFAULT_TIMEOUT_RETRIES = 1;
13
+ const DEFAULT_TIMEOUT_BACKOFF_FACTOR = 2;
14
+ function positiveInteger(value) {
15
+ const parsed = typeof value === 'string' ? Number(value) : value;
16
+ return typeof parsed === 'number' && Number.isFinite(parsed) && parsed > 0
17
+ ? Math.floor(parsed)
18
+ : undefined;
19
+ }
20
+ function nonNegativeInteger(value) {
21
+ const parsed = typeof value === 'string' ? Number(value) : value;
22
+ return typeof parsed === 'number' && Number.isFinite(parsed) && parsed >= 0
23
+ ? Math.floor(parsed)
24
+ : undefined;
25
+ }
26
+ export function resolveWorkerPoolOptions(options = {}) {
27
+ return {
28
+ subBatchSize: positiveInteger(options.subBatchSize) ?? SUB_BATCH_SIZE,
29
+ subBatchMaxBytes: positiveInteger(options.subBatchMaxBytes) ??
30
+ positiveInteger(process.env.GITNEXUS_WORKER_SUB_BATCH_MAX_BYTES) ??
31
+ SUB_BATCH_MAX_BYTES,
32
+ subBatchIdleTimeoutMs: positiveInteger(options.subBatchIdleTimeoutMs) ??
33
+ positiveInteger(process.env.GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS) ??
34
+ DEFAULT_SUB_BATCH_IDLE_TIMEOUT_MS,
35
+ maxTimeoutRetries: nonNegativeInteger(options.maxTimeoutRetries) ?? DEFAULT_TIMEOUT_RETRIES,
36
+ timeoutBackoffFactor: positiveInteger(options.timeoutBackoffFactor) ?? DEFAULT_TIMEOUT_BACKOFF_FACTOR,
37
+ };
38
+ }
39
+ function estimateItemBytes(item) {
40
+ if (typeof item !== 'object' || item === null)
41
+ return 0;
42
+ const content = item.content;
43
+ return typeof content === 'string' ? Buffer.byteLength(content, 'utf8') : 0;
44
+ }
45
+ function itemPath(item) {
46
+ if (typeof item !== 'object' || item === null)
47
+ return undefined;
48
+ const path = item.path;
49
+ return typeof path === 'string' ? path : undefined;
50
+ }
51
+ function createJobs(items, maxItems, maxBytes, timeoutMs) {
52
+ const jobs = [];
53
+ let startIndex = 0;
54
+ let batch = [];
55
+ let batchBytes = 0;
56
+ const flush = () => {
57
+ if (batch.length === 0)
58
+ return;
59
+ jobs.push({
60
+ startIndex,
61
+ items: batch,
62
+ estimatedBytes: batchBytes,
63
+ attempt: 0,
64
+ splitDepth: 0,
65
+ timeoutMs,
66
+ });
67
+ startIndex += batch.length;
68
+ batch = [];
69
+ batchBytes = 0;
70
+ };
71
+ for (const item of items) {
72
+ const itemBytes = estimateItemBytes(item);
73
+ const wouldExceedItems = batch.length >= maxItems;
74
+ const wouldExceedBytes = batch.length > 0 && batchBytes + itemBytes > maxBytes;
75
+ if (wouldExceedItems || wouldExceedBytes)
76
+ flush();
77
+ batch.push(item);
78
+ batchBytes += itemBytes;
79
+ }
80
+ flush();
81
+ return jobs;
82
+ }
13
83
  /**
14
84
  * Create a pool of worker threads.
15
85
  */
16
- export const createWorkerPool = (workerUrl, poolSize) => {
86
+ export const createWorkerPool = (workerUrl, poolSize, options) => {
17
87
  // Validate worker script exists before spawning to prevent uncaught
18
88
  // MODULE_NOT_FOUND crashes in worker threads (e.g. when running from src/ via vitest)
19
89
  const workerPath = fileURLToPath(workerUrl);
@@ -21,102 +91,228 @@ export const createWorkerPool = (workerUrl, poolSize) => {
21
91
  throw new Error(`Worker script not found: ${workerPath}`);
22
92
  }
23
93
  const size = poolSize ?? Math.min(8, Math.max(1, os.cpus().length - 1));
94
+ const poolOptions = resolveWorkerPoolOptions(options);
24
95
  const workers = [];
96
+ let poolBroken = false;
97
+ let poolFailure;
25
98
  for (let i = 0; i < size; i++) {
26
99
  workers.push(new Worker(workerUrl));
27
100
  }
28
101
  const dispatch = (items, onProgress) => {
102
+ if (poolBroken) {
103
+ const reason = poolFailure ? `: ${poolFailure.message}` : '';
104
+ return Promise.reject(new Error(`Worker pool is unavailable after a previous failure${reason}`));
105
+ }
29
106
  if (items.length === 0)
30
107
  return Promise.resolve([]);
31
- const chunkSize = Math.ceil(items.length / size);
32
- const chunks = [];
33
- for (let i = 0; i < items.length; i += chunkSize) {
34
- chunks.push(items.slice(i, i + chunkSize));
35
- }
36
- const workerProgress = new Array(chunks.length).fill(0);
37
- const promises = chunks.map((chunk, i) => {
38
- const worker = workers[i];
39
- return new Promise((resolve, reject) => {
108
+ if (workers.length === 0)
109
+ return Promise.reject(new Error('Worker pool has no active workers'));
110
+ const jobs = createJobs(items, poolOptions.subBatchSize, poolOptions.subBatchMaxBytes, poolOptions.subBatchIdleTimeoutMs);
111
+ return new Promise((resolve, reject) => {
112
+ const results = [];
113
+ const inFlightProgress = new Array(workers.length).fill(0);
114
+ let completedFiles = 0;
115
+ let activeWorkers = 0;
116
+ let stopped = false;
117
+ let maxReported = 0;
118
+ const reportProgress = () => {
119
+ if (!onProgress)
120
+ return;
121
+ const inFlight = inFlightProgress.reduce((sum, value) => sum + value, 0);
122
+ const next = Math.min(items.length, Math.max(maxReported, completedFiles + inFlight));
123
+ if (next === maxReported)
124
+ return;
125
+ maxReported = next;
126
+ onProgress(next);
127
+ };
128
+ const replaceWorker = async (workerIndex) => {
129
+ const worker = workers[workerIndex];
130
+ await worker?.terminate().catch(() => undefined);
131
+ if (!stopped)
132
+ workers[workerIndex] = new Worker(workerUrl);
133
+ };
134
+ const fail = async (err) => {
135
+ poolBroken = true;
136
+ poolFailure = err;
137
+ if (stopped)
138
+ return;
139
+ stopped = true;
140
+ await Promise.all(workers.map((worker) => worker.terminate().catch(() => undefined)));
141
+ reject(err);
142
+ };
143
+ const maybeDone = () => {
144
+ if (stopped)
145
+ return;
146
+ if (jobs.length === 0 && activeWorkers === 0) {
147
+ stopped = true;
148
+ results.sort((a, b) => a.startIndex - b.startIndex);
149
+ if (onProgress && maxReported < items.length)
150
+ onProgress(items.length);
151
+ resolve(results.map((result) => result.data));
152
+ }
153
+ };
154
+ const requeueAfterTimeout = (workerIndex, job, lastProgress) => {
155
+ const nextTimeout = Math.ceil(job.timeoutMs * poolOptions.timeoutBackoffFactor);
156
+ if (job.items.length > 1) {
157
+ const midpoint = Math.ceil(job.items.length / 2);
158
+ const firstItems = job.items.slice(0, midpoint);
159
+ const secondItems = job.items.slice(midpoint);
160
+ const first = {
161
+ startIndex: job.startIndex,
162
+ items: firstItems,
163
+ estimatedBytes: firstItems.reduce((sum, item) => sum + estimateItemBytes(item), 0),
164
+ attempt: job.attempt,
165
+ splitDepth: job.splitDepth + 1,
166
+ timeoutMs: nextTimeout,
167
+ };
168
+ const second = {
169
+ startIndex: job.startIndex + midpoint,
170
+ items: secondItems,
171
+ estimatedBytes: secondItems.reduce((sum, item) => sum + estimateItemBytes(item), 0),
172
+ attempt: job.attempt,
173
+ splitDepth: job.splitDepth + 1,
174
+ timeoutMs: nextTimeout,
175
+ };
176
+ console.warn(`Worker ${workerIndex} parse job idle timeout after ${job.timeoutMs / 1000}s ` +
177
+ `(${job.items.length} items, ${job.estimatedBytes} bytes, last progress: ${lastProgress}). ` +
178
+ `Splitting into ${first.items.length}/${second.items.length} item jobs with ` +
179
+ `${nextTimeout / 1000}s timeout.`);
180
+ // Preserve intuitive retry order; final result order is still enforced by startIndex sort.
181
+ jobs.unshift(first, second);
182
+ return true;
183
+ }
184
+ const nextAttempt = job.attempt + 1;
185
+ if (nextAttempt <= poolOptions.maxTimeoutRetries) {
186
+ console.warn(`Worker ${workerIndex} parse job idle timeout after ${job.timeoutMs / 1000}s ` +
187
+ `(single item, attempt ${nextAttempt}/${poolOptions.maxTimeoutRetries + 1}). ` +
188
+ `Retrying with ${nextTimeout / 1000}s timeout.`);
189
+ jobs.unshift({
190
+ ...job,
191
+ attempt: nextAttempt,
192
+ timeoutMs: nextTimeout,
193
+ });
194
+ return true;
195
+ }
196
+ void fail(new Error(`Worker ${workerIndex} parse job idle timeout after ${job.timeoutMs / 1000}s ` +
197
+ `(single item${itemPath(job.items[0]) ? `: ${itemPath(job.items[0])}` : ''}, ` +
198
+ `${job.estimatedBytes} bytes, last progress: ${lastProgress}). ` +
199
+ `Analyze will retry through sequential fallback. Increase with ` +
200
+ `--worker-timeout or GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS.`));
201
+ return false;
202
+ };
203
+ const runWorker = (workerIndex) => {
204
+ if (stopped)
205
+ return;
206
+ const job = jobs.shift();
207
+ if (!job) {
208
+ maybeDone();
209
+ return;
210
+ }
211
+ activeWorkers++;
212
+ inFlightProgress[workerIndex] = 0;
213
+ const worker = workers[workerIndex];
40
214
  let settled = false;
41
- let subBatchTimer = null;
215
+ let waitingForFlush = false;
216
+ let idleTimer = null;
217
+ let lastProgress = 0;
42
218
  const cleanup = () => {
43
- if (subBatchTimer)
44
- clearTimeout(subBatchTimer);
219
+ if (idleTimer)
220
+ clearTimeout(idleTimer);
45
221
  worker.removeListener('message', handler);
46
222
  worker.removeListener('error', errorHandler);
47
223
  worker.removeListener('exit', exitHandler);
48
224
  };
49
- const resetSubBatchTimer = () => {
50
- if (subBatchTimer)
51
- clearTimeout(subBatchTimer);
52
- subBatchTimer = setTimeout(() => {
225
+ const finishJob = () => {
226
+ activeWorkers--;
227
+ inFlightProgress[workerIndex] = 0;
228
+ runWorker(workerIndex);
229
+ maybeDone();
230
+ };
231
+ const resetIdleTimer = () => {
232
+ if (idleTimer)
233
+ clearTimeout(idleTimer);
234
+ idleTimer = setTimeout(async () => {
53
235
  if (!settled) {
54
236
  settled = true;
55
237
  cleanup();
56
- reject(new Error(`Worker ${i} sub-batch timed out after ${SUB_BATCH_TIMEOUT_MS / 1000}s (chunk: ${chunk.length} items).`));
238
+ activeWorkers--;
239
+ inFlightProgress[workerIndex] = 0;
240
+ const shouldContinue = requeueAfterTimeout(workerIndex, job, lastProgress);
241
+ if (!shouldContinue)
242
+ return;
243
+ await replaceWorker(workerIndex);
244
+ reportProgress();
245
+ runWorker(workerIndex);
246
+ maybeDone();
57
247
  }
58
- }, SUB_BATCH_TIMEOUT_MS);
59
- };
60
- let subBatchIdx = 0;
61
- const sendNextSubBatch = () => {
62
- const start = subBatchIdx * SUB_BATCH_SIZE;
63
- if (start >= chunk.length) {
64
- worker.postMessage({ type: 'flush' });
65
- return;
66
- }
67
- const subBatch = chunk.slice(start, start + SUB_BATCH_SIZE);
68
- subBatchIdx++;
69
- resetSubBatchTimer();
70
- worker.postMessage({ type: 'sub-batch', files: subBatch });
248
+ }, job.timeoutMs);
71
249
  };
72
250
  const handler = (msg) => {
73
- if (settled)
251
+ if (settled || stopped)
74
252
  return;
75
253
  if (msg.type === 'progress') {
76
- workerProgress[i] = msg.filesProcessed;
77
- if (onProgress) {
78
- const total = workerProgress.reduce((a, b) => a + b, 0);
79
- onProgress(total);
80
- }
254
+ const bounded = Math.min(job.items.length, Math.max(0, msg.filesProcessed));
255
+ inFlightProgress[workerIndex] = bounded;
256
+ lastProgress = bounded;
257
+ resetIdleTimer();
258
+ reportProgress();
81
259
  }
82
260
  else if (msg.type === 'warning') {
261
+ resetIdleTimer();
83
262
  console.warn(msg.message);
84
263
  }
85
264
  else if (msg.type === 'sub-batch-done') {
86
- sendNextSubBatch();
265
+ waitingForFlush = true;
266
+ resetIdleTimer();
267
+ worker.postMessage({ type: 'flush' });
87
268
  }
88
269
  else if (msg.type === 'error') {
89
270
  settled = true;
90
271
  cleanup();
91
- reject(new Error(`Worker ${i} error: ${msg.error}`));
272
+ void fail(new Error(`Worker ${workerIndex} error: ${msg.error}`));
92
273
  }
93
274
  else if (msg.type === 'result') {
275
+ if (!waitingForFlush) {
276
+ settled = true;
277
+ cleanup();
278
+ void fail(new Error(`Worker ${workerIndex} protocol error: result before flush`));
279
+ return;
280
+ }
94
281
  settled = true;
95
282
  cleanup();
96
- resolve(msg.data);
283
+ results.push({ startIndex: job.startIndex, data: msg.data });
284
+ completedFiles += job.items.length;
285
+ reportProgress();
286
+ finishJob();
97
287
  }
98
288
  };
99
289
  const errorHandler = (err) => {
100
290
  if (!settled) {
101
291
  settled = true;
102
292
  cleanup();
103
- reject(err);
293
+ void fail(err);
104
294
  }
105
295
  };
106
296
  const exitHandler = (code) => {
107
297
  if (!settled) {
108
298
  settled = true;
109
299
  cleanup();
110
- reject(new Error(`Worker ${i} exited with code ${code}. Likely OOM or native addon failure.`));
300
+ void fail(new Error(`Worker ${workerIndex} exited with code ${code}. Likely OOM or native addon failure.`));
111
301
  }
112
302
  };
113
303
  worker.on('message', handler);
114
304
  worker.once('error', errorHandler);
115
305
  worker.once('exit', exitHandler);
116
- sendNextSubBatch();
117
- });
306
+ resetIdleTimer();
307
+ if (stopped) {
308
+ cleanup();
309
+ return;
310
+ }
311
+ worker.postMessage({ type: 'sub-batch', files: job.items });
312
+ };
313
+ for (let i = 0; i < workers.length; i++)
314
+ runWorker(i);
118
315
  });
119
- return Promise.all(promises);
120
316
  };
121
317
  const terminate = async () => {
122
318
  await Promise.all(workers.map((w) => w.terminate()));
@@ -140,7 +140,8 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
140
140
  const pipelineResult = await runPipelineFromRepo(repoPath, (p) => {
141
141
  const phaseLabel = PHASE_LABELS[p.phase] || p.phase;
142
142
  const scaled = Math.round(p.percent * 0.6);
143
- progress(p.phase, scaled, phaseLabel);
143
+ const message = p.detail ? `${p.message || phaseLabel} (${p.detail})` : p.message || phaseLabel;
144
+ progress(p.phase, scaled, message);
144
145
  });
145
146
  // ── Phase 2: LadybugDB (60–85%) ──────────────────────────────────
146
147
  progress('lbug', 60, 'Loading into LadybugDB...');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitnexus",
3
- "version": "1.6.4-rc.15",
3
+ "version": "1.6.4-rc.17",
4
4
  "description": "Graph-powered code intelligence for AI agents. Index any codebase, query via MCP or CLI.",
5
5
  "author": "Abhigyan Patwari",
6
6
  "license": "PolyForm-Noncommercial-1.0.0",
@@ -47,7 +47,7 @@
47
47
  "test:integration": "vitest run test/integration",
48
48
  "test:watch": "vitest",
49
49
  "test:coverage": "vitest run --coverage",
50
- "postinstall": "node scripts/patch-tree-sitter-swift.cjs && node scripts/build-tree-sitter-proto.cjs",
50
+ "postinstall": "node scripts/patch-tree-sitter-swift.cjs && node scripts/build-tree-sitter-dart.cjs && node scripts/build-tree-sitter-proto.cjs",
51
51
  "prepare": "node scripts/build.js",
52
52
  "prepack": "node scripts/build.js"
53
53
  },
@@ -88,7 +88,7 @@
88
88
  "optionalDependencies": {
89
89
  "node-addon-api": "^8.0.0",
90
90
  "node-gyp-build": "^4.8.0",
91
- "tree-sitter-dart": "git+https://github.com/UserNobody14/tree-sitter-dart.git#80e23c07b64494f7e21090bb3450223ef0b192f4",
91
+ "tree-sitter-dart": "file:./vendor/tree-sitter-dart",
92
92
  "tree-sitter-kotlin": "^0.3.8",
93
93
  "tree-sitter-proto": "file:./vendor/tree-sitter-proto",
94
94
  "tree-sitter-swift": "^0.6.0"
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env node
2
+ const fs = require('fs');
3
+ const path = require('path');
4
+ const { execSync } = require('child_process');
5
+
6
+ const dartDir = path.join(__dirname, '..', 'node_modules', 'tree-sitter-dart');
7
+ const bindingGyp = path.join(dartDir, 'binding.gyp');
8
+ const bindingNode = path.join(dartDir, 'build', 'Release', 'tree_sitter_dart_binding.node');
9
+
10
+ try {
11
+ if (!fs.existsSync(bindingGyp) || fs.existsSync(bindingNode)) {
12
+ process.exit(0);
13
+ }
14
+
15
+ try {
16
+ require.resolve('node-addon-api');
17
+ require.resolve('node-gyp-build');
18
+ } catch (resolveErr) {
19
+ console.warn(
20
+ '[tree-sitter-dart] Skipping build: hoisted build deps not resolvable (%s).',
21
+ resolveErr.message,
22
+ );
23
+ console.warn(
24
+ '[tree-sitter-dart] Dart parsing will be unavailable. Install without --no-optional and with scripts enabled to build.',
25
+ );
26
+ process.exit(0);
27
+ }
28
+
29
+ console.log('[tree-sitter-dart] Building native binding...');
30
+ execSync('npx node-gyp rebuild', {
31
+ cwd: dartDir,
32
+ stdio: 'pipe',
33
+ timeout: 180000,
34
+ });
35
+ console.log('[tree-sitter-dart] Native binding built successfully');
36
+ } catch (err) {
37
+ console.warn('[tree-sitter-dart] Could not build native binding:', err.message);
38
+ console.warn(
39
+ '[tree-sitter-dart] Dart parsing will be unavailable. Non-Dart functionality is unaffected.',
40
+ );
41
+ process.exit(0);
42
+ }
@@ -0,0 +1,18 @@
1
+ # tree-sitter-dart
2
+ This is a tree-sitter grammar written for the dart programming language. We attempt to adhere as closely as possible to the dart language spec. Initially it was started with a copy of the tree-sitter Java grammar which is why there may be a few relics included in here. For the sake of simplifying the syntax tree, many items were made inlined with tree-sitter's "underscore" method of writing rules.
3
+
4
+ # Getting Started
5
+ - Go to the project directory
6
+ - run `npm install` (first time)
7
+ - run `npm run build_init` (first time) `npm run build` (subsequent times)
8
+ - run `npm run test`
9
+
10
+ # To test a single highlight file
11
+ - run `tree-sitter highlight test/highlight/types.dart`
12
+
13
+ # To test a single test file
14
+ - run `tree-sitter test -f 'testcasefilter'`
15
+ - for example `tree-sitter test -f 'dart string literals'`
16
+
17
+ # To show the output of a parse for a sample file (for example while debugging highlight issues)
18
+ - run `tree-sitter parse path/to/file.dart`
@@ -0,0 +1,31 @@
1
+ {
2
+ "targets": [
3
+ {
4
+ "target_name": "tree_sitter_dart_binding",
5
+ "dependencies": [
6
+ "<!(node -p \"require('node-addon-api').targets\"):node_addon_api_except",
7
+ ],
8
+ "include_dirs": [
9
+ "src",
10
+ ],
11
+ "sources": [
12
+ "bindings/node/binding.cc",
13
+ "src/parser.c",
14
+ # NOTE: if your language has an external scanner, add it here.
15
+ "src/scanner.c",
16
+ ],
17
+ "conditions": [
18
+ ["OS!='win'", {
19
+ "cflags_c": [
20
+ "-std=c11",
21
+ ],
22
+ }, { # OS == "win"
23
+ "cflags_c": [
24
+ "/std:c11",
25
+ "/utf-8",
26
+ ],
27
+ }],
28
+ ],
29
+ }
30
+ ]
31
+ }