@softerist/heuristic-mcp 3.0.12 → 3.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,13 +24,51 @@ console.info = (...args) => console.error('[INFO]', ...args);
24
24
  console.warn = (...args) => console.error('[WARN]', ...args);
25
25
 
26
26
  import { RESULT_BATCH_SIZE, DEFAULT_INFERENCE_BATCH_SIZE } from './constants.js';
27
- const workerId = Number.isInteger(workerData.workerId) ? workerData.workerId : null;
28
- const workerLabel = workerId === null ? '[Worker]' : `[Worker ${workerId}]`;
29
- const workerThreads = Number.isFinite(workerData.numThreads) ? workerData.numThreads : 1;
30
- const logInfo = (...args) => {
31
- console.info(...args);
32
- };
33
- let nativeBackendConfigured = false;
27
+ const workerId = Number.isInteger(workerData.workerId) ? workerData.workerId : null;
28
+ const workerLabel = workerId === null ? '[Worker]' : `[Worker ${workerId}]`;
29
+ const workerThreads = Number.isFinite(workerData.numThreads) ? workerData.numThreads : 1;
30
+ const explicitGcEnabled = workerData.enableExplicitGc !== false;
31
+ const failFastEmbeddingErrors = workerData.failFastEmbeddingErrors === true;
32
+ const FAIL_FAST_CONSECUTIVE_ERROR_LIMIT = 8;
33
+ const logInfo = (...args) => {
34
+ console.info(...args);
35
+ };
36
+ let nativeBackendConfigured = false;
37
+
38
+ function maybeRunGc() {
39
+ if (!explicitGcEnabled || typeof global.gc !== 'function') return;
40
+ global.gc();
41
+ }
42
+
43
+ function createFailFastState(scope) {
44
+ if (!failFastEmbeddingErrors) return null;
45
+ return { scope, consecutiveFailures: 0 };
46
+ }
47
+
48
+ function noteEmbeddingSuccess(failFastState) {
49
+ if (!failFastState) return;
50
+ failFastState.consecutiveFailures = 0;
51
+ }
52
+
53
+ function noteEmbeddingFailure(failFastState, err) {
54
+ if (!failFastState) return;
55
+ failFastState.consecutiveFailures += 1;
56
+
57
+ if (failFastState.consecutiveFailures >= FAIL_FAST_CONSECUTIVE_ERROR_LIMIT) {
58
+ const message =
59
+ `${failFastState.scope}: fail-fast breaker tripped after ` +
60
+ `${failFastState.consecutiveFailures} consecutive embedding failures (${err?.message || err})`;
61
+ console.warn(`${workerLabel} ${message}`);
62
+ throw new Error(message);
63
+ }
64
+
65
+ if (workerData.verbose) {
66
+ console.warn(
67
+ `${workerLabel} ${failFastState.scope}: embedding failure ` +
68
+ `${failFastState.consecutiveFailures}/${FAIL_FAST_CONSECUTIVE_ERROR_LIMIT}`
69
+ );
70
+ }
71
+ }
34
72
 
35
73
  function ensureNativeBackend() {
36
74
  if (nativeBackendConfigured) return;
@@ -134,7 +172,7 @@ const embeddingDimension = workerData.embeddingDimension || null;
134
172
  // Use a promise to handle concurrent calls to initializeEmbedder safely
135
173
  let embedderPromise = null;
136
174
 
137
- async function initializeEmbedder() {
175
+ async function initializeEmbedder() {
138
176
  if (!embedderPromise) {
139
177
  const modelLoadStart = Date.now();
140
178
 
@@ -145,11 +183,16 @@ async function initializeEmbedder() {
145
183
 
146
184
  embedderPromise = (async () => {
147
185
  try {
148
- ensureNativeBackend();
149
- const model = await pipeline('feature-extraction', workerData.embeddingModel, {
150
- quantized: true,
151
- dtype: 'fp32',
152
- });
186
+ ensureNativeBackend();
187
+ const model = await pipeline('feature-extraction', workerData.embeddingModel, {
188
+ quantized: true,
189
+ dtype: 'fp32',
190
+ session_options: {
191
+ numThreads: workerThreads,
192
+ intraOpNumThreads: workerThreads,
193
+ interOpNumThreads: 1,
194
+ },
195
+ });
153
196
  const loadSeconds = ((Date.now() - modelLoadStart) / 1000).toFixed(1);
154
197
  logInfo(
155
198
  `${workerLabel} Embedding model ready: ${workerData.embeddingModel} (${loadSeconds}s)`
@@ -161,17 +204,48 @@ async function initializeEmbedder() {
161
204
  }
162
205
  })();
163
206
  }
164
- return embedderPromise;
165
- }
166
-
167
- /**
168
- * Legacy Protocol: Process chunks with optimized single-text embedding
169
- * Streams results in batches.
170
- */
171
- async function processChunks(chunks, batchId) {
172
- const embedder = await initializeEmbedder();
173
- let results = [];
174
- let transferList = [];
207
+ return embedderPromise;
208
+ }
209
+
210
+ function isFatalRuntimeEmbeddingError(err) {
211
+ const message = String(err?.message || err || '').toLowerCase();
212
+ return (
213
+ message.includes('exception is pending') ||
214
+ message.includes('invalid embedding output') ||
215
+ message.includes("cannot read properties of undefined (reading 'data')") ||
216
+ message.includes("cannot read properties of null (reading 'data')")
217
+ );
218
+ }
219
+
220
+ function getEmbeddingTensor(output, { requireDimsForBatch = false, batchSize = null } = {}) {
221
+ const data = output?.data;
222
+ if (!data || typeof data.length !== 'number') {
223
+ throw new Error('Invalid embedding output: missing tensor data');
224
+ }
225
+ if (!requireDimsForBatch) {
226
+ return { data };
227
+ }
228
+
229
+ const dims = Array.isArray(output?.dims) ? output.dims : null;
230
+ const hiddenSize = Number.isInteger(dims?.[dims.length - 1]) ? dims[dims.length - 1] : null;
231
+ if (!hiddenSize || hiddenSize <= 0) {
232
+ throw new Error('Invalid embedding output: missing tensor dims');
233
+ }
234
+ if (Number.isInteger(batchSize) && batchSize > 0 && data.length < hiddenSize * batchSize) {
235
+ throw new Error('Invalid embedding output: tensor length mismatch');
236
+ }
237
+ return { data, hiddenSize };
238
+ }
239
+
240
+ /**
241
+ * Legacy Protocol: Process chunks with optimized single-text embedding
242
+ * Streams results in batches.
243
+ */
244
+ async function processChunks(chunks, batchId) {
245
+ const embedder = await initializeEmbedder();
246
+ let results = [];
247
+ let transferList = [];
248
+ const failFastState = createFailFastState('legacy chunk embedding');
175
249
 
176
250
  const flush = (done = false) => {
177
251
  // Only flush intermediate results when we have enough for a batch
@@ -196,15 +270,16 @@ async function processChunks(chunks, batchId) {
196
270
  };
197
271
 
198
272
  for (const chunk of chunks) {
199
- try {
200
- const output = await embedder(chunk.text, {
201
- pooling: 'mean',
202
- normalize: true,
203
- });
204
- // CRITICAL: Deep copy to release ONNX tensor memory
205
- let vector = new Float32Array(output.data);
206
- // Apply MRL dimension slicing if configured
207
- vector = sliceAndNormalize(vector, embeddingDimension);
273
+ try {
274
+ const output = await embedder(chunk.text, {
275
+ pooling: 'mean',
276
+ normalize: true,
277
+ });
278
+ // CRITICAL: Deep copy to release ONNX tensor memory
279
+ const { data } = getEmbeddingTensor(output);
280
+ let vector = new Float32Array(data);
281
+ // Apply MRL dimension slicing if configured
282
+ vector = sliceAndNormalize(vector, embeddingDimension);
208
283
  // Properly dispose tensor to release ONNX runtime memory
209
284
  if (typeof output.dispose === 'function')
210
285
  try {
@@ -214,34 +289,37 @@ async function processChunks(chunks, batchId) {
214
289
  console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
215
290
  }
216
291
  }
217
- results.push({
218
- file: chunk.file,
219
- startLine: chunk.startLine,
220
- endLine: chunk.endLine,
221
- content: chunk.text,
222
- vector,
223
- success: true,
224
- });
225
- transferList.push(vector.buffer);
226
- } catch (error) {
227
- results.push({
228
- file: chunk.file,
229
- startLine: chunk.startLine,
230
- endLine: chunk.endLine,
231
- error: error.message,
232
- success: false,
233
- });
234
- }
235
- flush();
236
- }
237
-
238
- flush(true);
239
-
240
- // Force GC if available to free massive tensor buffers immediately
241
- if (typeof global.gc === 'function') {
242
- global.gc();
243
- }
244
- }
292
+ results.push({
293
+ file: chunk.file,
294
+ startLine: chunk.startLine,
295
+ endLine: chunk.endLine,
296
+ content: chunk.text,
297
+ vector,
298
+ success: true,
299
+ });
300
+ transferList.push(vector.buffer);
301
+ noteEmbeddingSuccess(failFastState);
302
+ } catch (error) {
303
+ results.push({
304
+ file: chunk.file,
305
+ startLine: chunk.startLine,
306
+ endLine: chunk.endLine,
307
+ error: error.message,
308
+ success: false,
309
+ });
310
+ noteEmbeddingFailure(failFastState, error);
311
+ if (isFatalRuntimeEmbeddingError(error)) {
312
+ throw error;
313
+ }
314
+ }
315
+ flush();
316
+ }
317
+
318
+ flush(true);
319
+
320
+ // Force GC if available to free massive tensor buffers immediately
321
+ maybeRunGc();
322
+ }
245
323
 
246
324
  // =====================================================================
247
325
  // SHARED HELPER FUNCTIONS
@@ -356,8 +434,9 @@ function processFileMetadata(file, content, options) {
356
434
  * New Protocol: Process entire file (read, chunk, embed) in worker.
357
435
  * Returns results once processing is complete.
358
436
  */
359
- async function processFileTask(message) {
360
- const embedder = await initializeEmbedder();
437
+ async function processFileTask(message) {
438
+ const embedder = await initializeEmbedder();
439
+ const failFastState = createFailFastState(`file-task ${path.basename(message.file || '')}`);
361
440
 
362
441
  const file = message.file;
363
442
  const force = !!message.force;
@@ -409,33 +488,38 @@ async function processFileTask(message) {
409
488
 
410
489
  // Batch size for inference (balance between speed and memory)
411
490
  // Configurable via workerData, default 4 balances memory and throughput
412
- const INFERENCE_BATCH_SIZE = Number.isInteger(workerData.inferenceBatchSize)
413
- ? workerData.inferenceBatchSize
414
- : DEFAULT_INFERENCE_BATCH_SIZE;
415
-
416
- for (let i = 0; i < chunks.length; i += INFERENCE_BATCH_SIZE) {
491
+ const INFERENCE_BATCH_SIZE = Number.isInteger(workerData.inferenceBatchSize)
492
+ ? workerData.inferenceBatchSize
493
+ : DEFAULT_INFERENCE_BATCH_SIZE;
494
+ let processedSinceGc = 0;
495
+
496
+ for (let i = 0; i < chunks.length; i += INFERENCE_BATCH_SIZE) {
417
497
  const batchChunks = chunks.slice(i, i + INFERENCE_BATCH_SIZE);
418
498
  const batchTexts = batchChunks.map((c) => c.text);
419
499
 
420
- try {
421
- // Run inference on the batch
422
- const output = await embedder(batchTexts, {
423
- pooling: 'mean',
424
- normalize: true,
425
- });
426
-
427
- // Output is a Tensor with shape [batch_size, hidden_size]
428
- // data is a flat Float32Array
429
- const hiddenSize = output.dims[output.dims.length - 1];
430
-
431
- for (let j = 0; j < batchChunks.length; j++) {
432
- const c = batchChunks[j];
433
-
434
- // Slice the flat buffer to get this chunk's vector
435
- // specific slice for this element
436
- const start = j * hiddenSize;
437
- const end = start + hiddenSize;
438
- const vectorView = output.data.subarray(start, end);
500
+ try {
501
+ // Run inference on the batch
502
+ const output = await embedder(batchTexts, {
503
+ pooling: 'mean',
504
+ normalize: true,
505
+ });
506
+
507
+ // Output is a Tensor with shape [batch_size, hidden_size]
508
+ // data is a flat Float32Array
509
+ const { data, hiddenSize } = getEmbeddingTensor(output, {
510
+ requireDimsForBatch: true,
511
+ batchSize: batchChunks.length,
512
+ });
513
+
514
+ for (let j = 0; j < batchChunks.length; j++) {
515
+ const c = batchChunks[j];
516
+
517
+ // Slice the flat buffer to get this chunk's vector
518
+ // specific slice for this element
519
+ const start = j * hiddenSize;
520
+ const end = start + hiddenSize;
521
+ const vectorView =
522
+ typeof data.subarray === 'function' ? data.subarray(start, end) : data.slice(start, end);
439
523
 
440
524
  // Deep copy to ensure independent buffer for transfer
441
525
  let vector = new Float32Array(vectorView);
@@ -451,22 +535,29 @@ async function processFileTask(message) {
451
535
  transferList.push(vector.buffer);
452
536
  }
453
537
  // Properly dispose tensor to release ONNX runtime memory
454
- if (typeof output.dispose === 'function')
455
- try {
456
- output.dispose();
457
- } catch (disposeErr) {
458
- if (workerData.verbose) {
459
- console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
460
- }
461
- }
462
- } catch (err) {
463
- // Fallback: if batch fails (e.g. OOM), try one by one for this batch
464
- console.warn(`${workerLabel} Batch inference failed (${err.name}), retrying individually: ${err.message}`);
465
-
466
- for (const c of batchChunks) {
467
- try {
468
- const output = await embedder(c.text, { pooling: 'mean', normalize: true });
469
- let vector = new Float32Array(output.data);
538
+ if (typeof output.dispose === 'function')
539
+ try {
540
+ output.dispose();
541
+ } catch (disposeErr) {
542
+ if (workerData.verbose) {
543
+ console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
544
+ }
545
+ }
546
+ noteEmbeddingSuccess(failFastState);
547
+ } catch (err) {
548
+ if (isFatalRuntimeEmbeddingError(err)) {
549
+ noteEmbeddingFailure(failFastState, err);
550
+ throw err;
551
+ }
552
+ // Fallback: if batch fails (e.g. OOM), try one by one for this batch
553
+ console.warn(`${workerLabel} Batch inference failed (${err.name}), retrying individually: ${err.message}`);
554
+ noteEmbeddingFailure(failFastState, err);
555
+
556
+ for (const c of batchChunks) {
557
+ try {
558
+ const output = await embedder(c.text, { pooling: 'mean', normalize: true });
559
+ const { data } = getEmbeddingTensor(output);
560
+ let vector = new Float32Array(data);
470
561
  // Apply MRL dimension slicing if configured
471
562
  vector = sliceAndNormalize(vector, embeddingDimension);
472
563
  // Properly dispose tensor to release ONNX runtime memory
@@ -478,27 +569,36 @@ async function processFileTask(message) {
478
569
  console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
479
570
  }
480
571
  }
481
- results.push({
482
- startLine: c.startLine,
483
- endLine: c.endLine,
484
- text: c.text,
485
- vectorBuffer: vector.buffer,
486
- });
487
- transferList.push(vector.buffer);
488
- } catch (innerErr) {
489
- // Note: No tensor disposal needed - embedder() threw before returning a tensor
490
- console.warn(`${workerLabel} Chunk embedding failed: ${innerErr.message}`);
491
- // We omit this chunk from results, effectively skipping it
492
- }
493
- }
494
- }
495
-
496
- // Yield to event loop briefly between batches and trigger GC
497
- if (chunks.length > INFERENCE_BATCH_SIZE) {
498
- if (typeof global.gc === 'function') global.gc();
499
- await new Promise((resolve) => setTimeout(resolve, 0));
500
- }
501
- }
572
+ results.push({
573
+ startLine: c.startLine,
574
+ endLine: c.endLine,
575
+ text: c.text,
576
+ vectorBuffer: vector.buffer,
577
+ });
578
+ transferList.push(vector.buffer);
579
+ noteEmbeddingSuccess(failFastState);
580
+ } catch (innerErr) {
581
+ // Note: No tensor disposal needed - embedder() threw before returning a tensor
582
+ console.warn(`${workerLabel} Chunk embedding failed: ${innerErr.message}`);
583
+ // We omit this chunk from results, effectively skipping it
584
+ noteEmbeddingFailure(failFastState, innerErr);
585
+ if (isFatalRuntimeEmbeddingError(innerErr)) {
586
+ throw innerErr;
587
+ }
588
+ }
589
+ }
590
+ }
591
+
592
+ // Yield to event loop briefly between batches and trigger GC
593
+ processedSinceGc += batchChunks.length;
594
+ if (chunks.length > INFERENCE_BATCH_SIZE) {
595
+ if (processedSinceGc >= 100) {
596
+ maybeRunGc();
597
+ processedSinceGc = 0;
598
+ }
599
+ await new Promise((resolve) => setTimeout(resolve, 0));
600
+ }
601
+ }
502
602
 
503
603
  return { status: 'indexed', hash, mtimeMs, size, callData, results, transferList };
504
604
  }
@@ -535,13 +635,13 @@ parentPort.on('message', async (message) => {
535
635
 
536
636
  // Clear references
537
637
  embedderPromise = null;
538
- }
539
-
540
- // Trigger garbage collection if available
541
- if (typeof global.gc === 'function') {
542
- const before = process.memoryUsage();
543
- global.gc();
544
- const after = process.memoryUsage();
638
+ }
639
+
640
+ // Trigger garbage collection if available
641
+ if (explicitGcEnabled && typeof global.gc === 'function') {
642
+ const before = process.memoryUsage();
643
+ global.gc();
644
+ const after = process.memoryUsage();
545
645
  logInfo(
546
646
  `${workerLabel} Post-unload GC: rss ${(before.rss / 1024 / 1024).toFixed(1)}MB -> ${(after.rss / 1024 / 1024).toFixed(1)}MB`
547
647
  );
@@ -572,9 +672,10 @@ parentPort.on('message', async (message) => {
572
672
  }
573
673
 
574
674
  // ---- Batch file processing ----
575
- if (message.type === 'processFiles') {
576
- const { files, batchId } = message;
577
- const batchTransfer = [];
675
+ if (message.type === 'processFiles') {
676
+ const { files, batchId } = message;
677
+ const batchTransfer = [];
678
+ const failFastState = createFailFastState('cross-file batch embedding');
578
679
 
579
680
  // 1. Pre-process all files: Read, Stat, and Chunk
580
681
  // We do this first to gather a massive list of chunks for batched inference
@@ -659,13 +760,13 @@ parentPort.on('message', async (message) => {
659
760
  continue;
660
761
  }
661
762
 
662
- const { hash, callData, chunks } = meta;
663
- const chunkCount = chunks.length;
664
-
665
- // Trigger GC every 100 files
666
- if ((i + 1) % 100 === 0 && typeof global.gc === 'function') {
667
- global.gc();
668
- }
763
+ const { hash, callData, chunks } = meta;
764
+ const chunkCount = chunks.length;
765
+
766
+ // Trigger GC every 100 files
767
+ if ((i + 1) % 100 === 0) {
768
+ maybeRunGc();
769
+ }
669
770
 
670
771
  // Register chunks for batching
671
772
  if (chunks.length > 0) {
@@ -713,39 +814,52 @@ parentPort.on('message', async (message) => {
713
814
  const batchSlice = allPendingChunks.slice(i, i + INFERENCE_BATCH_SIZE);
714
815
  const batchTexts = batchSlice.map((c) => c.text);
715
816
 
716
- try {
717
- const output = await embedder(batchTexts, { pooling: 'mean', normalize: true });
718
- const hiddenSize = output.dims[output.dims.length - 1];
719
-
720
- for (let j = 0; j < batchSlice.length; j++) {
721
- const start = j * hiddenSize;
722
- const end = start + hiddenSize;
723
- const vectorView = output.data.subarray(start, end);
724
- // Deep copy the view to avoid WASM memory issues, then apply MRL slicing
725
- const vector = sliceAndNormalize(new Float32Array(vectorView), embeddingDimension);
817
+ try {
818
+ const output = await embedder(batchTexts, { pooling: 'mean', normalize: true });
819
+ const { data, hiddenSize } = getEmbeddingTensor(output, {
820
+ requireDimsForBatch: true,
821
+ batchSize: batchSlice.length,
822
+ });
823
+
824
+ for (let j = 0; j < batchSlice.length; j++) {
825
+ const start = j * hiddenSize;
826
+ const end = start + hiddenSize;
827
+ const vectorView =
828
+ typeof data.subarray === 'function'
829
+ ? data.subarray(start, end)
830
+ : data.slice(start, end);
831
+ // Deep copy the view to avoid WASM memory issues, then apply MRL slicing
832
+ const vector = sliceAndNormalize(new Float32Array(vectorView), embeddingDimension);
726
833
 
727
834
  batchSlice[j].vectorBuffer = vector.buffer;
728
835
  batchTransfer.push(vector.buffer);
729
836
  }
730
837
  // Properly dispose tensor to release ONNX runtime memory
731
- if (typeof output.dispose === 'function')
732
- try {
733
- output.dispose();
734
- } catch (disposeErr) {
735
- if (workerData.verbose) {
736
- console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
737
- }
738
- }
739
- } catch (err) {
740
- console.warn(
741
- `${workerLabel} Cross-file batch inference failed, retrying individually: ${err.message}`
742
- );
743
- // Fallback: individual embedding for this failed batch
744
- for (const item of batchSlice) {
745
- try {
746
- const output = await embedder(item.text, { pooling: 'mean', normalize: true });
747
- // Deep copy and apply MRL slicing
748
- const vector = sliceAndNormalize(new Float32Array(output.data), embeddingDimension);
838
+ if (typeof output.dispose === 'function')
839
+ try {
840
+ output.dispose();
841
+ } catch (disposeErr) {
842
+ if (workerData.verbose) {
843
+ console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
844
+ }
845
+ }
846
+ noteEmbeddingSuccess(failFastState);
847
+ } catch (err) {
848
+ if (isFatalRuntimeEmbeddingError(err)) {
849
+ noteEmbeddingFailure(failFastState, err);
850
+ throw err;
851
+ }
852
+ console.warn(
853
+ `${workerLabel} Cross-file batch inference failed, retrying individually: ${err.message}`
854
+ );
855
+ noteEmbeddingFailure(failFastState, err);
856
+ // Fallback: individual embedding for this failed batch
857
+ for (const item of batchSlice) {
858
+ try {
859
+ const output = await embedder(item.text, { pooling: 'mean', normalize: true });
860
+ const { data } = getEmbeddingTensor(output);
861
+ // Deep copy and apply MRL slicing
862
+ const vector = sliceAndNormalize(new Float32Array(data), embeddingDimension);
749
863
  // Properly dispose tensor to release ONNX runtime memory
750
864
  if (typeof output.dispose === 'function')
751
865
  try {
@@ -755,13 +869,18 @@ parentPort.on('message', async (message) => {
755
869
  console.warn(`${workerLabel} Failed to dispose tensor: ${disposeErr.message}`);
756
870
  }
757
871
  }
758
- item.vectorBuffer = vector.buffer;
759
- batchTransfer.push(vector.buffer);
760
- } catch (innerErr) {
761
- console.warn(`${workerLabel} Chunk embedding failed: ${innerErr.message}`);
762
- }
763
- }
764
- }
872
+ item.vectorBuffer = vector.buffer;
873
+ batchTransfer.push(vector.buffer);
874
+ noteEmbeddingSuccess(failFastState);
875
+ } catch (innerErr) {
876
+ console.warn(`${workerLabel} Chunk embedding failed: ${innerErr.message}`);
877
+ noteEmbeddingFailure(failFastState, innerErr);
878
+ if (isFatalRuntimeEmbeddingError(innerErr)) {
879
+ throw innerErr;
880
+ }
881
+ }
882
+ }
883
+ }
765
884
 
766
885
  // Minimal yield to keep event loop breathing (optional, can be removed for max throughput)
767
886
  if (allPendingChunks.length > 50 && i % 50 === 0) {
@@ -825,10 +944,10 @@ parentPort.on('message', async (message) => {
825
944
  batchTransfer
826
945
  );
827
946
 
828
- // Explicitly clear references and trigger GC
829
- batchTransfer.length = 0;
830
- if (global.gc) global.gc();
831
- return;
947
+ // Explicitly clear references and trigger GC
948
+ batchTransfer.length = 0;
949
+ maybeRunGc();
950
+ return;
832
951
  }
833
952
 
834
953
  // ---- Legacy protocol: batch of chunks prepared by main thread ----
@@ -56,7 +56,7 @@ export class ProjectDetector {
56
56
 
57
57
  for (const [type, marker] of discoveredTypes) {
58
58
  this.detectedTypes.add(type);
59
- console.error(`[Detector] Detected ${type} project (${marker})`);
59
+ console.info(`[Detector] Detected ${type} project (${marker})`);
60
60
  }
61
61
 
62
62
  return Array.from(this.detectedTypes);