gitnexus 1.6.4-rc.20 → 1.6.4-rc.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,5 +47,9 @@ export interface AnalyzeOptions {
47
47
  maxFileSize?: string;
48
48
  /** Override worker sub-batch idle timeout in seconds. */
49
49
  workerTimeout?: string;
50
+ embeddingThreads?: string;
51
+ embeddingBatchSize?: string;
52
+ embeddingSubBatchSize?: string;
53
+ embeddingDevice?: string;
50
54
  }
51
55
  export declare const analyzeCommand: (inputPath?: string, options?: AnalyzeOptions) => Promise<void>;
@@ -64,6 +64,32 @@ export const analyzeCommand = async (inputPath, options) => {
64
64
  }
65
65
  process.env.GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS = String(Math.round(workerTimeoutSeconds * 1000));
66
66
  }
67
+ const setPositiveEnv = (optionName, envName, value) => {
68
+ if (value === undefined)
69
+ return true;
70
+ const parsed = Number(value);
71
+ if (!Number.isInteger(parsed) || parsed <= 0) {
72
+ console.error(` ${optionName} must be a positive integer.\n`);
73
+ process.exitCode = 1;
74
+ return false;
75
+ }
76
+ process.env[envName] = String(parsed);
77
+ return true;
78
+ };
79
+ if (!setPositiveEnv('--embedding-threads', 'GITNEXUS_EMBEDDING_THREADS', options?.embeddingThreads) ||
80
+ !setPositiveEnv('--embedding-batch-size', 'GITNEXUS_EMBEDDING_BATCH_SIZE', options?.embeddingBatchSize) ||
81
+ !setPositiveEnv('--embedding-sub-batch-size', 'GITNEXUS_EMBEDDING_SUB_BATCH_SIZE', options?.embeddingSubBatchSize)) {
82
+ return;
83
+ }
84
+ if (options?.embeddingDevice) {
85
+ const allowed = new Set(['auto', 'cpu', 'dml', 'cuda', 'wasm']);
86
+ if (!allowed.has(options.embeddingDevice)) {
87
+ console.error(' --embedding-device must be one of: auto, cpu, dml, cuda, wasm.\n');
88
+ process.exitCode = 1;
89
+ return;
90
+ }
91
+ process.env.GITNEXUS_EMBEDDING_DEVICE = options.embeddingDevice;
92
+ }
67
93
  console.log('\n GitNexus Analyzer\n');
68
94
  let repoPath;
69
95
  if (inputPath) {
@@ -0,0 +1 @@
1
+ export declare const doctorCommand: () => Promise<void>;
@@ -0,0 +1,31 @@
1
+ import { getRuntimeCapabilities, getRuntimeFingerprint } from '../core/platform/capabilities.js';
2
+ import { resolveEmbeddingConfig } from '../core/embeddings/config.js';
3
+ import { isHttpMode } from '../core/embeddings/http-client.js';
4
+ export const doctorCommand = async () => {
5
+ const fingerprint = getRuntimeFingerprint();
6
+ const capabilities = getRuntimeCapabilities();
7
+ const embeddingConfig = resolveEmbeddingConfig();
8
+ console.log('GitNexus Doctor\n');
9
+ console.log('Runtime');
10
+ console.log(` OS: ${fingerprint.platform}/${fingerprint.arch}`);
11
+ console.log(` Node: ${fingerprint.node}`);
12
+ console.log(` GitNexus: ${fingerprint.gitnexus}`);
13
+ console.log(` LadybugDB: ${fingerprint.ladybugdb ?? 'unknown'}`);
14
+ console.log(` ONNX: ${fingerprint.onnxruntime ?? 'unknown'}`);
15
+ console.log('');
16
+ console.log('Capabilities');
17
+ console.log(` Graph store: ${capabilities.graph}`);
18
+ console.log(` Full-text search:${capabilities.fts.padStart(10)}`);
19
+ console.log(` VECTOR index: ${capabilities.vector}`);
20
+ console.log(` Semantic mode: ${capabilities.semanticMode}`);
21
+ console.log(` Exact scan limit:${String(capabilities.exactScanLimit).padStart(9)} chunks`);
22
+ if (capabilities.reason)
23
+ console.log(` Note: ${capabilities.reason}`);
24
+ console.log('');
25
+ console.log('Embeddings');
26
+ console.log(` Backend: ${isHttpMode() ? 'http' : 'local'}`);
27
+ console.log(` Device: ${embeddingConfig.device}`);
28
+ console.log(` Threads: ${embeddingConfig.threads}`);
29
+ console.log(` Batch: ${embeddingConfig.batchSize} nodes`);
30
+ console.log(` Sub-batch: ${embeddingConfig.subBatchSize} chunks`);
31
+ };
package/dist/cli/index.js CHANGED
@@ -31,11 +31,17 @@ program
31
31
  .option('-v, --verbose', 'Enable verbose ingestion warnings (default: false)')
32
32
  .option('--max-file-size <kb>', 'Skip files larger than this (KB). Default: 512. Hard cap: 32768 (tree-sitter limit).')
33
33
  .option('--worker-timeout <seconds>', 'Worker sub-batch idle timeout before retry/fallback. Default: 30.')
34
+ .option('--embedding-threads <n>', 'Limit local ONNX embedding CPU threads')
35
+ .option('--embedding-batch-size <n>', 'Number of nodes per embedding batch')
36
+ .option('--embedding-sub-batch-size <n>', 'Number of chunks per embedding model call')
37
+ .option('--embedding-device <device>', 'Embedding device: auto, cpu, dml, cuda, or wasm')
34
38
  .addHelpText('after', '\nEnvironment variables:\n' +
35
39
  ' GITNEXUS_NO_GITIGNORE=1 Skip .gitignore parsing (still reads .gitnexusignore)\n' +
36
40
  ' GITNEXUS_MAX_FILE_SIZE=N Override large-file skip threshold (KB). Default 512, max 32768.\n' +
37
41
  ' GITNEXUS_WORKER_SUB_BATCH_TIMEOUT_MS=N Worker idle timeout in milliseconds. Default 30000.\n' +
38
42
  ' GITNEXUS_WORKER_SUB_BATCH_MAX_BYTES=N Worker job byte budget. Default 8388608.\n' +
43
+ ' GITNEXUS_EMBEDDING_THREADS=N Limit local ONNX CPU threads for --embeddings.\n' +
44
+ ' GITNEXUS_SEMANTIC_EXACT_SCAN_LIMIT=N Max embedding chunks for exact-scan fallback. Default 10000.\n' +
39
45
  '\nTip: `.gitnexusignore` supports `.gitignore`-style negation. Add e.g.\n' +
40
46
  ' `!__tests__/` to index a directory that is auto-filtered by default (#771).')
41
47
  .action(createLazyAction(() => import('./analyze.js'), 'analyzeCommand'));
@@ -63,6 +69,10 @@ program
63
69
  .command('status')
64
70
  .description('Show index status for current repo')
65
71
  .action(createLazyAction(() => import('./status.js'), 'statusCommand'));
72
+ program
73
+ .command('doctor')
74
+ .description('Show runtime platform capabilities and embedding configuration')
75
+ .action(createLazyAction(() => import('./doctor.js'), 'doctorCommand'));
66
76
  program
67
77
  .command('clean')
68
78
  .description('Delete GitNexus index for current repo')
@@ -0,0 +1,2 @@
1
+ import { type EmbeddingConfig } from './types.js';
2
+ export declare const resolveEmbeddingConfig: (overrides?: Partial<EmbeddingConfig>) => EmbeddingConfig;
@@ -0,0 +1,36 @@
1
+ import { defaultEmbeddingThreads } from '../platform/capabilities.js';
2
+ import { DEFAULT_EMBEDDING_CONFIG } from './types.js';
3
+ const parsePositiveInt = (name, value, fallback) => {
4
+ if (value === undefined)
5
+ return fallback;
6
+ const parsed = Number(value);
7
+ if (!Number.isInteger(parsed) || parsed <= 0) {
8
+ throw new Error(`${name} must be a positive integer, got "${value}"`);
9
+ }
10
+ return parsed;
11
+ };
12
+ const parseDevice = (value) => {
13
+ if (value === undefined)
14
+ return undefined;
15
+ if (value === 'auto' ||
16
+ value === 'dml' ||
17
+ value === 'cuda' ||
18
+ value === 'cpu' ||
19
+ value === 'wasm') {
20
+ return value;
21
+ }
22
+ throw new Error(`embedding device must be one of auto, dml, cuda, cpu, wasm; got "${value}"`);
23
+ };
24
+ export const resolveEmbeddingConfig = (overrides = {}) => {
25
+ const env = process.env;
26
+ return {
27
+ ...DEFAULT_EMBEDDING_CONFIG,
28
+ ...overrides,
29
+ batchSize: parsePositiveInt('GITNEXUS_EMBEDDING_BATCH_SIZE', env.GITNEXUS_EMBEDDING_BATCH_SIZE, overrides.batchSize ?? DEFAULT_EMBEDDING_CONFIG.batchSize),
30
+ subBatchSize: parsePositiveInt('GITNEXUS_EMBEDDING_SUB_BATCH_SIZE', env.GITNEXUS_EMBEDDING_SUB_BATCH_SIZE, overrides.subBatchSize ?? DEFAULT_EMBEDDING_CONFIG.subBatchSize),
31
+ threads: parsePositiveInt('GITNEXUS_EMBEDDING_THREADS', env.GITNEXUS_EMBEDDING_THREADS, overrides.threads ?? defaultEmbeddingThreads()),
32
+ device: parseDevice(env.GITNEXUS_EMBEDDING_DEVICE) ??
33
+ overrides.device ??
34
+ DEFAULT_EMBEDDING_CONFIG.device,
35
+ };
36
+ };
@@ -20,6 +20,7 @@ import { join, dirname } from 'path';
20
20
  import { createRequire } from 'module';
21
21
  import { DEFAULT_EMBEDDING_CONFIG } from './types.js';
22
22
  import { isHttpMode, getHttpDimensions, httpEmbed } from './http-client.js';
23
+ import { resolveEmbeddingConfig } from './config.js';
23
24
  /**
24
25
  * Check whether the onnxruntime-node package that @huggingface/transformers
25
26
  * will actually load at runtime ships the CUDA execution provider.
@@ -120,13 +121,12 @@ export const initEmbedder = async (onProgress, config = {}, forceDevice) => {
120
121
  return initPromise;
121
122
  }
122
123
  isInitializing = true;
123
- const finalConfig = { ...DEFAULT_EMBEDDING_CONFIG, ...config };
124
- // On Windows, use DirectML for GPU acceleration (via DirectX12)
125
- // CUDA is only available on Linux x64 with onnxruntime-node
124
+ const finalConfig = resolveEmbeddingConfig(config);
125
+ // CUDA is probe-gated because ONNX Runtime can crash in native code when
126
+ // provider libraries are missing. DirectML stays opt-in for the same reason.
126
127
  // Probe for CUDA first — ONNX Runtime crashes (uncatchable native error)
127
128
  // if we attempt CUDA without the required shared libraries
128
- const isWindows = process.platform === 'win32';
129
- const gpuDevice = isWindows ? 'dml' : isCudaAvailable() ? 'cuda' : 'cpu';
129
+ const gpuDevice = isCudaAvailable() ? 'cuda' : 'cpu';
130
130
  const requestedDevice = forceDevice || (finalConfig.device === 'auto' ? gpuDevice : finalConfig.device);
131
131
  initPromise = (async () => {
132
132
  try {
@@ -176,7 +176,12 @@ export const initEmbedder = async (onProgress, config = {}, forceDevice) => {
176
176
  device: device,
177
177
  dtype: 'fp32',
178
178
  progress_callback: progressCallback,
179
- session_options: { logSeverityLevel: 3 },
179
+ session_options: {
180
+ logSeverityLevel: 3,
181
+ intraOpNumThreads: finalConfig.threads,
182
+ interOpNumThreads: 1,
183
+ executionMode: 'sequential',
184
+ },
180
185
  });
181
186
  currentDevice = device;
182
187
  if (isDev) {
@@ -37,6 +37,12 @@ export declare const batchInsertEmbeddings: (executeWithReusedStatement: (cypher
37
37
  embedding: number[];
38
38
  contentHash?: string;
39
39
  }>) => Promise<void>;
40
+ export interface EmbeddingPipelineResult {
41
+ nodesProcessed: number;
42
+ chunksProcessed: number;
43
+ vectorIndexReady: boolean;
44
+ semanticMode: 'vector-index' | 'exact-scan';
45
+ }
40
46
  /**
41
47
  * Run the embedding pipeline
42
48
  *
@@ -51,7 +57,7 @@ export declare const batchInsertEmbeddings: (executeWithReusedStatement: (cypher
51
57
  * and re-embedded; nodes not in the map are embedded fresh.
52
58
 
53
59
  */
54
- export declare const runEmbeddingPipeline: (executeQuery: (cypher: string) => Promise<any[]>, executeWithReusedStatement: (cypher: string, paramsList: Array<Record<string, any>>) => Promise<void>, onProgress: EmbeddingProgressCallback, config?: Partial<EmbeddingConfig>, skipNodeIds?: Set<string>, context?: EmbeddingContext, existingEmbeddings?: Map<string, string>) => Promise<void>;
60
+ export declare const runEmbeddingPipeline: (executeQuery: (cypher: string) => Promise<any[]>, executeWithReusedStatement: (cypher: string, paramsList: Array<Record<string, any>>) => Promise<void>, onProgress: EmbeddingProgressCallback, config?: Partial<EmbeddingConfig>, skipNodeIds?: Set<string>, context?: EmbeddingContext, existingEmbeddings?: Map<string, string>) => Promise<EmbeddingPipelineResult>;
55
61
  /**
56
62
  * Perform semantic search using the vector index with chunk deduplication
57
63
  */
@@ -13,10 +13,21 @@ import { initEmbedder, embedBatch, embedText, embeddingToArray, isEmbedderReady,
13
13
  import { generateEmbeddingText } from './text-generator.js';
14
14
  import { chunkNode, characterChunk } from './chunker.js';
15
15
  import { extractStructuralNames } from './structural-extractor.js';
16
- import { DEFAULT_EMBEDDING_CONFIG, EMBEDDABLE_LABELS, isShortLabel, LABEL_METHOD, LABELS_WITH_EXPORTED, STRUCTURAL_LABELS, collectBestChunks, } from './types.js';
16
+ import { EMBEDDABLE_LABELS, isShortLabel, LABEL_METHOD, LABELS_WITH_EXPORTED, STRUCTURAL_LABELS, collectBestChunks, } from './types.js';
17
+ import { resolveEmbeddingConfig } from './config.js';
18
+ import { rankExactEmbeddingRows } from './exact-search.js';
17
19
  import { EMBEDDING_TABLE_NAME, EMBEDDING_INDEX_NAME, CREATE_VECTOR_INDEX_QUERY, STALE_HASH_SENTINEL, } from '../lbug/schema.js';
18
20
  import { loadVectorExtension } from '../lbug/lbug-adapter.js';
21
+ import { getExactScanLimit } from '../platform/capabilities.js';
19
22
  const isDev = process.env.NODE_ENV === 'development';
23
+ const vectorUnavailableMessage = 'VECTOR extension is unavailable for this LadybugDB runtime; semantic search will use exact scan when embeddings exist.';
24
+ const ensureVectorExtensionAvailable = async () => {
25
+ const vectorReady = await loadVectorExtension();
26
+ if (!vectorReady) {
27
+ return false;
28
+ }
29
+ return true;
30
+ };
20
31
  /**
21
32
  * Bump this when the embedding text template changes in a way that should
22
33
  * invalidate existing vectors, such as metadata/header shape changes,
@@ -132,18 +143,17 @@ export const batchInsertEmbeddings = async (executeWithReusedStatement, updates)
132
143
 
133
144
  */
134
145
  const createVectorIndex = async (executeQuery) => {
135
- // Delegate to the adapter which tracks loaded state and handles DB reconnect resets.
136
- // If the optional VECTOR extension cannot be loaded, semantic search degrades gracefully.
137
- if (!(await loadVectorExtension())) {
138
- return;
139
- }
146
+ if (!(await ensureVectorExtensionAvailable()))
147
+ return false;
140
148
  try {
141
149
  await executeQuery(CREATE_VECTOR_INDEX_QUERY);
150
+ return true;
142
151
  }
143
152
  catch (error) {
144
153
  if (isDev) {
145
154
  console.warn('Vector index creation warning:', error);
146
155
  }
156
+ return false;
147
157
  }
148
158
  };
149
159
  /**
@@ -161,8 +171,12 @@ const createVectorIndex = async (executeQuery) => {
161
171
 
162
172
  */
163
173
  export const runEmbeddingPipeline = async (executeQuery, executeWithReusedStatement, onProgress, config = {}, skipNodeIds, context, existingEmbeddings) => {
164
- const finalConfig = { ...DEFAULT_EMBEDDING_CONFIG, ...config };
174
+ const finalConfig = resolveEmbeddingConfig(config);
175
+ let totalChunks = 0;
165
176
  try {
177
+ const vectorAvailable = await ensureVectorExtensionAvailable();
178
+ if (!vectorAvailable && isDev)
179
+ console.warn(vectorUnavailableMessage);
166
180
  // Phase 1: Load embedding model
167
181
  onProgress({
168
182
  phase: 'loading-model',
@@ -250,21 +264,25 @@ export const runEmbeddingPipeline = async (executeQuery, executeWithReusedStatem
250
264
  // Ensure the vector index exists even when no new nodes need embedding.
251
265
  // A prior crash or first-time incremental run may have left CodeEmbedding
252
266
  // rows without ever reaching index creation.
253
- await createVectorIndex(executeQuery);
267
+ const vectorIndexReady = await createVectorIndex(executeQuery);
254
268
  onProgress({
255
269
  phase: 'ready',
256
270
  percent: 100,
257
271
  nodesProcessed: 0,
258
272
  totalNodes: 0,
259
273
  });
260
- return;
274
+ return {
275
+ nodesProcessed: 0,
276
+ chunksProcessed: 0,
277
+ vectorIndexReady,
278
+ semanticMode: vectorIndexReady ? 'vector-index' : 'exact-scan',
279
+ };
261
280
  }
262
281
  // Phase 3: Chunk + embed nodes
263
282
  const batchSize = finalConfig.batchSize;
264
283
  const chunkSize = finalConfig.chunkSize;
265
284
  const overlap = finalConfig.overlap;
266
285
  let processedNodes = 0;
267
- let totalChunks = 0;
268
286
  onProgress({
269
287
  phase: 'embedding',
270
288
  percent: 20,
@@ -326,7 +344,7 @@ export const runEmbeddingPipeline = async (executeQuery, executeWithReusedStatem
326
344
  }
327
345
  }
328
346
  // Embed chunk texts in sub-batches to control memory
329
- const EMBED_SUB_BATCH = 8;
347
+ const EMBED_SUB_BATCH = finalConfig.subBatchSize;
330
348
  for (let si = 0; si < allTexts.length; si += EMBED_SUB_BATCH) {
331
349
  const subTexts = allTexts.slice(si, si + EMBED_SUB_BATCH);
332
350
  const subUpdates = allUpdates.slice(si, si + EMBED_SUB_BATCH);
@@ -366,7 +384,7 @@ export const runEmbeddingPipeline = async (executeQuery, executeWithReusedStatem
366
384
  if (isDev) {
367
385
  console.log('📇 Creating vector index...');
368
386
  }
369
- await createVectorIndex(executeQuery);
387
+ const vectorIndexReady = await createVectorIndex(executeQuery);
370
388
  onProgress({
371
389
  phase: 'ready',
372
390
  percent: 100,
@@ -376,6 +394,12 @@ export const runEmbeddingPipeline = async (executeQuery, executeWithReusedStatem
376
394
  if (isDev) {
377
395
  console.log(`✅ Embedding pipeline complete! (${totalChunks} chunks from ${totalNodes} nodes)`);
378
396
  }
397
+ return {
398
+ nodesProcessed: totalNodes,
399
+ chunksProcessed: totalChunks,
400
+ vectorIndexReady,
401
+ semanticMode: vectorIndexReady ? 'vector-index' : 'exact-scan',
402
+ };
379
403
  }
380
404
  catch (error) {
381
405
  const errorMessage = error instanceof Error ? error.message : 'Unknown error';
@@ -400,26 +424,63 @@ export const semanticSearch = async (executeQuery, query, k = 10, maxDistance =
400
424
  const queryEmbedding = await embedText(query);
401
425
  const queryVec = embeddingToArray(queryEmbedding);
402
426
  const queryVecStr = `[${queryVec.join(',')}]`;
403
- const bestChunks = await collectBestChunks(k, async (fetchLimit) => {
404
- const vectorQuery = `
405
- CALL QUERY_VECTOR_INDEX('${EMBEDDING_TABLE_NAME}', '${EMBEDDING_INDEX_NAME}',
406
- CAST(${queryVecStr} AS FLOAT[${queryVec.length}]), ${fetchLimit})
407
- YIELD node AS emb, distance
408
- WITH emb, distance
409
- WHERE distance < ${maxDistance}
410
- RETURN emb.nodeId AS nodeId, emb.chunkIndex AS chunkIndex,
411
- emb.startLine AS startLine, emb.endLine AS endLine, distance
412
- ORDER BY distance
413
- `;
414
- const embResults = await executeQuery(vectorQuery);
415
- return embResults.map((row) => ({
416
- nodeId: row.nodeId ?? row[0],
417
- chunkIndex: row.chunkIndex ?? row[1] ?? 0,
418
- startLine: row.startLine ?? row[2] ?? 0,
419
- endLine: row.endLine ?? row[3] ?? 0,
420
- distance: row.distance ?? row[4],
421
- }));
422
- });
427
+ let bestChunks = new Map();
428
+ if (await loadVectorExtension()) {
429
+ try {
430
+ bestChunks = await collectBestChunks(k, async (fetchLimit) => {
431
+ const vectorQuery = `
432
+ CALL QUERY_VECTOR_INDEX('${EMBEDDING_TABLE_NAME}', '${EMBEDDING_INDEX_NAME}',
433
+ CAST(${queryVecStr} AS FLOAT[${queryVec.length}]), ${fetchLimit})
434
+ YIELD node AS emb, distance
435
+ WITH emb, distance
436
+ WHERE distance < ${maxDistance}
437
+ RETURN emb.nodeId AS nodeId, emb.chunkIndex AS chunkIndex,
438
+ emb.startLine AS startLine, emb.endLine AS endLine, distance
439
+ ORDER BY distance
440
+ `;
441
+ const embResults = await executeQuery(vectorQuery);
442
+ return embResults.map((row) => ({
443
+ nodeId: row.nodeId ?? row[0],
444
+ chunkIndex: row.chunkIndex ?? row[1] ?? 0,
445
+ startLine: row.startLine ?? row[2] ?? 0,
446
+ endLine: row.endLine ?? row[3] ?? 0,
447
+ distance: row.distance ?? row[4],
448
+ }));
449
+ });
450
+ }
451
+ catch {
452
+ bestChunks = new Map();
453
+ }
454
+ }
455
+ if (bestChunks.size === 0) {
456
+ const countRows = await executeQuery(`MATCH (e:${EMBEDDING_TABLE_NAME}) RETURN count(e) AS cnt`);
457
+ const countRow = countRows[0];
458
+ const embeddingCount = Number(countRow?.cnt ?? countRow?.[0] ?? 0);
459
+ const exactLimit = getExactScanLimit();
460
+ if (embeddingCount > 0 && embeddingCount <= exactLimit) {
461
+ const rows = await executeQuery(`
462
+ MATCH (e:${EMBEDDING_TABLE_NAME})
463
+ RETURN e.nodeId AS nodeId, e.chunkIndex AS chunkIndex,
464
+ e.startLine AS startLine, e.endLine AS endLine, e.embedding AS embedding
465
+ `);
466
+ const exactRows = rows.map((row) => ({
467
+ nodeId: row.nodeId ?? row[0],
468
+ chunkIndex: row.chunkIndex ?? row[1] ?? 0,
469
+ startLine: row.startLine ?? row[2] ?? 0,
470
+ endLine: row.endLine ?? row[3] ?? 0,
471
+ embedding: row.embedding ?? row[4] ?? [],
472
+ }));
473
+ bestChunks = new Map(rankExactEmbeddingRows(exactRows, queryVec, k, maxDistance).map((row) => [
474
+ row.nodeId,
475
+ {
476
+ distance: row.distance,
477
+ chunkIndex: row.chunkIndex,
478
+ startLine: row.startLine,
479
+ endLine: row.endLine,
480
+ },
481
+ ]));
482
+ }
483
+ }
423
484
  if (bestChunks.size === 0) {
424
485
  return [];
425
486
  }
@@ -0,0 +1,15 @@
1
+ export interface ExactEmbeddingRow {
2
+ nodeId: string;
3
+ chunkIndex: number;
4
+ startLine: number;
5
+ endLine: number;
6
+ embedding: readonly number[];
7
+ }
8
+ export interface ExactSearchChunk {
9
+ nodeId: string;
10
+ chunkIndex: number;
11
+ startLine: number;
12
+ endLine: number;
13
+ distance: number;
14
+ }
15
+ export declare const rankExactEmbeddingRows: (rows: readonly ExactEmbeddingRow[], queryEmbedding: readonly number[], limit: number, maxDistance: number) => ExactSearchChunk[];
@@ -0,0 +1,27 @@
1
+ const cosineDistance = (a, b) => {
2
+ let dot = 0;
3
+ let aNorm = 0;
4
+ let bNorm = 0;
5
+ const len = Math.min(a.length, b.length);
6
+ for (let i = 0; i < len; i++) {
7
+ const av = a[i] ?? 0;
8
+ const bv = b[i] ?? 0;
9
+ dot += av * bv;
10
+ aNorm += av * av;
11
+ bNorm += bv * bv;
12
+ }
13
+ if (aNorm === 0 || bNorm === 0)
14
+ return 1;
15
+ return 1 - dot / (Math.sqrt(aNorm) * Math.sqrt(bNorm));
16
+ };
17
+ export const rankExactEmbeddingRows = (rows, queryEmbedding, limit, maxDistance) => rows
18
+ .map((row) => ({
19
+ nodeId: row.nodeId,
20
+ chunkIndex: row.chunkIndex,
21
+ startLine: row.startLine,
22
+ endLine: row.endLine,
23
+ distance: cosineDistance(row.embedding, queryEmbedding),
24
+ }))
25
+ .filter((row) => row.distance < maxDistance)
26
+ .sort((a, b) => a.distance - b.distance)
27
+ .slice(0, limit);
@@ -102,6 +102,10 @@ export interface EmbeddingConfig {
102
102
  modelId: string;
103
103
  /** Number of nodes to embed in each batch */
104
104
  batchSize: number;
105
+ /** Number of chunks passed to one local/HTTP embedding call */
106
+ subBatchSize: number;
107
+ /** Maximum ONNX Runtime CPU threads for local inference */
108
+ threads: number;
105
109
  /** Embedding vector dimensions */
106
110
  dimensions: number;
107
111
  /** Device to use for inference: 'auto' tries GPU first (DirectML on Windows, CUDA on Linux), falls back to CPU */
@@ -147,6 +147,8 @@ export const CHUNKING_RULES = {
147
147
  export const DEFAULT_EMBEDDING_CONFIG = {
148
148
  modelId: 'Snowflake/snowflake-arctic-embed-xs',
149
149
  batchSize: 16,
150
+ subBatchSize: 8,
151
+ threads: 2,
150
152
  dimensions: 384,
151
153
  device: 'auto',
152
154
  maxSnippetLength: 500,
@@ -1051,7 +1051,7 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
1051
1051
  parentPort.postMessage({ type: 'warning', message });
1052
1052
  else
1053
1053
  console.warn(message);
1054
- });
1054
+ }, tree);
1055
1055
  if (parsedFile !== undefined)
1056
1056
  result.parsedFiles.push(parsedFile);
1057
1057
  // Pre-pass: extract heritage from query matches to build parentMap for buildTypeEnv.
@@ -137,8 +137,8 @@ export declare const getEmbeddingTableName: () => string;
137
137
  export declare const loadFTSExtension: (targetConn?: lbug.Connection, opts?: ExtensionEnsureOptions) => Promise<boolean>;
138
138
  /**
139
139
  * Load the VECTOR extension on the supplied connection (or the singleton
140
- * writable connection when none is given). See `loadFTSExtension` for the
141
- * policy / capability contract the same `ExtensionManager` owns both.
140
+ * writable connection when none is given). Returns false when VECTOR is
141
+ * unavailable so semantic search can fall back to exact scan.
142
142
  */
143
143
  export declare const loadVectorExtension: (targetConn?: lbug.Connection, opts?: ExtensionEnsureOptions) => Promise<boolean>;
144
144
  /**
@@ -8,6 +8,7 @@ import lbug from '@ladybugdb/core';
8
8
  import { NODE_TABLES, REL_TABLE_NAME, SCHEMA_QUERIES, EMBEDDING_TABLE_NAME, STALE_HASH_SENTINEL, } from './schema.js';
9
9
  import { streamAllCSVsToDisk } from './csv-generator.js';
10
10
  import { extensionManager } from './extension-loader.js';
11
+ import { isVectorExtensionSupportedByPlatform } from '../platform/capabilities.js';
11
12
  /**
12
13
  * Split a relationship CSV into per-label-pair files on disk.
13
14
  *
@@ -288,10 +289,9 @@ const doInitLbug = async (dbPath) => {
288
289
  }
289
290
  }
290
291
  }
291
- // Load query extensions once per core adapter session. Missing optional
292
- // extensions degrade search features but must not block analyze completion.
292
+ // FTS powers baseline search, so initialize it with the core DB. VECTOR is
293
+ // only required for semantic embeddings and is probed lazily there.
293
294
  await loadFTSExtension();
294
- await loadVectorExtension();
295
295
  currentDbPath = dbPath;
296
296
  return { db, conn };
297
297
  };
@@ -762,8 +762,9 @@ export const executeWithReusedStatement = async (cypher, paramsList) => {
762
762
  }
763
763
  }
764
764
  catch (e) {
765
- // Log the error and continue with next batch
766
- console.warn('Batch execution error:', e);
765
+ const msg = e instanceof Error ? e.message : String(e);
766
+ const queryPreview = cypher.replace(/\s+/g, ' ').slice(0, 120);
767
+ throw new Error(`Batch execution failed for rows ${i + 1}-${i + subBatch.length}: ${msg} (${queryPreview})`);
767
768
  }
768
769
  // Note: LadybugDB PreparedStatement doesn't require explicit close()
769
770
  }
@@ -1052,13 +1053,15 @@ export const loadFTSExtension = async (targetConn, opts = {}) => {
1052
1053
  };
1053
1054
  /**
1054
1055
  * Load the VECTOR extension on the supplied connection (or the singleton
1055
- * writable connection when none is given). See `loadFTSExtension` for the
1056
- * policy / capability contract the same `ExtensionManager` owns both.
1056
+ * writable connection when none is given). Returns false when VECTOR is
1057
+ * unavailable so semantic search can fall back to exact scan.
1057
1058
  */
1058
1059
  export const loadVectorExtension = async (targetConn, opts = {}) => {
1059
1060
  const useModuleState = targetConn === undefined;
1060
1061
  if (useModuleState && vectorExtensionLoaded)
1061
1062
  return true;
1063
+ if (!isVectorExtensionSupportedByPlatform())
1064
+ return false;
1062
1065
  const c = targetConn ?? conn;
1063
1066
  if (!c) {
1064
1067
  throw new Error('LadybugDB not initialized. Call initLbug first.');
@@ -16,7 +16,7 @@
16
16
  */
17
17
  import fs from 'fs/promises';
18
18
  import lbug from '@ladybugdb/core';
19
- import { loadFTSExtension, loadVectorExtension } from './lbug-adapter.js';
19
+ import { loadFTSExtension } from './lbug-adapter.js';
20
20
  const pool = new Map();
21
21
  const poolCloseListeners = new Set();
22
22
  /**
@@ -122,7 +122,6 @@ function closeOne(repoId) {
122
122
  // for the same dbPath reuse it instead of hitting a file lock.
123
123
  shared.refCount = 0;
124
124
  shared.ftsLoaded = false;
125
- shared.vectorLoaded = false;
126
125
  }
127
126
  else {
128
127
  shared.db.close().catch(() => { });
@@ -248,7 +247,7 @@ async function doInitLbug(repoId, dbPath) {
248
247
  false, // enableCompression (default)
249
248
  true);
250
249
  restoreStdout();
251
- shared = { db, refCount: 0, ftsLoaded: false, vectorLoaded: false };
250
+ shared = { db, refCount: 0, ftsLoaded: false };
252
251
  dbCache.set(dbPath, shared);
253
252
  break;
254
253
  }
@@ -290,9 +289,6 @@ async function doInitLbug(repoId, dbPath) {
290
289
  if (!shared.ftsLoaded) {
291
290
  shared.ftsLoaded = await loadFTSExtension(available[0], { policy: 'load-only' });
292
291
  }
293
- if (!shared.vectorLoaded) {
294
- shared.vectorLoaded = await loadVectorExtension(available[0], { policy: 'load-only' });
295
- }
296
292
  // Register pool entry only after all connections are pre-warmed and FTS is
297
293
  // loaded. Concurrent executeQuery calls see either "not initialized"
298
294
  // (and throw cleanly) or a fully ready pool — never a half-built one.
@@ -330,7 +326,7 @@ export async function initLbugWithDb(repoId, existingDb, dbPath) {
330
326
  // closeOne() respects the external flag and skips db.close().
331
327
  let shared = dbCache.get(dbPath);
332
328
  if (!shared) {
333
- shared = { db: existingDb, refCount: 0, ftsLoaded: false, vectorLoaded: false, external: true };
329
+ shared = { db: existingDb, refCount: 0, ftsLoaded: false, external: true };
334
330
  dbCache.set(dbPath, shared);
335
331
  }
336
332
  shared.refCount++;
@@ -350,9 +346,6 @@ export async function initLbugWithDb(repoId, existingDb, dbPath) {
350
346
  if (!shared.ftsLoaded) {
351
347
  shared.ftsLoaded = await loadFTSExtension(available[0], { policy: 'load-only' });
352
348
  }
353
- if (!shared.vectorLoaded) {
354
- shared.vectorLoaded = await loadVectorExtension(available[0], { policy: 'load-only' });
355
- }
356
349
  pool.set(repoId, {
357
350
  db: existingDb,
358
351
  available,
@@ -0,0 +1,24 @@
1
+ export type CapabilityStatus = 'available' | 'degraded' | 'unavailable';
2
+ export type SemanticSearchMode = 'vector-index' | 'exact-scan' | 'unavailable';
3
+ export interface RuntimeFingerprint {
4
+ platform: NodeJS.Platform;
5
+ arch: string;
6
+ node: string;
7
+ gitnexus: string;
8
+ ladybugdb?: string;
9
+ onnxruntime?: string;
10
+ }
11
+ export interface RuntimeCapabilities {
12
+ graph: CapabilityStatus;
13
+ fts: CapabilityStatus;
14
+ vector: CapabilityStatus;
15
+ semanticMode: SemanticSearchMode;
16
+ exactScanLimit: number;
17
+ reason?: string;
18
+ }
19
+ export declare const DEFAULT_EXACT_SCAN_LIMIT = 10000;
20
+ export declare const getExactScanLimit: () => number;
21
+ export declare const getRuntimeFingerprint: () => RuntimeFingerprint;
22
+ export declare const isVectorExtensionSupportedByPlatform: (platform?: NodeJS.Platform) => boolean;
23
+ export declare const getRuntimeCapabilities: () => RuntimeCapabilities;
24
+ export declare const defaultEmbeddingThreads: () => number;
@@ -0,0 +1,54 @@
1
+ import os from 'os';
2
+ import { createRequire } from 'module';
3
+ const require = createRequire(import.meta.url);
4
+ const packageVersion = (name) => {
5
+ try {
6
+ return require(`${name}/package.json`).version;
7
+ }
8
+ catch {
9
+ return undefined;
10
+ }
11
+ };
12
+ const gitnexusVersion = () => {
13
+ try {
14
+ return require('../../../package.json').version;
15
+ }
16
+ catch {
17
+ return 'unknown';
18
+ }
19
+ };
20
+ const parsePositiveInt = (value, fallback) => {
21
+ if (value === undefined)
22
+ return fallback;
23
+ const parsed = Number(value);
24
+ return Number.isInteger(parsed) && parsed > 0 ? parsed : fallback;
25
+ };
26
+ export const DEFAULT_EXACT_SCAN_LIMIT = 10_000;
27
+ export const getExactScanLimit = () => parsePositiveInt(process.env.GITNEXUS_SEMANTIC_EXACT_SCAN_LIMIT, DEFAULT_EXACT_SCAN_LIMIT);
28
+ export const getRuntimeFingerprint = () => ({
29
+ platform: process.platform,
30
+ arch: process.arch,
31
+ node: process.version,
32
+ gitnexus: gitnexusVersion(),
33
+ ladybugdb: packageVersion('@ladybugdb/core'),
34
+ onnxruntime: packageVersion('onnxruntime-node'),
35
+ });
36
+ export const isVectorExtensionSupportedByPlatform = (platform = process.platform) => platform !== 'win32';
37
+ export const getRuntimeCapabilities = () => {
38
+ const vector = isVectorExtensionSupportedByPlatform() ? 'available' : 'unavailable';
39
+ const exactScanLimit = getExactScanLimit();
40
+ return {
41
+ graph: 'available',
42
+ fts: 'available',
43
+ vector,
44
+ semanticMode: vector === 'available' ? 'vector-index' : 'exact-scan',
45
+ exactScanLimit,
46
+ reason: vector === 'unavailable'
47
+ ? 'LadybugDB VECTOR is disabled on this platform; semantic search uses exact scan when embeddings exist.'
48
+ : undefined,
49
+ };
50
+ };
51
+ export const defaultEmbeddingThreads = () => {
52
+ const available = typeof os.availableParallelism === 'function' ? os.availableParallelism() : os.cpus().length;
53
+ return Math.max(1, Math.min(4, Math.floor(available / 2) || 1));
54
+ };
@@ -198,6 +198,7 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
198
198
  // ── Phase 4: Embeddings (90–98%) ──────────────────────────────────
199
199
  const stats = await getLbugStats();
200
200
  let embeddingSkipped = true;
201
+ let semanticMode;
201
202
  if (shouldGenerateEmbeddings) {
202
203
  if (stats.nodes <= EMBEDDING_NODE_LIMIT) {
203
204
  embeddingSkipped = false;
@@ -219,7 +220,7 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
219
220
  const { readServerMapping } = await import('./embeddings/server-mapping.js');
220
221
  const projectName = path.basename(repoPath);
221
222
  const serverName = await readServerMapping(projectName);
222
- await runEmbeddingPipeline(executeQuery, executeWithReusedStatement, (p) => {
223
+ const embeddingResult = await runEmbeddingPipeline(executeQuery, executeWithReusedStatement, (p) => {
223
224
  const scaled = 90 + Math.round((p.percent / 100) * 8);
224
225
  const label = p.phase === 'loading-model'
225
226
  ? httpMode
@@ -228,6 +229,14 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
228
229
  : `Embedding ${p.nodesProcessed || 0}/${p.totalNodes || '?'}`;
229
230
  progress('embeddings', scaled, label);
230
231
  }, {}, cachedEmbeddingNodeIds.size > 0 ? cachedEmbeddingNodeIds : undefined, { repoName: projectName, serverName }, existingEmbeddings);
232
+ if (embeddingResult.semanticMode === 'exact-scan') {
233
+ semanticMode = 'exact-scan';
234
+ log('Semantic embeddings were generated without a VECTOR index; ' +
235
+ 'queries will use exact-scan fallback within the configured limit.');
236
+ }
237
+ else {
238
+ semanticMode = 'vector-index';
239
+ }
231
240
  }
232
241
  // ── Phase 5: Finalize (98–100%) ───────────────────────────────────
233
242
  progress('done', 98, 'Saving metadata...');
@@ -235,11 +244,20 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
235
244
  let embeddingCount = 0;
236
245
  try {
237
246
  const embResult = await executeQuery(`MATCH (e:${EMBEDDING_TABLE_NAME}) RETURN count(e) AS cnt`);
238
- embeddingCount = embResult?.[0]?.cnt ?? 0;
247
+ const row = embResult?.[0];
248
+ embeddingCount = Number(row?.cnt ?? row?.[0] ?? 0);
239
249
  }
240
250
  catch {
241
251
  /* table may not exist if embeddings never ran */
242
252
  }
253
+ if (!embeddingSkipped && stats.nodes > 0 && embeddingCount === 0) {
254
+ throw new Error('Embedding generation completed without persisted embeddings. ' +
255
+ 'The index was not registered to avoid silently reporting embeddings: 0.');
256
+ }
257
+ const { getRuntimeCapabilities } = await import('./platform/capabilities.js');
258
+ const runtimeCapabilities = getRuntimeCapabilities();
259
+ const effectiveSemanticMode = semanticMode ??
260
+ (runtimeCapabilities.semanticMode === 'vector-index' ? 'vector-index' : 'exact-scan');
243
261
  const meta = {
244
262
  repoPath,
245
263
  lastCommit: currentCommit,
@@ -259,6 +277,16 @@ export async function runFullAnalysis(repoPath, options, callbacks) {
259
277
  processes: pipelineResult.processResult?.stats.totalProcesses,
260
278
  embeddings: embeddingCount,
261
279
  },
280
+ capabilities: {
281
+ graph: { provider: 'ladybugdb', status: runtimeCapabilities.graph },
282
+ fts: { provider: 'ladybugdb-fts', status: runtimeCapabilities.fts },
283
+ vectorSearch: {
284
+ provider: effectiveSemanticMode === 'vector-index' ? 'ladybugdb-vector' : 'exact-scan',
285
+ status: embeddingCount > 0 ? effectiveSemanticMode : 'unavailable',
286
+ exactScanLimit: runtimeCapabilities.exactScanLimit,
287
+ reason: runtimeCapabilities.reason,
288
+ },
289
+ },
262
290
  };
263
291
  await saveMeta(storagePath, meta);
264
292
  // Forward the --name alias and the registry-collision bypass bit.
@@ -8,6 +8,7 @@ import { pipeline, env } from '@huggingface/transformers';
8
8
  import os from 'os';
9
9
  import { join } from 'path';
10
10
  import { isHttpMode, getHttpDimensions, httpEmbedQuery, } from '../../core/embeddings/http-client.js';
11
+ import { resolveEmbeddingConfig } from '../../core/embeddings/config.js';
11
12
  import { silenceStdout, restoreStdout, realStderrWrite } from '../../core/lbug/pool-adapter.js';
12
13
  // Model config
13
14
  const MODEL_ID = 'Snowflake/snowflake-arctic-embed-xs';
@@ -37,11 +38,11 @@ export const initEmbedder = async () => {
37
38
  // when gitnexus is installed globally (e.g. /usr/lib/node_modules/).
38
39
  // Respect HF_HOME if set, otherwise fall back to ~/.cache/huggingface.
39
40
  env.cacheDir = process.env.HF_HOME ?? join(os.homedir(), '.cache', 'huggingface');
41
+ const embeddingConfig = resolveEmbeddingConfig();
40
42
  console.error('GitNexus: Loading embedding model (first search may take a moment)...');
41
- // Try GPU first (DirectML on Windows, CUDA on Linux), fall back to CPU
42
- const isWindows = process.platform === 'win32';
43
- const gpuDevice = isWindows ? 'dml' : 'cuda';
44
- const devicesToTry = [gpuDevice, 'cpu'];
43
+ const devicesToTry = embeddingConfig.device === 'dml' || embeddingConfig.device === 'cuda'
44
+ ? [embeddingConfig.device, 'cpu']
45
+ : ['cpu'];
45
46
  for (const device of devicesToTry) {
46
47
  try {
47
48
  // Silence stdout and stderr during model load — ONNX Runtime and transformers.js
@@ -55,6 +56,12 @@ export const initEmbedder = async () => {
55
56
  embedderInstance = await pipeline('feature-extraction', MODEL_ID, {
56
57
  device: device,
57
58
  dtype: 'fp32',
59
+ session_options: {
60
+ logSeverityLevel: 3,
61
+ intraOpNumThreads: embeddingConfig.threads,
62
+ interOpNumThreads: 1,
63
+ executionMode: 'sequential',
64
+ },
58
65
  });
59
66
  }
60
67
  finally {
@@ -18,7 +18,9 @@ import { listRegisteredRepos, cleanupOldKuzuFiles, } from '../../storage/repo-ma
18
18
  import { GroupService } from '../../core/group/service.js';
19
19
  import { resolveAtGroupMemberRepoPath } from '../../core/group/resolve-at-member.js';
20
20
  import { collectBestChunks } from '../../core/embeddings/types.js';
21
+ import { rankExactEmbeddingRows, } from '../../core/embeddings/exact-search.js';
21
22
  import { EMBEDDING_TABLE_NAME, EMBEDDING_INDEX_NAME } from '../../core/lbug/schema.js';
23
+ import { getExactScanLimit } from '../../core/platform/capabilities.js';
22
24
  import { PhaseTimer } from '../../core/search/phase-timer.js';
23
25
  import { checkStaleness, checkCwdMatch } from '../../core/git-staleness.js';
24
26
  // AI context generation is CLI-only (gitnexus analyze)
@@ -889,26 +891,59 @@ export class LocalBackend {
889
891
  const queryVec = await embedQuery(query);
890
892
  const dims = getEmbeddingDims();
891
893
  const queryVecStr = `[${queryVec.join(',')}]`;
892
- const bestChunks = await collectBestChunks(limit, async (fetchLimit) => {
893
- const vectorQuery = `
894
- CALL QUERY_VECTOR_INDEX('${EMBEDDING_TABLE_NAME}', '${EMBEDDING_INDEX_NAME}',
895
- CAST(${queryVecStr} AS FLOAT[${dims}]), ${fetchLimit})
896
- YIELD node AS emb, distance
897
- WITH emb, distance
898
- WHERE distance < 0.6
899
- RETURN emb.nodeId AS nodeId, emb.chunkIndex AS chunkIndex,
900
- emb.startLine AS startLine, emb.endLine AS endLine, distance
901
- ORDER BY distance
902
- `;
903
- const embResults = await executeQuery(repo.id, vectorQuery);
904
- return embResults.map((row) => ({
894
+ let bestChunks = new Map();
895
+ try {
896
+ bestChunks = await collectBestChunks(limit, async (fetchLimit) => {
897
+ const vectorQuery = `
898
+ CALL QUERY_VECTOR_INDEX('${EMBEDDING_TABLE_NAME}', '${EMBEDDING_INDEX_NAME}',
899
+ CAST(${queryVecStr} AS FLOAT[${dims}]), ${fetchLimit})
900
+ YIELD node AS emb, distance
901
+ WITH emb, distance
902
+ WHERE distance < 0.6
903
+ RETURN emb.nodeId AS nodeId, emb.chunkIndex AS chunkIndex,
904
+ emb.startLine AS startLine, emb.endLine AS endLine, distance
905
+ ORDER BY distance
906
+ `;
907
+ const embResults = await executeQuery(repo.id, vectorQuery);
908
+ return embResults.map((row) => ({
909
+ nodeId: row.nodeId ?? row[0],
910
+ chunkIndex: row.chunkIndex ?? row[1] ?? 0,
911
+ startLine: row.startLine ?? row[2] ?? 0,
912
+ endLine: row.endLine ?? row[3] ?? 0,
913
+ distance: row.distance ?? row[4],
914
+ }));
915
+ });
916
+ }
917
+ catch {
918
+ bestChunks = new Map();
919
+ }
920
+ if (bestChunks.size === 0) {
921
+ const embeddingCount = Number(tableCheck[0].cnt ?? tableCheck[0][0] ?? 0);
922
+ const exactLimit = getExactScanLimit();
923
+ if (embeddingCount > exactLimit)
924
+ return [];
925
+ const rows = await executeQuery(repo.id, `
926
+ MATCH (e:${EMBEDDING_TABLE_NAME})
927
+ RETURN e.nodeId AS nodeId, e.chunkIndex AS chunkIndex,
928
+ e.startLine AS startLine, e.endLine AS endLine, e.embedding AS embedding
929
+ `);
930
+ const exactRows = rows.map((row) => ({
905
931
  nodeId: row.nodeId ?? row[0],
906
932
  chunkIndex: row.chunkIndex ?? row[1] ?? 0,
907
933
  startLine: row.startLine ?? row[2] ?? 0,
908
934
  endLine: row.endLine ?? row[3] ?? 0,
909
- distance: row.distance ?? row[4],
935
+ embedding: row.embedding ?? row[4] ?? [],
910
936
  }));
911
- });
937
+ bestChunks = new Map(rankExactEmbeddingRows(exactRows, queryVec, limit, 0.6).map((row) => [
938
+ row.nodeId,
939
+ {
940
+ distance: row.distance,
941
+ chunkIndex: row.chunkIndex,
942
+ startLine: row.startLine,
943
+ endLine: row.endLine,
944
+ },
945
+ ]));
946
+ }
912
947
  if (bestChunks.size === 0)
913
948
  return [];
914
949
  const results = [];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitnexus",
3
- "version": "1.6.4-rc.20",
3
+ "version": "1.6.4-rc.22",
4
4
  "description": "Graph-powered code intelligence for AI agents. Index any codebase, query via MCP or CLI.",
5
5
  "author": "Abhigyan Patwari",
6
6
  "license": "PolyForm-Noncommercial-1.0.0",