gitnexus 1.4.5 → 1.4.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/eval-server.js +13 -5
- package/dist/cli/index.js +0 -0
- package/dist/cli/tool.d.ts +3 -2
- package/dist/cli/tool.js +48 -13
- package/dist/core/graph/types.d.ts +2 -2
- package/dist/core/ingestion/call-processor.d.ts +7 -2
- package/dist/core/ingestion/call-processor.js +308 -235
- package/dist/core/ingestion/call-routing.d.ts +17 -2
- package/dist/core/ingestion/call-routing.js +21 -0
- package/dist/core/ingestion/parsing-processor.d.ts +2 -1
- package/dist/core/ingestion/parsing-processor.js +37 -8
- package/dist/core/ingestion/pipeline.js +5 -1
- package/dist/core/ingestion/symbol-table.d.ts +19 -3
- package/dist/core/ingestion/symbol-table.js +41 -2
- package/dist/core/ingestion/tree-sitter-queries.d.ts +12 -12
- package/dist/core/ingestion/tree-sitter-queries.js +200 -0
- package/dist/core/ingestion/type-env.js +126 -18
- package/dist/core/ingestion/type-extractors/c-cpp.js +28 -3
- package/dist/core/ingestion/type-extractors/csharp.js +61 -7
- package/dist/core/ingestion/type-extractors/go.js +86 -10
- package/dist/core/ingestion/type-extractors/jvm.js +122 -23
- package/dist/core/ingestion/type-extractors/php.js +172 -7
- package/dist/core/ingestion/type-extractors/python.js +107 -21
- package/dist/core/ingestion/type-extractors/ruby.js +18 -3
- package/dist/core/ingestion/type-extractors/rust.js +61 -14
- package/dist/core/ingestion/type-extractors/shared.d.ts +13 -0
- package/dist/core/ingestion/type-extractors/shared.js +243 -4
- package/dist/core/ingestion/type-extractors/types.d.ts +57 -12
- package/dist/core/ingestion/type-extractors/typescript.js +52 -8
- package/dist/core/ingestion/utils.d.ts +25 -0
- package/dist/core/ingestion/utils.js +160 -1
- package/dist/core/ingestion/workers/parse-worker.d.ts +23 -7
- package/dist/core/ingestion/workers/parse-worker.js +73 -28
- package/dist/core/lbug/lbug-adapter.d.ts +2 -0
- package/dist/core/lbug/lbug-adapter.js +2 -0
- package/dist/core/lbug/schema.d.ts +1 -1
- package/dist/core/lbug/schema.js +1 -1
- package/dist/mcp/core/lbug-adapter.d.ts +22 -0
- package/dist/mcp/core/lbug-adapter.js +167 -23
- package/dist/mcp/local/local-backend.d.ts +1 -0
- package/dist/mcp/local/local-backend.js +25 -3
- package/dist/mcp/resources.js +11 -0
- package/dist/mcp/server.js +26 -4
- package/dist/mcp/tools.js +15 -5
- package/hooks/claude/gitnexus-hook.cjs +0 -0
- package/hooks/claude/pre-tool-use.sh +0 -0
- package/hooks/claude/session-start.sh +0 -0
- package/package.json +6 -5
- package/scripts/patch-tree-sitter-swift.cjs +0 -0
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { SupportedLanguages } from '../../../config/supported-languages.js';
|
|
2
|
+
import { type MixedChainStep } from '../utils.js';
|
|
2
3
|
import type { ConstructorBinding } from '../type-env.js';
|
|
4
|
+
import type { NodeLabel } from '../../graph/types.js';
|
|
3
5
|
interface ParsedNode {
|
|
4
6
|
id: string;
|
|
5
7
|
label: string;
|
|
@@ -21,7 +23,7 @@ interface ParsedRelationship {
|
|
|
21
23
|
id: string;
|
|
22
24
|
sourceId: string;
|
|
23
25
|
targetId: string;
|
|
24
|
-
type: 'DEFINES' | 'HAS_METHOD';
|
|
26
|
+
type: 'DEFINES' | 'HAS_METHOD' | 'HAS_PROPERTY';
|
|
25
27
|
confidence: number;
|
|
26
28
|
reason: string;
|
|
27
29
|
}
|
|
@@ -29,9 +31,10 @@ interface ParsedSymbol {
|
|
|
29
31
|
filePath: string;
|
|
30
32
|
name: string;
|
|
31
33
|
nodeId: string;
|
|
32
|
-
type:
|
|
34
|
+
type: NodeLabel;
|
|
33
35
|
parameterCount?: number;
|
|
34
36
|
returnType?: string;
|
|
37
|
+
declaredType?: string;
|
|
35
38
|
ownerId?: string;
|
|
36
39
|
}
|
|
37
40
|
export interface ExtractedImport {
|
|
@@ -57,13 +60,25 @@ export interface ExtractedCall {
|
|
|
57
60
|
/** Resolved type name of the receiver (e.g., 'User' for user.save() when user: User) */
|
|
58
61
|
receiverTypeName?: string;
|
|
59
62
|
/**
|
|
60
|
-
*
|
|
61
|
-
*
|
|
62
|
-
*
|
|
63
|
-
* `
|
|
63
|
+
* Unified mixed chain when the receiver is a chain of field accesses and/or method calls.
|
|
64
|
+
* Steps are ordered base-first (innermost to outermost). Examples:
|
|
65
|
+
* `svc.getUser().save()` → chain=[{kind:'call',name:'getUser'}], receiverName='svc'
|
|
66
|
+
* `user.address.save()` → chain=[{kind:'field',name:'address'}], receiverName='user'
|
|
67
|
+
* `svc.getUser().address.save()` → chain=[{kind:'call',name:'getUser'},{kind:'field',name:'address'}]
|
|
64
68
|
* Length is capped at MAX_CHAIN_DEPTH (3).
|
|
65
69
|
*/
|
|
66
|
-
|
|
70
|
+
receiverMixedChain?: MixedChainStep[];
|
|
71
|
+
}
|
|
72
|
+
export interface ExtractedAssignment {
|
|
73
|
+
filePath: string;
|
|
74
|
+
/** generateId of enclosing function, or generateId('File', filePath) for top-level */
|
|
75
|
+
sourceId: string;
|
|
76
|
+
/** Receiver text (e.g., 'user' from user.address = value) */
|
|
77
|
+
receiverText: string;
|
|
78
|
+
/** Property name being written (e.g., 'address') */
|
|
79
|
+
propertyName: string;
|
|
80
|
+
/** Resolved type name of the receiver if available from TypeEnv */
|
|
81
|
+
receiverTypeName?: string;
|
|
67
82
|
}
|
|
68
83
|
export interface ExtractedHeritage {
|
|
69
84
|
filePath: string;
|
|
@@ -93,6 +108,7 @@ export interface ParseWorkerResult {
|
|
|
93
108
|
symbols: ParsedSymbol[];
|
|
94
109
|
imports: ExtractedImport[];
|
|
95
110
|
calls: ExtractedCall[];
|
|
111
|
+
assignments: ExtractedAssignment[];
|
|
96
112
|
heritage: ExtractedHeritage[];
|
|
97
113
|
routes: ExtractedRoute[];
|
|
98
114
|
constructorBindings: FileConstructorBindings[];
|
|
@@ -28,7 +28,7 @@ try {
|
|
|
28
28
|
Kotlin = _require('tree-sitter-kotlin');
|
|
29
29
|
}
|
|
30
30
|
catch { }
|
|
31
|
-
import { getLanguageFromFilename, FUNCTION_NODE_TYPES, extractFunctionName, isBuiltInOrNoise, getDefinitionNodeFromCaptures, findEnclosingClassId, extractMethodSignature, countCallArguments, inferCallForm, extractReceiverName, extractReceiverNode,
|
|
31
|
+
import { getLanguageFromFilename, FUNCTION_NODE_TYPES, extractFunctionName, isBuiltInOrNoise, getDefinitionNodeFromCaptures, findEnclosingClassId, extractMethodSignature, countCallArguments, inferCallForm, extractReceiverName, extractReceiverNode, extractMixedChain, } from '../utils.js';
|
|
32
32
|
import { buildTypeEnv } from '../type-env.js';
|
|
33
33
|
import { isNodeExported } from '../export-detection.js';
|
|
34
34
|
import { detectFrameworkFromAST } from '../framework-detection.js';
|
|
@@ -37,6 +37,7 @@ import { generateId } from '../../../lib/utils.js';
|
|
|
37
37
|
import { extractNamedBindings } from '../named-binding-extraction.js';
|
|
38
38
|
import { appendKotlinWildcard } from '../resolvers/index.js';
|
|
39
39
|
import { callRouters } from '../call-routing.js';
|
|
40
|
+
import { extractPropertyDeclaredType } from '../type-extractors/shared.js';
|
|
40
41
|
// ============================================================================
|
|
41
42
|
// Worker-local parser + language map
|
|
42
43
|
// ============================================================================
|
|
@@ -162,6 +163,7 @@ const processBatch = (files, onProgress) => {
|
|
|
162
163
|
symbols: [],
|
|
163
164
|
imports: [],
|
|
164
165
|
calls: [],
|
|
166
|
+
assignments: [],
|
|
165
167
|
heritage: [],
|
|
166
168
|
routes: [],
|
|
167
169
|
constructorBindings: [],
|
|
@@ -759,6 +761,28 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
759
761
|
});
|
|
760
762
|
continue;
|
|
761
763
|
}
|
|
764
|
+
// Extract assignment sites (field write access)
|
|
765
|
+
if (captureMap['assignment'] && captureMap['assignment.receiver'] && captureMap['assignment.property']) {
|
|
766
|
+
const receiverText = captureMap['assignment.receiver'].text;
|
|
767
|
+
const propertyName = captureMap['assignment.property'].text;
|
|
768
|
+
if (receiverText && propertyName) {
|
|
769
|
+
const srcId = findEnclosingFunctionId(captureMap['assignment'], file.path)
|
|
770
|
+
|| generateId('File', file.path);
|
|
771
|
+
let receiverTypeName;
|
|
772
|
+
if (typeEnv) {
|
|
773
|
+
receiverTypeName = typeEnv.lookup(receiverText, captureMap['assignment']) ?? undefined;
|
|
774
|
+
}
|
|
775
|
+
result.assignments.push({
|
|
776
|
+
filePath: file.path,
|
|
777
|
+
sourceId: srcId,
|
|
778
|
+
receiverText,
|
|
779
|
+
propertyName,
|
|
780
|
+
...(receiverTypeName ? { receiverTypeName } : {}),
|
|
781
|
+
});
|
|
782
|
+
}
|
|
783
|
+
if (!captureMap['call'])
|
|
784
|
+
continue;
|
|
785
|
+
}
|
|
762
786
|
// Extract call sites
|
|
763
787
|
if (captureMap['call']) {
|
|
764
788
|
const callNameNode = captureMap['call.name'];
|
|
@@ -811,6 +835,7 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
811
835
|
nodeId,
|
|
812
836
|
type: 'Property',
|
|
813
837
|
...(propEnclosingClassId ? { ownerId: propEnclosingClassId } : {}),
|
|
838
|
+
...(item.declaredType ? { declaredType: item.declaredType } : {}),
|
|
814
839
|
});
|
|
815
840
|
const fileId = generateId('File', file.path);
|
|
816
841
|
const relId = generateId('DEFINES', `${fileId}->${nodeId}`);
|
|
@@ -824,10 +849,10 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
824
849
|
});
|
|
825
850
|
if (propEnclosingClassId) {
|
|
826
851
|
result.relationships.push({
|
|
827
|
-
id: generateId('
|
|
852
|
+
id: generateId('HAS_PROPERTY', `${propEnclosingClassId}->${nodeId}`),
|
|
828
853
|
sourceId: propEnclosingClassId,
|
|
829
854
|
targetId: nodeId,
|
|
830
|
-
type: '
|
|
855
|
+
type: 'HAS_PROPERTY',
|
|
831
856
|
confidence: 1.0,
|
|
832
857
|
reason: '',
|
|
833
858
|
});
|
|
@@ -844,26 +869,19 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
844
869
|
const callForm = inferCallForm(callNode, callNameNode);
|
|
845
870
|
let receiverName = callForm === 'member' ? extractReceiverName(callNameNode) : undefined;
|
|
846
871
|
let receiverTypeName = receiverName ? typeEnv.lookup(receiverName, callNode) : undefined;
|
|
847
|
-
let
|
|
848
|
-
// When the receiver is a
|
|
849
|
-
// extractReceiverName returns undefined
|
|
850
|
-
//
|
|
851
|
-
// We capture the base receiver name so processCallsFromExtracted can look it up
|
|
852
|
-
// from constructor bindings. receiverTypeName is intentionally left unset here —
|
|
853
|
-
// the chain resolver in processCallsFromExtracted needs the base type as input and
|
|
854
|
-
// produces the final receiver type as output.
|
|
872
|
+
let receiverMixedChain;
|
|
873
|
+
// When the receiver is a complex expression (call chain, field chain, or mixed),
|
|
874
|
+
// extractReceiverName returns undefined. Walk the receiver node to build a unified
|
|
875
|
+
// mixed chain for deferred resolution in processCallsFromExtracted.
|
|
855
876
|
if (callForm === 'member' && receiverName === undefined && !receiverTypeName) {
|
|
856
877
|
const receiverNode = extractReceiverNode(callNameNode);
|
|
857
|
-
if (receiverNode
|
|
858
|
-
const extracted =
|
|
859
|
-
if (extracted) {
|
|
860
|
-
|
|
861
|
-
// Set receiverName to the base object so Step 1 in processCallsFromExtracted
|
|
862
|
-
// can resolve it via constructor bindings to a base type for the chain.
|
|
878
|
+
if (receiverNode) {
|
|
879
|
+
const extracted = extractMixedChain(receiverNode);
|
|
880
|
+
if (extracted && extracted.chain.length > 0) {
|
|
881
|
+
receiverMixedChain = extracted.chain;
|
|
863
882
|
receiverName = extracted.baseReceiverName;
|
|
864
|
-
//
|
|
865
|
-
// and annotated parameters
|
|
866
|
-
// This sets a base type that chain resolution (Step 2) will use as input.
|
|
883
|
+
// Try the type environment immediately for the base receiver
|
|
884
|
+
// (covers explicitly-typed locals and annotated parameters).
|
|
867
885
|
if (receiverName) {
|
|
868
886
|
receiverTypeName = typeEnv.lookup(receiverName, callNode);
|
|
869
887
|
}
|
|
@@ -878,7 +896,7 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
878
896
|
...(callForm !== undefined ? { callForm } : {}),
|
|
879
897
|
...(receiverName !== undefined ? { receiverName } : {}),
|
|
880
898
|
...(receiverTypeName !== undefined ? { receiverTypeName } : {}),
|
|
881
|
-
...(
|
|
899
|
+
...(receiverMixedChain !== undefined ? { receiverMixedChain } : {}),
|
|
882
900
|
});
|
|
883
901
|
}
|
|
884
902
|
}
|
|
@@ -926,6 +944,21 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
926
944
|
const nodeLabel = getLabelFromCaptures(captureMap);
|
|
927
945
|
if (!nodeLabel)
|
|
928
946
|
continue;
|
|
947
|
+
// C/C++: @definition.function is broad and also matches inline class methods (inside
|
|
948
|
+
// a class/struct body). Those are already captured by @definition.method, so skip
|
|
949
|
+
// the duplicate Function entry to prevent double-indexing in globalIndex.
|
|
950
|
+
if ((language === SupportedLanguages.CPlusPlus || language === SupportedLanguages.C) &&
|
|
951
|
+
nodeLabel === 'Function') {
|
|
952
|
+
let ancestor = captureMap['definition.function']?.parent;
|
|
953
|
+
while (ancestor) {
|
|
954
|
+
if (ancestor.type === 'class_specifier' || ancestor.type === 'struct_specifier') {
|
|
955
|
+
break; // inside a class body — duplicate of @definition.method
|
|
956
|
+
}
|
|
957
|
+
ancestor = ancestor.parent;
|
|
958
|
+
}
|
|
959
|
+
if (ancestor)
|
|
960
|
+
continue; // found a class/struct ancestor → skip
|
|
961
|
+
}
|
|
929
962
|
const nameNode = captureMap['name'];
|
|
930
963
|
// Synthesize name for constructors without explicit @name capture (e.g. Swift init)
|
|
931
964
|
if (!nameNode && nodeLabel !== 'Constructor')
|
|
@@ -948,18 +981,27 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
948
981
|
: null;
|
|
949
982
|
let parameterCount;
|
|
950
983
|
let returnType;
|
|
984
|
+
let declaredType;
|
|
951
985
|
if (nodeLabel === 'Function' || nodeLabel === 'Method' || nodeLabel === 'Constructor') {
|
|
952
986
|
const sig = extractMethodSignature(definitionNode);
|
|
953
987
|
parameterCount = sig.parameterCount;
|
|
954
988
|
returnType = sig.returnType;
|
|
955
989
|
// Language-specific return type fallback (e.g. Ruby YARD @return [Type])
|
|
956
|
-
|
|
990
|
+
// Also upgrades uninformative AST types like PHP `array` with PHPDoc `@return User[]`
|
|
991
|
+
if ((!returnType || returnType === 'array' || returnType === 'iterable') && definitionNode) {
|
|
957
992
|
const tc = typeConfigs[language];
|
|
958
993
|
if (tc?.extractReturnType) {
|
|
959
|
-
|
|
994
|
+
const docReturn = tc.extractReturnType(definitionNode);
|
|
995
|
+
if (docReturn)
|
|
996
|
+
returnType = docReturn;
|
|
960
997
|
}
|
|
961
998
|
}
|
|
962
999
|
}
|
|
1000
|
+
else if (nodeLabel === 'Property' && definitionNode) {
|
|
1001
|
+
// Extract the declared type for property/field nodes.
|
|
1002
|
+
// Walk the definition node for type annotation children.
|
|
1003
|
+
declaredType = extractPropertyDeclaredType(definitionNode);
|
|
1004
|
+
}
|
|
963
1005
|
result.nodes.push({
|
|
964
1006
|
id: nodeId,
|
|
965
1007
|
label: nodeLabel,
|
|
@@ -990,6 +1032,7 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
990
1032
|
type: nodeLabel,
|
|
991
1033
|
...(parameterCount !== undefined ? { parameterCount } : {}),
|
|
992
1034
|
...(returnType !== undefined ? { returnType } : {}),
|
|
1035
|
+
...(declaredType !== undefined ? { declaredType } : {}),
|
|
993
1036
|
...(enclosingClassId ? { ownerId: enclosingClassId } : {}),
|
|
994
1037
|
});
|
|
995
1038
|
const fileId = generateId('File', file.path);
|
|
@@ -1002,13 +1045,14 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
1002
1045
|
confidence: 1.0,
|
|
1003
1046
|
reason: '',
|
|
1004
1047
|
});
|
|
1005
|
-
// ── HAS_METHOD: link
|
|
1048
|
+
// ── HAS_METHOD / HAS_PROPERTY: link member to enclosing class ──
|
|
1006
1049
|
if (enclosingClassId) {
|
|
1050
|
+
const memberEdgeType = nodeLabel === 'Property' ? 'HAS_PROPERTY' : 'HAS_METHOD';
|
|
1007
1051
|
result.relationships.push({
|
|
1008
|
-
id: generateId(
|
|
1052
|
+
id: generateId(memberEdgeType, `${enclosingClassId}->${nodeId}`),
|
|
1009
1053
|
sourceId: enclosingClassId,
|
|
1010
1054
|
targetId: nodeId,
|
|
1011
|
-
type:
|
|
1055
|
+
type: memberEdgeType,
|
|
1012
1056
|
confidence: 1.0,
|
|
1013
1057
|
reason: '',
|
|
1014
1058
|
});
|
|
@@ -1027,7 +1071,7 @@ const processFileGroup = (files, language, queryString, result, onFileProcessed)
|
|
|
1027
1071
|
/** Accumulated result across sub-batches */
|
|
1028
1072
|
let accumulated = {
|
|
1029
1073
|
nodes: [], relationships: [], symbols: [],
|
|
1030
|
-
imports: [], calls: [], heritage: [], routes: [], constructorBindings: [], skippedLanguages: {}, fileCount: 0,
|
|
1074
|
+
imports: [], calls: [], assignments: [], heritage: [], routes: [], constructorBindings: [], skippedLanguages: {}, fileCount: 0,
|
|
1031
1075
|
};
|
|
1032
1076
|
let cumulativeProcessed = 0;
|
|
1033
1077
|
const mergeResult = (target, src) => {
|
|
@@ -1036,6 +1080,7 @@ const mergeResult = (target, src) => {
|
|
|
1036
1080
|
target.symbols.push(...src.symbols);
|
|
1037
1081
|
target.imports.push(...src.imports);
|
|
1038
1082
|
target.calls.push(...src.calls);
|
|
1083
|
+
target.assignments.push(...src.assignments);
|
|
1039
1084
|
target.heritage.push(...src.heritage);
|
|
1040
1085
|
target.routes.push(...src.routes);
|
|
1041
1086
|
target.constructorBindings.push(...src.constructorBindings);
|
|
@@ -1061,7 +1106,7 @@ parentPort.on('message', (msg) => {
|
|
|
1061
1106
|
if (msg && msg.type === 'flush') {
|
|
1062
1107
|
parentPort.postMessage({ type: 'result', data: accumulated });
|
|
1063
1108
|
// Reset for potential reuse
|
|
1064
|
-
accumulated = { nodes: [], relationships: [], symbols: [], imports: [], calls: [], heritage: [], routes: [], constructorBindings: [], skippedLanguages: {}, fileCount: 0 };
|
|
1109
|
+
accumulated = { nodes: [], relationships: [], symbols: [], imports: [], calls: [], assignments: [], heritage: [], routes: [], constructorBindings: [], skippedLanguages: {}, fileCount: 0 };
|
|
1065
1110
|
cumulativeProcessed = 0;
|
|
1066
1111
|
return;
|
|
1067
1112
|
}
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import lbug from '@ladybugdb/core';
|
|
2
2
|
import { KnowledgeGraph } from '../graph/types.js';
|
|
3
|
+
/** Expose the current Database for pool adapter reuse in tests. */
|
|
4
|
+
export declare const getDatabase: () => lbug.Database | null;
|
|
3
5
|
export declare const initLbug: (dbPath: string) => Promise<{
|
|
4
6
|
db: lbug.Database;
|
|
5
7
|
conn: lbug.Connection;
|
|
@@ -9,6 +9,8 @@ let db = null;
|
|
|
9
9
|
let conn = null;
|
|
10
10
|
let currentDbPath = null;
|
|
11
11
|
let ftsLoaded = false;
|
|
12
|
+
/** Expose the current Database for pool adapter reuse in tests. */
|
|
13
|
+
export const getDatabase = () => db;
|
|
12
14
|
// Global session lock for operations that touch module-level lbug globals.
|
|
13
15
|
// This guarantees no DB switch can happen while an operation is running.
|
|
14
16
|
let sessionLock = Promise.resolve();
|
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
export declare const NODE_TABLES: readonly ["File", "Folder", "Function", "Class", "Interface", "Method", "CodeElement", "Community", "Process", "Struct", "Enum", "Macro", "Typedef", "Union", "Namespace", "Trait", "Impl", "TypeAlias", "Const", "Static", "Property", "Record", "Delegate", "Annotation", "Constructor", "Template", "Module"];
|
|
12
12
|
export type NodeTableName = typeof NODE_TABLES[number];
|
|
13
13
|
export declare const REL_TABLE_NAME = "CodeRelation";
|
|
14
|
-
export declare const REL_TYPES: readonly ["CONTAINS", "DEFINES", "IMPORTS", "CALLS", "EXTENDS", "IMPLEMENTS", "HAS_METHOD", "OVERRIDES", "MEMBER_OF", "STEP_IN_PROCESS"];
|
|
14
|
+
export declare const REL_TYPES: readonly ["CONTAINS", "DEFINES", "IMPORTS", "CALLS", "EXTENDS", "IMPLEMENTS", "HAS_METHOD", "HAS_PROPERTY", "ACCESSES", "OVERRIDES", "MEMBER_OF", "STEP_IN_PROCESS"];
|
|
15
15
|
export type RelType = typeof REL_TYPES[number];
|
|
16
16
|
export declare const EMBEDDING_TABLE_NAME = "CodeEmbedding";
|
|
17
17
|
export declare const FILE_SCHEMA = "\nCREATE NODE TABLE File (\n id STRING,\n name STRING,\n filePath STRING,\n content STRING,\n PRIMARY KEY (id)\n)";
|
package/dist/core/lbug/schema.js
CHANGED
|
@@ -22,7 +22,7 @@ export const NODE_TABLES = [
|
|
|
22
22
|
// ============================================================================
|
|
23
23
|
export const REL_TABLE_NAME = 'CodeRelation';
|
|
24
24
|
// Valid relation types
|
|
25
|
-
export const REL_TYPES = ['CONTAINS', 'DEFINES', 'IMPORTS', 'CALLS', 'EXTENDS', 'IMPLEMENTS', 'HAS_METHOD', 'OVERRIDES', 'MEMBER_OF', 'STEP_IN_PROCESS'];
|
|
25
|
+
export const REL_TYPES = ['CONTAINS', 'DEFINES', 'IMPORTS', 'CALLS', 'EXTENDS', 'IMPLEMENTS', 'HAS_METHOD', 'HAS_PROPERTY', 'ACCESSES', 'OVERRIDES', 'MEMBER_OF', 'STEP_IN_PROCESS'];
|
|
26
26
|
// ============================================================================
|
|
27
27
|
// EMBEDDING TABLE
|
|
28
28
|
// ============================================================================
|
|
@@ -12,11 +12,29 @@
|
|
|
12
12
|
* @see https://docs.ladybugdb.com/concurrency — multiple Connections
|
|
13
13
|
* from the same Database is the officially supported concurrency pattern.
|
|
14
14
|
*/
|
|
15
|
+
import lbug from '@ladybugdb/core';
|
|
16
|
+
/** Saved real stdout.write — used to silence LadybugDB native output without race conditions */
|
|
17
|
+
export declare const realStdoutWrite: any;
|
|
15
18
|
/**
|
|
16
19
|
* Initialize (or reuse) a Database + connection pool for a specific repo.
|
|
17
20
|
* Retries on lock errors (e.g., when `gitnexus analyze` is running).
|
|
21
|
+
*
|
|
22
|
+
* Concurrent calls for the same repoId are deduplicated — the second caller
|
|
23
|
+
* awaits the first's in-progress init rather than starting a redundant one.
|
|
18
24
|
*/
|
|
19
25
|
export declare const initLbug: (repoId: string, dbPath: string) => Promise<void>;
|
|
26
|
+
/**
|
|
27
|
+
* Initialize a pool entry from a pre-existing Database object.
|
|
28
|
+
*
|
|
29
|
+
* Used in tests to avoid the writable→close→read-only cycle that crashes
|
|
30
|
+
* on macOS due to N-API destructor segfaults. The pool adapter reuses
|
|
31
|
+
* the core adapter's writable Database instead of opening a new read-only one.
|
|
32
|
+
*
|
|
33
|
+
* The Database is registered in the shared dbCache so closeOne() decrements
|
|
34
|
+
* the refCount correctly. If the Database is already cached (e.g. another
|
|
35
|
+
* repoId already injected it), the existing entry is reused.
|
|
36
|
+
*/
|
|
37
|
+
export declare function initLbugWithDb(repoId: string, existingDb: lbug.Database, dbPath: string): Promise<void>;
|
|
20
38
|
export declare const executeQuery: (repoId: string, cypher: string) => Promise<any[]>;
|
|
21
39
|
/**
|
|
22
40
|
* Execute a parameterized query on a specific repo's connection pool.
|
|
@@ -33,3 +51,7 @@ export declare const closeLbug: (repoId?: string) => Promise<void>;
|
|
|
33
51
|
* Check if a specific repo's pool is active
|
|
34
52
|
*/
|
|
35
53
|
export declare const isLbugReady: (repoId: string) => boolean;
|
|
54
|
+
/** Regex to detect write operations in user-supplied Cypher queries */
|
|
55
|
+
export declare const CYPHER_WRITE_RE: RegExp;
|
|
56
|
+
/** Check if a Cypher query contains write operations */
|
|
57
|
+
export declare function isWriteQuery(query: string): boolean;
|
|
@@ -22,12 +22,12 @@ const MAX_POOL_SIZE = 5;
|
|
|
22
22
|
const IDLE_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes
|
|
23
23
|
/** Max connections per repo (caps concurrent queries per repo) */
|
|
24
24
|
const MAX_CONNS_PER_REPO = 8;
|
|
25
|
-
/** Connections created eagerly on init */
|
|
26
|
-
const INITIAL_CONNS_PER_REPO = 2;
|
|
27
25
|
let idleTimer = null;
|
|
28
26
|
/** Saved real stdout.write — used to silence LadybugDB native output without race conditions */
|
|
29
|
-
const realStdoutWrite = process.stdout.write.bind(process.stdout);
|
|
27
|
+
export const realStdoutWrite = process.stdout.write.bind(process.stdout);
|
|
30
28
|
let stdoutSilenceCount = 0;
|
|
29
|
+
/** True while pre-warming connections — prevents watchdog from prematurely restoring stdout */
|
|
30
|
+
let preWarmActive = false;
|
|
31
31
|
/**
|
|
32
32
|
* Start the idle cleanup timer (runs every 60s)
|
|
33
33
|
*/
|
|
@@ -65,19 +65,42 @@ function evictLRU() {
|
|
|
65
65
|
}
|
|
66
66
|
}
|
|
67
67
|
/**
|
|
68
|
-
* Remove a repo from the pool and release its
|
|
69
|
-
*
|
|
70
|
-
*
|
|
71
|
-
* segfault on Linux/macOS. Pool databases are opened read-only, so
|
|
72
|
-
* there is no WAL to flush — just deleting the pool entry and letting
|
|
73
|
-
* the GC (or process exit) reclaim native resources is safe.
|
|
68
|
+
* Remove a repo from the pool, close its connections, and release its
|
|
69
|
+
* shared Database ref. Only closes the Database when no other repoIds
|
|
70
|
+
* reference it (refCount === 0).
|
|
74
71
|
*/
|
|
75
72
|
function closeOne(repoId) {
|
|
76
73
|
const entry = pool.get(repoId);
|
|
77
|
-
if (entry)
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
74
|
+
if (!entry)
|
|
75
|
+
return;
|
|
76
|
+
entry.closed = true;
|
|
77
|
+
// Close available connections — fire-and-forget with .catch() to prevent
|
|
78
|
+
// unhandled rejections. Native close() returns Promise<void> but can crash
|
|
79
|
+
// the N-API destructor on macOS/Windows; deferring to process exit lets
|
|
80
|
+
// dangerouslyIgnoreUnhandledErrors absorb the crash.
|
|
81
|
+
for (const conn of entry.available) {
|
|
82
|
+
conn.close().catch(() => { });
|
|
83
|
+
}
|
|
84
|
+
entry.available.length = 0;
|
|
85
|
+
// Checked-out connections can't be closed here — they're in-flight.
|
|
86
|
+
// The checkin() function detects entry.closed and closes them on return.
|
|
87
|
+
// Only close the Database when no other repoIds reference it.
|
|
88
|
+
// External databases (injected via initLbugWithDb) are never closed here —
|
|
89
|
+
// the core adapter owns them and handles their lifecycle.
|
|
90
|
+
const shared = dbCache.get(entry.dbPath);
|
|
91
|
+
if (shared) {
|
|
92
|
+
shared.refCount--;
|
|
93
|
+
if (shared.refCount === 0) {
|
|
94
|
+
if (shared.external) {
|
|
95
|
+
// External databases are owned by the core adapter — don't close
|
|
96
|
+
// or remove from cache. Keep the entry so future initLbug() calls
|
|
97
|
+
// for the same dbPath reuse it instead of hitting a file lock.
|
|
98
|
+
shared.refCount = 0;
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
shared.db.close().catch(() => { });
|
|
102
|
+
dbCache.delete(entry.dbPath);
|
|
103
|
+
}
|
|
81
104
|
}
|
|
82
105
|
}
|
|
83
106
|
pool.delete(repoId);
|
|
@@ -97,6 +120,14 @@ function restoreStdout() {
|
|
|
97
120
|
process.stdout.write = realStdoutWrite;
|
|
98
121
|
}
|
|
99
122
|
}
|
|
123
|
+
// Safety watchdog: restore stdout if it gets stuck silenced (e.g. native crash
|
|
124
|
+
// inside createConnection before restoreStdout runs).
|
|
125
|
+
setInterval(() => {
|
|
126
|
+
if (stdoutSilenceCount > 0 && !preWarmActive) {
|
|
127
|
+
stdoutSilenceCount = 0;
|
|
128
|
+
process.stdout.write = realStdoutWrite;
|
|
129
|
+
}
|
|
130
|
+
}, 1000).unref();
|
|
100
131
|
function createConnection(db) {
|
|
101
132
|
silenceStdout();
|
|
102
133
|
try {
|
|
@@ -112,9 +143,14 @@ const QUERY_TIMEOUT_MS = 30_000;
|
|
|
112
143
|
const WAITER_TIMEOUT_MS = 15_000;
|
|
113
144
|
const LOCK_RETRY_ATTEMPTS = 3;
|
|
114
145
|
const LOCK_RETRY_DELAY_MS = 2000;
|
|
146
|
+
/** Deduplicates concurrent initLbug calls for the same repoId */
|
|
147
|
+
const initPromises = new Map();
|
|
115
148
|
/**
|
|
116
149
|
* Initialize (or reuse) a Database + connection pool for a specific repo.
|
|
117
150
|
* Retries on lock errors (e.g., when `gitnexus analyze` is running).
|
|
151
|
+
*
|
|
152
|
+
* Concurrent calls for the same repoId are deduplicated — the second caller
|
|
153
|
+
* awaits the first's in-progress init rather than starting a redundant one.
|
|
118
154
|
*/
|
|
119
155
|
export const initLbug = async (repoId, dbPath) => {
|
|
120
156
|
const existing = pool.get(repoId);
|
|
@@ -122,6 +158,27 @@ export const initLbug = async (repoId, dbPath) => {
|
|
|
122
158
|
existing.lastUsed = Date.now();
|
|
123
159
|
return;
|
|
124
160
|
}
|
|
161
|
+
// Deduplicate concurrent init calls for the same repoId —
|
|
162
|
+
// prevents double-init race when multiple parallel tool calls
|
|
163
|
+
// trigger initialization for the same repo simultaneously.
|
|
164
|
+
const pending = initPromises.get(repoId);
|
|
165
|
+
if (pending)
|
|
166
|
+
return pending;
|
|
167
|
+
const promise = doInitLbug(repoId, dbPath);
|
|
168
|
+
initPromises.set(repoId, promise);
|
|
169
|
+
try {
|
|
170
|
+
await promise;
|
|
171
|
+
}
|
|
172
|
+
finally {
|
|
173
|
+
initPromises.delete(repoId);
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
/**
|
|
177
|
+
* Internal init — creates DB, pre-warms connections, loads FTS, then registers pool.
|
|
178
|
+
* Pool entry is registered LAST so concurrent executeQuery calls see either
|
|
179
|
+
* "not initialized" (and throw) or a fully ready pool — never a half-built one.
|
|
180
|
+
*/
|
|
181
|
+
async function doInitLbug(repoId, dbPath) {
|
|
125
182
|
// Check if database exists
|
|
126
183
|
try {
|
|
127
184
|
await fs.stat(dbPath);
|
|
@@ -166,14 +223,22 @@ export const initLbug = async (repoId, dbPath) => {
|
|
|
166
223
|
}
|
|
167
224
|
shared.refCount++;
|
|
168
225
|
const db = shared.db;
|
|
169
|
-
// Pre-create
|
|
226
|
+
// Pre-create the full pool upfront so createConnection() (which silences
|
|
227
|
+
// stdout) is never called lazily during active query execution.
|
|
228
|
+
// Mark preWarmActive so the watchdog timer doesn't interfere.
|
|
229
|
+
preWarmActive = true;
|
|
170
230
|
const available = [];
|
|
171
|
-
|
|
172
|
-
|
|
231
|
+
try {
|
|
232
|
+
for (let i = 0; i < MAX_CONNS_PER_REPO; i++) {
|
|
233
|
+
available.push(createConnection(db));
|
|
234
|
+
}
|
|
173
235
|
}
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
236
|
+
finally {
|
|
237
|
+
preWarmActive = false;
|
|
238
|
+
}
|
|
239
|
+
// Load FTS extension once per shared Database.
|
|
240
|
+
// Done BEFORE pool registration so no concurrent checkout can grab
|
|
241
|
+
// the connection while the async FTS load is in progress.
|
|
177
242
|
if (!shared.ftsLoaded) {
|
|
178
243
|
try {
|
|
179
244
|
await available[0].query('LOAD EXTENSION fts');
|
|
@@ -183,7 +248,67 @@ export const initLbug = async (repoId, dbPath) => {
|
|
|
183
248
|
// Extension may not be installed — FTS queries will fail gracefully
|
|
184
249
|
}
|
|
185
250
|
}
|
|
186
|
-
|
|
251
|
+
// Register pool entry only after all connections are pre-warmed and FTS is
|
|
252
|
+
// loaded. Concurrent executeQuery calls see either "not initialized"
|
|
253
|
+
// (and throw cleanly) or a fully ready pool — never a half-built one.
|
|
254
|
+
pool.set(repoId, { db, available, checkedOut: 0, waiters: [], lastUsed: Date.now(), dbPath, closed: false });
|
|
255
|
+
ensureIdleTimer();
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Initialize a pool entry from a pre-existing Database object.
|
|
259
|
+
*
|
|
260
|
+
* Used in tests to avoid the writable→close→read-only cycle that crashes
|
|
261
|
+
* on macOS due to N-API destructor segfaults. The pool adapter reuses
|
|
262
|
+
* the core adapter's writable Database instead of opening a new read-only one.
|
|
263
|
+
*
|
|
264
|
+
* The Database is registered in the shared dbCache so closeOne() decrements
|
|
265
|
+
* the refCount correctly. If the Database is already cached (e.g. another
|
|
266
|
+
* repoId already injected it), the existing entry is reused.
|
|
267
|
+
*/
|
|
268
|
+
export async function initLbugWithDb(repoId, existingDb, dbPath) {
|
|
269
|
+
const existing = pool.get(repoId);
|
|
270
|
+
if (existing) {
|
|
271
|
+
existing.lastUsed = Date.now();
|
|
272
|
+
return;
|
|
273
|
+
}
|
|
274
|
+
// Register in dbCache with external: true so other initLbug() calls
|
|
275
|
+
// for the same dbPath reuse this Database instead of trying to open
|
|
276
|
+
// a new one (which would fail with a file lock error).
|
|
277
|
+
// closeOne() respects the external flag and skips db.close().
|
|
278
|
+
let shared = dbCache.get(dbPath);
|
|
279
|
+
if (!shared) {
|
|
280
|
+
shared = { db: existingDb, refCount: 0, ftsLoaded: false, external: true };
|
|
281
|
+
dbCache.set(dbPath, shared);
|
|
282
|
+
}
|
|
283
|
+
shared.refCount++;
|
|
284
|
+
const available = [];
|
|
285
|
+
preWarmActive = true;
|
|
286
|
+
try {
|
|
287
|
+
for (let i = 0; i < MAX_CONNS_PER_REPO; i++) {
|
|
288
|
+
available.push(createConnection(existingDb));
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
finally {
|
|
292
|
+
preWarmActive = false;
|
|
293
|
+
}
|
|
294
|
+
// Load FTS extension if not already loaded on this Database
|
|
295
|
+
try {
|
|
296
|
+
await available[0].query('LOAD EXTENSION fts');
|
|
297
|
+
}
|
|
298
|
+
catch {
|
|
299
|
+
// Extension may already be loaded or not installed
|
|
300
|
+
}
|
|
301
|
+
pool.set(repoId, {
|
|
302
|
+
db: existingDb,
|
|
303
|
+
available,
|
|
304
|
+
checkedOut: 0,
|
|
305
|
+
waiters: [],
|
|
306
|
+
lastUsed: Date.now(),
|
|
307
|
+
dbPath,
|
|
308
|
+
closed: false
|
|
309
|
+
});
|
|
310
|
+
ensureIdleTimer();
|
|
311
|
+
}
|
|
187
312
|
/**
|
|
188
313
|
* Checkout a connection from the pool.
|
|
189
314
|
* Returns an available connection, or creates a new one if under the cap.
|
|
@@ -195,11 +320,14 @@ function checkout(entry) {
|
|
|
195
320
|
entry.checkedOut++;
|
|
196
321
|
return Promise.resolve(entry.available.pop());
|
|
197
322
|
}
|
|
198
|
-
//
|
|
323
|
+
// Pool was pre-warmed to MAX_CONNS_PER_REPO during init. If we're here
|
|
324
|
+
// with fewer total connections, something leaked — surface the bug rather
|
|
325
|
+
// than silently creating a connection (which would silence stdout mid-query).
|
|
199
326
|
const totalConns = entry.available.length + entry.checkedOut;
|
|
200
327
|
if (totalConns < MAX_CONNS_PER_REPO) {
|
|
201
|
-
|
|
202
|
-
|
|
328
|
+
throw new Error(`Connection pool integrity error: expected ${MAX_CONNS_PER_REPO} ` +
|
|
329
|
+
`connections but found ${totalConns} (${entry.available.length} available, ` +
|
|
330
|
+
`${entry.checkedOut} checked out)`);
|
|
203
331
|
}
|
|
204
332
|
// At capacity — queue the caller with a timeout.
|
|
205
333
|
return new Promise((resolve, reject) => {
|
|
@@ -218,10 +346,17 @@ function checkout(entry) {
|
|
|
218
346
|
}
|
|
219
347
|
/**
|
|
220
348
|
* Return a connection to the pool after use.
|
|
349
|
+
* If the pool entry was closed while the connection was checked out (e.g.
|
|
350
|
+
* LRU eviction), close the orphaned connection instead of returning it.
|
|
221
351
|
* If there are queued waiters, hand the connection directly to the next one
|
|
222
352
|
* instead of putting it back in the available array (avoids race conditions).
|
|
223
353
|
*/
|
|
224
354
|
function checkin(entry, conn) {
|
|
355
|
+
if (entry.closed) {
|
|
356
|
+
// Pool entry was deleted during checkout — close the orphaned connection
|
|
357
|
+
conn.close().catch(() => { });
|
|
358
|
+
return;
|
|
359
|
+
}
|
|
225
360
|
if (entry.waiters.length > 0) {
|
|
226
361
|
// Hand directly to the next waiter — no intermediate available state
|
|
227
362
|
const waiter = entry.waiters.shift();
|
|
@@ -249,6 +384,9 @@ export const executeQuery = async (repoId, cypher) => {
|
|
|
249
384
|
if (!entry) {
|
|
250
385
|
throw new Error(`LadybugDB not initialized for repo "${repoId}". Call initLbug first.`);
|
|
251
386
|
}
|
|
387
|
+
if (isWriteQuery(cypher)) {
|
|
388
|
+
throw new Error('Write operations are not allowed. The pool adapter is read-only.');
|
|
389
|
+
}
|
|
252
390
|
entry.lastUsed = Date.now();
|
|
253
391
|
const conn = await checkout(entry);
|
|
254
392
|
try {
|
|
@@ -309,3 +447,9 @@ export const closeLbug = async (repoId) => {
|
|
|
309
447
|
* Check if a specific repo's pool is active
|
|
310
448
|
*/
|
|
311
449
|
export const isLbugReady = (repoId) => pool.has(repoId);
|
|
450
|
+
/** Regex to detect write operations in user-supplied Cypher queries */
|
|
451
|
+
export const CYPHER_WRITE_RE = /\b(CREATE|DELETE|SET|MERGE|REMOVE|DROP|ALTER|COPY|DETACH)\b/i;
|
|
452
|
+
/** Check if a Cypher query contains write operations */
|
|
453
|
+
export function isWriteQuery(query) {
|
|
454
|
+
return CYPHER_WRITE_RE.test(query);
|
|
455
|
+
}
|