trellis 2.0.13 → 2.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +1 -1
- package/dist/embeddings/index.js +1 -1
- package/dist/{index-7gvjxt27.js → index-2917tjd8.js} +1 -1
- package/package.json +2 -10
- package/dist/transformers.node-bx3q9d7k.js +0 -33130
- package/src/cli/index.ts +0 -3356
- package/src/core/agents/harness.ts +0 -380
- package/src/core/agents/index.ts +0 -18
- package/src/core/agents/types.ts +0 -90
- package/src/core/index.ts +0 -118
- package/src/core/kernel/middleware.ts +0 -44
- package/src/core/kernel/trellis-kernel.ts +0 -593
- package/src/core/ontology/builtins.ts +0 -248
- package/src/core/ontology/index.ts +0 -34
- package/src/core/ontology/registry.ts +0 -209
- package/src/core/ontology/types.ts +0 -124
- package/src/core/ontology/validator.ts +0 -382
- package/src/core/persist/backend.ts +0 -74
- package/src/core/persist/sqlite-backend.ts +0 -298
- package/src/core/plugins/index.ts +0 -17
- package/src/core/plugins/registry.ts +0 -322
- package/src/core/plugins/types.ts +0 -126
- package/src/core/query/datalog.ts +0 -188
- package/src/core/query/engine.ts +0 -370
- package/src/core/query/index.ts +0 -34
- package/src/core/query/parser.ts +0 -481
- package/src/core/query/types.ts +0 -200
- package/src/core/store/eav-store.ts +0 -467
- package/src/decisions/auto-capture.ts +0 -136
- package/src/decisions/hooks.ts +0 -163
- package/src/decisions/index.ts +0 -261
- package/src/decisions/types.ts +0 -103
- package/src/embeddings/auto-embed.ts +0 -248
- package/src/embeddings/chunker.ts +0 -327
- package/src/embeddings/index.ts +0 -48
- package/src/embeddings/model.ts +0 -112
- package/src/embeddings/search.ts +0 -305
- package/src/embeddings/store.ts +0 -313
- package/src/embeddings/types.ts +0 -92
- package/src/engine.ts +0 -1125
- package/src/garden/cluster.ts +0 -330
- package/src/garden/garden.ts +0 -306
- package/src/garden/index.ts +0 -29
- package/src/git/git-exporter.ts +0 -286
- package/src/git/git-importer.ts +0 -329
- package/src/git/git-reader.ts +0 -189
- package/src/git/index.ts +0 -22
- package/src/identity/governance.ts +0 -211
- package/src/identity/identity.ts +0 -224
- package/src/identity/index.ts +0 -30
- package/src/identity/signing-middleware.ts +0 -97
- package/src/index.ts +0 -29
- package/src/links/index.ts +0 -49
- package/src/links/lifecycle.ts +0 -400
- package/src/links/parser.ts +0 -484
- package/src/links/ref-index.ts +0 -186
- package/src/links/resolver.ts +0 -314
- package/src/links/types.ts +0 -108
- package/src/mcp/index.ts +0 -22
- package/src/mcp/server.ts +0 -1278
- package/src/semantic/csharp-parser.ts +0 -493
- package/src/semantic/go-parser.ts +0 -585
- package/src/semantic/index.ts +0 -34
- package/src/semantic/java-parser.ts +0 -456
- package/src/semantic/python-parser.ts +0 -659
- package/src/semantic/ruby-parser.ts +0 -446
- package/src/semantic/rust-parser.ts +0 -784
- package/src/semantic/semantic-merge.ts +0 -210
- package/src/semantic/ts-parser.ts +0 -681
- package/src/semantic/types.ts +0 -175
- package/src/sync/http-transport.ts +0 -144
- package/src/sync/index.ts +0 -43
- package/src/sync/memory-transport.ts +0 -66
- package/src/sync/multi-repo.ts +0 -200
- package/src/sync/reconciler.ts +0 -237
- package/src/sync/sync-engine.ts +0 -258
- package/src/sync/types.ts +0 -104
- package/src/sync/ws-transport.ts +0 -145
- package/src/ui/client.html +0 -695
- package/src/ui/server.ts +0 -419
- package/src/vcs/blob-store.ts +0 -124
- package/src/vcs/branch.ts +0 -150
- package/src/vcs/checkpoint.ts +0 -64
- package/src/vcs/decompose.ts +0 -469
- package/src/vcs/diff.ts +0 -409
- package/src/vcs/engine-context.ts +0 -26
- package/src/vcs/index.ts +0 -23
- package/src/vcs/issue.ts +0 -800
- package/src/vcs/merge.ts +0 -425
- package/src/vcs/milestone.ts +0 -124
- package/src/vcs/ops.ts +0 -59
- package/src/vcs/types.ts +0 -213
- package/src/vcs/vcs-middleware.ts +0 -81
- package/src/watcher/fs-watcher.ts +0 -255
- package/src/watcher/index.ts +0 -9
- package/src/watcher/ingestion.ts +0 -116
package/src/links/parser.ts
DELETED
|
@@ -1,484 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Wiki-Link Parser
|
|
3
|
-
*
|
|
4
|
-
* Parses [[...]] references from markdown files and doc-comments
|
|
5
|
-
* in source code. Supports namespaced and bare syntax with smart
|
|
6
|
-
* namespace inference.
|
|
7
|
-
*
|
|
8
|
-
* @see TRL-11
|
|
9
|
-
*/
|
|
10
|
-
|
|
11
|
-
import type { EntityRef, RefContext, RefNamespace, RefSource } from './types.js';
|
|
12
|
-
|
|
13
|
-
// ---------------------------------------------------------------------------
|
|
14
|
-
// Main regex for [[...]] wiki-links
|
|
15
|
-
// ---------------------------------------------------------------------------
|
|
16
|
-
|
|
17
|
-
/**
|
|
18
|
-
* Matches [[target]] or [[target|alias]] where target may include
|
|
19
|
-
* namespace prefix, file paths, and #anchors.
|
|
20
|
-
*
|
|
21
|
-
* Captures:
|
|
22
|
-
* [1] = inner content (everything between [[ and ]])
|
|
23
|
-
*/
|
|
24
|
-
const WIKI_LINK_RE = /\[\[([^\]]+)\]\]/g;
|
|
25
|
-
|
|
26
|
-
// ---------------------------------------------------------------------------
|
|
27
|
-
// Known file extensions for bare ref inference
|
|
28
|
-
// ---------------------------------------------------------------------------
|
|
29
|
-
|
|
30
|
-
const CODE_EXTENSIONS = new Set([
|
|
31
|
-
'ts', 'tsx', 'js', 'jsx', 'mjs', 'cjs',
|
|
32
|
-
'py', 'pyi',
|
|
33
|
-
'go',
|
|
34
|
-
'rs',
|
|
35
|
-
'rb',
|
|
36
|
-
'java',
|
|
37
|
-
'cs',
|
|
38
|
-
'md',
|
|
39
|
-
'json', 'yaml', 'yml', 'toml',
|
|
40
|
-
'css', 'scss', 'less',
|
|
41
|
-
'html', 'vue', 'svelte',
|
|
42
|
-
]);
|
|
43
|
-
|
|
44
|
-
// ---------------------------------------------------------------------------
|
|
45
|
-
// Public API
|
|
46
|
-
// ---------------------------------------------------------------------------
|
|
47
|
-
|
|
48
|
-
/**
|
|
49
|
-
* Parse all [[...]] references from a file's content.
|
|
50
|
-
* Detects context (markdown vs doc-comment) based on file extension.
|
|
51
|
-
*/
|
|
52
|
-
export function parseFileRefs(content: string, filePath: string): EntityRef[] {
|
|
53
|
-
const ext = filePath.split('.').pop()?.toLowerCase() ?? '';
|
|
54
|
-
|
|
55
|
-
if (ext === 'md') {
|
|
56
|
-
return parseMarkdownRefs(content, filePath);
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
// Source code: extract refs from doc-comments only
|
|
60
|
-
return parseDocCommentRefs(content, filePath, ext);
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
/**
|
|
64
|
-
* Parse all [[...]] references from markdown content.
|
|
65
|
-
*/
|
|
66
|
-
export function parseMarkdownRefs(content: string, filePath: string): EntityRef[] {
|
|
67
|
-
return extractRefs(content, filePath, 'markdown');
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
/**
|
|
71
|
-
* Parse [[...]] references from doc-comments in source code.
|
|
72
|
-
* Extracts comment blocks first, then scans them for wiki-links.
|
|
73
|
-
*/
|
|
74
|
-
export function parseDocCommentRefs(
|
|
75
|
-
content: string,
|
|
76
|
-
filePath: string,
|
|
77
|
-
ext?: string,
|
|
78
|
-
): EntityRef[] {
|
|
79
|
-
const fileExt = ext ?? (filePath.split('.').pop()?.toLowerCase() ?? '');
|
|
80
|
-
const commentBlocks = extractDocComments(content, fileExt);
|
|
81
|
-
const refs: EntityRef[] = [];
|
|
82
|
-
|
|
83
|
-
for (const block of commentBlocks) {
|
|
84
|
-
const blockRefs = extractRefs(block.text, filePath, block.context, block.startLine);
|
|
85
|
-
refs.push(...blockRefs);
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
return refs;
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
// ---------------------------------------------------------------------------
|
|
92
|
-
// Core extraction
|
|
93
|
-
// ---------------------------------------------------------------------------
|
|
94
|
-
|
|
95
|
-
interface CommentBlock {
|
|
96
|
-
text: string;
|
|
97
|
-
startLine: number;
|
|
98
|
-
context: RefContext;
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
/**
|
|
102
|
-
* Extract all [[...]] wiki-links from a text block.
|
|
103
|
-
*/
|
|
104
|
-
function extractRefs(
|
|
105
|
-
text: string,
|
|
106
|
-
filePath: string,
|
|
107
|
-
context: RefContext,
|
|
108
|
-
lineOffset: number = 0,
|
|
109
|
-
): EntityRef[] {
|
|
110
|
-
const refs: EntityRef[] = [];
|
|
111
|
-
const lines = text.split('\n');
|
|
112
|
-
|
|
113
|
-
for (let i = 0; i < lines.length; i++) {
|
|
114
|
-
const line = lines[i];
|
|
115
|
-
let match: RegExpExecArray | null;
|
|
116
|
-
|
|
117
|
-
// Reset regex state for each line
|
|
118
|
-
const re = new RegExp(WIKI_LINK_RE.source, WIKI_LINK_RE.flags);
|
|
119
|
-
|
|
120
|
-
while ((match = re.exec(line)) !== null) {
|
|
121
|
-
const raw = match[1];
|
|
122
|
-
const col = match.index;
|
|
123
|
-
const lineNum = i + 1 + lineOffset;
|
|
124
|
-
|
|
125
|
-
const parsed = parseRefContent(raw);
|
|
126
|
-
if (!parsed) continue;
|
|
127
|
-
|
|
128
|
-
refs.push({
|
|
129
|
-
...parsed,
|
|
130
|
-
raw,
|
|
131
|
-
source: {
|
|
132
|
-
filePath,
|
|
133
|
-
line: lineNum,
|
|
134
|
-
col,
|
|
135
|
-
context,
|
|
136
|
-
},
|
|
137
|
-
});
|
|
138
|
-
}
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
return refs;
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
// ---------------------------------------------------------------------------
|
|
145
|
-
// Ref content parsing and namespace inference
|
|
146
|
-
// ---------------------------------------------------------------------------
|
|
147
|
-
|
|
148
|
-
interface ParsedRef {
|
|
149
|
-
namespace: RefNamespace;
|
|
150
|
-
target: string;
|
|
151
|
-
anchor?: string;
|
|
152
|
-
alias?: string;
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
/**
|
|
156
|
-
* Parse the inner content of a [[...]] wiki-link.
|
|
157
|
-
*
|
|
158
|
-
* Supported forms:
|
|
159
|
-
* - "namespace:target" → explicit namespace
|
|
160
|
-
* - "namespace:target|alias" → explicit namespace + alias
|
|
161
|
-
* - "namespace:target#anchor" → explicit namespace + anchor
|
|
162
|
-
* - "target" → infer namespace
|
|
163
|
-
* - "target|alias" → infer namespace + alias
|
|
164
|
-
* - "path#anchor" → symbol ref
|
|
165
|
-
* - "path#anchor|alias" → symbol ref + alias
|
|
166
|
-
*/
|
|
167
|
-
export function parseRefContent(raw: string): ParsedRef | null {
|
|
168
|
-
if (!raw || !raw.trim()) return null;
|
|
169
|
-
|
|
170
|
-
// Split alias first: "content|alias"
|
|
171
|
-
let content: string;
|
|
172
|
-
let alias: string | undefined;
|
|
173
|
-
|
|
174
|
-
const pipeIdx = raw.indexOf('|');
|
|
175
|
-
if (pipeIdx !== -1) {
|
|
176
|
-
content = raw.substring(0, pipeIdx).trim();
|
|
177
|
-
alias = raw.substring(pipeIdx + 1).trim();
|
|
178
|
-
if (!alias) alias = undefined;
|
|
179
|
-
} else {
|
|
180
|
-
content = raw.trim();
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
if (!content) return null;
|
|
184
|
-
|
|
185
|
-
// Check for explicit namespace: "namespace:rest"
|
|
186
|
-
const colonIdx = content.indexOf(':');
|
|
187
|
-
if (colonIdx !== -1) {
|
|
188
|
-
const possibleNs = content.substring(0, colonIdx);
|
|
189
|
-
if (isValidNamespace(possibleNs)) {
|
|
190
|
-
const rest = content.substring(colonIdx + 1);
|
|
191
|
-
const { target, anchor } = splitAnchor(rest);
|
|
192
|
-
const ns = possibleNs as RefNamespace;
|
|
193
|
-
return { namespace: ns, target, anchor, alias };
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
// No explicit namespace — infer from content
|
|
198
|
-
const { target, anchor } = splitAnchor(content);
|
|
199
|
-
|
|
200
|
-
const inferred = inferNamespace(target, anchor);
|
|
201
|
-
if (!inferred) return null;
|
|
202
|
-
|
|
203
|
-
return { namespace: inferred, target, anchor, alias };
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
/**
|
|
207
|
-
* Split "path#anchor" into { target, anchor }.
|
|
208
|
-
*/
|
|
209
|
-
function splitAnchor(content: string): { target: string; anchor?: string } {
|
|
210
|
-
const hashIdx = content.indexOf('#');
|
|
211
|
-
if (hashIdx === -1) return { target: content };
|
|
212
|
-
return {
|
|
213
|
-
target: content.substring(0, hashIdx),
|
|
214
|
-
anchor: content.substring(hashIdx + 1) || undefined,
|
|
215
|
-
};
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
const VALID_NAMESPACES = new Set<string>([
|
|
219
|
-
'issue', 'file', 'symbol', 'identity', 'milestone', 'decision',
|
|
220
|
-
]);
|
|
221
|
-
|
|
222
|
-
function isValidNamespace(s: string): boolean {
|
|
223
|
-
return VALID_NAMESPACES.has(s);
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
/**
|
|
227
|
-
* Infer the namespace from bare ref content.
|
|
228
|
-
*
|
|
229
|
-
* Rules (in order):
|
|
230
|
-
* 1. TRL-\d+ pattern → issue
|
|
231
|
-
* 2. DEC-\d+ pattern → decision
|
|
232
|
-
* 3. Has anchor (#) → symbol
|
|
233
|
-
* 4. Contains '/' or has known file extension → file
|
|
234
|
-
* 5. Otherwise → null (namespace required)
|
|
235
|
-
*/
|
|
236
|
-
export function inferNamespace(target: string, anchor?: string): RefNamespace | null {
|
|
237
|
-
// Issue pattern: TRL-1, TRL-42, etc.
|
|
238
|
-
if (/^TRL-\d+$/i.test(target)) return 'issue';
|
|
239
|
-
|
|
240
|
-
// Decision pattern: DEC-1, DEC-42, etc.
|
|
241
|
-
if (/^DEC-\d+$/i.test(target)) return 'decision';
|
|
242
|
-
|
|
243
|
-
// Symbol: has anchor
|
|
244
|
-
if (anchor) return 'symbol';
|
|
245
|
-
|
|
246
|
-
// File path: contains slash or has known extension
|
|
247
|
-
if (target.includes('/')) return 'file';
|
|
248
|
-
const ext = target.split('.').pop()?.toLowerCase();
|
|
249
|
-
if (ext && CODE_EXTENSIONS.has(ext)) return 'file';
|
|
250
|
-
|
|
251
|
-
// Cannot infer — namespace required
|
|
252
|
-
return null;
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
// ---------------------------------------------------------------------------
|
|
256
|
-
// Doc-comment extraction
|
|
257
|
-
// ---------------------------------------------------------------------------
|
|
258
|
-
|
|
259
|
-
/**
|
|
260
|
-
* Extract doc-comment blocks from source code based on language.
|
|
261
|
-
*/
|
|
262
|
-
function extractDocComments(content: string, ext: string): CommentBlock[] {
|
|
263
|
-
switch (ext) {
|
|
264
|
-
case 'ts':
|
|
265
|
-
case 'tsx':
|
|
266
|
-
case 'js':
|
|
267
|
-
case 'jsx':
|
|
268
|
-
case 'mjs':
|
|
269
|
-
case 'cjs':
|
|
270
|
-
case 'java':
|
|
271
|
-
case 'cs':
|
|
272
|
-
return extractJSDocComments(content);
|
|
273
|
-
case 'py':
|
|
274
|
-
case 'pyi':
|
|
275
|
-
return extractPythonDocstrings(content);
|
|
276
|
-
case 'rs':
|
|
277
|
-
return extractRustDocComments(content);
|
|
278
|
-
case 'go':
|
|
279
|
-
return extractGoDocComments(content);
|
|
280
|
-
case 'rb':
|
|
281
|
-
return extractRubyDocComments(content);
|
|
282
|
-
default:
|
|
283
|
-
return [];
|
|
284
|
-
}
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
/**
|
|
288
|
-
* Extract JSDoc-style comments: /** ... * / and // comments
|
|
289
|
-
*/
|
|
290
|
-
function extractJSDocComments(content: string): CommentBlock[] {
|
|
291
|
-
const blocks: CommentBlock[] = [];
|
|
292
|
-
const lines = content.split('\n');
|
|
293
|
-
|
|
294
|
-
let inBlock = false;
|
|
295
|
-
let blockLines: string[] = [];
|
|
296
|
-
let blockStart = 0;
|
|
297
|
-
|
|
298
|
-
for (let i = 0; i < lines.length; i++) {
|
|
299
|
-
const trimmed = lines[i].trim();
|
|
300
|
-
|
|
301
|
-
// Block comment start: /** or /*
|
|
302
|
-
if (!inBlock && (trimmed.startsWith('/**') || trimmed.startsWith('/*'))) {
|
|
303
|
-
inBlock = true;
|
|
304
|
-
blockStart = i;
|
|
305
|
-
blockLines = [trimmed];
|
|
306
|
-
|
|
307
|
-
// Single-line block comment: /** ... */
|
|
308
|
-
if (trimmed.endsWith('*/') && trimmed.length > 4) {
|
|
309
|
-
blocks.push({
|
|
310
|
-
text: stripBlockCommentMarkers(blockLines.join('\n')),
|
|
311
|
-
startLine: blockStart,
|
|
312
|
-
context: 'jsdoc',
|
|
313
|
-
});
|
|
314
|
-
inBlock = false;
|
|
315
|
-
blockLines = [];
|
|
316
|
-
}
|
|
317
|
-
continue;
|
|
318
|
-
}
|
|
319
|
-
|
|
320
|
-
if (inBlock) {
|
|
321
|
-
blockLines.push(trimmed);
|
|
322
|
-
if (trimmed.includes('*/')) {
|
|
323
|
-
blocks.push({
|
|
324
|
-
text: stripBlockCommentMarkers(blockLines.join('\n')),
|
|
325
|
-
startLine: blockStart,
|
|
326
|
-
context: 'jsdoc',
|
|
327
|
-
});
|
|
328
|
-
inBlock = false;
|
|
329
|
-
blockLines = [];
|
|
330
|
-
}
|
|
331
|
-
continue;
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
// Single-line // comments
|
|
335
|
-
if (trimmed.startsWith('//')) {
|
|
336
|
-
blocks.push({
|
|
337
|
-
text: trimmed.replace(/^\/\/\s?/, ''),
|
|
338
|
-
startLine: i,
|
|
339
|
-
context: 'comment',
|
|
340
|
-
});
|
|
341
|
-
}
|
|
342
|
-
}
|
|
343
|
-
|
|
344
|
-
return blocks;
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
/**
|
|
348
|
-
* Strip block comment markers (/** * * /) from text.
|
|
349
|
-
*/
|
|
350
|
-
function stripBlockCommentMarkers(text: string): string {
|
|
351
|
-
return text
|
|
352
|
-
.replace(/^\/\*\*?\s?/, '')
|
|
353
|
-
.replace(/\*\/\s*$/, '')
|
|
354
|
-
.split('\n')
|
|
355
|
-
.map((line) => line.replace(/^\s*\*\s?/, ''))
|
|
356
|
-
.join('\n')
|
|
357
|
-
.trim();
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
/**
|
|
361
|
-
* Extract Python docstrings (triple-quoted strings).
|
|
362
|
-
*/
|
|
363
|
-
function extractPythonDocstrings(content: string): CommentBlock[] {
|
|
364
|
-
const blocks: CommentBlock[] = [];
|
|
365
|
-
const lines = content.split('\n');
|
|
366
|
-
|
|
367
|
-
let inDocstring = false;
|
|
368
|
-
let delimiter = '';
|
|
369
|
-
let blockLines: string[] = [];
|
|
370
|
-
let blockStart = 0;
|
|
371
|
-
|
|
372
|
-
for (let i = 0; i < lines.length; i++) {
|
|
373
|
-
const trimmed = lines[i].trim();
|
|
374
|
-
|
|
375
|
-
if (!inDocstring) {
|
|
376
|
-
// Check for docstring start
|
|
377
|
-
for (const delim of ['"""', "'''"]) {
|
|
378
|
-
if (trimmed.startsWith(delim)) {
|
|
379
|
-
// Single-line docstring
|
|
380
|
-
if (trimmed.endsWith(delim) && trimmed.length > delim.length * 2) {
|
|
381
|
-
blocks.push({
|
|
382
|
-
text: trimmed.slice(delim.length, -delim.length).trim(),
|
|
383
|
-
startLine: i,
|
|
384
|
-
context: 'pydoc',
|
|
385
|
-
});
|
|
386
|
-
break;
|
|
387
|
-
}
|
|
388
|
-
// Multi-line docstring start
|
|
389
|
-
inDocstring = true;
|
|
390
|
-
delimiter = delim;
|
|
391
|
-
blockStart = i;
|
|
392
|
-
blockLines = [trimmed.slice(delim.length)];
|
|
393
|
-
break;
|
|
394
|
-
}
|
|
395
|
-
}
|
|
396
|
-
// Also capture # comments
|
|
397
|
-
if (!inDocstring && trimmed.startsWith('#')) {
|
|
398
|
-
blocks.push({
|
|
399
|
-
text: trimmed.replace(/^#\s?/, ''),
|
|
400
|
-
startLine: i,
|
|
401
|
-
context: 'comment',
|
|
402
|
-
});
|
|
403
|
-
}
|
|
404
|
-
} else {
|
|
405
|
-
if (trimmed.endsWith(delimiter)) {
|
|
406
|
-
blockLines.push(trimmed.slice(0, -delimiter.length));
|
|
407
|
-
blocks.push({
|
|
408
|
-
text: blockLines.join('\n').trim(),
|
|
409
|
-
startLine: blockStart,
|
|
410
|
-
context: 'pydoc',
|
|
411
|
-
});
|
|
412
|
-
inDocstring = false;
|
|
413
|
-
blockLines = [];
|
|
414
|
-
} else {
|
|
415
|
-
blockLines.push(trimmed);
|
|
416
|
-
}
|
|
417
|
-
}
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
return blocks;
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
/**
|
|
424
|
-
* Extract Rust doc-comments (/// and //!).
|
|
425
|
-
*/
|
|
426
|
-
function extractRustDocComments(content: string): CommentBlock[] {
|
|
427
|
-
const blocks: CommentBlock[] = [];
|
|
428
|
-
const lines = content.split('\n');
|
|
429
|
-
|
|
430
|
-
for (let i = 0; i < lines.length; i++) {
|
|
431
|
-
const trimmed = lines[i].trim();
|
|
432
|
-
if (trimmed.startsWith('///') || trimmed.startsWith('//!')) {
|
|
433
|
-
blocks.push({
|
|
434
|
-
text: trimmed.replace(/^\/\/[\/!]\s?/, ''),
|
|
435
|
-
startLine: i,
|
|
436
|
-
context: 'rustdoc',
|
|
437
|
-
});
|
|
438
|
-
}
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
return blocks;
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
/**
|
|
445
|
-
* Extract Go doc-comments (// comments preceding declarations).
|
|
446
|
-
*/
|
|
447
|
-
function extractGoDocComments(content: string): CommentBlock[] {
|
|
448
|
-
const blocks: CommentBlock[] = [];
|
|
449
|
-
const lines = content.split('\n');
|
|
450
|
-
|
|
451
|
-
for (let i = 0; i < lines.length; i++) {
|
|
452
|
-
const trimmed = lines[i].trim();
|
|
453
|
-
if (trimmed.startsWith('//')) {
|
|
454
|
-
blocks.push({
|
|
455
|
-
text: trimmed.replace(/^\/\/\s?/, ''),
|
|
456
|
-
startLine: i,
|
|
457
|
-
context: 'godoc',
|
|
458
|
-
});
|
|
459
|
-
}
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
return blocks;
|
|
463
|
-
}
|
|
464
|
-
|
|
465
|
-
/**
|
|
466
|
-
* Extract Ruby doc-comments (# comments).
|
|
467
|
-
*/
|
|
468
|
-
function extractRubyDocComments(content: string): CommentBlock[] {
|
|
469
|
-
const blocks: CommentBlock[] = [];
|
|
470
|
-
const lines = content.split('\n');
|
|
471
|
-
|
|
472
|
-
for (let i = 0; i < lines.length; i++) {
|
|
473
|
-
const trimmed = lines[i].trim();
|
|
474
|
-
if (trimmed.startsWith('#')) {
|
|
475
|
-
blocks.push({
|
|
476
|
-
text: trimmed.replace(/^#\s?/, ''),
|
|
477
|
-
startLine: i,
|
|
478
|
-
context: 'comment',
|
|
479
|
-
});
|
|
480
|
-
}
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
return blocks;
|
|
484
|
-
}
|
package/src/links/ref-index.ts
DELETED
|
@@ -1,186 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Bidirectional Reference Index
|
|
3
|
-
*
|
|
4
|
-
* Builds and maintains a bidirectional index of [[...]] references:
|
|
5
|
-
* - outgoing: source file → EntityRefs it contains
|
|
6
|
-
* - incoming: target entity ID → RefSources that reference it
|
|
7
|
-
*
|
|
8
|
-
* Supports incremental updates when files change.
|
|
9
|
-
*
|
|
10
|
-
* @see TRL-13
|
|
11
|
-
*/
|
|
12
|
-
|
|
13
|
-
import type { EntityRef, RefIndex, RefSource } from './types.js';
|
|
14
|
-
import { parseFileRefs } from './parser.js';
|
|
15
|
-
import { resolveRef } from './resolver.js';
|
|
16
|
-
import type { ResolverContext } from './resolver.js';
|
|
17
|
-
|
|
18
|
-
// ---------------------------------------------------------------------------
|
|
19
|
-
// Public API
|
|
20
|
-
// ---------------------------------------------------------------------------
|
|
21
|
-
|
|
22
|
-
/**
|
|
23
|
-
* Build a complete RefIndex by scanning all provided file contents.
|
|
24
|
-
*/
|
|
25
|
-
export function buildRefIndex(
|
|
26
|
-
files: Array<{ path: string; content: string }>,
|
|
27
|
-
ctx: ResolverContext,
|
|
28
|
-
): RefIndex {
|
|
29
|
-
const index: RefIndex = {
|
|
30
|
-
outgoing: new Map(),
|
|
31
|
-
incoming: new Map(),
|
|
32
|
-
};
|
|
33
|
-
|
|
34
|
-
for (const file of files) {
|
|
35
|
-
const refs = parseFileRefs(file.content, file.path);
|
|
36
|
-
addFileToIndex(index, file.path, refs, ctx);
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
return index;
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
/**
|
|
43
|
-
* Update the index for a single file that was added or modified.
|
|
44
|
-
* Removes old entries for the file, then re-parses and re-indexes.
|
|
45
|
-
*/
|
|
46
|
-
export function updateFileInIndex(
|
|
47
|
-
index: RefIndex,
|
|
48
|
-
filePath: string,
|
|
49
|
-
content: string,
|
|
50
|
-
ctx: ResolverContext,
|
|
51
|
-
): void {
|
|
52
|
-
// Remove old entries for this file
|
|
53
|
-
removeFileFromIndex(index, filePath);
|
|
54
|
-
|
|
55
|
-
// Parse and add new entries
|
|
56
|
-
const refs = parseFileRefs(content, filePath);
|
|
57
|
-
addFileToIndex(index, filePath, refs, ctx);
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
/**
|
|
61
|
-
* Remove all refs originating from a file (e.g. when the file is deleted).
|
|
62
|
-
*/
|
|
63
|
-
export function removeFileFromIndex(index: RefIndex, filePath: string): void {
|
|
64
|
-
const oldRefs = index.outgoing.get(filePath);
|
|
65
|
-
if (!oldRefs) return;
|
|
66
|
-
|
|
67
|
-
// Remove incoming entries that came from this file
|
|
68
|
-
for (const ref of oldRefs) {
|
|
69
|
-
const resolved = resolveRefToEntityId(ref);
|
|
70
|
-
if (resolved) {
|
|
71
|
-
const sources = index.incoming.get(resolved);
|
|
72
|
-
if (sources) {
|
|
73
|
-
const filtered = sources.filter((s) => s.filePath !== filePath);
|
|
74
|
-
if (filtered.length > 0) {
|
|
75
|
-
index.incoming.set(resolved, filtered);
|
|
76
|
-
} else {
|
|
77
|
-
index.incoming.delete(resolved);
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
// Remove outgoing entry
|
|
84
|
-
index.outgoing.delete(filePath);
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
/**
|
|
88
|
-
* Get all outgoing refs from a file.
|
|
89
|
-
*/
|
|
90
|
-
export function getOutgoingRefs(index: RefIndex, filePath: string): EntityRef[] {
|
|
91
|
-
return index.outgoing.get(filePath) ?? [];
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
/**
|
|
95
|
-
* Get all incoming refs (backlinks) for an entity.
|
|
96
|
-
* Accepts either a raw entity ID (e.g. "issue:TRL-5") or a target string.
|
|
97
|
-
*/
|
|
98
|
-
export function getBacklinks(index: RefIndex, entityId: string): RefSource[] {
|
|
99
|
-
return index.incoming.get(entityId) ?? [];
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
/**
|
|
103
|
-
* Get all entity IDs that have at least one backlink.
|
|
104
|
-
*/
|
|
105
|
-
export function getReferencedEntities(index: RefIndex): string[] {
|
|
106
|
-
return [...index.incoming.keys()];
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
/**
|
|
110
|
-
* Get all files that contain at least one ref.
|
|
111
|
-
*/
|
|
112
|
-
export function getFilesWithRefs(index: RefIndex): string[] {
|
|
113
|
-
return [...index.outgoing.keys()];
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
/**
|
|
117
|
-
* Get total counts for the index.
|
|
118
|
-
*/
|
|
119
|
-
export function getIndexStats(index: RefIndex): {
|
|
120
|
-
totalFiles: number;
|
|
121
|
-
totalRefs: number;
|
|
122
|
-
totalEntities: number;
|
|
123
|
-
} {
|
|
124
|
-
let totalRefs = 0;
|
|
125
|
-
for (const refs of index.outgoing.values()) {
|
|
126
|
-
totalRefs += refs.length;
|
|
127
|
-
}
|
|
128
|
-
return {
|
|
129
|
-
totalFiles: index.outgoing.size,
|
|
130
|
-
totalRefs,
|
|
131
|
-
totalEntities: index.incoming.size,
|
|
132
|
-
};
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
// ---------------------------------------------------------------------------
|
|
136
|
-
// Internal helpers
|
|
137
|
-
// ---------------------------------------------------------------------------
|
|
138
|
-
|
|
139
|
-
/**
|
|
140
|
-
* Add parsed refs from a file into the index.
|
|
141
|
-
*/
|
|
142
|
-
function addFileToIndex(
|
|
143
|
-
index: RefIndex,
|
|
144
|
-
filePath: string,
|
|
145
|
-
refs: EntityRef[],
|
|
146
|
-
ctx: ResolverContext,
|
|
147
|
-
): void {
|
|
148
|
-
if (refs.length === 0) return;
|
|
149
|
-
|
|
150
|
-
// Store outgoing refs
|
|
151
|
-
index.outgoing.set(filePath, refs);
|
|
152
|
-
|
|
153
|
-
// Build incoming (backlink) entries
|
|
154
|
-
for (const ref of refs) {
|
|
155
|
-
// Resolve the ref to get an entity ID for the backlinks map
|
|
156
|
-
const resolved = resolveRef(ref, ctx);
|
|
157
|
-
const entityId = resolved.entityId ?? buildFallbackEntityId(ref);
|
|
158
|
-
|
|
159
|
-
const sources = index.incoming.get(entityId) ?? [];
|
|
160
|
-
sources.push(ref.source);
|
|
161
|
-
index.incoming.set(entityId, sources);
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
/**
|
|
166
|
-
* Build a synthetic entity ID from a ref for backlink indexing
|
|
167
|
-
* even when the ref doesn't resolve (broken/stale refs still
|
|
168
|
-
* appear in the backlinks map so they can be found later).
|
|
169
|
-
*/
|
|
170
|
-
function buildFallbackEntityId(ref: EntityRef): string {
|
|
171
|
-
if (ref.anchor) {
|
|
172
|
-
return `${ref.namespace}:${ref.target}#${ref.anchor}`;
|
|
173
|
-
}
|
|
174
|
-
return `${ref.namespace}:${ref.target}`;
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
/**
|
|
178
|
-
* Quick entity ID extraction from a ref without full resolution.
|
|
179
|
-
* Used for removing old entries.
|
|
180
|
-
*/
|
|
181
|
-
function resolveRefToEntityId(ref: EntityRef): string {
|
|
182
|
-
if (ref.anchor) {
|
|
183
|
-
return `${ref.namespace}:${ref.target}#${ref.anchor}`;
|
|
184
|
-
}
|
|
185
|
-
return `${ref.namespace}:${ref.target}`;
|
|
186
|
-
}
|