bluera-knowledge 0.10.1 → 0.11.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,187 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { ZilAdapter } from './zil-adapter.js';
3
+ import type { LanguageAdapter } from '../language-adapter.js';
4
+
5
+ describe('ZilAdapter', () => {
6
+ const adapter = new ZilAdapter();
7
+
8
+ describe('interface compliance', () => {
9
+ it('should implement LanguageAdapter interface', () => {
10
+ // Type check - this should compile without error
11
+ const _: LanguageAdapter = adapter;
12
+ expect(adapter.languageId).toBe('zil');
13
+ expect(adapter.extensions).toContain('.zil');
14
+ expect(adapter.displayName).toBeDefined();
15
+ });
16
+
17
+ it('should have correct extensions', () => {
18
+ expect(adapter.extensions).toEqual(['.zil', '.mud']);
19
+ });
20
+ });
21
+
22
+ describe('parse', () => {
23
+ it('should return CodeNode[] for routines', () => {
24
+ const code = '<ROUTINE V-LOOK () <TELL "You see nothing.">>';
25
+ const nodes = adapter.parse(code, 'test.zil');
26
+
27
+ expect(nodes).toHaveLength(1);
28
+ expect(nodes[0]).toMatchObject({
29
+ type: 'function',
30
+ name: 'V-LOOK',
31
+ exported: true,
32
+ });
33
+ });
34
+
35
+ it('should return CodeNode[] for objects', () => {
36
+ const code = '<OBJECT BRASS-LANTERN (DESC "brass lantern")>';
37
+ const nodes = adapter.parse(code, 'test.zil');
38
+
39
+ expect(nodes).toHaveLength(1);
40
+ expect(nodes[0]).toMatchObject({
41
+ type: 'const', // Objects map to const
42
+ name: 'BRASS-LANTERN',
43
+ });
44
+ });
45
+
46
+ it('should return CodeNode[] for globals', () => {
47
+ const code = '<GLOBAL SCORE 0>';
48
+ const nodes = adapter.parse(code, 'test.zil');
49
+
50
+ expect(nodes).toHaveLength(1);
51
+ expect(nodes[0]).toMatchObject({
52
+ type: 'const',
53
+ name: 'SCORE',
54
+ });
55
+ });
56
+
57
+ it('should include line numbers', () => {
58
+ const code = `
59
+ <ROUTINE V-LOOK ()
60
+ <TELL "text">>
61
+ `;
62
+ const nodes = adapter.parse(code, 'test.zil');
63
+
64
+ expect(nodes).toHaveLength(1);
65
+ expect(nodes[0]?.startLine).toBe(2);
66
+ expect(nodes[0]?.endLine).toBeGreaterThanOrEqual(3);
67
+ });
68
+
69
+ it('should include signature for routines', () => {
70
+ const code = '<ROUTINE V-TAKE (OBJ) <TELL "Taking">>';
71
+ const nodes = adapter.parse(code, 'test.zil');
72
+
73
+ expect(nodes).toHaveLength(1);
74
+ expect(nodes[0]?.signature).toContain('V-TAKE');
75
+ expect(nodes[0]?.signature).toContain('OBJ');
76
+ });
77
+
78
+ it('should parse multiple symbols', () => {
79
+ const code = `
80
+ <CONSTANT M-BEG 1>
81
+ <GLOBAL SCORE 0>
82
+ <ROUTINE V-LOOK ()>
83
+ <OBJECT LAMP>
84
+ `;
85
+ const nodes = adapter.parse(code, 'test.zil');
86
+
87
+ expect(nodes).toHaveLength(4);
88
+ });
89
+ });
90
+
91
+ describe('extractImports', () => {
92
+ it('should return ImportInfo[] for INSERT-FILE', () => {
93
+ const code = '<INSERT-FILE "GMACROS" T>';
94
+ const imports = adapter.extractImports(code, 'test.zil');
95
+
96
+ expect(imports).toHaveLength(1);
97
+ expect(imports[0]).toMatchObject({
98
+ source: 'GMACROS',
99
+ specifiers: [],
100
+ isType: false,
101
+ });
102
+ });
103
+
104
+ it('should extract multiple imports', () => {
105
+ const code = `
106
+ <INSERT-FILE "GMACROS" T>
107
+ <INSERT-FILE "PARSER" T>
108
+ `;
109
+ const imports = adapter.extractImports(code, 'test.zil');
110
+
111
+ expect(imports).toHaveLength(2);
112
+ expect(imports.map((i) => i.source)).toEqual(['GMACROS', 'PARSER']);
113
+ });
114
+
115
+ it('should return empty array when no imports', () => {
116
+ const code = '<ROUTINE V-LOOK ()>';
117
+ const imports = adapter.extractImports(code, 'test.zil');
118
+
119
+ expect(imports).toEqual([]);
120
+ });
121
+ });
122
+
123
+ describe('chunk', () => {
124
+ it('should chunk by top-level forms', () => {
125
+ const code = `<ROUTINE V-LOOK ()
126
+ <TELL "text">>
127
+
128
+ <OBJECT LAMP
129
+ (DESC "lamp")>`;
130
+
131
+ const chunks = adapter.chunk?.(code, 'test.zil');
132
+
133
+ expect(chunks).toBeDefined();
134
+ expect(chunks).toHaveLength(2);
135
+ });
136
+
137
+ it('should include symbol metadata in chunks', () => {
138
+ const code = '<ROUTINE V-LOOK () <TELL "text">>';
139
+ const chunks = adapter.chunk?.(code, 'test.zil');
140
+
141
+ expect(chunks).toHaveLength(1);
142
+ expect(chunks?.[0]).toMatchObject({
143
+ symbolName: 'V-LOOK',
144
+ symbolKind: 'routine',
145
+ });
146
+ });
147
+
148
+ it('should preserve original content in chunks', () => {
149
+ const code = '<ROUTINE V-LOOK () <TELL "text">>';
150
+ const chunks = adapter.chunk?.(code, 'test.zil');
151
+
152
+ expect(chunks?.[0]?.content).toContain('ROUTINE');
153
+ expect(chunks?.[0]?.content).toContain('V-LOOK');
154
+ });
155
+ });
156
+
157
+ describe('analyzeCallRelationships', () => {
158
+ it('should return GraphEdge[] for calls', () => {
159
+ const code = '<ROUTINE V-LOOK () <V-DESCRIBE>>';
160
+ const edges = adapter.analyzeCallRelationships?.(code, 'test.zil');
161
+
162
+ expect(edges).toBeDefined();
163
+ expect(edges?.length).toBeGreaterThan(0);
164
+
165
+ const callEdge = edges?.find((e) => e.type === 'calls');
166
+ expect(callEdge).toBeDefined();
167
+ expect(callEdge?.to).toContain('V-DESCRIBE');
168
+ });
169
+
170
+ it('should not include special forms as calls', () => {
171
+ const code = '<ROUTINE TEST () <COND (<EQUAL? 1 1> <RTRUE>)>>';
172
+ const edges = adapter.analyzeCallRelationships?.(code, 'test.zil');
173
+
174
+ const callees = edges?.map((e) => e.to) ?? [];
175
+ expect(callees).not.toContain('COND');
176
+ expect(callees).not.toContain('EQUAL?');
177
+ expect(callees).not.toContain('RTRUE');
178
+ });
179
+
180
+ it('should set caller as from field', () => {
181
+ const code = '<ROUTINE V-LOOK () <MY-HELPER>>';
182
+ const edges = adapter.analyzeCallRelationships?.(code, 'test.zil');
183
+
184
+ expect(edges?.[0]?.from).toContain('V-LOOK');
185
+ });
186
+ });
187
+ });
@@ -0,0 +1,121 @@
1
+ /**
2
+ * ZIL Language Adapter
3
+ *
4
+ * Implements LanguageAdapter for ZIL (Zork Implementation Language).
5
+ * Provides full graph support: parsing, imports, chunking, and call analysis.
6
+ */
7
+
8
+ import { ZilParser } from './zil-parser.js';
9
+ import type { CodeNode, ImportInfo } from '../ast-parser.js';
10
+ import type { GraphEdge } from '../code-graph.js';
11
+ import type { LanguageAdapter, ChunkResult } from '../language-adapter.js';
12
+
13
+ /**
14
+ * Language adapter for ZIL (Zork Implementation Language)
15
+ */
16
+ export class ZilAdapter implements LanguageAdapter {
17
+ readonly languageId = 'zil';
18
+ readonly extensions = ['.zil', '.mud'];
19
+ readonly displayName = 'ZIL (Zork Implementation Language)';
20
+
21
+ private readonly parser = new ZilParser();
22
+
23
+ /**
24
+ * Parse ZIL code and extract symbols as CodeNode[]
25
+ */
26
+ parse(content: string, _filePath: string): CodeNode[] {
27
+ const result = this.parser.parse(content);
28
+
29
+ return result.symbols.map((symbol) => {
30
+ const node: CodeNode = {
31
+ type: this.mapSymbolKindToNodeType(symbol.kind),
32
+ name: symbol.name,
33
+ exported: true, // ZIL doesn't have export concept, treat all as exported
34
+ startLine: symbol.startLine,
35
+ endLine: symbol.endLine,
36
+ };
37
+
38
+ if (symbol.signature !== undefined) {
39
+ node.signature = symbol.signature;
40
+ }
41
+
42
+ return node;
43
+ });
44
+ }
45
+
46
+ /**
47
+ * Extract imports from INSERT-FILE directives
48
+ */
49
+ extractImports(content: string, _filePath: string): ImportInfo[] {
50
+ const result = this.parser.parse(content);
51
+ return result.imports;
52
+ }
53
+
54
+ /**
55
+ * Chunk ZIL code by top-level forms
56
+ */
57
+ chunk(content: string, _filePath: string): ChunkResult[] {
58
+ const result = this.parser.parse(content);
59
+ const lines = content.split('\n');
60
+
61
+ return result.forms
62
+ .filter((form) => form.head !== '') // Skip empty forms
63
+ .map((form) => {
64
+ // Extract content from original source using line numbers
65
+ const chunkLines = lines.slice(form.startLine - 1, form.endLine);
66
+ const chunkContent = chunkLines.join('\n');
67
+
68
+ // Find symbol for this form if it's a definition
69
+ const symbol = result.symbols.find(
70
+ (s) => s.startLine === form.startLine && s.endLine === form.endLine
71
+ );
72
+
73
+ const chunk: ChunkResult = {
74
+ content: chunkContent,
75
+ startLine: form.startLine,
76
+ endLine: form.endLine,
77
+ };
78
+
79
+ if (symbol !== undefined) {
80
+ chunk.symbolName = symbol.name;
81
+ chunk.symbolKind = symbol.kind;
82
+ }
83
+
84
+ return chunk;
85
+ });
86
+ }
87
+
88
+ /**
89
+ * Analyze call relationships within ZIL code
90
+ */
91
+ analyzeCallRelationships(content: string, filePath: string): GraphEdge[] {
92
+ const result = this.parser.parse(content);
93
+
94
+ return result.calls.map((call) => ({
95
+ from: `${filePath}:${call.caller}`,
96
+ to: `${filePath}:${call.callee}`,
97
+ type: 'calls' as const,
98
+ confidence: 0.9, // High confidence for ZIL - calls are explicit
99
+ }));
100
+ }
101
+
102
+ /**
103
+ * Map ZIL symbol kinds to CodeNode types
104
+ */
105
+ private mapSymbolKindToNodeType(kind: string): CodeNode['type'] {
106
+ switch (kind) {
107
+ case 'routine':
108
+ return 'function';
109
+ case 'object':
110
+ case 'room':
111
+ case 'global':
112
+ case 'constant':
113
+ return 'const';
114
+ case 'syntax':
115
+ case 'verb':
116
+ return 'const';
117
+ default:
118
+ return 'const';
119
+ }
120
+ }
121
+ }
@@ -0,0 +1,222 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { ZilLexer, TokenType, type Token } from './zil-lexer.js';
3
+
4
+ describe('ZilLexer', () => {
5
+ const lexer = new ZilLexer();
6
+
7
+ describe('basic tokens', () => {
8
+ it('should tokenize angle brackets', () => {
9
+ const tokens = lexer.tokenize('<>');
10
+ expect(tokens).toHaveLength(2);
11
+ expect(tokens[0]).toMatchObject({ type: TokenType.LANGLE, value: '<' });
12
+ expect(tokens[1]).toMatchObject({ type: TokenType.RANGLE, value: '>' });
13
+ });
14
+
15
+ it('should tokenize parentheses', () => {
16
+ const tokens = lexer.tokenize('()');
17
+ expect(tokens).toHaveLength(2);
18
+ expect(tokens[0]).toMatchObject({ type: TokenType.LPAREN, value: '(' });
19
+ expect(tokens[1]).toMatchObject({ type: TokenType.RPAREN, value: ')' });
20
+ });
21
+
22
+ it('should tokenize atoms (symbols)', () => {
23
+ const tokens = lexer.tokenize('ROUTINE');
24
+ expect(tokens).toHaveLength(1);
25
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ROUTINE' });
26
+ });
27
+
28
+ it('should tokenize atoms with special characters', () => {
29
+ const tokens = lexer.tokenize('EQUAL? FSET? IN?');
30
+ expect(tokens).toHaveLength(3);
31
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'EQUAL?' });
32
+ expect(tokens[1]).toMatchObject({ type: TokenType.ATOM, value: 'FSET?' });
33
+ expect(tokens[2]).toMatchObject({ type: TokenType.ATOM, value: 'IN?' });
34
+ });
35
+
36
+ it('should tokenize atoms with hyphens', () => {
37
+ const tokens = lexer.tokenize('V-LOOK BRASS-LANTERN');
38
+ expect(tokens).toHaveLength(2);
39
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'V-LOOK' });
40
+ expect(tokens[1]).toMatchObject({ type: TokenType.ATOM, value: 'BRASS-LANTERN' });
41
+ });
42
+ });
43
+
44
+ describe('strings', () => {
45
+ it('should tokenize simple strings', () => {
46
+ const tokens = lexer.tokenize('"Hello, World!"');
47
+ expect(tokens).toHaveLength(1);
48
+ expect(tokens[0]).toMatchObject({ type: TokenType.STRING, value: 'Hello, World!' });
49
+ });
50
+
51
+ it('should tokenize strings with escaped quotes', () => {
52
+ const tokens = lexer.tokenize('"He said \\"Hello\\""');
53
+ expect(tokens).toHaveLength(1);
54
+ expect(tokens[0]).toMatchObject({ type: TokenType.STRING, value: 'He said "Hello"' });
55
+ });
56
+
57
+ it('should tokenize strings with escaped backslashes', () => {
58
+ const tokens = lexer.tokenize('"path\\\\to\\\\file"');
59
+ expect(tokens).toHaveLength(1);
60
+ expect(tokens[0]).toMatchObject({ type: TokenType.STRING, value: 'path\\to\\file' });
61
+ });
62
+
63
+ it('should tokenize empty strings', () => {
64
+ const tokens = lexer.tokenize('""');
65
+ expect(tokens).toHaveLength(1);
66
+ expect(tokens[0]).toMatchObject({ type: TokenType.STRING, value: '' });
67
+ });
68
+ });
69
+
70
+ describe('numbers', () => {
71
+ it('should tokenize positive integers', () => {
72
+ const tokens = lexer.tokenize('42');
73
+ expect(tokens).toHaveLength(1);
74
+ expect(tokens[0]).toMatchObject({ type: TokenType.NUMBER, value: '42' });
75
+ });
76
+
77
+ it('should tokenize negative integers', () => {
78
+ const tokens = lexer.tokenize('-10');
79
+ expect(tokens).toHaveLength(1);
80
+ expect(tokens[0]).toMatchObject({ type: TokenType.NUMBER, value: '-10' });
81
+ });
82
+
83
+ it('should tokenize zero', () => {
84
+ const tokens = lexer.tokenize('0');
85
+ expect(tokens).toHaveLength(1);
86
+ expect(tokens[0]).toMatchObject({ type: TokenType.NUMBER, value: '0' });
87
+ });
88
+ });
89
+
90
+ describe('comments', () => {
91
+ it('should skip line comments starting with semicolon', () => {
92
+ const tokens = lexer.tokenize('; This is a comment\nATOM');
93
+ expect(tokens).toHaveLength(1);
94
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM' });
95
+ });
96
+
97
+ it('should handle inline comments', () => {
98
+ const tokens = lexer.tokenize('ATOM ; inline comment');
99
+ expect(tokens).toHaveLength(1);
100
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM' });
101
+ });
102
+
103
+ it('should handle multiple comment lines', () => {
104
+ const tokens = lexer.tokenize('; comment 1\n; comment 2\nATOM');
105
+ expect(tokens).toHaveLength(1);
106
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM' });
107
+ });
108
+ });
109
+
110
+ describe('whitespace', () => {
111
+ it('should skip whitespace between tokens', () => {
112
+ const tokens = lexer.tokenize(' ATOM1 ATOM2 ');
113
+ expect(tokens).toHaveLength(2);
114
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM1' });
115
+ expect(tokens[1]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM2' });
116
+ });
117
+
118
+ it('should handle tabs and newlines', () => {
119
+ const tokens = lexer.tokenize('ATOM1\t\nATOM2');
120
+ expect(tokens).toHaveLength(2);
121
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM1' });
122
+ expect(tokens[1]).toMatchObject({ type: TokenType.ATOM, value: 'ATOM2' });
123
+ });
124
+ });
125
+
126
+ describe('complex expressions', () => {
127
+ it('should tokenize a simple routine', () => {
128
+ const code = '<ROUTINE V-LOOK ()>';
129
+ const tokens = lexer.tokenize(code);
130
+ expect(tokens).toHaveLength(6);
131
+ expect(tokens.map((t) => t.type)).toEqual([
132
+ TokenType.LANGLE,
133
+ TokenType.ATOM,
134
+ TokenType.ATOM,
135
+ TokenType.LPAREN,
136
+ TokenType.RPAREN,
137
+ TokenType.RANGLE,
138
+ ]);
139
+ });
140
+
141
+ it('should tokenize nested forms', () => {
142
+ const code = '<COND (<EQUAL? ,FOO 1> <TELL "One">)>';
143
+ const tokens = lexer.tokenize(code);
144
+
145
+ // Verify key tokens are present
146
+ const types = tokens.map((t) => t.type);
147
+ expect(types).toContain(TokenType.LANGLE);
148
+ expect(types).toContain(TokenType.ATOM);
149
+ expect(types).toContain(TokenType.STRING);
150
+ expect(types).toContain(TokenType.NUMBER);
151
+ });
152
+
153
+ it('should tokenize INSERT-FILE directive', () => {
154
+ const code = '<INSERT-FILE "GMACROS" T>';
155
+ const tokens = lexer.tokenize(code);
156
+ expect(tokens).toHaveLength(5);
157
+ expect(tokens[1]).toMatchObject({ type: TokenType.ATOM, value: 'INSERT-FILE' });
158
+ expect(tokens[2]).toMatchObject({ type: TokenType.STRING, value: 'GMACROS' });
159
+ expect(tokens[3]).toMatchObject({ type: TokenType.ATOM, value: 'T' });
160
+ });
161
+
162
+ it('should tokenize OBJECT definition', () => {
163
+ const code = '<OBJECT BRASS-LANTERN (DESC "brass lantern") (FLAGS LIGHTBIT)>';
164
+ const tokens = lexer.tokenize(code);
165
+
166
+ expect(tokens.find((t) => t.value === 'OBJECT')).toBeDefined();
167
+ expect(tokens.find((t) => t.value === 'BRASS-LANTERN')).toBeDefined();
168
+ expect(tokens.find((t) => t.value === 'brass lantern')).toBeDefined();
169
+ });
170
+ });
171
+
172
+ describe('line tracking', () => {
173
+ it('should track line numbers', () => {
174
+ const code = 'ATOM1\nATOM2\nATOM3';
175
+ const tokens = lexer.tokenize(code);
176
+
177
+ expect(tokens[0]?.line).toBe(1);
178
+ expect(tokens[1]?.line).toBe(2);
179
+ expect(tokens[2]?.line).toBe(3);
180
+ });
181
+
182
+ it('should track column numbers', () => {
183
+ const code = ' ATOM';
184
+ const tokens = lexer.tokenize(code);
185
+
186
+ expect(tokens[0]?.column).toBe(3); // 1-based, after 2 spaces
187
+ });
188
+ });
189
+
190
+ describe('edge cases', () => {
191
+ it('should handle empty input', () => {
192
+ const tokens = lexer.tokenize('');
193
+ expect(tokens).toEqual([]);
194
+ });
195
+
196
+ it('should handle only whitespace', () => {
197
+ const tokens = lexer.tokenize(' \n\t ');
198
+ expect(tokens).toEqual([]);
199
+ });
200
+
201
+ it('should handle only comments', () => {
202
+ const tokens = lexer.tokenize('; comment only');
203
+ expect(tokens).toEqual([]);
204
+ });
205
+
206
+ it('should throw on unterminated string', () => {
207
+ expect(() => lexer.tokenize('"unterminated')).toThrow('Unterminated string');
208
+ });
209
+
210
+ it('should handle comma prefix (global reference)', () => {
211
+ const tokens = lexer.tokenize(',FOO');
212
+ expect(tokens).toHaveLength(1);
213
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: ',FOO' });
214
+ });
215
+
216
+ it('should handle period prefix (local reference)', () => {
217
+ const tokens = lexer.tokenize('.BAR');
218
+ expect(tokens).toHaveLength(1);
219
+ expect(tokens[0]).toMatchObject({ type: TokenType.ATOM, value: '.BAR' });
220
+ });
221
+ });
222
+ });