@a2ui-sdk/utils 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/0.9/index.d.ts +1 -0
- package/dist/0.9/index.js +1 -0
- package/dist/0.9/interpolation/evaluator.d.ts +15 -0
- package/dist/0.9/interpolation/evaluator.js +95 -0
- package/dist/0.9/interpolation/evaluator.test.d.ts +4 -0
- package/dist/0.9/interpolation/evaluator.test.js +699 -0
- package/dist/0.9/interpolation/index.d.ts +70 -0
- package/dist/0.9/interpolation/index.js +84 -0
- package/dist/0.9/interpolation/lexer.d.ts +18 -0
- package/dist/0.9/interpolation/lexer.js +250 -0
- package/dist/0.9/interpolation/lexer.test.d.ts +4 -0
- package/dist/0.9/interpolation/lexer.test.js +360 -0
- package/dist/0.9/interpolation/parser.d.ts +14 -0
- package/dist/0.9/interpolation/parser.js +236 -0
- package/dist/0.9/interpolation/parser.test.d.ts +4 -0
- package/dist/0.9/interpolation/parser.test.js +314 -0
- package/dist/0.9/interpolation/types.d.ts +124 -0
- package/dist/0.9/interpolation/types.js +36 -0
- package/dist/0.9/interpolation.test.d.ts +5 -0
- package/dist/0.9/interpolation.test.js +154 -0
- package/dist/0.9/pathUtils.d.ts +115 -0
- package/dist/0.9/pathUtils.js +256 -0
- package/dist/0.9/pathUtils.test.d.ts +6 -0
- package/dist/0.9/pathUtils.test.js +330 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/dist/tsconfig.tsbuildinfo +1 -0
- package/package.json +40 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for the lexer (tokenizer).
|
|
3
|
+
*/
|
|
4
|
+
import { describe, it, expect } from 'vitest';
|
|
5
|
+
import { tokenize } from './lexer.js';
|
|
6
|
+
import { TokenType } from './types.js';
|
|
7
|
+
describe('Lexer', () => {
|
|
8
|
+
describe('TEXT tokens', () => {
|
|
9
|
+
it('should tokenize plain text', () => {
|
|
10
|
+
const tokens = tokenize('Hello World');
|
|
11
|
+
expect(tokens).toHaveLength(2); // TEXT + EOF
|
|
12
|
+
expect(tokens[0]).toMatchObject({
|
|
13
|
+
type: TokenType.TEXT,
|
|
14
|
+
value: 'Hello World',
|
|
15
|
+
start: 0,
|
|
16
|
+
end: 11,
|
|
17
|
+
});
|
|
18
|
+
});
|
|
19
|
+
it('should return only EOF for empty string', () => {
|
|
20
|
+
const tokens = tokenize('');
|
|
21
|
+
expect(tokens).toHaveLength(1);
|
|
22
|
+
expect(tokens[0].type).toBe(TokenType.EOF);
|
|
23
|
+
});
|
|
24
|
+
it('should handle text with special characters', () => {
|
|
25
|
+
const tokens = tokenize('Hello @world! #123');
|
|
26
|
+
expect(tokens[0]).toMatchObject({
|
|
27
|
+
type: TokenType.TEXT,
|
|
28
|
+
value: 'Hello @world! #123',
|
|
29
|
+
});
|
|
30
|
+
});
|
|
31
|
+
it('should handle multi-line text', () => {
|
|
32
|
+
const tokens = tokenize('Line 1\nLine 2');
|
|
33
|
+
expect(tokens[0]).toMatchObject({
|
|
34
|
+
type: TokenType.TEXT,
|
|
35
|
+
value: 'Line 1\nLine 2',
|
|
36
|
+
});
|
|
37
|
+
});
|
|
38
|
+
});
|
|
39
|
+
describe('EXPR_START and EXPR_END tokens', () => {
|
|
40
|
+
it('should tokenize expression delimiters', () => {
|
|
41
|
+
const tokens = tokenize('${/path}');
|
|
42
|
+
expect(tokens[0]).toMatchObject({
|
|
43
|
+
type: TokenType.EXPR_START,
|
|
44
|
+
value: '${',
|
|
45
|
+
});
|
|
46
|
+
expect(tokens[1]).toMatchObject({ type: TokenType.PATH, value: '/path' });
|
|
47
|
+
expect(tokens[2]).toMatchObject({ type: TokenType.EXPR_END, value: '}' });
|
|
48
|
+
});
|
|
49
|
+
it('should handle multiple expressions', () => {
|
|
50
|
+
const tokens = tokenize('${/a} ${/b}');
|
|
51
|
+
const types = tokens.map((t) => t.type);
|
|
52
|
+
expect(types).toEqual([
|
|
53
|
+
TokenType.EXPR_START,
|
|
54
|
+
TokenType.PATH,
|
|
55
|
+
TokenType.EXPR_END,
|
|
56
|
+
TokenType.TEXT,
|
|
57
|
+
TokenType.EXPR_START,
|
|
58
|
+
TokenType.PATH,
|
|
59
|
+
TokenType.EXPR_END,
|
|
60
|
+
TokenType.EOF,
|
|
61
|
+
]);
|
|
62
|
+
});
|
|
63
|
+
it('should handle expression at start of string', () => {
|
|
64
|
+
const tokens = tokenize('${/name} is here');
|
|
65
|
+
expect(tokens[0].type).toBe(TokenType.EXPR_START);
|
|
66
|
+
});
|
|
67
|
+
it('should handle expression at end of string', () => {
|
|
68
|
+
const tokens = tokenize('Name: ${/name}');
|
|
69
|
+
expect(tokens[tokens.length - 2].type).toBe(TokenType.EXPR_END);
|
|
70
|
+
});
|
|
71
|
+
});
|
|
72
|
+
describe('PATH tokens', () => {
|
|
73
|
+
it('should tokenize absolute paths', () => {
|
|
74
|
+
const tokens = tokenize('${/user/name}');
|
|
75
|
+
expect(tokens[1]).toMatchObject({
|
|
76
|
+
type: TokenType.PATH,
|
|
77
|
+
value: '/user/name',
|
|
78
|
+
});
|
|
79
|
+
});
|
|
80
|
+
it('should tokenize root path', () => {
|
|
81
|
+
const tokens = tokenize('${/}');
|
|
82
|
+
expect(tokens[1]).toMatchObject({
|
|
83
|
+
type: TokenType.PATH,
|
|
84
|
+
value: '/',
|
|
85
|
+
});
|
|
86
|
+
});
|
|
87
|
+
it('should tokenize paths with array indices', () => {
|
|
88
|
+
const tokens = tokenize('${/items/0}');
|
|
89
|
+
expect(tokens[1]).toMatchObject({
|
|
90
|
+
type: TokenType.PATH,
|
|
91
|
+
value: '/items/0',
|
|
92
|
+
});
|
|
93
|
+
});
|
|
94
|
+
it('should tokenize relative paths', () => {
|
|
95
|
+
const tokens = tokenize('${name}');
|
|
96
|
+
expect(tokens[1]).toMatchObject({
|
|
97
|
+
type: TokenType.PATH,
|
|
98
|
+
value: 'name',
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
it('should tokenize nested relative paths', () => {
|
|
102
|
+
const tokens = tokenize('${profile/name}');
|
|
103
|
+
expect(tokens[1]).toMatchObject({
|
|
104
|
+
type: TokenType.PATH,
|
|
105
|
+
value: 'profile/name',
|
|
106
|
+
});
|
|
107
|
+
});
|
|
108
|
+
it('should tokenize paths with hyphens', () => {
|
|
109
|
+
const tokens = tokenize('${/user-data/first-name}');
|
|
110
|
+
expect(tokens[1]).toMatchObject({
|
|
111
|
+
type: TokenType.PATH,
|
|
112
|
+
value: '/user-data/first-name',
|
|
113
|
+
});
|
|
114
|
+
});
|
|
115
|
+
it('should tokenize paths with underscores', () => {
|
|
116
|
+
const tokens = tokenize('${/user_data/first_name}');
|
|
117
|
+
expect(tokens[1]).toMatchObject({
|
|
118
|
+
type: TokenType.PATH,
|
|
119
|
+
value: '/user_data/first_name',
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
describe('IDENTIFIER, LPAREN, RPAREN, COMMA tokens', () => {
|
|
124
|
+
it('should tokenize no-argument function call', () => {
|
|
125
|
+
const tokens = tokenize('${now()}');
|
|
126
|
+
expect(tokens[1]).toMatchObject({
|
|
127
|
+
type: TokenType.IDENTIFIER,
|
|
128
|
+
value: 'now',
|
|
129
|
+
});
|
|
130
|
+
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN, value: '(' });
|
|
131
|
+
expect(tokens[3]).toMatchObject({ type: TokenType.RPAREN, value: ')' });
|
|
132
|
+
});
|
|
133
|
+
it('should tokenize function with single argument', () => {
|
|
134
|
+
const tokens = tokenize("${upper('hello')}");
|
|
135
|
+
expect(tokens[1]).toMatchObject({
|
|
136
|
+
type: TokenType.IDENTIFIER,
|
|
137
|
+
value: 'upper',
|
|
138
|
+
});
|
|
139
|
+
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN, value: '(' });
|
|
140
|
+
expect(tokens[3]).toMatchObject({
|
|
141
|
+
type: TokenType.STRING,
|
|
142
|
+
value: 'hello',
|
|
143
|
+
});
|
|
144
|
+
expect(tokens[4]).toMatchObject({ type: TokenType.RPAREN, value: ')' });
|
|
145
|
+
});
|
|
146
|
+
it('should tokenize function with multiple arguments', () => {
|
|
147
|
+
const tokens = tokenize('${add(1, 2, 3)}');
|
|
148
|
+
const types = tokens.map((t) => t.type);
|
|
149
|
+
expect(types).toContain(TokenType.COMMA);
|
|
150
|
+
// Count commas
|
|
151
|
+
const commaCount = tokens.filter((t) => t.type === TokenType.COMMA).length;
|
|
152
|
+
expect(commaCount).toBe(2);
|
|
153
|
+
});
|
|
154
|
+
it('should handle whitespace around function parts', () => {
|
|
155
|
+
const tokens = tokenize('${ upper ( 1 , 2 ) }');
|
|
156
|
+
expect(tokens[1]).toMatchObject({
|
|
157
|
+
type: TokenType.IDENTIFIER,
|
|
158
|
+
value: 'upper',
|
|
159
|
+
});
|
|
160
|
+
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN });
|
|
161
|
+
expect(tokens[3]).toMatchObject({ type: TokenType.NUMBER, value: '1' });
|
|
162
|
+
expect(tokens[4]).toMatchObject({ type: TokenType.COMMA });
|
|
163
|
+
expect(tokens[5]).toMatchObject({ type: TokenType.NUMBER, value: '2' });
|
|
164
|
+
expect(tokens[6]).toMatchObject({ type: TokenType.RPAREN });
|
|
165
|
+
});
|
|
166
|
+
it('should tokenize function names with underscores', () => {
|
|
167
|
+
const tokens = tokenize('${format_date()}');
|
|
168
|
+
expect(tokens[1]).toMatchObject({
|
|
169
|
+
type: TokenType.IDENTIFIER,
|
|
170
|
+
value: 'format_date',
|
|
171
|
+
});
|
|
172
|
+
});
|
|
173
|
+
});
|
|
174
|
+
describe('STRING, NUMBER, BOOLEAN literal tokens', () => {
|
|
175
|
+
it('should tokenize string literals', () => {
|
|
176
|
+
const tokens = tokenize("${'hello'}");
|
|
177
|
+
expect(tokens[1]).toMatchObject({
|
|
178
|
+
type: TokenType.STRING,
|
|
179
|
+
value: 'hello',
|
|
180
|
+
});
|
|
181
|
+
});
|
|
182
|
+
it('should tokenize string with spaces', () => {
|
|
183
|
+
const tokens = tokenize("${'hello world'}");
|
|
184
|
+
expect(tokens[1]).toMatchObject({
|
|
185
|
+
type: TokenType.STRING,
|
|
186
|
+
value: 'hello world',
|
|
187
|
+
});
|
|
188
|
+
});
|
|
189
|
+
it('should handle escaped quotes in strings', () => {
|
|
190
|
+
const tokens = tokenize("${'it\\'s here'}");
|
|
191
|
+
expect(tokens[1]).toMatchObject({
|
|
192
|
+
type: TokenType.STRING,
|
|
193
|
+
value: "it's here",
|
|
194
|
+
});
|
|
195
|
+
});
|
|
196
|
+
it('should tokenize integer numbers', () => {
|
|
197
|
+
const tokens = tokenize('${42}');
|
|
198
|
+
expect(tokens[1]).toMatchObject({
|
|
199
|
+
type: TokenType.NUMBER,
|
|
200
|
+
value: '42',
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
it('should tokenize negative numbers', () => {
|
|
204
|
+
const tokens = tokenize('${-5}');
|
|
205
|
+
expect(tokens[1]).toMatchObject({
|
|
206
|
+
type: TokenType.NUMBER,
|
|
207
|
+
value: '-5',
|
|
208
|
+
});
|
|
209
|
+
});
|
|
210
|
+
it('should tokenize decimal numbers', () => {
|
|
211
|
+
const tokens = tokenize('${3.14}');
|
|
212
|
+
expect(tokens[1]).toMatchObject({
|
|
213
|
+
type: TokenType.NUMBER,
|
|
214
|
+
value: '3.14',
|
|
215
|
+
});
|
|
216
|
+
});
|
|
217
|
+
it('should tokenize negative decimal numbers', () => {
|
|
218
|
+
const tokens = tokenize('${-3.14}');
|
|
219
|
+
expect(tokens[1]).toMatchObject({
|
|
220
|
+
type: TokenType.NUMBER,
|
|
221
|
+
value: '-3.14',
|
|
222
|
+
});
|
|
223
|
+
});
|
|
224
|
+
it('should tokenize boolean true', () => {
|
|
225
|
+
const tokens = tokenize('${true}');
|
|
226
|
+
expect(tokens[1]).toMatchObject({
|
|
227
|
+
type: TokenType.BOOLEAN,
|
|
228
|
+
value: 'true',
|
|
229
|
+
});
|
|
230
|
+
});
|
|
231
|
+
it('should tokenize boolean false', () => {
|
|
232
|
+
const tokens = tokenize('${false}');
|
|
233
|
+
expect(tokens[1]).toMatchObject({
|
|
234
|
+
type: TokenType.BOOLEAN,
|
|
235
|
+
value: 'false',
|
|
236
|
+
});
|
|
237
|
+
});
|
|
238
|
+
it('should not tokenize partial boolean words', () => {
|
|
239
|
+
const tokens = tokenize('${trueValue}');
|
|
240
|
+
// Should be PATH, not BOOLEAN
|
|
241
|
+
expect(tokens[1].type).toBe(TokenType.PATH);
|
|
242
|
+
expect(tokens[1].value).toBe('trueValue');
|
|
243
|
+
});
|
|
244
|
+
});
|
|
245
|
+
describe('escape sequence handling', () => {
|
|
246
|
+
it('should convert \\${ to literal ${', () => {
|
|
247
|
+
const tokens = tokenize('\\${escaped}');
|
|
248
|
+
expect(tokens[0]).toMatchObject({
|
|
249
|
+
type: TokenType.TEXT,
|
|
250
|
+
value: '${escaped}',
|
|
251
|
+
});
|
|
252
|
+
});
|
|
253
|
+
it('should handle multiple escapes', () => {
|
|
254
|
+
const tokens = tokenize('\\${a} \\${b}');
|
|
255
|
+
expect(tokens[0]).toMatchObject({
|
|
256
|
+
type: TokenType.TEXT,
|
|
257
|
+
value: '${a} ${b}',
|
|
258
|
+
});
|
|
259
|
+
});
|
|
260
|
+
it('should handle mix of escaped and unescaped', () => {
|
|
261
|
+
const tokens = tokenize('\\${escaped} ${/real}');
|
|
262
|
+
expect(tokens[0]).toMatchObject({
|
|
263
|
+
type: TokenType.TEXT,
|
|
264
|
+
value: '${escaped} ',
|
|
265
|
+
});
|
|
266
|
+
expect(tokens[1]).toMatchObject({ type: TokenType.EXPR_START });
|
|
267
|
+
expect(tokens[2]).toMatchObject({ type: TokenType.PATH, value: '/real' });
|
|
268
|
+
});
|
|
269
|
+
it('should handle escape at end of string', () => {
|
|
270
|
+
const tokens = tokenize('text \\${end}');
|
|
271
|
+
expect(tokens[0]).toMatchObject({
|
|
272
|
+
type: TokenType.TEXT,
|
|
273
|
+
value: 'text ${end}',
|
|
274
|
+
});
|
|
275
|
+
});
|
|
276
|
+
it('should handle backslash not followed by ${', () => {
|
|
277
|
+
const tokens = tokenize('back\\slash');
|
|
278
|
+
expect(tokens[0]).toMatchObject({
|
|
279
|
+
type: TokenType.TEXT,
|
|
280
|
+
value: 'back\\slash',
|
|
281
|
+
});
|
|
282
|
+
});
|
|
283
|
+
});
|
|
284
|
+
describe('JSON Pointer escapes in PATH tokens', () => {
|
|
285
|
+
it('should preserve ~1 escape (forward slash) in path', () => {
|
|
286
|
+
const tokens = tokenize('${/a~1b}');
|
|
287
|
+
expect(tokens[1]).toMatchObject({
|
|
288
|
+
type: TokenType.PATH,
|
|
289
|
+
value: '/a~1b',
|
|
290
|
+
});
|
|
291
|
+
});
|
|
292
|
+
it('should preserve ~0 escape (tilde) in path', () => {
|
|
293
|
+
const tokens = tokenize('${/m~0n}');
|
|
294
|
+
expect(tokens[1]).toMatchObject({
|
|
295
|
+
type: TokenType.PATH,
|
|
296
|
+
value: '/m~0n',
|
|
297
|
+
});
|
|
298
|
+
});
|
|
299
|
+
it('should preserve multiple escapes in path', () => {
|
|
300
|
+
const tokens = tokenize('${/a~1b~0c}');
|
|
301
|
+
expect(tokens[1]).toMatchObject({
|
|
302
|
+
type: TokenType.PATH,
|
|
303
|
+
value: '/a~1b~0c',
|
|
304
|
+
});
|
|
305
|
+
});
|
|
306
|
+
it('should handle tilde without following 0 or 1', () => {
|
|
307
|
+
const tokens = tokenize('${/path~other}');
|
|
308
|
+
expect(tokens[1]).toMatchObject({
|
|
309
|
+
type: TokenType.PATH,
|
|
310
|
+
value: '/path~other',
|
|
311
|
+
});
|
|
312
|
+
});
|
|
313
|
+
});
|
|
314
|
+
describe('nested expressions', () => {
|
|
315
|
+
it('should tokenize nested expression in function arg', () => {
|
|
316
|
+
const tokens = tokenize('${upper(${/name})}');
|
|
317
|
+
const types = tokens.map((t) => t.type);
|
|
318
|
+
expect(types).toEqual([
|
|
319
|
+
TokenType.EXPR_START, // ${
|
|
320
|
+
TokenType.IDENTIFIER, // upper
|
|
321
|
+
TokenType.LPAREN, // (
|
|
322
|
+
TokenType.EXPR_START, // ${
|
|
323
|
+
TokenType.PATH, // /name
|
|
324
|
+
TokenType.EXPR_END, // }
|
|
325
|
+
TokenType.RPAREN, // )
|
|
326
|
+
TokenType.EXPR_END, // }
|
|
327
|
+
TokenType.EOF,
|
|
328
|
+
]);
|
|
329
|
+
});
|
|
330
|
+
it('should tokenize deeply nested expressions', () => {
|
|
331
|
+
const tokens = tokenize('${a(${b(${/c})})}');
|
|
332
|
+
// Count nested ${ tokens
|
|
333
|
+
const exprStartCount = tokens.filter((t) => t.type === TokenType.EXPR_START).length;
|
|
334
|
+
expect(exprStartCount).toBe(3);
|
|
335
|
+
});
|
|
336
|
+
});
|
|
337
|
+
describe('complex expressions', () => {
|
|
338
|
+
it('should tokenize text with expression in middle', () => {
|
|
339
|
+
const tokens = tokenize('Hello, ${/user/name}!');
|
|
340
|
+
expect(tokens[0]).toMatchObject({
|
|
341
|
+
type: TokenType.TEXT,
|
|
342
|
+
value: 'Hello, ',
|
|
343
|
+
});
|
|
344
|
+
expect(tokens[1]).toMatchObject({ type: TokenType.EXPR_START });
|
|
345
|
+
expect(tokens[2]).toMatchObject({
|
|
346
|
+
type: TokenType.PATH,
|
|
347
|
+
value: '/user/name',
|
|
348
|
+
});
|
|
349
|
+
expect(tokens[3]).toMatchObject({ type: TokenType.EXPR_END });
|
|
350
|
+
expect(tokens[4]).toMatchObject({ type: TokenType.TEXT, value: '!' });
|
|
351
|
+
});
|
|
352
|
+
it('should tokenize multiple expressions in text', () => {
|
|
353
|
+
const tokens = tokenize('${/a} + ${/b} = ${/c}');
|
|
354
|
+
const textTokens = tokens.filter((t) => t.type === TokenType.TEXT);
|
|
355
|
+
const pathTokens = tokens.filter((t) => t.type === TokenType.PATH);
|
|
356
|
+
expect(textTokens.length).toBe(2); // ' + ' and ' = '
|
|
357
|
+
expect(pathTokens.length).toBe(3);
|
|
358
|
+
});
|
|
359
|
+
});
|
|
360
|
+
});
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parser for string interpolation expressions.
|
|
3
|
+
*
|
|
4
|
+
* Converts a sequence of tokens into an Abstract Syntax Tree (AST).
|
|
5
|
+
* Implements a recursive descent parser for the LL(1) grammar.
|
|
6
|
+
*/
|
|
7
|
+
import type { Token, InterpolatedStringNode } from './types.js';
|
|
8
|
+
/**
|
|
9
|
+
* Parses a sequence of tokens into an AST.
|
|
10
|
+
*
|
|
11
|
+
* @param tokens - The tokens to parse
|
|
12
|
+
* @returns The parsed AST (InterpolatedStringNode)
|
|
13
|
+
*/
|
|
14
|
+
export declare function parse(tokens: Token[]): InterpolatedStringNode;
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Parser for string interpolation expressions.
|
|
3
|
+
*
|
|
4
|
+
* Converts a sequence of tokens into an Abstract Syntax Tree (AST).
|
|
5
|
+
* Implements a recursive descent parser for the LL(1) grammar.
|
|
6
|
+
*/
|
|
7
|
+
import { TokenType } from './types.js';
|
|
8
|
+
/** Maximum nesting depth for expressions to prevent stack overflow */
|
|
9
|
+
const MAX_DEPTH = 10;
|
|
10
|
+
/**
|
|
11
|
+
* Parses a sequence of tokens into an AST.
|
|
12
|
+
*
|
|
13
|
+
* @param tokens - The tokens to parse
|
|
14
|
+
* @returns The parsed AST (InterpolatedStringNode)
|
|
15
|
+
*/
|
|
16
|
+
export function parse(tokens) {
|
|
17
|
+
const state = { tokens, pos: 0, depth: 0 };
|
|
18
|
+
return parseInterpolatedString(state);
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Returns the current token without advancing.
|
|
22
|
+
*/
|
|
23
|
+
function peek(state) {
|
|
24
|
+
return (state.tokens[state.pos] || {
|
|
25
|
+
type: TokenType.EOF,
|
|
26
|
+
value: '',
|
|
27
|
+
start: 0,
|
|
28
|
+
end: 0,
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Returns the current token and advances position.
|
|
33
|
+
*/
|
|
34
|
+
function advance(state) {
|
|
35
|
+
return (state.tokens[state.pos++] || {
|
|
36
|
+
type: TokenType.EOF,
|
|
37
|
+
value: '',
|
|
38
|
+
start: 0,
|
|
39
|
+
end: 0,
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
/**
|
|
43
|
+
* Parses the root interpolated string node.
|
|
44
|
+
*/
|
|
45
|
+
function parseInterpolatedString(state) {
|
|
46
|
+
const parts = [];
|
|
47
|
+
while (peek(state).type !== TokenType.EOF) {
|
|
48
|
+
const token = peek(state);
|
|
49
|
+
if (token.type === TokenType.TEXT) {
|
|
50
|
+
advance(state);
|
|
51
|
+
parts.push({ type: 'literal', value: token.value });
|
|
52
|
+
}
|
|
53
|
+
else if (token.type === TokenType.EXPR_START) {
|
|
54
|
+
advance(state); // consume ${
|
|
55
|
+
state.depth++;
|
|
56
|
+
if (state.depth > MAX_DEPTH) {
|
|
57
|
+
// Max depth exceeded - skip to closing }
|
|
58
|
+
console.warn('[A2UI] Parse error: Maximum nesting depth exceeded');
|
|
59
|
+
skipToExprEnd(state);
|
|
60
|
+
state.depth--;
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
const expr = parseExpression(state);
|
|
64
|
+
if (expr) {
|
|
65
|
+
parts.push(expr);
|
|
66
|
+
}
|
|
67
|
+
// Expect closing }
|
|
68
|
+
if (peek(state).type === TokenType.EXPR_END) {
|
|
69
|
+
advance(state);
|
|
70
|
+
}
|
|
71
|
+
state.depth--;
|
|
72
|
+
}
|
|
73
|
+
else if (token.type === TokenType.EXPR_END) {
|
|
74
|
+
// Unexpected } - might be from nested expression context
|
|
75
|
+
break;
|
|
76
|
+
}
|
|
77
|
+
else {
|
|
78
|
+
// Skip unknown tokens at top level
|
|
79
|
+
advance(state);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
// If no parts, add empty literal
|
|
83
|
+
if (parts.length === 0) {
|
|
84
|
+
parts.push({ type: 'literal', value: '' });
|
|
85
|
+
}
|
|
86
|
+
return { type: 'interpolatedString', parts };
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Parses a single expression inside ${...}.
|
|
90
|
+
*/
|
|
91
|
+
function parseExpression(state) {
|
|
92
|
+
const token = peek(state);
|
|
93
|
+
// Path expression (absolute)
|
|
94
|
+
if (token.type === TokenType.PATH) {
|
|
95
|
+
return parsePath(state);
|
|
96
|
+
}
|
|
97
|
+
// Function call or relative path
|
|
98
|
+
if (token.type === TokenType.IDENTIFIER) {
|
|
99
|
+
// Look ahead to see if it's a function call
|
|
100
|
+
const nextPos = state.pos + 1;
|
|
101
|
+
const nextToken = state.tokens[nextPos];
|
|
102
|
+
if (nextToken && nextToken.type === TokenType.LPAREN) {
|
|
103
|
+
return parseFunctionCall(state);
|
|
104
|
+
}
|
|
105
|
+
// It's actually a relative path (shouldn't happen if lexer is correct)
|
|
106
|
+
return parsePath(state);
|
|
107
|
+
}
|
|
108
|
+
// Nested expression
|
|
109
|
+
if (token.type === TokenType.EXPR_START) {
|
|
110
|
+
advance(state); // consume ${
|
|
111
|
+
state.depth++;
|
|
112
|
+
if (state.depth > MAX_DEPTH) {
|
|
113
|
+
console.warn('[A2UI] Parse error: Maximum nesting depth exceeded');
|
|
114
|
+
skipToExprEnd(state);
|
|
115
|
+
state.depth--;
|
|
116
|
+
return null;
|
|
117
|
+
}
|
|
118
|
+
const expr = parseExpression(state);
|
|
119
|
+
if (peek(state).type === TokenType.EXPR_END) {
|
|
120
|
+
advance(state);
|
|
121
|
+
}
|
|
122
|
+
state.depth--;
|
|
123
|
+
return expr;
|
|
124
|
+
}
|
|
125
|
+
// Literal value (inside function args)
|
|
126
|
+
if (token.type === TokenType.STRING ||
|
|
127
|
+
token.type === TokenType.NUMBER ||
|
|
128
|
+
token.type === TokenType.BOOLEAN) {
|
|
129
|
+
return parseLiteral(state);
|
|
130
|
+
}
|
|
131
|
+
// Empty or invalid expression
|
|
132
|
+
return null;
|
|
133
|
+
}
|
|
134
|
+
/**
|
|
135
|
+
* Parses a path node.
|
|
136
|
+
*/
|
|
137
|
+
function parsePath(state) {
|
|
138
|
+
const token = advance(state);
|
|
139
|
+
const absolute = token.value.startsWith('/');
|
|
140
|
+
return { type: 'path', path: token.value, absolute };
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Parses a function call node.
|
|
144
|
+
*/
|
|
145
|
+
function parseFunctionCall(state) {
|
|
146
|
+
const nameToken = advance(state); // function name
|
|
147
|
+
advance(state); // consume (
|
|
148
|
+
const args = [];
|
|
149
|
+
// Parse arguments
|
|
150
|
+
while (peek(state).type !== TokenType.RPAREN &&
|
|
151
|
+
peek(state).type !== TokenType.EOF) {
|
|
152
|
+
const arg = parseArgument(state);
|
|
153
|
+
if (arg) {
|
|
154
|
+
args.push(arg);
|
|
155
|
+
}
|
|
156
|
+
// Skip comma between arguments
|
|
157
|
+
if (peek(state).type === TokenType.COMMA) {
|
|
158
|
+
advance(state);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
// Consume closing )
|
|
162
|
+
if (peek(state).type === TokenType.RPAREN) {
|
|
163
|
+
advance(state);
|
|
164
|
+
}
|
|
165
|
+
return { type: 'functionCall', name: nameToken.value, args };
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Parses a function argument.
|
|
169
|
+
*/
|
|
170
|
+
function parseArgument(state) {
|
|
171
|
+
const token = peek(state);
|
|
172
|
+
// Nested expression ${...}
|
|
173
|
+
if (token.type === TokenType.EXPR_START) {
|
|
174
|
+
advance(state); // consume ${
|
|
175
|
+
state.depth++;
|
|
176
|
+
if (state.depth > MAX_DEPTH) {
|
|
177
|
+
console.warn('[A2UI] Parse error: Maximum nesting depth exceeded');
|
|
178
|
+
skipToExprEnd(state);
|
|
179
|
+
state.depth--;
|
|
180
|
+
return null;
|
|
181
|
+
}
|
|
182
|
+
const expr = parseExpression(state);
|
|
183
|
+
if (peek(state).type === TokenType.EXPR_END) {
|
|
184
|
+
advance(state);
|
|
185
|
+
}
|
|
186
|
+
state.depth--;
|
|
187
|
+
return expr;
|
|
188
|
+
}
|
|
189
|
+
// Literal string
|
|
190
|
+
if (token.type === TokenType.STRING) {
|
|
191
|
+
return parseLiteral(state);
|
|
192
|
+
}
|
|
193
|
+
// Literal number
|
|
194
|
+
if (token.type === TokenType.NUMBER) {
|
|
195
|
+
return parseLiteral(state);
|
|
196
|
+
}
|
|
197
|
+
// Literal boolean
|
|
198
|
+
if (token.type === TokenType.BOOLEAN) {
|
|
199
|
+
return parseLiteral(state);
|
|
200
|
+
}
|
|
201
|
+
// Path (absolute)
|
|
202
|
+
if (token.type === TokenType.PATH) {
|
|
203
|
+
return parsePath(state);
|
|
204
|
+
}
|
|
205
|
+
// Identifier could be function call or relative path
|
|
206
|
+
if (token.type === TokenType.IDENTIFIER) {
|
|
207
|
+
const nextPos = state.pos + 1;
|
|
208
|
+
const nextToken = state.tokens[nextPos];
|
|
209
|
+
if (nextToken && nextToken.type === TokenType.LPAREN) {
|
|
210
|
+
return parseFunctionCall(state);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
return null;
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Parses a literal value node.
|
|
217
|
+
*/
|
|
218
|
+
function parseLiteral(state) {
|
|
219
|
+
const token = advance(state);
|
|
220
|
+
return { type: 'literal', value: token.value };
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Skips tokens until expression end is found.
|
|
224
|
+
*/
|
|
225
|
+
function skipToExprEnd(state) {
|
|
226
|
+
let depth = 1;
|
|
227
|
+
while (depth > 0 && peek(state).type !== TokenType.EOF) {
|
|
228
|
+
const token = advance(state);
|
|
229
|
+
if (token.type === TokenType.EXPR_START) {
|
|
230
|
+
depth++;
|
|
231
|
+
}
|
|
232
|
+
else if (token.type === TokenType.EXPR_END) {
|
|
233
|
+
depth--;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|