@a2ui-sdk/utils 0.1.1 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -4
- package/dist/0.8/dataBinding.test.d.ts +0 -6
- package/dist/0.8/dataBinding.test.js +0 -271
- package/dist/0.8/pathUtils.test.d.ts +0 -6
- package/dist/0.8/pathUtils.test.js +0 -211
- package/dist/0.9/dataBinding.test.d.ts +0 -6
- package/dist/0.9/dataBinding.test.js +0 -180
- package/dist/0.9/interpolation/evaluator.test.d.ts +0 -4
- package/dist/0.9/interpolation/evaluator.test.js +0 -699
- package/dist/0.9/interpolation/lexer.test.d.ts +0 -4
- package/dist/0.9/interpolation/lexer.test.js +0 -360
- package/dist/0.9/interpolation/parser.test.d.ts +0 -4
- package/dist/0.9/interpolation/parser.test.js +0 -314
- package/dist/0.9/interpolation.test.d.ts +0 -5
- package/dist/0.9/interpolation.test.js +0 -154
- package/dist/0.9/pathUtils.test.d.ts +0 -6
- package/dist/0.9/pathUtils.test.js +0 -310
- package/dist/0.9/validation.test.d.ts +0 -4
- package/dist/0.9/validation.test.js +0 -307
|
@@ -1,360 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Tests for the lexer (tokenizer).
|
|
3
|
-
*/
|
|
4
|
-
import { describe, it, expect } from 'vitest';
|
|
5
|
-
import { tokenize } from './lexer.js';
|
|
6
|
-
import { TokenType } from './types.js';
|
|
7
|
-
describe('Lexer', () => {
|
|
8
|
-
describe('TEXT tokens', () => {
|
|
9
|
-
it('should tokenize plain text', () => {
|
|
10
|
-
const tokens = tokenize('Hello World');
|
|
11
|
-
expect(tokens).toHaveLength(2); // TEXT + EOF
|
|
12
|
-
expect(tokens[0]).toMatchObject({
|
|
13
|
-
type: TokenType.TEXT,
|
|
14
|
-
value: 'Hello World',
|
|
15
|
-
start: 0,
|
|
16
|
-
end: 11,
|
|
17
|
-
});
|
|
18
|
-
});
|
|
19
|
-
it('should return only EOF for empty string', () => {
|
|
20
|
-
const tokens = tokenize('');
|
|
21
|
-
expect(tokens).toHaveLength(1);
|
|
22
|
-
expect(tokens[0].type).toBe(TokenType.EOF);
|
|
23
|
-
});
|
|
24
|
-
it('should handle text with special characters', () => {
|
|
25
|
-
const tokens = tokenize('Hello @world! #123');
|
|
26
|
-
expect(tokens[0]).toMatchObject({
|
|
27
|
-
type: TokenType.TEXT,
|
|
28
|
-
value: 'Hello @world! #123',
|
|
29
|
-
});
|
|
30
|
-
});
|
|
31
|
-
it('should handle multi-line text', () => {
|
|
32
|
-
const tokens = tokenize('Line 1\nLine 2');
|
|
33
|
-
expect(tokens[0]).toMatchObject({
|
|
34
|
-
type: TokenType.TEXT,
|
|
35
|
-
value: 'Line 1\nLine 2',
|
|
36
|
-
});
|
|
37
|
-
});
|
|
38
|
-
});
|
|
39
|
-
describe('EXPR_START and EXPR_END tokens', () => {
|
|
40
|
-
it('should tokenize expression delimiters', () => {
|
|
41
|
-
const tokens = tokenize('${/path}');
|
|
42
|
-
expect(tokens[0]).toMatchObject({
|
|
43
|
-
type: TokenType.EXPR_START,
|
|
44
|
-
value: '${',
|
|
45
|
-
});
|
|
46
|
-
expect(tokens[1]).toMatchObject({ type: TokenType.PATH, value: '/path' });
|
|
47
|
-
expect(tokens[2]).toMatchObject({ type: TokenType.EXPR_END, value: '}' });
|
|
48
|
-
});
|
|
49
|
-
it('should handle multiple expressions', () => {
|
|
50
|
-
const tokens = tokenize('${/a} ${/b}');
|
|
51
|
-
const types = tokens.map((t) => t.type);
|
|
52
|
-
expect(types).toEqual([
|
|
53
|
-
TokenType.EXPR_START,
|
|
54
|
-
TokenType.PATH,
|
|
55
|
-
TokenType.EXPR_END,
|
|
56
|
-
TokenType.TEXT,
|
|
57
|
-
TokenType.EXPR_START,
|
|
58
|
-
TokenType.PATH,
|
|
59
|
-
TokenType.EXPR_END,
|
|
60
|
-
TokenType.EOF,
|
|
61
|
-
]);
|
|
62
|
-
});
|
|
63
|
-
it('should handle expression at start of string', () => {
|
|
64
|
-
const tokens = tokenize('${/name} is here');
|
|
65
|
-
expect(tokens[0].type).toBe(TokenType.EXPR_START);
|
|
66
|
-
});
|
|
67
|
-
it('should handle expression at end of string', () => {
|
|
68
|
-
const tokens = tokenize('Name: ${/name}');
|
|
69
|
-
expect(tokens[tokens.length - 2].type).toBe(TokenType.EXPR_END);
|
|
70
|
-
});
|
|
71
|
-
});
|
|
72
|
-
describe('PATH tokens', () => {
|
|
73
|
-
it('should tokenize absolute paths', () => {
|
|
74
|
-
const tokens = tokenize('${/user/name}');
|
|
75
|
-
expect(tokens[1]).toMatchObject({
|
|
76
|
-
type: TokenType.PATH,
|
|
77
|
-
value: '/user/name',
|
|
78
|
-
});
|
|
79
|
-
});
|
|
80
|
-
it('should tokenize root path', () => {
|
|
81
|
-
const tokens = tokenize('${/}');
|
|
82
|
-
expect(tokens[1]).toMatchObject({
|
|
83
|
-
type: TokenType.PATH,
|
|
84
|
-
value: '/',
|
|
85
|
-
});
|
|
86
|
-
});
|
|
87
|
-
it('should tokenize paths with array indices', () => {
|
|
88
|
-
const tokens = tokenize('${/items/0}');
|
|
89
|
-
expect(tokens[1]).toMatchObject({
|
|
90
|
-
type: TokenType.PATH,
|
|
91
|
-
value: '/items/0',
|
|
92
|
-
});
|
|
93
|
-
});
|
|
94
|
-
it('should tokenize relative paths', () => {
|
|
95
|
-
const tokens = tokenize('${name}');
|
|
96
|
-
expect(tokens[1]).toMatchObject({
|
|
97
|
-
type: TokenType.PATH,
|
|
98
|
-
value: 'name',
|
|
99
|
-
});
|
|
100
|
-
});
|
|
101
|
-
it('should tokenize nested relative paths', () => {
|
|
102
|
-
const tokens = tokenize('${profile/name}');
|
|
103
|
-
expect(tokens[1]).toMatchObject({
|
|
104
|
-
type: TokenType.PATH,
|
|
105
|
-
value: 'profile/name',
|
|
106
|
-
});
|
|
107
|
-
});
|
|
108
|
-
it('should tokenize paths with hyphens', () => {
|
|
109
|
-
const tokens = tokenize('${/user-data/first-name}');
|
|
110
|
-
expect(tokens[1]).toMatchObject({
|
|
111
|
-
type: TokenType.PATH,
|
|
112
|
-
value: '/user-data/first-name',
|
|
113
|
-
});
|
|
114
|
-
});
|
|
115
|
-
it('should tokenize paths with underscores', () => {
|
|
116
|
-
const tokens = tokenize('${/user_data/first_name}');
|
|
117
|
-
expect(tokens[1]).toMatchObject({
|
|
118
|
-
type: TokenType.PATH,
|
|
119
|
-
value: '/user_data/first_name',
|
|
120
|
-
});
|
|
121
|
-
});
|
|
122
|
-
});
|
|
123
|
-
describe('IDENTIFIER, LPAREN, RPAREN, COMMA tokens', () => {
|
|
124
|
-
it('should tokenize no-argument function call', () => {
|
|
125
|
-
const tokens = tokenize('${now()}');
|
|
126
|
-
expect(tokens[1]).toMatchObject({
|
|
127
|
-
type: TokenType.IDENTIFIER,
|
|
128
|
-
value: 'now',
|
|
129
|
-
});
|
|
130
|
-
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN, value: '(' });
|
|
131
|
-
expect(tokens[3]).toMatchObject({ type: TokenType.RPAREN, value: ')' });
|
|
132
|
-
});
|
|
133
|
-
it('should tokenize function with single argument', () => {
|
|
134
|
-
const tokens = tokenize("${upper('hello')}");
|
|
135
|
-
expect(tokens[1]).toMatchObject({
|
|
136
|
-
type: TokenType.IDENTIFIER,
|
|
137
|
-
value: 'upper',
|
|
138
|
-
});
|
|
139
|
-
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN, value: '(' });
|
|
140
|
-
expect(tokens[3]).toMatchObject({
|
|
141
|
-
type: TokenType.STRING,
|
|
142
|
-
value: 'hello',
|
|
143
|
-
});
|
|
144
|
-
expect(tokens[4]).toMatchObject({ type: TokenType.RPAREN, value: ')' });
|
|
145
|
-
});
|
|
146
|
-
it('should tokenize function with multiple arguments', () => {
|
|
147
|
-
const tokens = tokenize('${add(1, 2, 3)}');
|
|
148
|
-
const types = tokens.map((t) => t.type);
|
|
149
|
-
expect(types).toContain(TokenType.COMMA);
|
|
150
|
-
// Count commas
|
|
151
|
-
const commaCount = tokens.filter((t) => t.type === TokenType.COMMA).length;
|
|
152
|
-
expect(commaCount).toBe(2);
|
|
153
|
-
});
|
|
154
|
-
it('should handle whitespace around function parts', () => {
|
|
155
|
-
const tokens = tokenize('${ upper ( 1 , 2 ) }');
|
|
156
|
-
expect(tokens[1]).toMatchObject({
|
|
157
|
-
type: TokenType.IDENTIFIER,
|
|
158
|
-
value: 'upper',
|
|
159
|
-
});
|
|
160
|
-
expect(tokens[2]).toMatchObject({ type: TokenType.LPAREN });
|
|
161
|
-
expect(tokens[3]).toMatchObject({ type: TokenType.NUMBER, value: '1' });
|
|
162
|
-
expect(tokens[4]).toMatchObject({ type: TokenType.COMMA });
|
|
163
|
-
expect(tokens[5]).toMatchObject({ type: TokenType.NUMBER, value: '2' });
|
|
164
|
-
expect(tokens[6]).toMatchObject({ type: TokenType.RPAREN });
|
|
165
|
-
});
|
|
166
|
-
it('should tokenize function names with underscores', () => {
|
|
167
|
-
const tokens = tokenize('${format_date()}');
|
|
168
|
-
expect(tokens[1]).toMatchObject({
|
|
169
|
-
type: TokenType.IDENTIFIER,
|
|
170
|
-
value: 'format_date',
|
|
171
|
-
});
|
|
172
|
-
});
|
|
173
|
-
});
|
|
174
|
-
describe('STRING, NUMBER, BOOLEAN literal tokens', () => {
|
|
175
|
-
it('should tokenize string literals', () => {
|
|
176
|
-
const tokens = tokenize("${'hello'}");
|
|
177
|
-
expect(tokens[1]).toMatchObject({
|
|
178
|
-
type: TokenType.STRING,
|
|
179
|
-
value: 'hello',
|
|
180
|
-
});
|
|
181
|
-
});
|
|
182
|
-
it('should tokenize string with spaces', () => {
|
|
183
|
-
const tokens = tokenize("${'hello world'}");
|
|
184
|
-
expect(tokens[1]).toMatchObject({
|
|
185
|
-
type: TokenType.STRING,
|
|
186
|
-
value: 'hello world',
|
|
187
|
-
});
|
|
188
|
-
});
|
|
189
|
-
it('should handle escaped quotes in strings', () => {
|
|
190
|
-
const tokens = tokenize("${'it\\'s here'}");
|
|
191
|
-
expect(tokens[1]).toMatchObject({
|
|
192
|
-
type: TokenType.STRING,
|
|
193
|
-
value: "it's here",
|
|
194
|
-
});
|
|
195
|
-
});
|
|
196
|
-
it('should tokenize integer numbers', () => {
|
|
197
|
-
const tokens = tokenize('${42}');
|
|
198
|
-
expect(tokens[1]).toMatchObject({
|
|
199
|
-
type: TokenType.NUMBER,
|
|
200
|
-
value: '42',
|
|
201
|
-
});
|
|
202
|
-
});
|
|
203
|
-
it('should tokenize negative numbers', () => {
|
|
204
|
-
const tokens = tokenize('${-5}');
|
|
205
|
-
expect(tokens[1]).toMatchObject({
|
|
206
|
-
type: TokenType.NUMBER,
|
|
207
|
-
value: '-5',
|
|
208
|
-
});
|
|
209
|
-
});
|
|
210
|
-
it('should tokenize decimal numbers', () => {
|
|
211
|
-
const tokens = tokenize('${3.14}');
|
|
212
|
-
expect(tokens[1]).toMatchObject({
|
|
213
|
-
type: TokenType.NUMBER,
|
|
214
|
-
value: '3.14',
|
|
215
|
-
});
|
|
216
|
-
});
|
|
217
|
-
it('should tokenize negative decimal numbers', () => {
|
|
218
|
-
const tokens = tokenize('${-3.14}');
|
|
219
|
-
expect(tokens[1]).toMatchObject({
|
|
220
|
-
type: TokenType.NUMBER,
|
|
221
|
-
value: '-3.14',
|
|
222
|
-
});
|
|
223
|
-
});
|
|
224
|
-
it('should tokenize boolean true', () => {
|
|
225
|
-
const tokens = tokenize('${true}');
|
|
226
|
-
expect(tokens[1]).toMatchObject({
|
|
227
|
-
type: TokenType.BOOLEAN,
|
|
228
|
-
value: 'true',
|
|
229
|
-
});
|
|
230
|
-
});
|
|
231
|
-
it('should tokenize boolean false', () => {
|
|
232
|
-
const tokens = tokenize('${false}');
|
|
233
|
-
expect(tokens[1]).toMatchObject({
|
|
234
|
-
type: TokenType.BOOLEAN,
|
|
235
|
-
value: 'false',
|
|
236
|
-
});
|
|
237
|
-
});
|
|
238
|
-
it('should not tokenize partial boolean words', () => {
|
|
239
|
-
const tokens = tokenize('${trueValue}');
|
|
240
|
-
// Should be PATH, not BOOLEAN
|
|
241
|
-
expect(tokens[1].type).toBe(TokenType.PATH);
|
|
242
|
-
expect(tokens[1].value).toBe('trueValue');
|
|
243
|
-
});
|
|
244
|
-
});
|
|
245
|
-
describe('escape sequence handling', () => {
|
|
246
|
-
it('should convert \\${ to literal ${', () => {
|
|
247
|
-
const tokens = tokenize('\\${escaped}');
|
|
248
|
-
expect(tokens[0]).toMatchObject({
|
|
249
|
-
type: TokenType.TEXT,
|
|
250
|
-
value: '${escaped}',
|
|
251
|
-
});
|
|
252
|
-
});
|
|
253
|
-
it('should handle multiple escapes', () => {
|
|
254
|
-
const tokens = tokenize('\\${a} \\${b}');
|
|
255
|
-
expect(tokens[0]).toMatchObject({
|
|
256
|
-
type: TokenType.TEXT,
|
|
257
|
-
value: '${a} ${b}',
|
|
258
|
-
});
|
|
259
|
-
});
|
|
260
|
-
it('should handle mix of escaped and unescaped', () => {
|
|
261
|
-
const tokens = tokenize('\\${escaped} ${/real}');
|
|
262
|
-
expect(tokens[0]).toMatchObject({
|
|
263
|
-
type: TokenType.TEXT,
|
|
264
|
-
value: '${escaped} ',
|
|
265
|
-
});
|
|
266
|
-
expect(tokens[1]).toMatchObject({ type: TokenType.EXPR_START });
|
|
267
|
-
expect(tokens[2]).toMatchObject({ type: TokenType.PATH, value: '/real' });
|
|
268
|
-
});
|
|
269
|
-
it('should handle escape at end of string', () => {
|
|
270
|
-
const tokens = tokenize('text \\${end}');
|
|
271
|
-
expect(tokens[0]).toMatchObject({
|
|
272
|
-
type: TokenType.TEXT,
|
|
273
|
-
value: 'text ${end}',
|
|
274
|
-
});
|
|
275
|
-
});
|
|
276
|
-
it('should handle backslash not followed by ${', () => {
|
|
277
|
-
const tokens = tokenize('back\\slash');
|
|
278
|
-
expect(tokens[0]).toMatchObject({
|
|
279
|
-
type: TokenType.TEXT,
|
|
280
|
-
value: 'back\\slash',
|
|
281
|
-
});
|
|
282
|
-
});
|
|
283
|
-
});
|
|
284
|
-
describe('JSON Pointer escapes in PATH tokens', () => {
|
|
285
|
-
it('should preserve ~1 escape (forward slash) in path', () => {
|
|
286
|
-
const tokens = tokenize('${/a~1b}');
|
|
287
|
-
expect(tokens[1]).toMatchObject({
|
|
288
|
-
type: TokenType.PATH,
|
|
289
|
-
value: '/a~1b',
|
|
290
|
-
});
|
|
291
|
-
});
|
|
292
|
-
it('should preserve ~0 escape (tilde) in path', () => {
|
|
293
|
-
const tokens = tokenize('${/m~0n}');
|
|
294
|
-
expect(tokens[1]).toMatchObject({
|
|
295
|
-
type: TokenType.PATH,
|
|
296
|
-
value: '/m~0n',
|
|
297
|
-
});
|
|
298
|
-
});
|
|
299
|
-
it('should preserve multiple escapes in path', () => {
|
|
300
|
-
const tokens = tokenize('${/a~1b~0c}');
|
|
301
|
-
expect(tokens[1]).toMatchObject({
|
|
302
|
-
type: TokenType.PATH,
|
|
303
|
-
value: '/a~1b~0c',
|
|
304
|
-
});
|
|
305
|
-
});
|
|
306
|
-
it('should handle tilde without following 0 or 1', () => {
|
|
307
|
-
const tokens = tokenize('${/path~other}');
|
|
308
|
-
expect(tokens[1]).toMatchObject({
|
|
309
|
-
type: TokenType.PATH,
|
|
310
|
-
value: '/path~other',
|
|
311
|
-
});
|
|
312
|
-
});
|
|
313
|
-
});
|
|
314
|
-
describe('nested expressions', () => {
|
|
315
|
-
it('should tokenize nested expression in function arg', () => {
|
|
316
|
-
const tokens = tokenize('${upper(${/name})}');
|
|
317
|
-
const types = tokens.map((t) => t.type);
|
|
318
|
-
expect(types).toEqual([
|
|
319
|
-
TokenType.EXPR_START, // ${
|
|
320
|
-
TokenType.IDENTIFIER, // upper
|
|
321
|
-
TokenType.LPAREN, // (
|
|
322
|
-
TokenType.EXPR_START, // ${
|
|
323
|
-
TokenType.PATH, // /name
|
|
324
|
-
TokenType.EXPR_END, // }
|
|
325
|
-
TokenType.RPAREN, // )
|
|
326
|
-
TokenType.EXPR_END, // }
|
|
327
|
-
TokenType.EOF,
|
|
328
|
-
]);
|
|
329
|
-
});
|
|
330
|
-
it('should tokenize deeply nested expressions', () => {
|
|
331
|
-
const tokens = tokenize('${a(${b(${/c})})}');
|
|
332
|
-
// Count nested ${ tokens
|
|
333
|
-
const exprStartCount = tokens.filter((t) => t.type === TokenType.EXPR_START).length;
|
|
334
|
-
expect(exprStartCount).toBe(3);
|
|
335
|
-
});
|
|
336
|
-
});
|
|
337
|
-
describe('complex expressions', () => {
|
|
338
|
-
it('should tokenize text with expression in middle', () => {
|
|
339
|
-
const tokens = tokenize('Hello, ${/user/name}!');
|
|
340
|
-
expect(tokens[0]).toMatchObject({
|
|
341
|
-
type: TokenType.TEXT,
|
|
342
|
-
value: 'Hello, ',
|
|
343
|
-
});
|
|
344
|
-
expect(tokens[1]).toMatchObject({ type: TokenType.EXPR_START });
|
|
345
|
-
expect(tokens[2]).toMatchObject({
|
|
346
|
-
type: TokenType.PATH,
|
|
347
|
-
value: '/user/name',
|
|
348
|
-
});
|
|
349
|
-
expect(tokens[3]).toMatchObject({ type: TokenType.EXPR_END });
|
|
350
|
-
expect(tokens[4]).toMatchObject({ type: TokenType.TEXT, value: '!' });
|
|
351
|
-
});
|
|
352
|
-
it('should tokenize multiple expressions in text', () => {
|
|
353
|
-
const tokens = tokenize('${/a} + ${/b} = ${/c}');
|
|
354
|
-
const textTokens = tokens.filter((t) => t.type === TokenType.TEXT);
|
|
355
|
-
const pathTokens = tokens.filter((t) => t.type === TokenType.PATH);
|
|
356
|
-
expect(textTokens.length).toBe(2); // ' + ' and ' = '
|
|
357
|
-
expect(pathTokens.length).toBe(3);
|
|
358
|
-
});
|
|
359
|
-
});
|
|
360
|
-
});
|
|
@@ -1,314 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Tests for the parser.
|
|
3
|
-
*/
|
|
4
|
-
import { describe, it, expect } from 'vitest';
|
|
5
|
-
import { tokenize } from './lexer.js';
|
|
6
|
-
import { parse } from './parser.js';
|
|
7
|
-
describe('Parser', () => {
|
|
8
|
-
describe('US1: Simple path expressions', () => {
|
|
9
|
-
it('should parse simple absolute path', () => {
|
|
10
|
-
const tokens = tokenize('${/user/name}');
|
|
11
|
-
const ast = parse(tokens);
|
|
12
|
-
expect(ast.type).toBe('interpolatedString');
|
|
13
|
-
expect(ast.parts).toHaveLength(1);
|
|
14
|
-
const pathNode = ast.parts[0];
|
|
15
|
-
expect(pathNode.type).toBe('path');
|
|
16
|
-
expect(pathNode.path).toBe('/user/name');
|
|
17
|
-
expect(pathNode.absolute).toBe(true);
|
|
18
|
-
});
|
|
19
|
-
it('should parse root path', () => {
|
|
20
|
-
const tokens = tokenize('${/}');
|
|
21
|
-
const ast = parse(tokens);
|
|
22
|
-
const pathNode = ast.parts[0];
|
|
23
|
-
expect(pathNode.path).toBe('/');
|
|
24
|
-
expect(pathNode.absolute).toBe(true);
|
|
25
|
-
});
|
|
26
|
-
it('should parse path with array index', () => {
|
|
27
|
-
const tokens = tokenize('${/items/0}');
|
|
28
|
-
const ast = parse(tokens);
|
|
29
|
-
const pathNode = ast.parts[0];
|
|
30
|
-
expect(pathNode.path).toBe('/items/0');
|
|
31
|
-
});
|
|
32
|
-
it('should parse mixed literal and path content', () => {
|
|
33
|
-
const tokens = tokenize('Hello, ${/user/name}!');
|
|
34
|
-
const ast = parse(tokens);
|
|
35
|
-
expect(ast.parts).toHaveLength(3);
|
|
36
|
-
expect(ast.parts[0].type).toBe('literal');
|
|
37
|
-
expect(ast.parts[0].value).toBe('Hello, ');
|
|
38
|
-
expect(ast.parts[1].type).toBe('path');
|
|
39
|
-
expect(ast.parts[1].path).toBe('/user/name');
|
|
40
|
-
expect(ast.parts[2].type).toBe('literal');
|
|
41
|
-
expect(ast.parts[2].value).toBe('!');
|
|
42
|
-
});
|
|
43
|
-
it('should parse multiple path expressions', () => {
|
|
44
|
-
const tokens = tokenize('${/user/name} is ${/user/age} years old');
|
|
45
|
-
const ast = parse(tokens);
|
|
46
|
-
expect(ast.parts).toHaveLength(4);
|
|
47
|
-
expect(ast.parts[0].path).toBe('/user/name');
|
|
48
|
-
expect(ast.parts[1].value).toBe(' is ');
|
|
49
|
-
expect(ast.parts[2].path).toBe('/user/age');
|
|
50
|
-
expect(ast.parts[3].value).toBe(' years old');
|
|
51
|
-
});
|
|
52
|
-
it('should parse adjacent expressions', () => {
|
|
53
|
-
const tokens = tokenize('${/a}${/b}${/c}');
|
|
54
|
-
const ast = parse(tokens);
|
|
55
|
-
expect(ast.parts).toHaveLength(3);
|
|
56
|
-
expect(ast.parts[0].path).toBe('/a');
|
|
57
|
-
expect(ast.parts[1].path).toBe('/b');
|
|
58
|
-
expect(ast.parts[2].path).toBe('/c');
|
|
59
|
-
});
|
|
60
|
-
it('should parse path with JSON Pointer escapes', () => {
|
|
61
|
-
const tokens = tokenize('${/a~1b}');
|
|
62
|
-
const ast = parse(tokens);
|
|
63
|
-
const pathNode = ast.parts[0];
|
|
64
|
-
expect(pathNode.path).toBe('/a~1b');
|
|
65
|
-
});
|
|
66
|
-
});
|
|
67
|
-
describe('US2: Function call expressions', () => {
|
|
68
|
-
it('should parse no-argument function call', () => {
|
|
69
|
-
const tokens = tokenize('${now()}');
|
|
70
|
-
const ast = parse(tokens);
|
|
71
|
-
expect(ast.parts).toHaveLength(1);
|
|
72
|
-
const funcNode = ast.parts[0];
|
|
73
|
-
expect(funcNode.type).toBe('functionCall');
|
|
74
|
-
expect(funcNode.name).toBe('now');
|
|
75
|
-
expect(funcNode.args).toHaveLength(0);
|
|
76
|
-
});
|
|
77
|
-
it('should parse function with string argument', () => {
|
|
78
|
-
const tokens = tokenize("${upper('hello')}");
|
|
79
|
-
const ast = parse(tokens);
|
|
80
|
-
const funcNode = ast.parts[0];
|
|
81
|
-
expect(funcNode.name).toBe('upper');
|
|
82
|
-
expect(funcNode.args).toHaveLength(1);
|
|
83
|
-
const arg = funcNode.args[0];
|
|
84
|
-
expect(arg.type).toBe('literal');
|
|
85
|
-
expect(arg.value).toBe('hello');
|
|
86
|
-
});
|
|
87
|
-
it('should parse function with number argument', () => {
|
|
88
|
-
const tokens = tokenize('${abs(-5)}');
|
|
89
|
-
const ast = parse(tokens);
|
|
90
|
-
const funcNode = ast.parts[0];
|
|
91
|
-
expect(funcNode.name).toBe('abs');
|
|
92
|
-
const arg = funcNode.args[0];
|
|
93
|
-
expect(arg.value).toBe('-5');
|
|
94
|
-
});
|
|
95
|
-
it('should parse function with boolean argument', () => {
|
|
96
|
-
const tokens = tokenize('${if(true)}');
|
|
97
|
-
const ast = parse(tokens);
|
|
98
|
-
const funcNode = ast.parts[0];
|
|
99
|
-
const arg = funcNode.args[0];
|
|
100
|
-
expect(arg.value).toBe('true');
|
|
101
|
-
});
|
|
102
|
-
it('should parse function with multiple arguments', () => {
|
|
103
|
-
const tokens = tokenize('${add(1, 2, 3)}');
|
|
104
|
-
const ast = parse(tokens);
|
|
105
|
-
const funcNode = ast.parts[0];
|
|
106
|
-
expect(funcNode.name).toBe('add');
|
|
107
|
-
expect(funcNode.args).toHaveLength(3);
|
|
108
|
-
expect(funcNode.args[0].value).toBe('1');
|
|
109
|
-
expect(funcNode.args[1].value).toBe('2');
|
|
110
|
-
expect(funcNode.args[2].value).toBe('3');
|
|
111
|
-
});
|
|
112
|
-
it('should parse function with path argument', () => {
|
|
113
|
-
const tokens = tokenize('${upper(${/name})}');
|
|
114
|
-
const ast = parse(tokens);
|
|
115
|
-
const funcNode = ast.parts[0];
|
|
116
|
-
expect(funcNode.name).toBe('upper');
|
|
117
|
-
expect(funcNode.args).toHaveLength(1);
|
|
118
|
-
const arg = funcNode.args[0];
|
|
119
|
-
expect(arg.type).toBe('path');
|
|
120
|
-
expect(arg.path).toBe('/name');
|
|
121
|
-
});
|
|
122
|
-
it('should parse function with mixed argument types', () => {
|
|
123
|
-
const tokens = tokenize("${format(${/value}, 'prefix', 10)}");
|
|
124
|
-
const ast = parse(tokens);
|
|
125
|
-
const funcNode = ast.parts[0];
|
|
126
|
-
expect(funcNode.args).toHaveLength(3);
|
|
127
|
-
expect(funcNode.args[0].type).toBe('path');
|
|
128
|
-
expect(funcNode.args[1].type).toBe('literal');
|
|
129
|
-
expect(funcNode.args[2].type).toBe('literal');
|
|
130
|
-
});
|
|
131
|
-
});
|
|
132
|
-
describe('US3: Nested expressions', () => {
|
|
133
|
-
it('should parse nested path in function argument', () => {
|
|
134
|
-
const tokens = tokenize('${upper(${/name})}');
|
|
135
|
-
const ast = parse(tokens);
|
|
136
|
-
const funcNode = ast.parts[0];
|
|
137
|
-
const arg = funcNode.args[0];
|
|
138
|
-
expect(arg.path).toBe('/name');
|
|
139
|
-
});
|
|
140
|
-
it('should parse nested function call in argument', () => {
|
|
141
|
-
const tokens = tokenize('${upper(${lower(${/name})})}');
|
|
142
|
-
const ast = parse(tokens);
|
|
143
|
-
const outerFunc = ast.parts[0];
|
|
144
|
-
expect(outerFunc.name).toBe('upper');
|
|
145
|
-
const innerFunc = outerFunc.args[0];
|
|
146
|
-
expect(innerFunc.name).toBe('lower');
|
|
147
|
-
const pathArg = innerFunc.args[0];
|
|
148
|
-
expect(pathArg.path).toBe('/name');
|
|
149
|
-
});
|
|
150
|
-
it('should parse deeply nested expressions (3+ levels)', () => {
|
|
151
|
-
const tokens = tokenize('${a(${b(${c(${/x})})})}');
|
|
152
|
-
const ast = parse(tokens);
|
|
153
|
-
const level1 = ast.parts[0];
|
|
154
|
-
expect(level1.name).toBe('a');
|
|
155
|
-
const level2 = level1.args[0];
|
|
156
|
-
expect(level2.name).toBe('b');
|
|
157
|
-
const level3 = level2.args[0];
|
|
158
|
-
expect(level3.name).toBe('c');
|
|
159
|
-
const path = level3.args[0];
|
|
160
|
-
expect(path.path).toBe('/x');
|
|
161
|
-
});
|
|
162
|
-
it('should handle max nesting depth gracefully', () => {
|
|
163
|
-
// Create expression with 11 nesting levels (exceeds MAX_DEPTH of 10)
|
|
164
|
-
let expr = '${/x}';
|
|
165
|
-
for (let i = 0; i < 11; i++) {
|
|
166
|
-
expr = `\${wrap(${expr})}`;
|
|
167
|
-
}
|
|
168
|
-
const tokens = tokenize(expr);
|
|
169
|
-
const ast = parse(tokens);
|
|
170
|
-
// Should still return valid AST (with warning logged)
|
|
171
|
-
expect(ast.type).toBe('interpolatedString');
|
|
172
|
-
});
|
|
173
|
-
});
|
|
174
|
-
describe('US4: Escaped expressions', () => {
|
|
175
|
-
it('should parse escaped expression as literal text', () => {
|
|
176
|
-
const tokens = tokenize('\\${escaped}');
|
|
177
|
-
const ast = parse(tokens);
|
|
178
|
-
expect(ast.parts).toHaveLength(1);
|
|
179
|
-
expect(ast.parts[0].type).toBe('literal');
|
|
180
|
-
expect(ast.parts[0].value).toBe('${escaped}');
|
|
181
|
-
});
|
|
182
|
-
it('should parse mixed escaped and unescaped', () => {
|
|
183
|
-
const tokens = tokenize('\\${escaped} ${/real}');
|
|
184
|
-
const ast = parse(tokens);
|
|
185
|
-
expect(ast.parts).toHaveLength(2);
|
|
186
|
-
expect(ast.parts[0].value).toBe('${escaped} ');
|
|
187
|
-
expect(ast.parts[1].path).toBe('/real');
|
|
188
|
-
});
|
|
189
|
-
});
|
|
190
|
-
describe('US5: Relative paths', () => {
|
|
191
|
-
it('should parse relative path', () => {
|
|
192
|
-
const tokens = tokenize('${name}');
|
|
193
|
-
const ast = parse(tokens);
|
|
194
|
-
const pathNode = ast.parts[0];
|
|
195
|
-
expect(pathNode.type).toBe('path');
|
|
196
|
-
expect(pathNode.path).toBe('name');
|
|
197
|
-
expect(pathNode.absolute).toBe(false);
|
|
198
|
-
});
|
|
199
|
-
it('should parse nested relative path', () => {
|
|
200
|
-
const tokens = tokenize('${profile/name}');
|
|
201
|
-
const ast = parse(tokens);
|
|
202
|
-
const pathNode = ast.parts[0];
|
|
203
|
-
expect(pathNode.path).toBe('profile/name');
|
|
204
|
-
expect(pathNode.absolute).toBe(false);
|
|
205
|
-
});
|
|
206
|
-
it('should parse mixed absolute and relative paths', () => {
|
|
207
|
-
const tokens = tokenize('${name} and ${/absolute}');
|
|
208
|
-
const ast = parse(tokens);
|
|
209
|
-
const relativePath = ast.parts[0];
|
|
210
|
-
expect(relativePath.absolute).toBe(false);
|
|
211
|
-
const absolutePath = ast.parts[2];
|
|
212
|
-
expect(absolutePath.absolute).toBe(true);
|
|
213
|
-
});
|
|
214
|
-
});
|
|
215
|
-
describe('Additional edge cases', () => {
|
|
216
|
-
it('should parse function with direct path argument (not nested)', () => {
|
|
217
|
-
const tokens = tokenize('${upper(/name)}');
|
|
218
|
-
const ast = parse(tokens);
|
|
219
|
-
const funcNode = ast.parts[0];
|
|
220
|
-
expect(funcNode.type).toBe('functionCall');
|
|
221
|
-
expect(funcNode.name).toBe('upper');
|
|
222
|
-
// Direct path in function args (without ${})
|
|
223
|
-
expect(funcNode.args).toHaveLength(1);
|
|
224
|
-
});
|
|
225
|
-
it('should parse function call inside function argument', () => {
|
|
226
|
-
// Test nested function call recognized via IDENTIFIER token
|
|
227
|
-
const tokens = tokenize('${outer(inner())}');
|
|
228
|
-
const ast = parse(tokens);
|
|
229
|
-
const outerFunc = ast.parts[0];
|
|
230
|
-
expect(outerFunc.name).toBe('outer');
|
|
231
|
-
expect(outerFunc.args).toHaveLength(1);
|
|
232
|
-
const innerFunc = outerFunc.args[0];
|
|
233
|
-
expect(innerFunc.type).toBe('functionCall');
|
|
234
|
-
expect(innerFunc.name).toBe('inner');
|
|
235
|
-
});
|
|
236
|
-
it('should handle function argument with EOF', () => {
|
|
237
|
-
const tokens = tokenize('${func(');
|
|
238
|
-
const ast = parse(tokens);
|
|
239
|
-
const funcNode = ast.parts[0];
|
|
240
|
-
expect(funcNode.type).toBe('functionCall');
|
|
241
|
-
expect(funcNode.name).toBe('func');
|
|
242
|
-
});
|
|
243
|
-
it('should handle nested EXPR_START at depth limit in parseArgument', () => {
|
|
244
|
-
// Create expression with exactly MAX_DEPTH nestings in function arg context
|
|
245
|
-
let expr = '${/x}';
|
|
246
|
-
for (let i = 0; i < 9; i++) {
|
|
247
|
-
expr = `\${f(${expr})}`;
|
|
248
|
-
}
|
|
249
|
-
// One more nesting should trigger the max depth in parseArgument
|
|
250
|
-
expr = `\${f(${expr})}`;
|
|
251
|
-
expr = `\${f(${expr})}`;
|
|
252
|
-
const tokens = tokenize(expr);
|
|
253
|
-
const ast = parse(tokens);
|
|
254
|
-
// Should still return valid AST
|
|
255
|
-
expect(ast.type).toBe('interpolatedString');
|
|
256
|
-
});
|
|
257
|
-
it('should handle literal in parseExpression', () => {
|
|
258
|
-
// When a literal token appears in an expression context
|
|
259
|
-
const tokens = tokenize("${'string'}");
|
|
260
|
-
const ast = parse(tokens);
|
|
261
|
-
const literalNode = ast.parts[0];
|
|
262
|
-
expect(literalNode.type).toBe('literal');
|
|
263
|
-
expect(literalNode.value).toBe('string');
|
|
264
|
-
});
|
|
265
|
-
it('should handle number in parseExpression', () => {
|
|
266
|
-
const tokens = tokenize('${42}');
|
|
267
|
-
const ast = parse(tokens);
|
|
268
|
-
const literalNode = ast.parts[0];
|
|
269
|
-
expect(literalNode.type).toBe('literal');
|
|
270
|
-
expect(literalNode.value).toBe('42');
|
|
271
|
-
});
|
|
272
|
-
it('should handle boolean in parseExpression', () => {
|
|
273
|
-
const tokens = tokenize('${true}');
|
|
274
|
-
const ast = parse(tokens);
|
|
275
|
-
const literalNode = ast.parts[0];
|
|
276
|
-
expect(literalNode.type).toBe('literal');
|
|
277
|
-
expect(literalNode.value).toBe('true');
|
|
278
|
-
});
|
|
279
|
-
it('should handle unexpected EXPR_END at top level', () => {
|
|
280
|
-
// This simulates finding } outside of expression context
|
|
281
|
-
const tokens = tokenize('text}more');
|
|
282
|
-
const ast = parse(tokens);
|
|
283
|
-
expect(ast.type).toBe('interpolatedString');
|
|
284
|
-
expect(ast.parts).toHaveLength(1);
|
|
285
|
-
expect(ast.parts[0].value).toBe('text}more');
|
|
286
|
-
});
|
|
287
|
-
});
|
|
288
|
-
describe('Error handling', () => {
|
|
289
|
-
it('should return empty literal for empty expression', () => {
|
|
290
|
-
const tokens = tokenize('${}');
|
|
291
|
-
const ast = parse(tokens);
|
|
292
|
-
// Should not crash, may have empty parts
|
|
293
|
-
expect(ast.type).toBe('interpolatedString');
|
|
294
|
-
});
|
|
295
|
-
it('should handle unclosed expression', () => {
|
|
296
|
-
const tokens = tokenize('${/path');
|
|
297
|
-
const ast = parse(tokens);
|
|
298
|
-
// Should return partial AST
|
|
299
|
-
expect(ast.type).toBe('interpolatedString');
|
|
300
|
-
});
|
|
301
|
-
it('should handle plain text without expressions', () => {
|
|
302
|
-
const tokens = tokenize('Hello World');
|
|
303
|
-
const ast = parse(tokens);
|
|
304
|
-
expect(ast.parts).toHaveLength(1);
|
|
305
|
-
expect(ast.parts[0].value).toBe('Hello World');
|
|
306
|
-
});
|
|
307
|
-
it('should handle empty string', () => {
|
|
308
|
-
const tokens = tokenize('');
|
|
309
|
-
const ast = parse(tokens);
|
|
310
|
-
expect(ast.type).toBe('interpolatedString');
|
|
311
|
-
expect(ast.parts.length).toBeGreaterThanOrEqual(1);
|
|
312
|
-
});
|
|
313
|
-
});
|
|
314
|
-
});
|