@mmapp/player-core 0.1.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +1436 -0
- package/dist/index.d.ts +1436 -0
- package/dist/index.js +4828 -0
- package/dist/index.mjs +4762 -0
- package/package.json +35 -0
- package/package.json.backup +35 -0
- package/src/__tests__/actions.test.ts +187 -0
- package/src/__tests__/blueprint-e2e.test.ts +706 -0
- package/src/__tests__/blueprint-test-runner.test.ts +680 -0
- package/src/__tests__/core-functions.test.ts +78 -0
- package/src/__tests__/dsl-compiler.test.ts +1382 -0
- package/src/__tests__/dsl-grammar.test.ts +1682 -0
- package/src/__tests__/events.test.ts +200 -0
- package/src/__tests__/expression.test.ts +296 -0
- package/src/__tests__/failure-policies.test.ts +110 -0
- package/src/__tests__/frontend-context.test.ts +182 -0
- package/src/__tests__/integration.test.ts +256 -0
- package/src/__tests__/security.test.ts +190 -0
- package/src/__tests__/state-machine.test.ts +450 -0
- package/src/__tests__/testing-engine.test.ts +671 -0
- package/src/actions/dispatcher.ts +80 -0
- package/src/actions/index.ts +7 -0
- package/src/actions/types.ts +25 -0
- package/src/dsl/compiler/component-mapper.ts +289 -0
- package/src/dsl/compiler/field-mapper.ts +187 -0
- package/src/dsl/compiler/index.ts +82 -0
- package/src/dsl/compiler/manifest-compiler.ts +76 -0
- package/src/dsl/compiler/symbol-table.ts +214 -0
- package/src/dsl/compiler/utils.ts +48 -0
- package/src/dsl/compiler/view-compiler.ts +286 -0
- package/src/dsl/compiler/workflow-compiler.ts +600 -0
- package/src/dsl/index.ts +66 -0
- package/src/dsl/ir-migration.ts +221 -0
- package/src/dsl/ir-types.ts +416 -0
- package/src/dsl/lexer.ts +579 -0
- package/src/dsl/parser.ts +115 -0
- package/src/dsl/types.ts +256 -0
- package/src/events/event-bus.ts +68 -0
- package/src/events/index.ts +9 -0
- package/src/events/pattern-matcher.ts +61 -0
- package/src/events/types.ts +27 -0
- package/src/expression/evaluator.ts +676 -0
- package/src/expression/functions.ts +214 -0
- package/src/expression/index.ts +13 -0
- package/src/expression/types.ts +64 -0
- package/src/index.ts +61 -0
- package/src/state-machine/index.ts +16 -0
- package/src/state-machine/interpreter.ts +319 -0
- package/src/state-machine/types.ts +89 -0
- package/src/testing/action-trace.ts +209 -0
- package/src/testing/blueprint-test-runner.ts +214 -0
- package/src/testing/graph-walker.ts +249 -0
- package/src/testing/index.ts +69 -0
- package/src/testing/nrt-comparator.ts +199 -0
- package/src/testing/nrt-types.ts +230 -0
- package/src/testing/test-actions.ts +645 -0
- package/src/testing/test-compiler.ts +278 -0
- package/src/testing/test-runner.ts +444 -0
- package/src/testing/types.ts +231 -0
- package/src/validation/definition-validator.ts +812 -0
- package/src/validation/index.ts +13 -0
- package/tsconfig.json +26 -0
- package/vitest.config.ts +8 -0
package/src/dsl/lexer.ts
ADDED
|
@@ -0,0 +1,579 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DSL Lexer — line-level tokenizer for the noun-first DSL.
|
|
3
|
+
*
|
|
4
|
+
* Each line is classified independently based on pattern matching.
|
|
5
|
+
* Indentation is measured as the number of leading spaces (2-space indent).
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import type {
|
|
9
|
+
LineToken,
|
|
10
|
+
LineData,
|
|
11
|
+
Emphasis,
|
|
12
|
+
Constraint,
|
|
13
|
+
} from './types';
|
|
14
|
+
|
|
15
|
+
// =============================================================================
|
|
16
|
+
// Section keywords (bare nouns that start structural blocks)
|
|
17
|
+
// =============================================================================
|
|
18
|
+
|
|
19
|
+
const SECTION_KEYWORDS = new Set([
|
|
20
|
+
'things', 'paths', 'levels', 'numbers', 'tabs',
|
|
21
|
+
'controls', 'overlay', 'actions',
|
|
22
|
+
]);
|
|
23
|
+
|
|
24
|
+
// =============================================================================
|
|
25
|
+
// Qualifier patterns
|
|
26
|
+
// =============================================================================
|
|
27
|
+
|
|
28
|
+
const QUALIFIER_PATTERNS: Array<{
|
|
29
|
+
pattern: RegExp;
|
|
30
|
+
kind: 'order' | 'searchable' | 'filterable' | 'pagination';
|
|
31
|
+
}> = [
|
|
32
|
+
{ pattern: /^(\w[\w\s]*)\s+first$/, kind: 'order' },
|
|
33
|
+
{ pattern: /^searchable\s+by\s+(.+)$/, kind: 'searchable' },
|
|
34
|
+
{ pattern: /^filterable\s+by\s+(.+)$/, kind: 'filterable' },
|
|
35
|
+
{ pattern: /^(\d+)\s+at\s+a\s+time$/, kind: 'pagination' },
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
// =============================================================================
|
|
39
|
+
// Known pre-type adjectives
|
|
40
|
+
// =============================================================================
|
|
41
|
+
|
|
42
|
+
const TYPE_ADJECTIVES = new Set([
|
|
43
|
+
'required', 'unique', 'computed', 'non-negative', 'positive',
|
|
44
|
+
'negative', 'lowercase', 'uppercase', 'optional', 'readonly',
|
|
45
|
+
]);
|
|
46
|
+
|
|
47
|
+
// =============================================================================
|
|
48
|
+
// Known base types (for field definitions)
|
|
49
|
+
// =============================================================================
|
|
50
|
+
|
|
51
|
+
const FIELD_TYPES = new Set([
|
|
52
|
+
'text', 'number', 'integer', 'time', 'rich text', 'choice',
|
|
53
|
+
]);
|
|
54
|
+
|
|
55
|
+
// =============================================================================
|
|
56
|
+
// Content roles (for display, NOT field definitions)
|
|
57
|
+
// =============================================================================
|
|
58
|
+
|
|
59
|
+
const CONTENT_ROLES = new Set([
|
|
60
|
+
'tag', 'card', 'progress', 'meter', 'slider', 'timeline', 'image',
|
|
61
|
+
]);
|
|
62
|
+
|
|
63
|
+
// =============================================================================
|
|
64
|
+
// Pronouns that start content lines
|
|
65
|
+
// =============================================================================
|
|
66
|
+
|
|
67
|
+
const PRONOUNS = new Set(['its', 'my', 'the', 'this', 'these']);
|
|
68
|
+
|
|
69
|
+
// =============================================================================
|
|
70
|
+
// Public API
|
|
71
|
+
// =============================================================================
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Tokenize a full DSL source string into line tokens.
|
|
75
|
+
*/
|
|
76
|
+
export function tokenize(source: string): LineToken[] {
|
|
77
|
+
const rawLines = source.split('\n');
|
|
78
|
+
return rawLines.map((raw, i) => tokenizeLine(raw, i + 1));
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Tokenize a single line of DSL source.
|
|
83
|
+
*/
|
|
84
|
+
export function tokenizeLine(raw: string, lineNumber: number = 1): LineToken {
|
|
85
|
+
const indent = measureIndent(raw);
|
|
86
|
+
const trimmed = raw.trim();
|
|
87
|
+
const data = classifyLine(trimmed);
|
|
88
|
+
return { indent, lineNumber, raw, data };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// =============================================================================
|
|
92
|
+
// Indentation
|
|
93
|
+
// =============================================================================
|
|
94
|
+
|
|
95
|
+
function measureIndent(line: string): number {
|
|
96
|
+
let count = 0;
|
|
97
|
+
for (const ch of line) {
|
|
98
|
+
if (ch === ' ') count++;
|
|
99
|
+
else if (ch === '\t') count += 2;
|
|
100
|
+
else break;
|
|
101
|
+
}
|
|
102
|
+
return count;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// =============================================================================
|
|
106
|
+
// Line Classification
|
|
107
|
+
// =============================================================================
|
|
108
|
+
|
|
109
|
+
function classifyLine(trimmed: string): LineData {
|
|
110
|
+
// Blank
|
|
111
|
+
if (trimmed === '') return { type: 'blank' };
|
|
112
|
+
|
|
113
|
+
// Comment
|
|
114
|
+
if (trimmed.startsWith('#')) {
|
|
115
|
+
return { type: 'comment', text: trimmed.slice(1).trim() };
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Pages (exact keyword)
|
|
119
|
+
if (trimmed === 'pages') return { type: 'pages' };
|
|
120
|
+
|
|
121
|
+
// Section keywords
|
|
122
|
+
if (SECTION_KEYWORDS.has(trimmed)) {
|
|
123
|
+
return { type: 'section', name: trimmed };
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Tagged
|
|
127
|
+
const taggedMatch = trimmed.match(/^tagged:\s*(.+)$/);
|
|
128
|
+
if (taggedMatch) {
|
|
129
|
+
return {
|
|
130
|
+
type: 'tagged',
|
|
131
|
+
tags: taggedMatch[1].split(',').map(t => t.trim()),
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// Path mapping: /path → view (context)
|
|
136
|
+
const pathMappingMatch = trimmed.match(
|
|
137
|
+
/^(\/[\w\/:.-]+)\s*→\s*([\w\s]+?)(?:\s*\((\w+)\))?$/
|
|
138
|
+
);
|
|
139
|
+
if (pathMappingMatch) {
|
|
140
|
+
return {
|
|
141
|
+
type: 'path_mapping',
|
|
142
|
+
path: pathMappingMatch[1],
|
|
143
|
+
view: pathMappingMatch[2].trim(),
|
|
144
|
+
context: pathMappingMatch[3],
|
|
145
|
+
};
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Level definition: N: "name", from N xp
|
|
149
|
+
const levelMatch = trimmed.match(
|
|
150
|
+
/^(\d+):\s*"([^"]+)",\s*from\s+(\d+)\s+xp$/
|
|
151
|
+
);
|
|
152
|
+
if (levelMatch) {
|
|
153
|
+
return {
|
|
154
|
+
type: 'level_def',
|
|
155
|
+
level: parseInt(levelMatch[1], 10),
|
|
156
|
+
title: levelMatch[2],
|
|
157
|
+
fromXp: parseInt(levelMatch[3], 10),
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Starts at
|
|
162
|
+
const startsAtMatch = trimmed.match(/^starts\s+at\s+(.+)$/);
|
|
163
|
+
if (startsAtMatch) {
|
|
164
|
+
return { type: 'starts_at', state: startsAtMatch[1].trim() };
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Transition: can verb → state [, guard]
|
|
168
|
+
const transitionMatch = trimmed.match(
|
|
169
|
+
/^can\s+(.+?)\s*→\s*(.+?)(?:,\s*(.+))?$/
|
|
170
|
+
);
|
|
171
|
+
if (transitionMatch) {
|
|
172
|
+
return {
|
|
173
|
+
type: 'transition',
|
|
174
|
+
verb: transitionMatch[1].trim(),
|
|
175
|
+
target: transitionMatch[2].trim(),
|
|
176
|
+
guard: transitionMatch[3]?.trim(),
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// When clause
|
|
181
|
+
if (trimmed.startsWith('when ') || trimmed === 'when') {
|
|
182
|
+
return { type: 'when', condition: trimmed.slice(5).trim() };
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Set action: set field = expr
|
|
186
|
+
const setMatch = trimmed.match(/^set\s+(.+?)\s*=\s*(.+)$/);
|
|
187
|
+
if (setMatch) {
|
|
188
|
+
return {
|
|
189
|
+
type: 'set_action',
|
|
190
|
+
field: setMatch[1].trim(),
|
|
191
|
+
expression: setMatch[2].trim(),
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Do action: do verb
|
|
196
|
+
if (trimmed.startsWith('do ')) {
|
|
197
|
+
return { type: 'do_action', action: trimmed.slice(3).trim() };
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// Go action: go to path
|
|
201
|
+
const goMatch = trimmed.match(/^go\s+to\s+(.+)$/);
|
|
202
|
+
if (goMatch) {
|
|
203
|
+
return { type: 'go_action', path: goMatch[1].trim() };
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// Tell action: tell target "message"
|
|
207
|
+
const tellMatch = trimmed.match(/^tell\s+(.+?)\s+"([^"]+)"$/);
|
|
208
|
+
if (tellMatch) {
|
|
209
|
+
return {
|
|
210
|
+
type: 'tell_action',
|
|
211
|
+
target: tellMatch[1].trim(),
|
|
212
|
+
message: tellMatch[2],
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// Show action: show content [briefly]
|
|
217
|
+
const showMatch = trimmed.match(/^show\s+(.+?)(?:\s+(briefly))?$/);
|
|
218
|
+
if (showMatch) {
|
|
219
|
+
return {
|
|
220
|
+
type: 'show_action',
|
|
221
|
+
content: showMatch[1].trim(),
|
|
222
|
+
modifier: showMatch[2],
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// Search: search target
|
|
227
|
+
if (trimmed.startsWith('search ')) {
|
|
228
|
+
return { type: 'search', target: trimmed.slice(7).trim() };
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
// Each/iteration: each subject [as role] [, emphasis]
|
|
232
|
+
const eachMatch = trimmed.match(
|
|
233
|
+
/^each\s+(.+?)(?:\s+as\s+([\w\s]+?))?(?:,\s*(big|small))?$/
|
|
234
|
+
);
|
|
235
|
+
if (eachMatch) {
|
|
236
|
+
return {
|
|
237
|
+
type: 'iteration',
|
|
238
|
+
subject: eachMatch[1].trim(),
|
|
239
|
+
role: eachMatch[2]?.trim(),
|
|
240
|
+
emphasis: eachMatch[3] as Emphasis | undefined,
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Qualifier patterns (newest first, searchable by, etc.)
|
|
245
|
+
for (const { pattern, kind } of QUALIFIER_PATTERNS) {
|
|
246
|
+
const qMatch = trimmed.match(pattern);
|
|
247
|
+
if (qMatch) {
|
|
248
|
+
return { type: 'qualifier', kind, value: qMatch[1] ?? trimmed };
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Space/thing declaration: name @version
|
|
253
|
+
const versionedDeclMatch = trimmed.match(
|
|
254
|
+
/^(?:a\s+)?(.+?)\s+@(\d+\.\d+\.\d+)$/
|
|
255
|
+
);
|
|
256
|
+
if (versionedDeclMatch) {
|
|
257
|
+
const hasPrefix = trimmed.startsWith('a ');
|
|
258
|
+
const name = versionedDeclMatch[1].replace(/^a\s+/, '').trim();
|
|
259
|
+
if (hasPrefix) {
|
|
260
|
+
return { type: 'thing_decl', name, version: versionedDeclMatch[2] };
|
|
261
|
+
}
|
|
262
|
+
return { type: 'space_decl', name, version: versionedDeclMatch[2] };
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// Fragment definition: a name: (ends with colon)
|
|
266
|
+
const fragmentMatch = trimmed.match(/^a\s+(.+):$/);
|
|
267
|
+
if (fragmentMatch) {
|
|
268
|
+
return { type: 'fragment_def', name: fragmentMatch[1].trim() };
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Thing reference in 'things' section: [a] name (kind)
|
|
272
|
+
const thingRefMatch = trimmed.match(/^(?:a\s+)?(.+?)\s*\((\w+)\)$/);
|
|
273
|
+
if (thingRefMatch) {
|
|
274
|
+
return {
|
|
275
|
+
type: 'thing_ref',
|
|
276
|
+
name: thingRefMatch[1].trim(),
|
|
277
|
+
kind: thingRefMatch[2],
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
// String literal: "text" [, emphasis]
|
|
282
|
+
const stringLitMatch = trimmed.match(
|
|
283
|
+
/^"([^"]+)"(?:,\s*(big|small))?$/
|
|
284
|
+
);
|
|
285
|
+
if (stringLitMatch) {
|
|
286
|
+
return {
|
|
287
|
+
type: 'string_literal',
|
|
288
|
+
text: stringLitMatch[1],
|
|
289
|
+
emphasis: stringLitMatch[2] as Emphasis | undefined,
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
// Navigation: trigger → path (tap, click, label)
|
|
294
|
+
const navMatch = trimmed.match(/^(.+?)\s*→\s*(.+)$/);
|
|
295
|
+
if (navMatch) {
|
|
296
|
+
return {
|
|
297
|
+
type: 'navigation',
|
|
298
|
+
trigger: navMatch[1].trim(),
|
|
299
|
+
target: navMatch[2].trim(),
|
|
300
|
+
};
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// Data source: name from source [, qualifier] [for scope]
|
|
304
|
+
// Must come before content/field_def since both can start with pronouns
|
|
305
|
+
const dataSourceResult = tryParseDataSource(trimmed);
|
|
306
|
+
if (dataSourceResult) return dataSourceResult;
|
|
307
|
+
|
|
308
|
+
// Content display: pronoun field [, emphasis] [as role] [with "label"]
|
|
309
|
+
// Must come before field_def since both use "as"
|
|
310
|
+
const contentResult = tryParseContent(trimmed);
|
|
311
|
+
if (contentResult) return contentResult;
|
|
312
|
+
|
|
313
|
+
// Field definition: name as [adj]* type [, constraint]*
|
|
314
|
+
// Only matches bare nouns (not pronouns) with known field types
|
|
315
|
+
const fieldResult = tryParseFieldDef(trimmed);
|
|
316
|
+
if (fieldResult) return fieldResult;
|
|
317
|
+
|
|
318
|
+
// Grouping: collection by key
|
|
319
|
+
const groupMatch = trimmed.match(/^(.+?)\s+by\s+(.+)$/);
|
|
320
|
+
if (groupMatch) {
|
|
321
|
+
return {
|
|
322
|
+
type: 'grouping',
|
|
323
|
+
collection: groupMatch[1].trim(),
|
|
324
|
+
key: groupMatch[2].trim(),
|
|
325
|
+
};
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
// State declaration: name [, final]
|
|
329
|
+
const stateResult = tryParseStateDef(trimmed);
|
|
330
|
+
if (stateResult) return stateResult;
|
|
331
|
+
|
|
332
|
+
return { type: 'unknown', text: trimmed };
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
// =============================================================================
|
|
336
|
+
// Utilities
|
|
337
|
+
// =============================================================================
|
|
338
|
+
|
|
339
|
+
/**
|
|
340
|
+
* Split a string on commas, but not inside [...] brackets.
|
|
341
|
+
*/
|
|
342
|
+
function splitOutsideBrackets(str: string): string[] {
|
|
343
|
+
const parts: string[] = [];
|
|
344
|
+
let current = '';
|
|
345
|
+
let depth = 0;
|
|
346
|
+
for (const ch of str) {
|
|
347
|
+
if (ch === '[') depth++;
|
|
348
|
+
else if (ch === ']') depth--;
|
|
349
|
+
|
|
350
|
+
if (ch === ',' && depth === 0) {
|
|
351
|
+
parts.push(current.trim());
|
|
352
|
+
current = '';
|
|
353
|
+
} else {
|
|
354
|
+
current += ch;
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
if (current.trim()) parts.push(current.trim());
|
|
358
|
+
return parts;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
// =============================================================================
|
|
362
|
+
// Complex parsers
|
|
363
|
+
// =============================================================================
|
|
364
|
+
|
|
365
|
+
function tryParseFieldDef(trimmed: string): (FieldDefData & { type: 'field_def' }) | null {
|
|
366
|
+
// Pattern: name as [adj]* type [, constraint]*
|
|
367
|
+
const asMatch = trimmed.match(/^(.+?)\s+as\s+(.+)$/);
|
|
368
|
+
if (!asMatch) return null;
|
|
369
|
+
|
|
370
|
+
const name = asMatch[1].trim();
|
|
371
|
+
const rest = asMatch[2].trim();
|
|
372
|
+
|
|
373
|
+
// Reject if name starts with a pronoun — those are content, not field defs
|
|
374
|
+
const firstWord = name.split(/\s+/)[0];
|
|
375
|
+
if (PRONOUNS.has(firstWord)) return null;
|
|
376
|
+
|
|
377
|
+
// Reject if "as" is followed by a quoted string — that's a label (content)
|
|
378
|
+
if (rest.startsWith('"')) return null;
|
|
379
|
+
|
|
380
|
+
// Split on commas, but not inside brackets
|
|
381
|
+
const parts = splitOutsideBrackets(rest);
|
|
382
|
+
const typeSpec = parts[0];
|
|
383
|
+
const constraintParts = parts.slice(1);
|
|
384
|
+
|
|
385
|
+
// Parse type spec: [adj]* type
|
|
386
|
+
const typeWords = typeSpec.split(/\s+/);
|
|
387
|
+
const adjectives: string[] = [];
|
|
388
|
+
let baseType = '';
|
|
389
|
+
|
|
390
|
+
// Check for "choice of [...]"
|
|
391
|
+
const choiceMatch = typeSpec.match(/^(.+?\s+)?choice\s+of\s+\[(.+)\]$/);
|
|
392
|
+
if (choiceMatch) {
|
|
393
|
+
const prefix = choiceMatch[1]?.trim() ?? '';
|
|
394
|
+
if (prefix) {
|
|
395
|
+
for (const w of prefix.split(/\s+/)) {
|
|
396
|
+
if (TYPE_ADJECTIVES.has(w)) adjectives.push(w);
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
baseType = `choice of [${choiceMatch[2]}]`;
|
|
400
|
+
} else {
|
|
401
|
+
// Walk words: adjectives before the type, type is the rest
|
|
402
|
+
let i = 0;
|
|
403
|
+
while (i < typeWords.length && TYPE_ADJECTIVES.has(typeWords[i])) {
|
|
404
|
+
adjectives.push(typeWords[i]);
|
|
405
|
+
i++;
|
|
406
|
+
}
|
|
407
|
+
// Handle multi-word types like "rich text"
|
|
408
|
+
baseType = typeWords.slice(i).join(' ');
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
if (!baseType) return null;
|
|
412
|
+
|
|
413
|
+
// Only classify as field_def if the base type is a known FIELD type (not a content role)
|
|
414
|
+
// Content roles (tag, card, meter, etc.) are handled by tryParseContent
|
|
415
|
+
const knownFieldType = FIELD_TYPES.has(baseType) ||
|
|
416
|
+
baseType.startsWith('choice of') ||
|
|
417
|
+
adjectives.length > 0;
|
|
418
|
+
|
|
419
|
+
if (!knownFieldType) return null;
|
|
420
|
+
|
|
421
|
+
// If base type is a content role and no adjectives, this is content not a field
|
|
422
|
+
if (CONTENT_ROLES.has(baseType) && adjectives.length === 0) return null;
|
|
423
|
+
|
|
424
|
+
// Parse constraints
|
|
425
|
+
const constraints: Constraint[] = [];
|
|
426
|
+
for (const cp of constraintParts) {
|
|
427
|
+
const constraint = parseConstraint(cp);
|
|
428
|
+
if (constraint) constraints.push(constraint);
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
return {
|
|
432
|
+
type: 'field_def',
|
|
433
|
+
name,
|
|
434
|
+
adjectives,
|
|
435
|
+
baseType,
|
|
436
|
+
constraints,
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
function parseConstraint(text: string): Constraint | null {
|
|
441
|
+
const trimmed = text.trim();
|
|
442
|
+
|
|
443
|
+
// max N
|
|
444
|
+
const maxMatch = trimmed.match(/^max\s+(\d+)$/);
|
|
445
|
+
if (maxMatch) return { kind: 'max', value: parseInt(maxMatch[1], 10) };
|
|
446
|
+
|
|
447
|
+
// min N
|
|
448
|
+
const minMatch = trimmed.match(/^min\s+(\d+)$/);
|
|
449
|
+
if (minMatch) return { kind: 'min', value: parseInt(minMatch[1], 10) };
|
|
450
|
+
|
|
451
|
+
// default "value" or default value
|
|
452
|
+
const defaultMatch = trimmed.match(/^default\s+(.+)$/);
|
|
453
|
+
if (defaultMatch) {
|
|
454
|
+
const val = defaultMatch[1].trim();
|
|
455
|
+
const num = Number(val);
|
|
456
|
+
return { kind: 'default', value: isNaN(num) ? val.replace(/^"(.*)"$/, '$1') : num };
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// between X and Y
|
|
460
|
+
const betweenMatch = trimmed.match(/^between\s+(\d+)\s+and\s+(\d+)$/);
|
|
461
|
+
if (betweenMatch) {
|
|
462
|
+
return {
|
|
463
|
+
kind: 'between',
|
|
464
|
+
value: parseInt(betweenMatch[1], 10),
|
|
465
|
+
value2: parseInt(betweenMatch[2], 10),
|
|
466
|
+
};
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
// unique
|
|
470
|
+
if (trimmed === 'unique') return { kind: 'unique', value: true };
|
|
471
|
+
|
|
472
|
+
return null;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
function tryParseContent(trimmed: string): (ContentData & { type: 'content' }) | null {
|
|
476
|
+
// Patterns:
|
|
477
|
+
// its field [, emphasis] [as role] [with "label"]
|
|
478
|
+
// my field [, emphasis] [as role] [with "label"]
|
|
479
|
+
// the field [, emphasis] [as role] [with "label"]
|
|
480
|
+
// field.name [, emphasis]
|
|
481
|
+
|
|
482
|
+
const pronounMatch = trimmed.match(
|
|
483
|
+
/^(its|my|the|this)\s+(.+?)(?:,\s*(big|small))?(?:\s+as\s+(.+?))?(?:\s+with\s+"([^"]+)")?$/
|
|
484
|
+
);
|
|
485
|
+
if (pronounMatch) {
|
|
486
|
+
return {
|
|
487
|
+
type: 'content',
|
|
488
|
+
pronoun: pronounMatch[1],
|
|
489
|
+
field: pronounMatch[2].trim(),
|
|
490
|
+
emphasis: pronounMatch[3] as Emphasis | undefined,
|
|
491
|
+
role: pronounMatch[4]?.trim(),
|
|
492
|
+
label: pronounMatch[5],
|
|
493
|
+
};
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
// Also handle: field as "Label" (without pronoun, in numbers sections)
|
|
497
|
+
const labelMatch = trimmed.match(
|
|
498
|
+
/^([\w\s]+?)\s+as\s+"([^"]+)"(?:\s+with\s+"([^"]+)")?$/
|
|
499
|
+
);
|
|
500
|
+
if (labelMatch) {
|
|
501
|
+
return {
|
|
502
|
+
type: 'content',
|
|
503
|
+
field: labelMatch[1].trim(),
|
|
504
|
+
label: labelMatch[2],
|
|
505
|
+
};
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
return null;
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
function tryParseDataSource(trimmed: string): (LineData & { type: 'data_source' }) | null {
|
|
512
|
+
// Patterns:
|
|
513
|
+
// my projects from project
|
|
514
|
+
// my projects from project, live
|
|
515
|
+
// my recent from project, 5 newest
|
|
516
|
+
// this project from project
|
|
517
|
+
// its tasks from task for this project
|
|
518
|
+
// my media from media-library for this project
|
|
519
|
+
|
|
520
|
+
const fromMatch = trimmed.match(
|
|
521
|
+
/^(.+?)\s+from\s+([\w-]+)(?:,\s*(.+?))?(?:\s+for\s+(.+))?$/
|
|
522
|
+
);
|
|
523
|
+
if (!fromMatch) return null;
|
|
524
|
+
|
|
525
|
+
const alias = fromMatch[1].trim();
|
|
526
|
+
const source = fromMatch[2].trim();
|
|
527
|
+
const qualifierOrLive = fromMatch[3]?.trim();
|
|
528
|
+
const scope = fromMatch[4]?.trim();
|
|
529
|
+
|
|
530
|
+
return {
|
|
531
|
+
type: 'data_source',
|
|
532
|
+
alias,
|
|
533
|
+
source,
|
|
534
|
+
isLive: qualifierOrLive === 'live',
|
|
535
|
+
qualifier: qualifierOrLive !== 'live' ? qualifierOrLive : undefined,
|
|
536
|
+
scope,
|
|
537
|
+
};
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
function tryParseStateDef(trimmed: string): (LineData & { type: 'state_decl' }) | null {
|
|
541
|
+
// State names are bare identifiers, possibly multi-word, optionally with ", final"
|
|
542
|
+
// Must NOT start with structural keywords
|
|
543
|
+
const nonStateStarters = [
|
|
544
|
+
'a ', 'an ', 'the ', 'my ', 'its ', 'this ',
|
|
545
|
+
'each ', 'when ', 'can ', 'set ', 'do ', 'go ',
|
|
546
|
+
'tell ', 'show ', 'search ', 'from ', 'emit ',
|
|
547
|
+
'tagged', 'starts ', 'follow ', 'facing ', 'playing ',
|
|
548
|
+
'volume ', 'playback ',
|
|
549
|
+
];
|
|
550
|
+
|
|
551
|
+
for (const prefix of nonStateStarters) {
|
|
552
|
+
if (trimmed.startsWith(prefix)) return null;
|
|
553
|
+
}
|
|
554
|
+
|
|
555
|
+
// Reject if it contains "→" (navigation)
|
|
556
|
+
if (trimmed.includes('→')) return null;
|
|
557
|
+
|
|
558
|
+
// Reject if it contains " from " (data source)
|
|
559
|
+
if (trimmed.includes(' from ')) return null;
|
|
560
|
+
|
|
561
|
+
// Reject if it contains " by " (grouping)
|
|
562
|
+
if (trimmed.includes(' by ')) return null;
|
|
563
|
+
|
|
564
|
+
// Reject if it contains " as " (content/field)
|
|
565
|
+
if (trimmed.includes(' as ')) return null;
|
|
566
|
+
|
|
567
|
+
// Check for ", final"
|
|
568
|
+
const finalMatch = trimmed.match(/^(.+?),\s*final$/);
|
|
569
|
+
if (finalMatch) {
|
|
570
|
+
return { type: 'state_decl', name: finalMatch[1].trim(), isFinal: true };
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
// Plain state: must be simple identifier(s)
|
|
574
|
+
if (/^[a-z][\w\s]*$/i.test(trimmed) && !SECTION_KEYWORDS.has(trimmed) && trimmed !== 'pages') {
|
|
575
|
+
return { type: 'state_decl', name: trimmed, isFinal: false };
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
return null;
|
|
579
|
+
}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DSL Parser — builds a tree from tokenized lines.
|
|
3
|
+
*
|
|
4
|
+
* Takes the flat list of LineTokens from the lexer and produces
|
|
5
|
+
* a nested ASTNode tree based on indentation levels.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import type { LineToken, ASTNode, ParseResult, ParseError } from './types';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Parse tokenized lines into an AST tree.
|
|
12
|
+
* Indentation defines parent-child nesting.
|
|
13
|
+
*/
|
|
14
|
+
export function parse(tokens: LineToken[]): ParseResult {
|
|
15
|
+
const errors: ParseError[] = [];
|
|
16
|
+
const roots: ASTNode[] = [];
|
|
17
|
+
|
|
18
|
+
// Filter out blanks and comments for tree building
|
|
19
|
+
const meaningful = tokens.filter(
|
|
20
|
+
t => t.data.type !== 'blank' && t.data.type !== 'comment'
|
|
21
|
+
);
|
|
22
|
+
|
|
23
|
+
if (meaningful.length === 0) {
|
|
24
|
+
return { nodes: [], errors };
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// Stack-based tree building
|
|
28
|
+
// Each entry: { node, indent }
|
|
29
|
+
const stack: Array<{ node: ASTNode; indent: number }> = [];
|
|
30
|
+
|
|
31
|
+
for (const token of meaningful) {
|
|
32
|
+
const node: ASTNode = { token, children: [] };
|
|
33
|
+
|
|
34
|
+
// Pop stack until we find a parent with lower indent
|
|
35
|
+
while (stack.length > 0 && stack[stack.length - 1].indent >= token.indent) {
|
|
36
|
+
stack.pop();
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if (stack.length === 0) {
|
|
40
|
+
// Root level
|
|
41
|
+
roots.push(node);
|
|
42
|
+
} else {
|
|
43
|
+
// Child of the top of stack
|
|
44
|
+
stack[stack.length - 1].node.children.push(node);
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
stack.push({ node, indent: token.indent });
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Validate structural rules
|
|
51
|
+
validateStructure(roots, errors);
|
|
52
|
+
|
|
53
|
+
return { nodes: roots, errors };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Validate structural rules of the parsed tree.
|
|
58
|
+
*/
|
|
59
|
+
function validateStructure(nodes: ASTNode[], errors: ParseError[]): void {
|
|
60
|
+
for (const node of nodes) {
|
|
61
|
+
const { data } = node.token;
|
|
62
|
+
|
|
63
|
+
// Transitions must be inside states
|
|
64
|
+
if (data.type === 'transition' && !isInsideState(node, nodes)) {
|
|
65
|
+
// This is a soft warning — transitions at root level are unusual
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Set actions must be inside when clauses
|
|
69
|
+
if (data.type === 'set_action' && !hasAncestorType(node, 'when', nodes)) {
|
|
70
|
+
// Soft warning
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// Recurse
|
|
74
|
+
if (node.children.length > 0) {
|
|
75
|
+
validateStructure(node.children, errors);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function isInsideState(_node: ASTNode, _roots: ASTNode[]): boolean {
|
|
81
|
+
// Simplified check — full implementation would trace parent chain
|
|
82
|
+
return true;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
function hasAncestorType(_node: ASTNode, _type: string, _roots: ASTNode[]): boolean {
|
|
86
|
+
// Simplified check
|
|
87
|
+
return true;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Walk all nodes in the tree, depth-first.
|
|
92
|
+
*/
|
|
93
|
+
export function walkTree(
|
|
94
|
+
nodes: ASTNode[],
|
|
95
|
+
visitor: (node: ASTNode, depth: number) => void,
|
|
96
|
+
depth: number = 0,
|
|
97
|
+
): void {
|
|
98
|
+
for (const node of nodes) {
|
|
99
|
+
visitor(node, depth);
|
|
100
|
+
walkTree(node.children, visitor, depth + 1);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Find all nodes of a given type in the tree.
|
|
106
|
+
*/
|
|
107
|
+
export function findByType(nodes: ASTNode[], type: string): ASTNode[] {
|
|
108
|
+
const results: ASTNode[] = [];
|
|
109
|
+
walkTree(nodes, (node) => {
|
|
110
|
+
if (node.token.data.type === type) {
|
|
111
|
+
results.push(node);
|
|
112
|
+
}
|
|
113
|
+
});
|
|
114
|
+
return results;
|
|
115
|
+
}
|