@bablr/boot 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/languages/cstml.js +47 -52
- package/lib/languages/instruction.js +9 -10
- package/lib/languages/regex.js +42 -35
- package/lib/languages/spamex.js +6 -11
- package/package.json +1 -1
package/lib/languages/cstml.js
CHANGED
|
@@ -78,14 +78,15 @@ const covers = buildCovers({
|
|
|
78
78
|
'String',
|
|
79
79
|
'Content',
|
|
80
80
|
'UnsignedInteger',
|
|
81
|
+
'Flags',
|
|
81
82
|
],
|
|
82
83
|
[sym.fragment]: ['Attributes', 'Fragment'],
|
|
83
84
|
Attribute: ['MappingAttribute', 'BooleanAttribute'],
|
|
84
85
|
AttributeValue: ['String', 'Number'],
|
|
85
86
|
TagType: ['Identifier', 'GlobalIdentifier'],
|
|
86
|
-
Terminal: ['Literal', 'Trivia'
|
|
87
|
+
Terminal: ['Literal', 'Trivia'],
|
|
87
88
|
PropertyValue: ['Gap', 'Node', 'Null'],
|
|
88
|
-
EmbeddedTerminal: ['Literal'
|
|
89
|
+
EmbeddedTerminal: ['Literal'],
|
|
89
90
|
Number: ['Integer', 'Infinity'],
|
|
90
91
|
});
|
|
91
92
|
|
|
@@ -110,10 +111,10 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
110
111
|
|
|
111
112
|
// @Node
|
|
112
113
|
DoctypeTag(p) {
|
|
113
|
-
p.eat('<!', PN, { path: '
|
|
114
|
+
p.eat('<!', PN, { path: 'openToken' });
|
|
114
115
|
p.eatProduction('UnsignedInteger', { path: 'version' });
|
|
115
|
-
p.eat(':', PN, { path: '
|
|
116
|
-
p.eat('cstml', KW, { path: '
|
|
116
|
+
p.eat(':', PN, { path: 'versionSeparatorToken' });
|
|
117
|
+
p.eat('cstml', KW, { path: 'doctypeToken' });
|
|
117
118
|
|
|
118
119
|
let sp = p.eatMatchTrivia(_);
|
|
119
120
|
|
|
@@ -122,17 +123,17 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
122
123
|
sp = p.eatMatchTrivia(_);
|
|
123
124
|
}
|
|
124
125
|
|
|
125
|
-
p.eat('>', PN, { path: '
|
|
126
|
+
p.eat('>', PN, { path: 'closeToken' });
|
|
126
127
|
}
|
|
127
128
|
|
|
128
129
|
// @Node
|
|
129
130
|
Null(p) {
|
|
130
|
-
p.eat('null', KW, { path: '
|
|
131
|
+
p.eat('null', KW, { path: 'sigilToken' });
|
|
131
132
|
}
|
|
132
133
|
|
|
133
134
|
// @Node
|
|
134
135
|
Gap(p) {
|
|
135
|
-
p.eat('<//>', PN, { path: '
|
|
136
|
+
p.eat('<//>', PN, { path: 'sigilToken' });
|
|
136
137
|
}
|
|
137
138
|
|
|
138
139
|
// @Node
|
|
@@ -160,7 +161,11 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
160
161
|
const { token } = props || {};
|
|
161
162
|
|
|
162
163
|
if (token) {
|
|
163
|
-
p.
|
|
164
|
+
if (p.match(/<\*?@/y)) {
|
|
165
|
+
p.eatProduction('Node');
|
|
166
|
+
} else {
|
|
167
|
+
p.eatProduction('Literal');
|
|
168
|
+
}
|
|
164
169
|
} else {
|
|
165
170
|
if (p.match(/<\*?#/y)) {
|
|
166
171
|
p.eatProduction('Node');
|
|
@@ -176,7 +181,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
176
181
|
Property(p) {
|
|
177
182
|
p.eatProduction('Reference', { path: 'reference' });
|
|
178
183
|
p.eatMatchTrivia(_);
|
|
179
|
-
p.eat(':', PN, { path: '
|
|
184
|
+
p.eat(':', PN, { path: 'mapToken' });
|
|
180
185
|
p.eatMatchTrivia(_);
|
|
181
186
|
p.eatProduction('PropertyValue', { path: 'value' });
|
|
182
187
|
}
|
|
@@ -193,27 +198,36 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
193
198
|
|
|
194
199
|
// @Node
|
|
195
200
|
OpenFragmentTag(p) {
|
|
196
|
-
p.eat('<', PN, { path: '
|
|
197
|
-
p.eat('>', PN, { path: '
|
|
201
|
+
p.eat('<', PN, { path: 'openToken', startSpan: 'Tag', balanced: '>' });
|
|
202
|
+
p.eat('>', PN, { path: 'closeToken', endSpan: 'Tag', balancer: true });
|
|
198
203
|
}
|
|
199
204
|
|
|
200
205
|
// @Node
|
|
201
|
-
|
|
202
|
-
p.
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
let
|
|
206
|
-
let
|
|
207
|
-
let exp = p.eatMatch('+', PN, { path: 'expressionFlag' });
|
|
206
|
+
Flags(p) {
|
|
207
|
+
let tr = p.eatMatch('#', PN, { path: 'triviaToken' });
|
|
208
|
+
p.eatMatch('~', PN, { path: 'intrinsicToken' });
|
|
209
|
+
p.eatMatch('*', PN, { path: 'tokenToken' });
|
|
210
|
+
let esc = p.eatMatch('@', PN, { path: 'escapeToken' });
|
|
211
|
+
let exp = p.eatMatch('+', PN, { path: 'expressionToken' });
|
|
208
212
|
|
|
209
213
|
if ((tr && esc) || (exp && (tr || esc))) throw new Error();
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// @Node
|
|
217
|
+
OpenNodeTag(p) {
|
|
218
|
+
p.eat('<', PN, { path: 'openToken', startSpan: 'Tag', balanced: '>' });
|
|
219
|
+
|
|
220
|
+
let flags = null;
|
|
221
|
+
if (!p.atExpression) {
|
|
222
|
+
flags = p.eatProduction('Flags', { path: 'flags' });
|
|
223
|
+
}
|
|
210
224
|
|
|
211
225
|
p.eatProduction('TagType', { path: 'type' });
|
|
212
226
|
|
|
213
227
|
let sp = p.eatMatchTrivia(_);
|
|
214
228
|
|
|
215
229
|
let iv;
|
|
216
|
-
if (
|
|
230
|
+
if (flags.properties.intrinsic && sp && (p.match(/['"/]/y) || p.atExpression)) {
|
|
217
231
|
iv = p.eatProduction('String', { path: 'intrinsicValue' });
|
|
218
232
|
|
|
219
233
|
sp = p.eatMatchTrivia(_);
|
|
@@ -226,21 +240,21 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
226
240
|
|
|
227
241
|
p.eatMatchTrivia(_);
|
|
228
242
|
if (iv) {
|
|
229
|
-
p.eat('/', PN, { path: '
|
|
243
|
+
p.eat('/', PN, { path: 'selfClosingTagToken' });
|
|
230
244
|
}
|
|
231
|
-
p.eat('>', PN, { path: '
|
|
245
|
+
p.eat('>', PN, { path: 'closeToken', endSpan: 'Tag', balancer: true });
|
|
232
246
|
}
|
|
233
247
|
|
|
234
248
|
// @Node
|
|
235
249
|
CloseNodeTag(p) {
|
|
236
|
-
p.eat('</', PN, { path: '
|
|
237
|
-
p.eat('>', PN, { path: '
|
|
250
|
+
p.eat('</', PN, { path: 'openToken', startSpan: 'Tag', balanced: '>' });
|
|
251
|
+
p.eat('>', PN, { path: 'closeToken', endSpan: 'Tag', balancer: true });
|
|
238
252
|
}
|
|
239
253
|
|
|
240
254
|
// @Node
|
|
241
255
|
CloseFragmentTag(p) {
|
|
242
|
-
p.eat('</', PN, { path: '
|
|
243
|
-
p.eat('>', PN, { path: '
|
|
256
|
+
p.eat('</', PN, { path: 'openToken', startSpan: 'Tag', balanced: '>' });
|
|
257
|
+
p.eat('>', PN, { path: 'closeToken', endSpan: 'Tag', balancer: true });
|
|
244
258
|
}
|
|
245
259
|
|
|
246
260
|
// @Fragment
|
|
@@ -271,7 +285,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
271
285
|
|
|
272
286
|
// @Node
|
|
273
287
|
BooleanAttribute(p) {
|
|
274
|
-
p.eat('!', KW, { path: '
|
|
288
|
+
p.eat('!', KW, { path: 'negateToken' });
|
|
275
289
|
p.eat(/\w+/y, ID, { path: 'key' });
|
|
276
290
|
}
|
|
277
291
|
|
|
@@ -279,7 +293,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
279
293
|
MappingAttribute(p) {
|
|
280
294
|
p.eat(/\w+/y, LIT, { path: 'key' });
|
|
281
295
|
p.eatMatchTrivia(_);
|
|
282
|
-
p.eat('=', PN, { path: '
|
|
296
|
+
p.eat('=', PN, { path: 'mapToken' });
|
|
283
297
|
p.eatMatchTrivia(_);
|
|
284
298
|
p.eatProduction('AttributeValue', { path: 'value' });
|
|
285
299
|
}
|
|
@@ -296,7 +310,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
296
310
|
TagType(p) {
|
|
297
311
|
if (p.match(/[\w.]+:/y)) {
|
|
298
312
|
p.eatProduction('LanguageReference', { path: 'language' });
|
|
299
|
-
p.eat(':', PN, { path: '
|
|
313
|
+
p.eat(':', PN, { path: 'namespaceSeparatorToken' });
|
|
300
314
|
p.eatProduction('Identifier', { path: 'type' });
|
|
301
315
|
} else {
|
|
302
316
|
p.eatProduction('Identifier', { path: 'type' });
|
|
@@ -326,11 +340,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
326
340
|
|
|
327
341
|
// @Cover
|
|
328
342
|
Terminal(p) {
|
|
329
|
-
if (p.match(
|
|
330
|
-
p.eatProduction('Escape');
|
|
331
|
-
} else if (p.match(/#['"]/y)) {
|
|
332
|
-
p.eatProduction('Trivia');
|
|
333
|
-
} else if (p.match(/['"]/y)) {
|
|
343
|
+
if (p.match(/['"]/y)) {
|
|
334
344
|
p.eatProduction('Literal');
|
|
335
345
|
} else {
|
|
336
346
|
throw new Error();
|
|
@@ -341,7 +351,7 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
341
351
|
Reference(p) {
|
|
342
352
|
p.eatProduction('Identifier', { path: 'name' });
|
|
343
353
|
p.eatMatchTrivia(_);
|
|
344
|
-
p.eatMatch('[]', PN, { path: '
|
|
354
|
+
p.eatMatch('[]', PN, { path: 'arrayToken' });
|
|
345
355
|
}
|
|
346
356
|
|
|
347
357
|
// @Cover
|
|
@@ -355,21 +365,6 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
355
365
|
}
|
|
356
366
|
}
|
|
357
367
|
|
|
358
|
-
// @Node
|
|
359
|
-
Escape(p) {
|
|
360
|
-
p.eat('!', PN, { path: 'escapeOperator' });
|
|
361
|
-
p.eatProduction('String', { path: 'rawValue' });
|
|
362
|
-
p.eatMatchTrivia(_);
|
|
363
|
-
p.eat(':', PN, { path: 'rawOperator' });
|
|
364
|
-
p.eatProduction('String', { path: 'value' });
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
// @Node
|
|
368
|
-
Trivia(p) {
|
|
369
|
-
p.eat('#', PN, { path: 'trivializeOperator' });
|
|
370
|
-
p.eatProduction('String', { path: 'value' });
|
|
371
|
-
}
|
|
372
|
-
|
|
373
368
|
// @Node
|
|
374
369
|
Literal(p) {
|
|
375
370
|
p.eatProduction('String', { path: 'value' });
|
|
@@ -417,11 +412,11 @@ const grammar = class CSTMLMiniparserGrammar {
|
|
|
417
412
|
|
|
418
413
|
const span = q === '"' ? 'Double' : 'Single';
|
|
419
414
|
|
|
420
|
-
p.eat(q, PN, { path: '
|
|
415
|
+
p.eat(q, PN, { path: 'openToken', startSpan: span, balanced: q });
|
|
421
416
|
while (p.match(/./sy) || p.atExpression) {
|
|
422
417
|
p.eatProduction('Content', { path: 'content' });
|
|
423
418
|
}
|
|
424
|
-
p.eat(q, PN, { path: '
|
|
419
|
+
p.eat(q, PN, { path: 'closeToken', endSpan: span, balancer: true });
|
|
425
420
|
}
|
|
426
421
|
|
|
427
422
|
// @Node
|
|
@@ -37,7 +37,6 @@ const grammar = class InstructionMiniparserGrammar {
|
|
|
37
37
|
Call(p) {
|
|
38
38
|
p.eat(/\w+/y, ID, { path: 'verb' });
|
|
39
39
|
p.eatMatchTrivia(_);
|
|
40
|
-
p.eatMatch(/[!#]/y, PN, { path: 'verbSuffix' });
|
|
41
40
|
p.eatProduction('Tuple', { path: 'arguments' });
|
|
42
41
|
}
|
|
43
42
|
|
|
@@ -68,7 +67,7 @@ const grammar = class InstructionMiniparserGrammar {
|
|
|
68
67
|
|
|
69
68
|
// @Node
|
|
70
69
|
Object(p) {
|
|
71
|
-
p.eat('{', PN, { path: '
|
|
70
|
+
p.eat('{', PN, { path: 'openToken', balanced: '}' });
|
|
72
71
|
|
|
73
72
|
p.eatMatchTrivia(_);
|
|
74
73
|
|
|
@@ -82,21 +81,21 @@ const grammar = class InstructionMiniparserGrammar {
|
|
|
82
81
|
|
|
83
82
|
p.eatMatchTrivia(_);
|
|
84
83
|
|
|
85
|
-
p.eat('}', PN, { path: '
|
|
84
|
+
p.eat('}', PN, { path: 'closeToken', balancer: true });
|
|
86
85
|
}
|
|
87
86
|
|
|
88
87
|
// @Node
|
|
89
88
|
Property(p) {
|
|
90
89
|
p.eat(/\w+/y, LIT, { path: 'key' });
|
|
91
90
|
p.eatMatchTrivia(_);
|
|
92
|
-
p.eat(':', PN, { path: '
|
|
91
|
+
p.eat(':', PN, { path: 'mapToken' });
|
|
93
92
|
p.eatMatchTrivia(_);
|
|
94
93
|
p.eatProduction('Expression', { path: 'value' });
|
|
95
94
|
}
|
|
96
95
|
|
|
97
96
|
// @Node
|
|
98
97
|
Array(p) {
|
|
99
|
-
p.eat('[', PN, { path: '
|
|
98
|
+
p.eat('[', PN, { path: 'openToken', balanced: ']' });
|
|
100
99
|
|
|
101
100
|
p.eatMatchTrivia(_);
|
|
102
101
|
|
|
@@ -108,12 +107,12 @@ const grammar = class InstructionMiniparserGrammar {
|
|
|
108
107
|
first = false;
|
|
109
108
|
}
|
|
110
109
|
|
|
111
|
-
p.eat(']', PN, { path: '
|
|
110
|
+
p.eat(']', PN, { path: 'closeToken', balancer: true });
|
|
112
111
|
}
|
|
113
112
|
|
|
114
113
|
// @Node
|
|
115
114
|
Tuple(p) {
|
|
116
|
-
p.eat('(', PN, { path: '
|
|
115
|
+
p.eat('(', PN, { path: 'openToken', balanced: ')' });
|
|
117
116
|
|
|
118
117
|
let sep = p.eatMatchTrivia(_);
|
|
119
118
|
|
|
@@ -124,17 +123,17 @@ const grammar = class InstructionMiniparserGrammar {
|
|
|
124
123
|
i++;
|
|
125
124
|
}
|
|
126
125
|
|
|
127
|
-
p.eat(')', PN, { path: '
|
|
126
|
+
p.eat(')', PN, { path: 'closeToken', balancer: true });
|
|
128
127
|
}
|
|
129
128
|
|
|
130
129
|
// @Node
|
|
131
130
|
Boolean(p) {
|
|
132
|
-
p.eat(/true|false/y, KW, { path: '
|
|
131
|
+
p.eat(/true|false/y, KW, { path: 'sigilToken' });
|
|
133
132
|
}
|
|
134
133
|
|
|
135
134
|
// @Node
|
|
136
135
|
Null(p) {
|
|
137
|
-
p.eat('null', KW, { path: '
|
|
136
|
+
p.eat('null', KW, { path: 'sigilToken' });
|
|
138
137
|
}
|
|
139
138
|
};
|
|
140
139
|
|
package/lib/languages/regex.js
CHANGED
|
@@ -12,7 +12,7 @@ const dependencies = {};
|
|
|
12
12
|
const covers = buildCovers({
|
|
13
13
|
[node]: [
|
|
14
14
|
'RegExpLiteral',
|
|
15
|
-
'
|
|
15
|
+
'Flags',
|
|
16
16
|
'Pattern',
|
|
17
17
|
'Alternative',
|
|
18
18
|
'Group',
|
|
@@ -55,7 +55,6 @@ const flags = {
|
|
|
55
55
|
unicode: 'u',
|
|
56
56
|
sticky: 'y',
|
|
57
57
|
};
|
|
58
|
-
const flagsReverse = Object.fromEntries(Object.entries(flags).map(([key, value]) => [value, key]));
|
|
59
58
|
|
|
60
59
|
const PN = 'Punctuator';
|
|
61
60
|
const KW = 'Keyword';
|
|
@@ -115,26 +114,32 @@ const cookEscape = (escape, span) => {
|
|
|
115
114
|
const grammar = class RegexMiniparserGrammar {
|
|
116
115
|
// @Node
|
|
117
116
|
Pattern(p) {
|
|
118
|
-
p.eat('/', PN, { path: '
|
|
117
|
+
p.eat('/', PN, { path: 'openToken', balanced: '/' });
|
|
119
118
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
120
|
-
p.eat('/', PN, { path: '
|
|
121
|
-
|
|
119
|
+
p.eat('/', PN, { path: 'closeToken', balancer: true });
|
|
120
|
+
|
|
121
|
+
if (p.match(/[gimsuy]/y || p.atExpression)) {
|
|
122
|
+
p.eatProduction('Flags', { path: 'flags' });
|
|
123
|
+
}
|
|
122
124
|
}
|
|
123
125
|
|
|
126
|
+
// @Node
|
|
124
127
|
Flags(p) {
|
|
125
|
-
const
|
|
128
|
+
const flagsStr = p.match(/[gimsuy]+/y) || '';
|
|
126
129
|
|
|
127
|
-
if (!unique(
|
|
130
|
+
if (flagsStr && !unique(flagsStr)) throw new Error('flags must be unique');
|
|
128
131
|
|
|
129
|
-
|
|
130
|
-
|
|
132
|
+
const attrs = {};
|
|
133
|
+
|
|
134
|
+
for (const { 0: name, 1: chr } of Object.entries(flags)) {
|
|
135
|
+
attrs[name] = flagsStr.includes(chr);
|
|
131
136
|
}
|
|
132
|
-
}
|
|
133
137
|
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
+
for (const flag of flagsStr) {
|
|
139
|
+
p.eat(flag, KW, { path: 'tokens[]' });
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
return { attrs };
|
|
138
143
|
}
|
|
139
144
|
|
|
140
145
|
Alternatives(p) {
|
|
@@ -181,16 +186,16 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
181
186
|
|
|
182
187
|
// @Node
|
|
183
188
|
Group(p) {
|
|
184
|
-
p.eat('(?:', PN, { path: '
|
|
189
|
+
p.eat('(?:', PN, { path: 'openToken', balanced: ')' });
|
|
185
190
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
186
|
-
p.eat(')', PN, { path: '
|
|
191
|
+
p.eat(')', PN, { path: 'closeToken', balancer: true });
|
|
187
192
|
}
|
|
188
193
|
|
|
189
194
|
// @Node
|
|
190
195
|
CapturingGroup(p) {
|
|
191
|
-
p.eat('(', PN, { path: '
|
|
196
|
+
p.eat('(', PN, { path: 'openToken', balanced: ')' });
|
|
192
197
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
193
|
-
p.eat(')', PN, { path: '
|
|
198
|
+
p.eat(')', PN, { path: 'closeToken', balancer: true });
|
|
194
199
|
}
|
|
195
200
|
|
|
196
201
|
Assertion(p) {
|
|
@@ -206,20 +211,20 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
206
211
|
// @CoveredBy('Assertion')
|
|
207
212
|
// @Node
|
|
208
213
|
StartOfInputAssertion(p) {
|
|
209
|
-
p.eat('^', KW, { path: '
|
|
214
|
+
p.eat('^', KW, { path: 'sigilToken' });
|
|
210
215
|
}
|
|
211
216
|
|
|
212
217
|
// @CoveredBy('Assertion')
|
|
213
218
|
// @Node
|
|
214
219
|
EndOfInputAssertion(p) {
|
|
215
|
-
p.eat('$', KW, { path: '
|
|
220
|
+
p.eat('$', KW, { path: 'sigilToken' });
|
|
216
221
|
}
|
|
217
222
|
|
|
218
223
|
// @CoveredBy('Assertion')
|
|
219
224
|
// @Node
|
|
220
225
|
WordBoundaryAssertion(p) {
|
|
221
226
|
let attrs;
|
|
222
|
-
if (p.eatMatch('\\', ESC, { path: '
|
|
227
|
+
if (p.eatMatch('\\', ESC, { path: 'escapeToken' })) {
|
|
223
228
|
const m = p.eat(/b/iy, KW, { path: 'value' });
|
|
224
229
|
attrs = { negate: m === 'B' };
|
|
225
230
|
} else {
|
|
@@ -256,9 +261,9 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
256
261
|
|
|
257
262
|
// @Node
|
|
258
263
|
CharacterClass(p) {
|
|
259
|
-
p.eat('[', PN, { path: '
|
|
264
|
+
p.eat('[', PN, { path: 'openToken', balanced: ']', startSpan: 'CharacterClass' });
|
|
260
265
|
|
|
261
|
-
const negate = !!p.eatMatch('^', KW, { path: '
|
|
266
|
+
const negate = !!p.eatMatch('^', KW, { path: 'negateToken', boolean: true });
|
|
262
267
|
|
|
263
268
|
let first = !negate;
|
|
264
269
|
while (p.match(/./sy)) {
|
|
@@ -266,7 +271,9 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
266
271
|
first = false;
|
|
267
272
|
}
|
|
268
273
|
|
|
269
|
-
p.eat(']', PN, { path: '
|
|
274
|
+
p.eat(']', PN, { path: 'closeToken', balancer: true, endSpan: 'CharacterClass' });
|
|
275
|
+
|
|
276
|
+
return { attrs: { negate } };
|
|
270
277
|
}
|
|
271
278
|
|
|
272
279
|
// @Cover
|
|
@@ -284,7 +291,7 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
284
291
|
|
|
285
292
|
// @Node
|
|
286
293
|
Gap(p) {
|
|
287
|
-
p.eat('\\', PN, { path: '
|
|
294
|
+
p.eat('\\', PN, { path: 'escapeToken' });
|
|
288
295
|
p.eat('g', KW, { path: 'value' });
|
|
289
296
|
}
|
|
290
297
|
|
|
@@ -294,7 +301,7 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
294
301
|
path: 'min',
|
|
295
302
|
...when(first, { span: 'CharacterClass:First' }),
|
|
296
303
|
});
|
|
297
|
-
p.eat('-', PN, { path: '
|
|
304
|
+
p.eat('-', PN, { path: 'rangeToken' });
|
|
298
305
|
p.eatProduction('Character', { path: 'max' });
|
|
299
306
|
}
|
|
300
307
|
|
|
@@ -321,13 +328,13 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
321
328
|
// @CoveredBy('CharacterSet')
|
|
322
329
|
// @Node
|
|
323
330
|
AnyCharacterSet(p) {
|
|
324
|
-
p.eat('.', KW, { path: '
|
|
331
|
+
p.eat('.', KW, { path: 'sigilToken' });
|
|
325
332
|
}
|
|
326
333
|
|
|
327
334
|
// @CoveredBy('CharacterSet')
|
|
328
335
|
// @Node
|
|
329
336
|
WordCharacterSet(p) {
|
|
330
|
-
p.eat('\\', PN, { path: '
|
|
337
|
+
p.eat('\\', PN, { path: 'escapeToken' });
|
|
331
338
|
|
|
332
339
|
let attrs;
|
|
333
340
|
|
|
@@ -343,7 +350,7 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
343
350
|
// @CoveredBy('CharacterSet')
|
|
344
351
|
// @Node
|
|
345
352
|
SpaceCharacterSet(p) {
|
|
346
|
-
p.eat('\\', PN, { path: '
|
|
353
|
+
p.eat('\\', PN, { path: 'escapeToken' });
|
|
347
354
|
|
|
348
355
|
let attrs;
|
|
349
356
|
|
|
@@ -359,7 +366,7 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
359
366
|
// @CoveredBy('CharacterSet')
|
|
360
367
|
// @Node
|
|
361
368
|
DigitCharacterSet(p) {
|
|
362
|
-
p.eat('\\', PN, { path: '
|
|
369
|
+
p.eat('\\', PN, { path: 'escapeToken' });
|
|
363
370
|
|
|
364
371
|
let attrs;
|
|
365
372
|
|
|
@@ -379,15 +386,15 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
379
386
|
let attrs;
|
|
380
387
|
|
|
381
388
|
if (p.eatMatch('*', KW, { path: 'value' })) {
|
|
382
|
-
const greedy = !p.eatMatch('?', KW, { path: '
|
|
389
|
+
const greedy = !p.eatMatch('?', KW, { path: 'greedyToken' });
|
|
383
390
|
attrs = { min: 0, max: Infinity, greedy };
|
|
384
391
|
} else if (p.eatMatch('+', KW, { path: 'value' })) {
|
|
385
|
-
const greedy = !p.eatMatch('?', KW, { path: '
|
|
392
|
+
const greedy = !p.eatMatch('?', KW, { path: 'greedyToken' });
|
|
386
393
|
attrs = { min: 1, max: Infinity, greedy };
|
|
387
394
|
} else if (p.eatMatch('?', KW, { path: 'value' })) {
|
|
388
395
|
attrs = { min: 0, max: 1, greedy: true };
|
|
389
396
|
} else if (p.match('{')) {
|
|
390
|
-
p.eat('{', PN, { path: '
|
|
397
|
+
p.eat('{', PN, { path: 'openToken', balanced: '}' });
|
|
391
398
|
|
|
392
399
|
let max;
|
|
393
400
|
let min = p.eat(/\d+/y, 'Number', { path: 'min' });
|
|
@@ -396,9 +403,9 @@ const grammar = class RegexMiniparserGrammar {
|
|
|
396
403
|
max = p.eatMatch(/\d+/y, 'Number', { path: 'max' });
|
|
397
404
|
}
|
|
398
405
|
|
|
399
|
-
p.eat('}', PN, { path: '
|
|
406
|
+
p.eat('}', PN, { path: 'closeToken', balancer: true });
|
|
400
407
|
|
|
401
|
-
const greedy = !p.eatMatch('?', KW, { path: '
|
|
408
|
+
const greedy = !p.eatMatch('?', KW, { path: 'greedyToken' });
|
|
402
409
|
|
|
403
410
|
attrs = { min: min && parseInt(min, 10), max: max && parseInt(max, 10), greedy };
|
|
404
411
|
}
|
package/lib/languages/spamex.js
CHANGED
|
@@ -25,7 +25,7 @@ const covers = buildCovers({
|
|
|
25
25
|
const grammar = class SpamexMiniparserGrammar {
|
|
26
26
|
// @Cover
|
|
27
27
|
Matcher(p) {
|
|
28
|
-
if (p.match(/<(?:[
|
|
28
|
+
if (p.match(/<(?:[*#@+~]*)?(?:\w|$)/y)) {
|
|
29
29
|
p.eatProduction('NodeMatcher');
|
|
30
30
|
} else if (p.match(/['"]/y)) {
|
|
31
31
|
p.eatProduction('CSTML:String');
|
|
@@ -38,18 +38,13 @@ const grammar = class SpamexMiniparserGrammar {
|
|
|
38
38
|
|
|
39
39
|
// @Node
|
|
40
40
|
NodeMatcher(p) {
|
|
41
|
-
p.eat('<', PN, { path: '
|
|
41
|
+
p.eat('<', PN, { path: 'openToken', startSpan: 'Tag', balanced: '>' });
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
p.eatMatch('*', PN, { path: 'tokenFlag' });
|
|
45
|
-
let esc = p.eatMatch('@', PN, { path: 'escapeFlag' });
|
|
46
|
-
let exp = p.eatMatch('+', PN, { path: 'expressionFlag' });
|
|
47
|
-
|
|
48
|
-
if ((tr && esc) || (exp && (tr || esc))) throw new Error();
|
|
43
|
+
p.eatProduction('CSTML:Flags', { path: 'flags' });
|
|
49
44
|
|
|
50
45
|
if (p.match(/\w+:/y)) {
|
|
51
46
|
p.eat(/\w+/y, ID, { path: 'language' });
|
|
52
|
-
p.eat(':', PN, { path: '
|
|
47
|
+
p.eat(':', PN, { path: 'namespaceSeparatorToken' });
|
|
53
48
|
p.eat(/\w+/y, ID, { path: 'type' });
|
|
54
49
|
} else {
|
|
55
50
|
p.eat(/\w+/y, ID, { path: 'type' });
|
|
@@ -69,7 +64,7 @@ const grammar = class SpamexMiniparserGrammar {
|
|
|
69
64
|
}
|
|
70
65
|
|
|
71
66
|
p.eatMatchTrivia(_);
|
|
72
|
-
p.eat('>', PN, { path: '
|
|
67
|
+
p.eat('>', PN, { path: 'closeToken', endSpan: 'Tag', balancer: true });
|
|
73
68
|
}
|
|
74
69
|
|
|
75
70
|
Attributes(p) {
|
|
@@ -100,7 +95,7 @@ const grammar = class SpamexMiniparserGrammar {
|
|
|
100
95
|
MappingAttribute(p) {
|
|
101
96
|
p.eat(/\w+/y, LIT, { path: 'key' });
|
|
102
97
|
p.eatMatchTrivia(_);
|
|
103
|
-
p.eat('=', PN, { path: '
|
|
98
|
+
p.eat('=', PN, { path: 'mapToken' });
|
|
104
99
|
p.eatMatchTrivia(_);
|
|
105
100
|
p.eatProduction('AttributeValue', { path: 'value' });
|
|
106
101
|
}
|