@bablr/boot 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.js +45 -15
- package/lib/languages/cstml.js +196 -117
- package/lib/languages/instruction.js +32 -22
- package/lib/languages/json.js +22 -30
- package/lib/languages/regex.js +6 -32
- package/lib/languages/spamex.js +89 -92
- package/lib/match.js +19 -18
- package/lib/miniparser.js +143 -114
- package/lib/path.js +5 -36
- package/package.json +8 -5
|
@@ -1,11 +1,10 @@
|
|
|
1
|
-
import
|
|
2
|
-
import
|
|
3
|
-
import
|
|
4
|
-
import
|
|
5
|
-
import * as BaseJSON from './json.js';
|
|
1
|
+
import Spamex from './spamex.js';
|
|
2
|
+
import CSTML from './cstml.js';
|
|
3
|
+
import Regex from './regex.js';
|
|
4
|
+
import BaseJSON from './json.js';
|
|
6
5
|
|
|
7
6
|
const _ = /\s+/y;
|
|
8
|
-
const PN =
|
|
7
|
+
const PN = null;
|
|
9
8
|
const ID = 'Identifier';
|
|
10
9
|
const KW = 'Keyword';
|
|
11
10
|
|
|
@@ -31,7 +30,7 @@ const JSON = {
|
|
|
31
30
|
export const dependencies = { Spamex, CSTML, Regex, JSON };
|
|
32
31
|
|
|
33
32
|
export const covers = new Map([
|
|
34
|
-
[
|
|
33
|
+
[Symbol.for('@bablr/node'), new Set(['Call'])],
|
|
35
34
|
[
|
|
36
35
|
'Expression',
|
|
37
36
|
new Set([
|
|
@@ -42,7 +41,8 @@ export const covers = new Map([
|
|
|
42
41
|
'CSTML:GapTag',
|
|
43
42
|
'RegexString',
|
|
44
43
|
'SpamexString',
|
|
45
|
-
'
|
|
44
|
+
'TagString',
|
|
45
|
+
'NodeString',
|
|
46
46
|
'Boolean',
|
|
47
47
|
'Null',
|
|
48
48
|
]),
|
|
@@ -50,12 +50,11 @@ export const covers = new Map([
|
|
|
50
50
|
]);
|
|
51
51
|
|
|
52
52
|
export const grammar = class InstructionMiniparserGrammar {
|
|
53
|
-
// @Node
|
|
54
53
|
Call(p) {
|
|
55
54
|
p.eat(/[a-zA-Z]+/y, ID, { path: 'verb' });
|
|
56
55
|
p.eatMatchTrivia(_);
|
|
57
56
|
|
|
58
|
-
p.eat('(', PN, { path: 'openToken'
|
|
57
|
+
p.eat('(', PN, { path: 'openToken' });
|
|
59
58
|
|
|
60
59
|
let sep = p.eatMatchTrivia(_);
|
|
61
60
|
|
|
@@ -66,40 +65,45 @@ export const grammar = class InstructionMiniparserGrammar {
|
|
|
66
65
|
i++;
|
|
67
66
|
}
|
|
68
67
|
|
|
69
|
-
p.eat(')', PN, { path: 'closeToken'
|
|
68
|
+
p.eat(')', PN, { path: 'closeToken' });
|
|
70
69
|
}
|
|
71
70
|
|
|
72
|
-
// @Node
|
|
73
71
|
SpamexString(p) {
|
|
74
72
|
p.eat('m', KW, { path: 'sigilToken' });
|
|
75
73
|
let quot = p.match(/['"`]/);
|
|
76
|
-
p.eat(quot, PN, { path: 'openToken'
|
|
74
|
+
p.eat(quot, PN, { path: 'openToken' });
|
|
77
75
|
p.eatProduction('Spamex:Matcher', { path: 'content' });
|
|
78
76
|
|
|
79
|
-
p.eat(quot, PN, { path: 'closeToken'
|
|
77
|
+
p.eat(quot, PN, { path: 'closeToken' });
|
|
80
78
|
}
|
|
81
79
|
|
|
82
|
-
|
|
83
|
-
CSTMLString(p) {
|
|
80
|
+
TagString(p) {
|
|
84
81
|
p.eat('t', KW, { path: 'sigilToken' });
|
|
85
82
|
let quot = p.match(/['"`]/);
|
|
86
|
-
p.eat(quot, PN, { path: 'openToken'
|
|
83
|
+
p.eat(quot, PN, { path: 'openToken' });
|
|
87
84
|
p.eatProduction('CSTML:Tag', { path: 'content' });
|
|
88
85
|
|
|
89
|
-
p.eat(quot, PN, { path: 'closeToken'
|
|
86
|
+
p.eat(quot, PN, { path: 'closeToken' });
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
NodeString(p) {
|
|
90
|
+
p.eat('n', KW, { path: 'sigilToken' });
|
|
91
|
+
let quot = p.match(/['"`]/);
|
|
92
|
+
p.eat(quot, PN, { path: 'openToken' });
|
|
93
|
+
p.eatProduction('CSTML:Node', { path: 'content' });
|
|
94
|
+
|
|
95
|
+
p.eat(quot, PN, { path: 'closeToken' });
|
|
90
96
|
}
|
|
91
97
|
|
|
92
|
-
// @Node
|
|
93
98
|
RegexString(p) {
|
|
94
99
|
p.eat('re', KW, { path: 'sigilToken' });
|
|
95
100
|
let quot = p.match(/['"`]/);
|
|
96
|
-
p.eat(quot, PN, { path: 'openToken'
|
|
101
|
+
p.eat(quot, PN, { path: 'openToken' });
|
|
97
102
|
p.eatProduction('Regex:Pattern', { path: 'content' });
|
|
98
103
|
|
|
99
|
-
p.eat(quot, PN, { path: 'closeToken'
|
|
104
|
+
p.eat(quot, PN, { path: 'closeToken' });
|
|
100
105
|
}
|
|
101
106
|
|
|
102
|
-
// @Cover
|
|
103
107
|
Expression(p) {
|
|
104
108
|
if (p.match('[')) {
|
|
105
109
|
p.eatProduction('JSON:Array');
|
|
@@ -111,6 +115,10 @@ export const grammar = class InstructionMiniparserGrammar {
|
|
|
111
115
|
p.eatProduction('RegexString');
|
|
112
116
|
} else if (p.match(/m['"`]/y)) {
|
|
113
117
|
p.eatProduction('SpamexString');
|
|
118
|
+
} else if (p.match(/t['"`]/y)) {
|
|
119
|
+
p.eatProduction('TagString');
|
|
120
|
+
} else if (p.match(/n['"`]/y)) {
|
|
121
|
+
p.eatProduction('NodeString');
|
|
114
122
|
} else if (p.match(/true|false/y)) {
|
|
115
123
|
p.eatProduction('JSON:Boolean');
|
|
116
124
|
} else if (p.match('null')) {
|
|
@@ -118,3 +126,5 @@ export const grammar = class InstructionMiniparserGrammar {
|
|
|
118
126
|
}
|
|
119
127
|
}
|
|
120
128
|
};
|
|
129
|
+
|
|
130
|
+
export default { name, canonicalURL, dependencies, covers, grammar };
|
package/lib/languages/json.js
CHANGED
|
@@ -1,25 +1,20 @@
|
|
|
1
1
|
import objectEntries from 'iter-tools-es/methods/object-entries';
|
|
2
|
-
import * as sym from '@bablr/agast-vm-helpers/symbols';
|
|
3
|
-
import * as Spamex from './spamex.js';
|
|
4
|
-
import * as CSTML from './cstml.js';
|
|
5
|
-
import * as Regex from './regex.js';
|
|
6
2
|
|
|
7
3
|
const _ = /\s+/y;
|
|
8
|
-
const PN =
|
|
4
|
+
const PN = null;
|
|
9
5
|
const KW = 'Keyword';
|
|
10
|
-
const
|
|
6
|
+
const LIT = 'Identifier';
|
|
11
7
|
|
|
12
8
|
export const name = 'JSON';
|
|
13
9
|
|
|
14
10
|
export const canonicalURL = 'https://bablr.org/languages/core/en/cstml-json';
|
|
15
11
|
|
|
16
|
-
export const dependencies = {
|
|
12
|
+
export const dependencies = {};
|
|
17
13
|
|
|
18
14
|
export const covers = new Map([
|
|
19
15
|
[
|
|
20
|
-
|
|
16
|
+
Symbol.for('@bablr/node'),
|
|
21
17
|
new Set([
|
|
22
|
-
'Punctuator',
|
|
23
18
|
'Property',
|
|
24
19
|
'Object',
|
|
25
20
|
'Array',
|
|
@@ -34,6 +29,7 @@ export const covers = new Map([
|
|
|
34
29
|
'Integer',
|
|
35
30
|
'String',
|
|
36
31
|
'StringContent',
|
|
32
|
+
'Identifier',
|
|
37
33
|
]),
|
|
38
34
|
],
|
|
39
35
|
['Expression', new Set(['Object', 'Array', 'Boolean', 'Null', 'Number', 'String'])],
|
|
@@ -83,7 +79,6 @@ export const cookEscape = (escape, span) => {
|
|
|
83
79
|
};
|
|
84
80
|
|
|
85
81
|
export const grammar = class JSONMiniparserGrammar {
|
|
86
|
-
// @Cover
|
|
87
82
|
Expression(p) {
|
|
88
83
|
if (p.match('[')) {
|
|
89
84
|
p.eatProduction('Array');
|
|
@@ -102,9 +97,8 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
102
97
|
}
|
|
103
98
|
}
|
|
104
99
|
|
|
105
|
-
// @Node
|
|
106
100
|
Object(p) {
|
|
107
|
-
p.eat('{', PN, { path: 'openToken'
|
|
101
|
+
p.eat('{', PN, { path: 'openToken' });
|
|
108
102
|
|
|
109
103
|
p.eatMatchTrivia(_);
|
|
110
104
|
|
|
@@ -120,21 +114,23 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
120
114
|
|
|
121
115
|
p.eatMatchTrivia(_);
|
|
122
116
|
|
|
123
|
-
p.eat('}', PN, { path: 'closeToken'
|
|
117
|
+
p.eat('}', PN, { path: 'closeToken' });
|
|
124
118
|
}
|
|
125
119
|
|
|
126
|
-
// @Node
|
|
127
120
|
Property(p) {
|
|
128
|
-
p.
|
|
121
|
+
if (p.match(/['"]/y)) {
|
|
122
|
+
p.eatProduction('String', { path: 'key' });
|
|
123
|
+
} else {
|
|
124
|
+
p.eatProduction('Identifier', { path: 'key' });
|
|
125
|
+
}
|
|
129
126
|
p.eatMatchTrivia(_);
|
|
130
127
|
p.eat(':', PN, { path: 'mapToken' });
|
|
131
128
|
p.eatMatchTrivia(_);
|
|
132
129
|
p.eatProduction('Expression', { path: 'value' });
|
|
133
130
|
}
|
|
134
131
|
|
|
135
|
-
// @Node
|
|
136
132
|
Array(p) {
|
|
137
|
-
p.eat('[', PN, { path: 'openToken'
|
|
133
|
+
p.eat('[', PN, { path: 'openToken' });
|
|
138
134
|
|
|
139
135
|
p.eatMatchTrivia(_);
|
|
140
136
|
|
|
@@ -149,15 +145,13 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
149
145
|
first = false;
|
|
150
146
|
}
|
|
151
147
|
|
|
152
|
-
p.eat(']', PN, { path: 'closeToken'
|
|
148
|
+
p.eat(']', PN, { path: 'closeToken' });
|
|
153
149
|
}
|
|
154
150
|
|
|
155
|
-
// @Node
|
|
156
151
|
Boolean(p) {
|
|
157
152
|
p.eat(/true|false/y, KW, { path: 'sigilToken' });
|
|
158
153
|
}
|
|
159
154
|
|
|
160
|
-
// @Node
|
|
161
155
|
Null(p) {
|
|
162
156
|
p.eat('null', KW, { path: 'sigilToken' });
|
|
163
157
|
}
|
|
@@ -178,29 +172,24 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
178
172
|
}
|
|
179
173
|
}
|
|
180
174
|
|
|
181
|
-
// @Node
|
|
182
175
|
Integer(p) {
|
|
183
|
-
p.eatMatch('-',
|
|
176
|
+
p.eatMatch('-', PN, { path: 'negative' });
|
|
184
177
|
p.eatProduction('Digits', { path: 'digits[]' });
|
|
185
178
|
}
|
|
186
179
|
|
|
187
|
-
// @Node
|
|
188
180
|
UnsignedInteger(p) {
|
|
189
181
|
p.eatProduction('Digits', { path: 'digits[]' });
|
|
190
182
|
}
|
|
191
183
|
|
|
192
|
-
// @Node
|
|
193
184
|
Infinity(p) {
|
|
194
|
-
p.eatMatch(/[+-]
|
|
185
|
+
p.eatMatch(/[+-]/y, PN, { path: 'sign' });
|
|
195
186
|
p.eat('Infinity', 'Keyword', { path: 'sigilToken' });
|
|
196
187
|
}
|
|
197
188
|
|
|
198
|
-
// @Node
|
|
199
189
|
NotANumber(p) {
|
|
200
190
|
p.eat('NaN', 'Keyword', { path: 'sigilToken' });
|
|
201
191
|
}
|
|
202
192
|
|
|
203
|
-
// @Node
|
|
204
193
|
Undefined(p) {
|
|
205
194
|
p.eat('undefined', 'Keyword', { path: 'sigilToken' });
|
|
206
195
|
}
|
|
@@ -211,12 +200,14 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
211
200
|
}
|
|
212
201
|
}
|
|
213
202
|
|
|
214
|
-
// @Node
|
|
215
203
|
Digit(p) {
|
|
216
204
|
p.eatLiteral(/\d/y);
|
|
217
205
|
}
|
|
218
206
|
|
|
219
|
-
|
|
207
|
+
Identifier(p) {
|
|
208
|
+
p.eat(/[a-zA-Z]+/y, LIT, { path: 'content' });
|
|
209
|
+
}
|
|
210
|
+
|
|
220
211
|
String(p) {
|
|
221
212
|
const q = p.match(/['"]/y) || '"';
|
|
222
213
|
|
|
@@ -229,7 +220,6 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
229
220
|
p.eat(q, PN, { path: 'closeToken', endSpan: span, balancer: true });
|
|
230
221
|
}
|
|
231
222
|
|
|
232
|
-
// @Node
|
|
233
223
|
StringContent(p) {
|
|
234
224
|
let esc, lit;
|
|
235
225
|
let i = 0;
|
|
@@ -249,3 +239,5 @@ export const grammar = class JSONMiniparserGrammar {
|
|
|
249
239
|
}
|
|
250
240
|
}
|
|
251
241
|
};
|
|
242
|
+
|
|
243
|
+
export default { name, canonicalURL, dependencies, covers, grammar, cookEscape };
|
package/lib/languages/regex.js
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import * as sym from '@bablr/agast-vm-helpers/symbols';
|
|
2
1
|
import when from 'iter-tools-es/methods/when';
|
|
3
2
|
import { escapables } from './json.js';
|
|
4
3
|
|
|
@@ -10,7 +9,7 @@ export const dependencies = {};
|
|
|
10
9
|
|
|
11
10
|
export const covers = new Map([
|
|
12
11
|
[
|
|
13
|
-
|
|
12
|
+
Symbol.for('@bablr/node'),
|
|
14
13
|
new Set([
|
|
15
14
|
'RegExpLiteral',
|
|
16
15
|
'Flags',
|
|
@@ -29,7 +28,6 @@ export const covers = new Map([
|
|
|
29
28
|
'SpaceCharacterSet',
|
|
30
29
|
'DigitCharacterSet',
|
|
31
30
|
'Quantifier',
|
|
32
|
-
'Punctuator',
|
|
33
31
|
'Keyword',
|
|
34
32
|
'Escape',
|
|
35
33
|
'Number',
|
|
@@ -82,7 +80,7 @@ const flags = {
|
|
|
82
80
|
sticky: 'y',
|
|
83
81
|
};
|
|
84
82
|
|
|
85
|
-
const PN =
|
|
83
|
+
const PN = null;
|
|
86
84
|
const KW = 'Keyword';
|
|
87
85
|
const ESC = 'Escape';
|
|
88
86
|
|
|
@@ -138,7 +136,6 @@ export const cookEscape = (escape, span) => {
|
|
|
138
136
|
};
|
|
139
137
|
|
|
140
138
|
export const grammar = class RegexMiniparserGrammar {
|
|
141
|
-
// @Node
|
|
142
139
|
Pattern(p) {
|
|
143
140
|
p.eat('/', PN, { path: 'openToken', balanced: '/' });
|
|
144
141
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
@@ -149,7 +146,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
149
146
|
}
|
|
150
147
|
}
|
|
151
148
|
|
|
152
|
-
// @Node
|
|
153
149
|
Flags(p) {
|
|
154
150
|
const flagsStr = p.match(/[gimsuy]+/y) || '';
|
|
155
151
|
|
|
@@ -171,10 +167,9 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
171
167
|
Alternatives(p) {
|
|
172
168
|
do {
|
|
173
169
|
p.eatProduction('Alternative');
|
|
174
|
-
} while (p.eatMatch('|', PN, { path: 'separatorTokens
|
|
170
|
+
} while (p.eatMatch('|', PN, { path: '#separatorTokens' }));
|
|
175
171
|
}
|
|
176
172
|
|
|
177
|
-
// @Node
|
|
178
173
|
Alternative(p) {
|
|
179
174
|
p.eatProduction('Elements', { path: 'elements[]+' });
|
|
180
175
|
}
|
|
@@ -185,7 +180,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
185
180
|
}
|
|
186
181
|
}
|
|
187
182
|
|
|
188
|
-
// @Cover
|
|
189
183
|
Element(p) {
|
|
190
184
|
if (p.match('[')) {
|
|
191
185
|
p.eatProduction('CharacterClass');
|
|
@@ -210,14 +204,12 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
210
204
|
}
|
|
211
205
|
}
|
|
212
206
|
|
|
213
|
-
// @Node
|
|
214
207
|
Group(p) {
|
|
215
208
|
p.eat('(?:', PN, { path: 'openToken', balanced: ')' });
|
|
216
209
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
217
210
|
p.eat(')', PN, { path: 'closeToken', balancer: true });
|
|
218
211
|
}
|
|
219
212
|
|
|
220
|
-
// @Node
|
|
221
213
|
CapturingGroup(p) {
|
|
222
214
|
p.eat('(', PN, { path: 'openToken', balanced: ')' });
|
|
223
215
|
p.eatProduction('Alternatives', { path: 'alternatives[]' });
|
|
@@ -234,20 +226,14 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
234
226
|
}
|
|
235
227
|
}
|
|
236
228
|
|
|
237
|
-
// @CoveredBy('Assertion')
|
|
238
|
-
// @Node
|
|
239
229
|
StartOfInputAssertion(p) {
|
|
240
230
|
p.eat('^', KW, { path: 'sigilToken' });
|
|
241
231
|
}
|
|
242
232
|
|
|
243
|
-
// @CoveredBy('Assertion')
|
|
244
|
-
// @Node
|
|
245
233
|
EndOfInputAssertion(p) {
|
|
246
234
|
p.eat('$', KW, { path: 'sigilToken' });
|
|
247
235
|
}
|
|
248
236
|
|
|
249
|
-
// @CoveredBy('Assertion')
|
|
250
|
-
// @Node
|
|
251
237
|
WordBoundaryAssertion(p) {
|
|
252
238
|
let attrs;
|
|
253
239
|
if (p.eatMatch('\\', ESC, { path: 'escapeToken' })) {
|
|
@@ -259,7 +245,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
259
245
|
return { attrs };
|
|
260
246
|
}
|
|
261
247
|
|
|
262
|
-
// @Node
|
|
263
248
|
Character(p) {
|
|
264
249
|
const specialPattern = getSpecialPattern(p.span);
|
|
265
250
|
|
|
@@ -287,14 +272,13 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
287
272
|
}
|
|
288
273
|
}
|
|
289
274
|
|
|
290
|
-
// @Node
|
|
291
275
|
CharacterClass(p) {
|
|
292
276
|
p.eat('[', PN, { path: 'openToken', balanced: ']', startSpan: 'CharacterClass' });
|
|
293
277
|
|
|
294
278
|
const negate = !!p.eatMatch('^', KW, { path: 'negateToken', boolean: true });
|
|
295
279
|
|
|
296
280
|
let first = !negate;
|
|
297
|
-
while (p.match(/./sy)) {
|
|
281
|
+
while (p.match(/./sy) || p.atExpression) {
|
|
298
282
|
p.eatProduction('CharacterClassElement', { path: 'elements[]' }, { first });
|
|
299
283
|
first = false;
|
|
300
284
|
}
|
|
@@ -304,7 +288,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
304
288
|
return { attrs: { negate } };
|
|
305
289
|
}
|
|
306
290
|
|
|
307
|
-
// @Cover
|
|
308
291
|
CharacterClassElement(p, { first }) {
|
|
309
292
|
if (p.match(/(.|\\(u(\{[0-9a-fA-F]{1,6}\}|[0-9a-fA-F]{4})|x[0-9a-fA-F]{2}|\w))-[^\]\n]/y)) {
|
|
310
293
|
p.eatProduction('CharacterClassRange', undefined, { first });
|
|
@@ -317,13 +300,11 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
317
300
|
}
|
|
318
301
|
}
|
|
319
302
|
|
|
320
|
-
// @Node
|
|
321
303
|
Gap(p) {
|
|
322
304
|
p.eat('\\', PN, { path: 'escapeToken' });
|
|
323
305
|
p.eat('g', KW, { path: 'value' });
|
|
324
306
|
}
|
|
325
307
|
|
|
326
|
-
// @Node
|
|
327
308
|
CharacterClassRange(p, { first }) {
|
|
328
309
|
p.eatProduction('Character', {
|
|
329
310
|
path: 'min',
|
|
@@ -353,14 +334,10 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
353
334
|
return { attrs };
|
|
354
335
|
}
|
|
355
336
|
|
|
356
|
-
// @CoveredBy('CharacterSet')
|
|
357
|
-
// @Node
|
|
358
337
|
AnyCharacterSet(p) {
|
|
359
338
|
p.eat('.', KW, { path: 'sigilToken' });
|
|
360
339
|
}
|
|
361
340
|
|
|
362
|
-
// @CoveredBy('CharacterSet')
|
|
363
|
-
// @Node
|
|
364
341
|
WordCharacterSet(p) {
|
|
365
342
|
p.eat('\\', PN, { path: 'escapeToken' });
|
|
366
343
|
|
|
@@ -375,8 +352,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
375
352
|
return { attrs };
|
|
376
353
|
}
|
|
377
354
|
|
|
378
|
-
// @CoveredBy('CharacterSet')
|
|
379
|
-
// @Node
|
|
380
355
|
SpaceCharacterSet(p) {
|
|
381
356
|
p.eat('\\', PN, { path: 'escapeToken' });
|
|
382
357
|
|
|
@@ -391,8 +366,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
391
366
|
return { attrs };
|
|
392
367
|
}
|
|
393
368
|
|
|
394
|
-
// @CoveredBy('CharacterSet')
|
|
395
|
-
// @Node
|
|
396
369
|
DigitCharacterSet(p) {
|
|
397
370
|
p.eat('\\', PN, { path: 'escapeToken' });
|
|
398
371
|
|
|
@@ -407,7 +380,6 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
407
380
|
return { attrs };
|
|
408
381
|
}
|
|
409
382
|
|
|
410
|
-
// @Node
|
|
411
383
|
Quantifier(p) {
|
|
412
384
|
p.eatHeldProduction('Element', { path: 'element+' });
|
|
413
385
|
|
|
@@ -441,3 +413,5 @@ export const grammar = class RegexMiniparserGrammar {
|
|
|
441
413
|
return { attrs };
|
|
442
414
|
}
|
|
443
415
|
};
|
|
416
|
+
|
|
417
|
+
export default { name, canonicalURL, dependencies, covers, grammar, cookEscape };
|