meriyah 5.0.0 → 6.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +79 -0
- package/README.md +38 -14
- package/dist/meriyah.cjs +2356 -2070
- package/dist/meriyah.min.mjs +1 -0
- package/dist/{meriyah.esm.js → meriyah.mjs} +2356 -2070
- package/dist/meriyah.umd.js +2356 -2070
- package/dist/meriyah.umd.min.js +1 -1
- package/dist/src/common.d.ts +71 -47
- package/dist/src/common.d.ts.map +1 -1
- package/dist/src/errors.d.ts +186 -177
- package/dist/src/errors.d.ts.map +1 -1
- package/dist/src/estree.d.ts +15 -6
- package/dist/src/estree.d.ts.map +1 -1
- package/dist/src/lexer/charClassifier.d.ts +2 -2
- package/dist/src/lexer/charClassifier.d.ts.map +1 -1
- package/dist/src/lexer/common.d.ts +1 -2
- package/dist/src/lexer/common.d.ts.map +1 -1
- package/dist/src/lexer/identifier.d.ts.map +1 -1
- package/dist/src/lexer/index.d.ts +1 -1
- package/dist/src/lexer/index.d.ts.map +1 -1
- package/dist/src/lexer/jsx.d.ts +2 -2
- package/dist/src/lexer/jsx.d.ts.map +1 -1
- package/dist/src/lexer/numeric.d.ts.map +1 -1
- package/dist/src/lexer/regexp.d.ts.map +1 -1
- package/dist/src/lexer/scan.d.ts.map +1 -1
- package/dist/src/lexer/string.d.ts +1 -1
- package/dist/src/lexer/string.d.ts.map +1 -1
- package/dist/src/lexer/template.d.ts.map +1 -1
- package/dist/src/parser.d.ts +72 -72
- package/dist/src/parser.d.ts.map +1 -1
- package/dist/src/token.d.ts +115 -115
- package/dist/src/token.d.ts.map +1 -1
- package/package.json +25 -34
- package/dist/meriyah.amd.js +0 -8964
- package/dist/meriyah.amd.min.js +0 -1
- package/dist/meriyah.cjs.js +0 -8962
- package/dist/meriyah.cjs.min.js +0 -1
- package/dist/meriyah.esm.min.js +0 -1
- package/dist/meriyah.esm.min.mjs +0 -1
- package/dist/meriyah.esm.mjs +0 -8956
- package/dist/meriyah.iife.js +0 -8967
- package/dist/meriyah.iife.min.js +0 -1
- package/dist/meriyah.min.cjs +0 -1
- package/dist/meriyah.system.js +0 -8970
- package/dist/meriyah.system.min.js +0 -1
- package/dist/meriyah.umd.cjs +0 -8968
- package/dist/meriyah.umd.es5.js +0 -9022
- package/dist/meriyah.umd.es5.min.js +0 -1
- package/dist/meriyah.umd.min.cjs +0 -1
- package/src/chars.ts +0 -155
- package/src/common.ts +0 -834
- package/src/errors.ts +0 -421
- package/src/estree.ts +0 -827
- package/src/lexer/charClassifier.ts +0 -449
- package/src/lexer/comments.ts +0 -178
- package/src/lexer/common.ts +0 -140
- package/src/lexer/decodeHTML.ts +0 -2184
- package/src/lexer/identifier.ts +0 -196
- package/src/lexer/index.ts +0 -32
- package/src/lexer/jsx.ts +0 -127
- package/src/lexer/numeric.ts +0 -259
- package/src/lexer/regexp.ts +0 -156
- package/src/lexer/scan.ts +0 -657
- package/src/lexer/string.ts +0 -242
- package/src/lexer/template.ts +0 -108
- package/src/meriyah.ts +0 -28
- package/src/parser.ts +0 -9358
- package/src/token.ts +0 -307
- package/src/unicode.ts +0 -36
package/src/lexer/scan.ts
DELETED
|
@@ -1,657 +0,0 @@
|
|
|
1
|
-
import { Chars } from '../chars';
|
|
2
|
-
import { Token } from '../token';
|
|
3
|
-
import { ParserState, Context, Flags } from '../common';
|
|
4
|
-
import { report, Errors } from '../errors';
|
|
5
|
-
import { unicodeLookup } from '../unicode';
|
|
6
|
-
import {
|
|
7
|
-
advanceChar,
|
|
8
|
-
LexerState,
|
|
9
|
-
isExoticECMAScriptWhitespace,
|
|
10
|
-
NumberKind,
|
|
11
|
-
fromCodePoint,
|
|
12
|
-
consumeLineFeed,
|
|
13
|
-
scanNewLine,
|
|
14
|
-
convertTokenType
|
|
15
|
-
} from './common';
|
|
16
|
-
import { skipSingleLineComment, skipMultiLineComment, skipSingleHTMLComment, CommentType } from './comments';
|
|
17
|
-
import { scanRegularExpression } from './regexp';
|
|
18
|
-
import { scanTemplate } from './template';
|
|
19
|
-
import { scanNumber } from './numeric';
|
|
20
|
-
import { scanString } from './string';
|
|
21
|
-
import { scanIdentifier, scanUnicodeIdentifier, scanIdentifierSlowCase, scanPrivateIdentifier } from './identifier';
|
|
22
|
-
|
|
23
|
-
/*
|
|
24
|
-
* OneChar: 40, 41, 44, 58, 59, 63, 91, 93, 123, 125, 126:
|
|
25
|
-
* '(', ')', ',', ':', ';', '?', '[', ']', '{', '}', '~'
|
|
26
|
-
* PrivateField: 35: '#',
|
|
27
|
-
* Identifier: 36, 65..90, 92, 95, 97..122: '$', 'A'..'Z', '_', '\'', 'a'..'z'
|
|
28
|
-
* Period: 46: '.'
|
|
29
|
-
* StringLiteral: 34, 39: '"', `'`
|
|
30
|
-
* NumericLiteral: 48, 49..57: '0'..'9'
|
|
31
|
-
* WhiteSpace: 9, 11, 12, 32: '\t', '\v', '\f', ' '
|
|
32
|
-
* LineFeed: 10: '\n'
|
|
33
|
-
* CarriageReturn: 13: '\r'
|
|
34
|
-
* Template: 96: '`'
|
|
35
|
-
*/
|
|
36
|
-
|
|
37
|
-
export const TokenLookup = [
|
|
38
|
-
/* 0 - Null */ Token.Illegal,
|
|
39
|
-
/* 1 - Start of Heading */ Token.Illegal,
|
|
40
|
-
/* 2 - Start of Text */ Token.Illegal,
|
|
41
|
-
/* 3 - End of Text */ Token.Illegal,
|
|
42
|
-
/* 4 - End of Transm. */ Token.Illegal,
|
|
43
|
-
/* 5 - Enquiry */ Token.Illegal,
|
|
44
|
-
/* 6 - Acknowledgment */ Token.Illegal,
|
|
45
|
-
/* 7 - Bell */ Token.Illegal,
|
|
46
|
-
/* 8 - Backspace */ Token.Illegal,
|
|
47
|
-
/* 9 - Horizontal Tab */ Token.WhiteSpace,
|
|
48
|
-
/* 10 - Line Feed */ Token.LineFeed,
|
|
49
|
-
/* 11 - Vertical Tab */ Token.WhiteSpace,
|
|
50
|
-
/* 12 - Form Feed */ Token.WhiteSpace,
|
|
51
|
-
/* 13 - Carriage Return */ Token.CarriageReturn,
|
|
52
|
-
/* 14 - Shift Out */ Token.Illegal,
|
|
53
|
-
/* 15 - Shift In */ Token.Illegal,
|
|
54
|
-
/* 16 - Data Line Escape */ Token.Illegal,
|
|
55
|
-
/* 17 - Device Control 1 */ Token.Illegal,
|
|
56
|
-
/* 18 - Device Control 2 */ Token.Illegal,
|
|
57
|
-
/* 19 - Device Control 3 */ Token.Illegal,
|
|
58
|
-
/* 20 - Device Control 4 */ Token.Illegal,
|
|
59
|
-
/* 21 - Negative Ack. */ Token.Illegal,
|
|
60
|
-
/* 22 - Synchronous Idle */ Token.Illegal,
|
|
61
|
-
/* 23 - End of Transmit */ Token.Illegal,
|
|
62
|
-
/* 24 - Cancel */ Token.Illegal,
|
|
63
|
-
/* 25 - End of Medium */ Token.Illegal,
|
|
64
|
-
/* 26 - Substitute */ Token.Illegal,
|
|
65
|
-
/* 27 - Escape */ Token.Illegal,
|
|
66
|
-
/* 28 - File Separator */ Token.Illegal,
|
|
67
|
-
/* 29 - Group Separator */ Token.Illegal,
|
|
68
|
-
/* 30 - Record Separator */ Token.Illegal,
|
|
69
|
-
/* 31 - Unit Separator */ Token.Illegal,
|
|
70
|
-
/* 32 - Space */ Token.WhiteSpace,
|
|
71
|
-
/* 33 - ! */ Token.Negate,
|
|
72
|
-
/* 34 - " */ Token.StringLiteral,
|
|
73
|
-
/* 35 - # */ Token.PrivateField,
|
|
74
|
-
/* 36 - $ */ Token.Identifier,
|
|
75
|
-
/* 37 - % */ Token.Modulo,
|
|
76
|
-
/* 38 - & */ Token.BitwiseAnd,
|
|
77
|
-
/* 39 - ' */ Token.StringLiteral,
|
|
78
|
-
/* 40 - ( */ Token.LeftParen,
|
|
79
|
-
/* 41 - ) */ Token.RightParen,
|
|
80
|
-
/* 42 - * */ Token.Multiply,
|
|
81
|
-
/* 43 - + */ Token.Add,
|
|
82
|
-
/* 44 - , */ Token.Comma,
|
|
83
|
-
/* 45 - - */ Token.Subtract,
|
|
84
|
-
/* 46 - . */ Token.Period,
|
|
85
|
-
/* 47 - / */ Token.Divide,
|
|
86
|
-
/* 48 - 0 */ Token.NumericLiteral,
|
|
87
|
-
/* 49 - 1 */ Token.NumericLiteral,
|
|
88
|
-
/* 50 - 2 */ Token.NumericLiteral,
|
|
89
|
-
/* 51 - 3 */ Token.NumericLiteral,
|
|
90
|
-
/* 52 - 4 */ Token.NumericLiteral,
|
|
91
|
-
/* 53 - 5 */ Token.NumericLiteral,
|
|
92
|
-
/* 54 - 6 */ Token.NumericLiteral,
|
|
93
|
-
/* 55 - 7 */ Token.NumericLiteral,
|
|
94
|
-
/* 56 - 8 */ Token.NumericLiteral,
|
|
95
|
-
/* 57 - 9 */ Token.NumericLiteral,
|
|
96
|
-
/* 58 - : */ Token.Colon,
|
|
97
|
-
/* 59 - ; */ Token.Semicolon,
|
|
98
|
-
/* 60 - < */ Token.LessThan,
|
|
99
|
-
/* 61 - = */ Token.Assign,
|
|
100
|
-
/* 62 - > */ Token.GreaterThan,
|
|
101
|
-
/* 63 - ? */ Token.QuestionMark,
|
|
102
|
-
/* 64 - @ */ Token.Decorator,
|
|
103
|
-
/* 65 - A */ Token.Identifier,
|
|
104
|
-
/* 66 - B */ Token.Identifier,
|
|
105
|
-
/* 67 - C */ Token.Identifier,
|
|
106
|
-
/* 68 - D */ Token.Identifier,
|
|
107
|
-
/* 69 - E */ Token.Identifier,
|
|
108
|
-
/* 70 - F */ Token.Identifier,
|
|
109
|
-
/* 71 - G */ Token.Identifier,
|
|
110
|
-
/* 72 - H */ Token.Identifier,
|
|
111
|
-
/* 73 - I */ Token.Identifier,
|
|
112
|
-
/* 74 - J */ Token.Identifier,
|
|
113
|
-
/* 75 - K */ Token.Identifier,
|
|
114
|
-
/* 76 - L */ Token.Identifier,
|
|
115
|
-
/* 77 - M */ Token.Identifier,
|
|
116
|
-
/* 78 - N */ Token.Identifier,
|
|
117
|
-
/* 79 - O */ Token.Identifier,
|
|
118
|
-
/* 80 - P */ Token.Identifier,
|
|
119
|
-
/* 81 - Q */ Token.Identifier,
|
|
120
|
-
/* 82 - R */ Token.Identifier,
|
|
121
|
-
/* 83 - S */ Token.Identifier,
|
|
122
|
-
/* 84 - T */ Token.Identifier,
|
|
123
|
-
/* 85 - U */ Token.Identifier,
|
|
124
|
-
/* 86 - V */ Token.Identifier,
|
|
125
|
-
/* 87 - W */ Token.Identifier,
|
|
126
|
-
/* 88 - X */ Token.Identifier,
|
|
127
|
-
/* 89 - Y */ Token.Identifier,
|
|
128
|
-
/* 90 - Z */ Token.Identifier,
|
|
129
|
-
/* 91 - [ */ Token.LeftBracket,
|
|
130
|
-
/* 92 - \ */ Token.EscapedIdentifier,
|
|
131
|
-
/* 93 - ] */ Token.RightBracket,
|
|
132
|
-
/* 94 - ^ */ Token.BitwiseXor,
|
|
133
|
-
/* 95 - _ */ Token.Identifier,
|
|
134
|
-
/* 96 - ` */ Token.Template,
|
|
135
|
-
/* 97 - a */ Token.Keyword,
|
|
136
|
-
/* 98 - b */ Token.Keyword,
|
|
137
|
-
/* 99 - c */ Token.Keyword,
|
|
138
|
-
/* 100 - d */ Token.Keyword,
|
|
139
|
-
/* 101 - e */ Token.Keyword,
|
|
140
|
-
/* 102 - f */ Token.Keyword,
|
|
141
|
-
/* 103 - g */ Token.Keyword,
|
|
142
|
-
/* 104 - h */ Token.Identifier,
|
|
143
|
-
/* 105 - i */ Token.Keyword,
|
|
144
|
-
/* 106 - j */ Token.Identifier,
|
|
145
|
-
/* 107 - k */ Token.Identifier,
|
|
146
|
-
/* 108 - l */ Token.Keyword,
|
|
147
|
-
/* 109 - m */ Token.Identifier,
|
|
148
|
-
/* 110 - n */ Token.Keyword,
|
|
149
|
-
/* 111 - o */ Token.Identifier,
|
|
150
|
-
/* 112 - p */ Token.Keyword,
|
|
151
|
-
/* 113 - q */ Token.Identifier,
|
|
152
|
-
/* 114 - r */ Token.Keyword,
|
|
153
|
-
/* 115 - s */ Token.Keyword,
|
|
154
|
-
/* 116 - t */ Token.Keyword,
|
|
155
|
-
/* 117 - u */ Token.Identifier,
|
|
156
|
-
/* 118 - v */ Token.Keyword,
|
|
157
|
-
/* 119 - w */ Token.Keyword,
|
|
158
|
-
/* 120 - x */ Token.Identifier,
|
|
159
|
-
/* 121 - y */ Token.Keyword,
|
|
160
|
-
/* 122 - z */ Token.Keyword,
|
|
161
|
-
/* 123 - { */ Token.LeftBrace,
|
|
162
|
-
/* 124 - | */ Token.BitwiseOr,
|
|
163
|
-
/* 125 - } */ Token.RightBrace,
|
|
164
|
-
/* 126 - ~ */ Token.Complement,
|
|
165
|
-
/* 127 - Delete */ Token.Illegal
|
|
166
|
-
];
|
|
167
|
-
|
|
168
|
-
/**
|
|
169
|
-
* Scans next token in the stream
|
|
170
|
-
*
|
|
171
|
-
* @param parser Parser object
|
|
172
|
-
* @param context Context masks
|
|
173
|
-
*/
|
|
174
|
-
export function nextToken(parser: ParserState, context: Context): void {
|
|
175
|
-
parser.flags = (parser.flags | Flags.NewLine) ^ Flags.NewLine;
|
|
176
|
-
parser.startPos = parser.index;
|
|
177
|
-
parser.startColumn = parser.column;
|
|
178
|
-
parser.startLine = parser.line;
|
|
179
|
-
parser.setToken(scanSingleToken(parser, context, LexerState.None));
|
|
180
|
-
if (parser.onToken && parser.getToken() !== Token.EOF) {
|
|
181
|
-
const loc = {
|
|
182
|
-
start: {
|
|
183
|
-
line: parser.linePos,
|
|
184
|
-
column: parser.colPos
|
|
185
|
-
},
|
|
186
|
-
end: {
|
|
187
|
-
line: parser.line,
|
|
188
|
-
column: parser.column
|
|
189
|
-
}
|
|
190
|
-
};
|
|
191
|
-
parser.onToken(convertTokenType(parser.getToken()), parser.tokenPos, parser.index, loc);
|
|
192
|
-
}
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
export function scanSingleToken(parser: ParserState, context: Context, state: LexerState): Token {
|
|
196
|
-
const isStartOfLine = parser.index === 0;
|
|
197
|
-
|
|
198
|
-
const { source } = parser;
|
|
199
|
-
|
|
200
|
-
// These three are only for HTMLClose comment
|
|
201
|
-
let startPos = parser.index;
|
|
202
|
-
let startLine = parser.line;
|
|
203
|
-
let startColumn = parser.column;
|
|
204
|
-
|
|
205
|
-
while (parser.index < parser.end) {
|
|
206
|
-
parser.tokenPos = parser.index;
|
|
207
|
-
parser.colPos = parser.column;
|
|
208
|
-
parser.linePos = parser.line;
|
|
209
|
-
|
|
210
|
-
let char = parser.currentChar;
|
|
211
|
-
|
|
212
|
-
if (char <= 0x7e) {
|
|
213
|
-
const token = TokenLookup[char];
|
|
214
|
-
|
|
215
|
-
switch (token) {
|
|
216
|
-
case Token.LeftParen:
|
|
217
|
-
case Token.RightParen:
|
|
218
|
-
case Token.LeftBrace:
|
|
219
|
-
case Token.RightBrace:
|
|
220
|
-
case Token.LeftBracket:
|
|
221
|
-
case Token.RightBracket:
|
|
222
|
-
case Token.Colon:
|
|
223
|
-
case Token.Semicolon:
|
|
224
|
-
case Token.Comma:
|
|
225
|
-
case Token.Complement:
|
|
226
|
-
case Token.Decorator:
|
|
227
|
-
case Token.Illegal:
|
|
228
|
-
advanceChar(parser);
|
|
229
|
-
return token;
|
|
230
|
-
|
|
231
|
-
// Look for an identifier
|
|
232
|
-
case Token.Identifier:
|
|
233
|
-
return scanIdentifier(parser, context, /* isValidAsKeyword */ 0);
|
|
234
|
-
|
|
235
|
-
// Look for identifier or keyword
|
|
236
|
-
case Token.Keyword:
|
|
237
|
-
return scanIdentifier(parser, context, /* isValidAsKeyword */ 1);
|
|
238
|
-
|
|
239
|
-
// Look for a decimal number.
|
|
240
|
-
case Token.NumericLiteral:
|
|
241
|
-
return scanNumber(parser, context, NumberKind.Decimal | NumberKind.ValidBigIntKind);
|
|
242
|
-
|
|
243
|
-
// Look for a string literal
|
|
244
|
-
case Token.StringLiteral:
|
|
245
|
-
return scanString(parser, context, char);
|
|
246
|
-
|
|
247
|
-
// Look for a template string
|
|
248
|
-
case Token.Template:
|
|
249
|
-
return scanTemplate(parser, context);
|
|
250
|
-
|
|
251
|
-
// Look for a escaped identifier
|
|
252
|
-
case Token.EscapedIdentifier:
|
|
253
|
-
return scanUnicodeIdentifier(parser, context);
|
|
254
|
-
|
|
255
|
-
// `#` (private name)
|
|
256
|
-
case Token.PrivateField:
|
|
257
|
-
return scanPrivateIdentifier(parser);
|
|
258
|
-
|
|
259
|
-
case Token.WhiteSpace:
|
|
260
|
-
advanceChar(parser);
|
|
261
|
-
break;
|
|
262
|
-
|
|
263
|
-
case Token.CarriageReturn:
|
|
264
|
-
state |= LexerState.NewLine | LexerState.LastIsCR;
|
|
265
|
-
scanNewLine(parser);
|
|
266
|
-
break;
|
|
267
|
-
|
|
268
|
-
case Token.LineFeed:
|
|
269
|
-
consumeLineFeed(parser, state);
|
|
270
|
-
state = (state & ~LexerState.LastIsCR) | LexerState.NewLine;
|
|
271
|
-
break;
|
|
272
|
-
|
|
273
|
-
// `<`, `<=`, `<<`, `<<=`, `</`, `<!--`
|
|
274
|
-
case Token.LessThan: {
|
|
275
|
-
let ch = advanceChar(parser);
|
|
276
|
-
if (parser.index < parser.end) {
|
|
277
|
-
if (ch === Chars.LessThan) {
|
|
278
|
-
if (parser.index < parser.end && advanceChar(parser) === Chars.EqualSign) {
|
|
279
|
-
advanceChar(parser);
|
|
280
|
-
return Token.ShiftLeftAssign;
|
|
281
|
-
}
|
|
282
|
-
return Token.ShiftLeft;
|
|
283
|
-
} else if (ch === Chars.EqualSign) {
|
|
284
|
-
advanceChar(parser);
|
|
285
|
-
return Token.LessThanOrEqual;
|
|
286
|
-
}
|
|
287
|
-
if (ch === Chars.Exclamation) {
|
|
288
|
-
// Treat HTML begin-comment as comment-till-end-of-line.
|
|
289
|
-
const index = parser.index + 1;
|
|
290
|
-
if (
|
|
291
|
-
index + 1 < parser.end &&
|
|
292
|
-
source.charCodeAt(index) === Chars.Hyphen &&
|
|
293
|
-
source.charCodeAt(index + 1) == Chars.Hyphen
|
|
294
|
-
) {
|
|
295
|
-
parser.column += 3;
|
|
296
|
-
parser.currentChar = source.charCodeAt((parser.index += 3));
|
|
297
|
-
state = skipSingleHTMLComment(
|
|
298
|
-
parser,
|
|
299
|
-
source,
|
|
300
|
-
state,
|
|
301
|
-
context,
|
|
302
|
-
CommentType.HTMLOpen,
|
|
303
|
-
parser.tokenPos,
|
|
304
|
-
parser.linePos,
|
|
305
|
-
parser.colPos
|
|
306
|
-
);
|
|
307
|
-
startPos = parser.tokenPos;
|
|
308
|
-
startLine = parser.linePos;
|
|
309
|
-
startColumn = parser.colPos;
|
|
310
|
-
continue;
|
|
311
|
-
}
|
|
312
|
-
return Token.LessThan;
|
|
313
|
-
}
|
|
314
|
-
if (ch === Chars.Slash) {
|
|
315
|
-
if ((context & Context.OptionsJSX) === 0) return Token.LessThan;
|
|
316
|
-
const index = parser.index + 1;
|
|
317
|
-
|
|
318
|
-
// Check that it's not a comment start.
|
|
319
|
-
if (index < parser.end) {
|
|
320
|
-
ch = source.charCodeAt(index);
|
|
321
|
-
if (ch === Chars.Asterisk || ch === Chars.Slash) break;
|
|
322
|
-
}
|
|
323
|
-
advanceChar(parser);
|
|
324
|
-
return Token.JSXClose;
|
|
325
|
-
}
|
|
326
|
-
}
|
|
327
|
-
return Token.LessThan;
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
// `=`, `==`, `===`, `=>`
|
|
331
|
-
case Token.Assign: {
|
|
332
|
-
advanceChar(parser);
|
|
333
|
-
|
|
334
|
-
const ch = parser.currentChar;
|
|
335
|
-
|
|
336
|
-
if (ch === Chars.EqualSign) {
|
|
337
|
-
if (advanceChar(parser) === Chars.EqualSign) {
|
|
338
|
-
advanceChar(parser);
|
|
339
|
-
return Token.StrictEqual;
|
|
340
|
-
}
|
|
341
|
-
return Token.LooseEqual;
|
|
342
|
-
}
|
|
343
|
-
if (ch === Chars.GreaterThan) {
|
|
344
|
-
advanceChar(parser);
|
|
345
|
-
return Token.Arrow;
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
return Token.Assign;
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
// `!`, `!=`, `!==`
|
|
352
|
-
case Token.Negate:
|
|
353
|
-
if (advanceChar(parser) !== Chars.EqualSign) {
|
|
354
|
-
return Token.Negate;
|
|
355
|
-
}
|
|
356
|
-
if (advanceChar(parser) !== Chars.EqualSign) {
|
|
357
|
-
return Token.LooseNotEqual;
|
|
358
|
-
}
|
|
359
|
-
advanceChar(parser);
|
|
360
|
-
return Token.StrictNotEqual;
|
|
361
|
-
|
|
362
|
-
// `%`, `%=`
|
|
363
|
-
case Token.Modulo:
|
|
364
|
-
if (advanceChar(parser) !== Chars.EqualSign) return Token.Modulo;
|
|
365
|
-
advanceChar(parser);
|
|
366
|
-
return Token.ModuloAssign;
|
|
367
|
-
|
|
368
|
-
// `*`, `**`, `*=`, `**=`
|
|
369
|
-
case Token.Multiply: {
|
|
370
|
-
advanceChar(parser);
|
|
371
|
-
|
|
372
|
-
if (parser.index >= parser.end) return Token.Multiply;
|
|
373
|
-
|
|
374
|
-
const ch = parser.currentChar;
|
|
375
|
-
|
|
376
|
-
if (ch === Chars.EqualSign) {
|
|
377
|
-
advanceChar(parser);
|
|
378
|
-
return Token.MultiplyAssign;
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
if (ch !== Chars.Asterisk) return Token.Multiply;
|
|
382
|
-
|
|
383
|
-
if (advanceChar(parser) !== Chars.EqualSign) return Token.Exponentiate;
|
|
384
|
-
|
|
385
|
-
advanceChar(parser);
|
|
386
|
-
|
|
387
|
-
return Token.ExponentiateAssign;
|
|
388
|
-
}
|
|
389
|
-
|
|
390
|
-
// `^`, `^=`
|
|
391
|
-
case Token.BitwiseXor:
|
|
392
|
-
if (advanceChar(parser) !== Chars.EqualSign) return Token.BitwiseXor;
|
|
393
|
-
advanceChar(parser);
|
|
394
|
-
return Token.BitwiseXorAssign;
|
|
395
|
-
|
|
396
|
-
// `+`, `++`, `+=`
|
|
397
|
-
case Token.Add: {
|
|
398
|
-
advanceChar(parser);
|
|
399
|
-
|
|
400
|
-
const ch = parser.currentChar;
|
|
401
|
-
|
|
402
|
-
if (ch === Chars.Plus) {
|
|
403
|
-
advanceChar(parser);
|
|
404
|
-
return Token.Increment;
|
|
405
|
-
}
|
|
406
|
-
|
|
407
|
-
if (ch === Chars.EqualSign) {
|
|
408
|
-
advanceChar(parser);
|
|
409
|
-
return Token.AddAssign;
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
return Token.Add;
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
// `-`, `--`, `-=`, `-->`
|
|
416
|
-
case Token.Subtract: {
|
|
417
|
-
advanceChar(parser);
|
|
418
|
-
|
|
419
|
-
const ch = parser.currentChar;
|
|
420
|
-
|
|
421
|
-
if (ch === Chars.Hyphen) {
|
|
422
|
-
advanceChar(parser);
|
|
423
|
-
if ((state & LexerState.NewLine || isStartOfLine) && parser.currentChar === Chars.GreaterThan) {
|
|
424
|
-
if ((context & Context.OptionsWebCompat) === 0) report(parser, Errors.HtmlCommentInWebCompat);
|
|
425
|
-
advanceChar(parser);
|
|
426
|
-
state = skipSingleHTMLComment(
|
|
427
|
-
parser,
|
|
428
|
-
source,
|
|
429
|
-
state,
|
|
430
|
-
context,
|
|
431
|
-
CommentType.HTMLClose,
|
|
432
|
-
startPos,
|
|
433
|
-
startLine,
|
|
434
|
-
startColumn
|
|
435
|
-
);
|
|
436
|
-
startPos = parser.tokenPos;
|
|
437
|
-
startLine = parser.linePos;
|
|
438
|
-
startColumn = parser.colPos;
|
|
439
|
-
continue;
|
|
440
|
-
}
|
|
441
|
-
|
|
442
|
-
return Token.Decrement;
|
|
443
|
-
}
|
|
444
|
-
|
|
445
|
-
if (ch === Chars.EqualSign) {
|
|
446
|
-
advanceChar(parser);
|
|
447
|
-
return Token.SubtractAssign;
|
|
448
|
-
}
|
|
449
|
-
|
|
450
|
-
return Token.Subtract;
|
|
451
|
-
}
|
|
452
|
-
|
|
453
|
-
// `/`, `/=`, `/>`, '/*..*/'
|
|
454
|
-
case Token.Divide: {
|
|
455
|
-
advanceChar(parser);
|
|
456
|
-
if (parser.index < parser.end) {
|
|
457
|
-
const ch = parser.currentChar;
|
|
458
|
-
if (ch === Chars.Slash) {
|
|
459
|
-
advanceChar(parser);
|
|
460
|
-
state = skipSingleLineComment(
|
|
461
|
-
parser,
|
|
462
|
-
source,
|
|
463
|
-
state,
|
|
464
|
-
CommentType.Single,
|
|
465
|
-
parser.tokenPos,
|
|
466
|
-
parser.linePos,
|
|
467
|
-
parser.colPos
|
|
468
|
-
);
|
|
469
|
-
startPos = parser.tokenPos;
|
|
470
|
-
startLine = parser.linePos;
|
|
471
|
-
startColumn = parser.colPos;
|
|
472
|
-
continue;
|
|
473
|
-
}
|
|
474
|
-
if (ch === Chars.Asterisk) {
|
|
475
|
-
advanceChar(parser);
|
|
476
|
-
state = skipMultiLineComment(parser, source, state) as LexerState;
|
|
477
|
-
startPos = parser.tokenPos;
|
|
478
|
-
startLine = parser.linePos;
|
|
479
|
-
startColumn = parser.colPos;
|
|
480
|
-
continue;
|
|
481
|
-
}
|
|
482
|
-
if (context & Context.AllowRegExp) {
|
|
483
|
-
return scanRegularExpression(parser, context);
|
|
484
|
-
}
|
|
485
|
-
if (ch === Chars.EqualSign) {
|
|
486
|
-
advanceChar(parser);
|
|
487
|
-
return Token.DivideAssign;
|
|
488
|
-
}
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
return Token.Divide;
|
|
492
|
-
}
|
|
493
|
-
|
|
494
|
-
// `.`, `...`, `.123` (numeric literal)
|
|
495
|
-
case Token.Period: {
|
|
496
|
-
const next = advanceChar(parser);
|
|
497
|
-
if (next >= Chars.Zero && next <= Chars.Nine)
|
|
498
|
-
return scanNumber(parser, context, NumberKind.Float | NumberKind.Decimal);
|
|
499
|
-
if (next === Chars.Period) {
|
|
500
|
-
const index = parser.index + 1;
|
|
501
|
-
if (index < parser.end && source.charCodeAt(index) === Chars.Period) {
|
|
502
|
-
parser.column += 2;
|
|
503
|
-
parser.currentChar = source.charCodeAt((parser.index += 2));
|
|
504
|
-
return Token.Ellipsis;
|
|
505
|
-
}
|
|
506
|
-
}
|
|
507
|
-
return Token.Period;
|
|
508
|
-
}
|
|
509
|
-
|
|
510
|
-
// `|`, `||`, `|=`, `||=`
|
|
511
|
-
case Token.BitwiseOr: {
|
|
512
|
-
advanceChar(parser);
|
|
513
|
-
|
|
514
|
-
const ch = parser.currentChar;
|
|
515
|
-
|
|
516
|
-
if (ch === Chars.VerticalBar) {
|
|
517
|
-
advanceChar(parser);
|
|
518
|
-
|
|
519
|
-
if (parser.currentChar === Chars.EqualSign) {
|
|
520
|
-
advanceChar(parser);
|
|
521
|
-
return Token.LogicalOrAssign;
|
|
522
|
-
}
|
|
523
|
-
|
|
524
|
-
return Token.LogicalOr;
|
|
525
|
-
}
|
|
526
|
-
if (ch === Chars.EqualSign) {
|
|
527
|
-
advanceChar(parser);
|
|
528
|
-
return Token.BitwiseOrAssign;
|
|
529
|
-
}
|
|
530
|
-
|
|
531
|
-
return Token.BitwiseOr;
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
// `>`, `>=`, `>>`, `>>>`, `>>=`, `>>>=`
|
|
535
|
-
case Token.GreaterThan: {
|
|
536
|
-
advanceChar(parser);
|
|
537
|
-
|
|
538
|
-
const ch = parser.currentChar;
|
|
539
|
-
|
|
540
|
-
if (ch === Chars.EqualSign) {
|
|
541
|
-
advanceChar(parser);
|
|
542
|
-
return Token.GreaterThanOrEqual;
|
|
543
|
-
}
|
|
544
|
-
|
|
545
|
-
if (ch !== Chars.GreaterThan) return Token.GreaterThan;
|
|
546
|
-
|
|
547
|
-
advanceChar(parser);
|
|
548
|
-
|
|
549
|
-
if (parser.index < parser.end) {
|
|
550
|
-
const ch = parser.currentChar;
|
|
551
|
-
|
|
552
|
-
if (ch === Chars.GreaterThan) {
|
|
553
|
-
if (advanceChar(parser) === Chars.EqualSign) {
|
|
554
|
-
advanceChar(parser);
|
|
555
|
-
return Token.LogicalShiftRightAssign;
|
|
556
|
-
}
|
|
557
|
-
return Token.LogicalShiftRight;
|
|
558
|
-
}
|
|
559
|
-
if (ch === Chars.EqualSign) {
|
|
560
|
-
advanceChar(parser);
|
|
561
|
-
return Token.ShiftRightAssign;
|
|
562
|
-
}
|
|
563
|
-
}
|
|
564
|
-
|
|
565
|
-
return Token.ShiftRight;
|
|
566
|
-
}
|
|
567
|
-
|
|
568
|
-
// `&`, `&&`, `&=`, `&&=`
|
|
569
|
-
case Token.BitwiseAnd: {
|
|
570
|
-
advanceChar(parser);
|
|
571
|
-
|
|
572
|
-
const ch = parser.currentChar;
|
|
573
|
-
|
|
574
|
-
if (ch === Chars.Ampersand) {
|
|
575
|
-
advanceChar(parser);
|
|
576
|
-
|
|
577
|
-
if (parser.currentChar === Chars.EqualSign) {
|
|
578
|
-
advanceChar(parser);
|
|
579
|
-
return Token.LogicalAndAssign;
|
|
580
|
-
}
|
|
581
|
-
|
|
582
|
-
return Token.LogicalAnd;
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
if (ch === Chars.EqualSign) {
|
|
586
|
-
advanceChar(parser);
|
|
587
|
-
return Token.BitwiseAndAssign;
|
|
588
|
-
}
|
|
589
|
-
|
|
590
|
-
return Token.BitwiseAnd;
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
// `?`, `??`, `?.`, `??=`
|
|
594
|
-
case Token.QuestionMark: {
|
|
595
|
-
let ch = advanceChar(parser);
|
|
596
|
-
if (ch === Chars.QuestionMark) {
|
|
597
|
-
advanceChar(parser);
|
|
598
|
-
|
|
599
|
-
if (parser.currentChar === Chars.EqualSign) {
|
|
600
|
-
advanceChar(parser);
|
|
601
|
-
return Token.CoalesceAssign;
|
|
602
|
-
}
|
|
603
|
-
|
|
604
|
-
return Token.Coalesce;
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
if (ch === Chars.Period) {
|
|
608
|
-
const index = parser.index + 1;
|
|
609
|
-
// Check that it's not followed by any numbers
|
|
610
|
-
if (index < parser.end) {
|
|
611
|
-
ch = source.charCodeAt(index);
|
|
612
|
-
if (!(ch >= Chars.Zero && ch <= Chars.Nine)) {
|
|
613
|
-
advanceChar(parser);
|
|
614
|
-
return Token.QuestionMarkPeriod;
|
|
615
|
-
}
|
|
616
|
-
}
|
|
617
|
-
}
|
|
618
|
-
|
|
619
|
-
return Token.QuestionMark;
|
|
620
|
-
}
|
|
621
|
-
|
|
622
|
-
default:
|
|
623
|
-
// unreachable
|
|
624
|
-
}
|
|
625
|
-
} else {
|
|
626
|
-
if ((char ^ Chars.LineSeparator) <= 1) {
|
|
627
|
-
state = (state & ~LexerState.LastIsCR) | LexerState.NewLine;
|
|
628
|
-
scanNewLine(parser);
|
|
629
|
-
continue;
|
|
630
|
-
}
|
|
631
|
-
|
|
632
|
-
if ((char & 0xfc00) === 0xd800 || ((unicodeLookup[(char >>> 5) + 34816] >>> char) & 31 & 1) !== 0) {
|
|
633
|
-
if ((char & 0xfc00) === 0xdc00) {
|
|
634
|
-
char = ((char & 0x3ff) << 10) | (char & 0x3ff) | 0x10000;
|
|
635
|
-
if (((unicodeLookup[(char >>> 5) + 0] >>> char) & 31 & 1) === 0) {
|
|
636
|
-
report(parser, Errors.IllegalCharacter, fromCodePoint(char));
|
|
637
|
-
}
|
|
638
|
-
parser.index++;
|
|
639
|
-
parser.currentChar = char;
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
parser.column++;
|
|
643
|
-
parser.tokenValue = '';
|
|
644
|
-
return scanIdentifierSlowCase(parser, context, /* hasEscape */ 0, /* canBeKeyword */ 0);
|
|
645
|
-
}
|
|
646
|
-
|
|
647
|
-
if (isExoticECMAScriptWhitespace(char)) {
|
|
648
|
-
advanceChar(parser);
|
|
649
|
-
continue;
|
|
650
|
-
}
|
|
651
|
-
|
|
652
|
-
// Invalid ASCII code point/unit
|
|
653
|
-
report(parser, Errors.IllegalCharacter, fromCodePoint(char));
|
|
654
|
-
}
|
|
655
|
-
}
|
|
656
|
-
return Token.EOF;
|
|
657
|
-
}
|