brighterscript 1.0.0-alpha.13 → 1.0.0-alpha.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +87 -2
- package/dist/Cache.d.ts +3 -8
- package/dist/Cache.js +9 -14
- package/dist/Cache.js.map +1 -1
- package/dist/DependencyGraph.js +5 -4
- package/dist/DependencyGraph.js.map +1 -1
- package/dist/DiagnosticMessages.d.ts +21 -1
- package/dist/DiagnosticMessages.js +21 -1
- package/dist/DiagnosticMessages.js.map +1 -1
- package/dist/LanguageServer.d.ts +1 -6
- package/dist/LanguageServer.js +0 -9
- package/dist/LanguageServer.js.map +1 -1
- package/dist/PluginInterface.d.ts +3 -3
- package/dist/PluginInterface.js +3 -0
- package/dist/PluginInterface.js.map +1 -1
- package/dist/Program.d.ts +30 -16
- package/dist/Program.js +108 -43
- package/dist/Program.js.map +1 -1
- package/dist/ProgramBuilder.js +3 -3
- package/dist/ProgramBuilder.js.map +1 -1
- package/dist/Scope.d.ts +29 -15
- package/dist/Scope.js +68 -28
- package/dist/Scope.js.map +1 -1
- package/dist/SymbolTable.d.ts +1 -1
- package/dist/XmlScope.d.ts +3 -3
- package/dist/astUtils/AstEditor.d.ts +6 -0
- package/dist/astUtils/AstEditor.js +10 -0
- package/dist/astUtils/AstEditor.js.map +1 -1
- package/dist/astUtils/AstEditor.spec.js +37 -0
- package/dist/astUtils/AstEditor.spec.js.map +1 -1
- package/dist/astUtils/creators.d.ts +8 -4
- package/dist/astUtils/creators.js +87 -6
- package/dist/astUtils/creators.js.map +1 -1
- package/dist/astUtils/reflection.d.ts +5 -1
- package/dist/astUtils/reflection.js +15 -3
- package/dist/astUtils/reflection.js.map +1 -1
- package/dist/astUtils/reflection.spec.js +11 -10
- package/dist/astUtils/reflection.spec.js.map +1 -1
- package/dist/astUtils/visitors.d.ts +3 -1
- package/dist/astUtils/visitors.js.map +1 -1
- package/dist/astUtils/visitors.spec.js +8 -8
- package/dist/astUtils/visitors.spec.js.map +1 -1
- package/dist/bscPlugin/BscPlugin.d.ts +4 -1
- package/dist/bscPlugin/BscPlugin.js +21 -2
- package/dist/bscPlugin/BscPlugin.js.map +1 -1
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.js +3 -3
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.js.map +1 -1
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.spec.js.map +1 -1
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.d.ts +9 -0
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.js +97 -0
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.js.map +1 -0
- package/dist/bscPlugin/semanticTokens/{SemanticTokensProcessor.spec.d.ts → BrsFileSemanticTokensProcessor.spec.d.ts} +0 -0
- package/dist/bscPlugin/semanticTokens/{SemanticTokensProcessor.spec.js → BrsFileSemanticTokensProcessor.spec.js} +30 -2
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.spec.js.map +1 -0
- package/dist/bscPlugin/transpile/BrsFilePreTranspileProcessor.d.ts +8 -0
- package/dist/bscPlugin/transpile/BrsFilePreTranspileProcessor.js +36 -0
- package/dist/bscPlugin/transpile/BrsFilePreTranspileProcessor.js.map +1 -0
- package/dist/bscPlugin/validation/BrsFileValidator.d.ts +9 -0
- package/dist/bscPlugin/validation/BrsFileValidator.js +66 -0
- package/dist/bscPlugin/validation/BrsFileValidator.js.map +1 -0
- package/dist/bscPlugin/validation/ScopeValidator.d.ts +11 -0
- package/dist/bscPlugin/validation/ScopeValidator.js +94 -0
- package/dist/bscPlugin/validation/ScopeValidator.js.map +1 -0
- package/dist/diagnosticUtils.js +3 -3
- package/dist/diagnosticUtils.js.map +1 -1
- package/dist/files/BrsFile.Class.spec.js +382 -232
- package/dist/files/BrsFile.Class.spec.js.map +1 -1
- package/dist/files/BrsFile.d.ts +26 -12
- package/dist/files/BrsFile.js +268 -119
- package/dist/files/BrsFile.js.map +1 -1
- package/dist/files/BrsFile.spec.js +570 -168
- package/dist/files/BrsFile.spec.js.map +1 -1
- package/dist/files/XmlFile.d.ts +11 -10
- package/dist/files/XmlFile.js +16 -11
- package/dist/files/XmlFile.js.map +1 -1
- package/dist/files/XmlFile.spec.js +60 -58
- package/dist/files/XmlFile.spec.js.map +1 -1
- package/dist/files/tests/imports.spec.js +8 -6
- package/dist/files/tests/imports.spec.js.map +1 -1
- package/dist/index.d.ts +12 -3
- package/dist/index.js +21 -4
- package/dist/index.js.map +1 -1
- package/dist/interfaces.d.ts +63 -35
- package/dist/lexer/Lexer.js +1 -2
- package/dist/lexer/Lexer.js.map +1 -1
- package/dist/lexer/Lexer.spec.js +470 -462
- package/dist/lexer/Lexer.spec.js.map +1 -1
- package/dist/lexer/TokenKind.d.ts +2 -0
- package/dist/lexer/TokenKind.js +5 -0
- package/dist/lexer/TokenKind.js.map +1 -1
- package/dist/parser/Expression.d.ts +18 -16
- package/dist/parser/Expression.js +57 -48
- package/dist/parser/Expression.js.map +1 -1
- package/dist/parser/Parser.Class.spec.js +33 -32
- package/dist/parser/Parser.Class.spec.js.map +1 -1
- package/dist/parser/Parser.d.ts +28 -7
- package/dist/parser/Parser.js +508 -296
- package/dist/parser/Parser.js.map +1 -1
- package/dist/parser/Parser.spec.js +157 -35
- package/dist/parser/Parser.spec.js.map +1 -1
- package/dist/parser/SGTypes.spec.js +9 -9
- package/dist/parser/SGTypes.spec.js.map +1 -1
- package/dist/parser/Statement.d.ts +80 -20
- package/dist/parser/Statement.js +257 -92
- package/dist/parser/Statement.js.map +1 -1
- package/dist/parser/tests/Parser.spec.d.ts +3 -3
- package/dist/parser/tests/Parser.spec.js +4 -4
- package/dist/parser/tests/Parser.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/For.spec.js +40 -40
- package/dist/parser/tests/controlFlow/For.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/ForEach.spec.js +22 -21
- package/dist/parser/tests/controlFlow/ForEach.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/If.spec.js +100 -99
- package/dist/parser/tests/controlFlow/If.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/While.spec.js +25 -25
- package/dist/parser/tests/controlFlow/While.spec.js.map +1 -1
- package/dist/parser/tests/expression/Additive.spec.js +21 -21
- package/dist/parser/tests/expression/Additive.spec.js.map +1 -1
- package/dist/parser/tests/expression/ArrayLiterals.spec.js +91 -91
- package/dist/parser/tests/expression/ArrayLiterals.spec.js.map +1 -1
- package/dist/parser/tests/expression/AssociativeArrayLiterals.spec.js +102 -102
- package/dist/parser/tests/expression/AssociativeArrayLiterals.spec.js.map +1 -1
- package/dist/parser/tests/expression/Boolean.spec.js +15 -15
- package/dist/parser/tests/expression/Boolean.spec.js.map +1 -1
- package/dist/parser/tests/expression/Call.spec.js +22 -21
- package/dist/parser/tests/expression/Call.spec.js.map +1 -1
- package/dist/parser/tests/expression/Exponential.spec.js +11 -11
- package/dist/parser/tests/expression/Exponential.spec.js.map +1 -1
- package/dist/parser/tests/expression/Function.spec.js +171 -171
- package/dist/parser/tests/expression/Function.spec.js.map +1 -1
- package/dist/parser/tests/expression/Indexing.spec.js +50 -50
- package/dist/parser/tests/expression/Indexing.spec.js.map +1 -1
- package/dist/parser/tests/expression/Multiplicative.spec.js +25 -25
- package/dist/parser/tests/expression/Multiplicative.spec.js.map +1 -1
- package/dist/parser/tests/expression/NullCoalescenceExpression.spec.js +16 -16
- package/dist/parser/tests/expression/NullCoalescenceExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/PrefixUnary.spec.js +26 -26
- package/dist/parser/tests/expression/PrefixUnary.spec.js.map +1 -1
- package/dist/parser/tests/expression/Primary.spec.js +27 -27
- package/dist/parser/tests/expression/Primary.spec.js.map +1 -1
- package/dist/parser/tests/expression/RegexLiteralExpression.spec.js +3 -2
- package/dist/parser/tests/expression/RegexLiteralExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/Relational.spec.js +25 -25
- package/dist/parser/tests/expression/Relational.spec.js.map +1 -1
- package/dist/parser/tests/expression/TemplateStringExpression.spec.js +7 -7
- package/dist/parser/tests/expression/TemplateStringExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/TernaryExpression.spec.js +6 -6
- package/dist/parser/tests/expression/TernaryExpression.spec.js.map +1 -1
- package/dist/parser/tests/statement/AssignmentOperators.spec.js +15 -15
- package/dist/parser/tests/statement/AssignmentOperators.spec.js.map +1 -1
- package/dist/parser/tests/statement/Declaration.spec.js +20 -20
- package/dist/parser/tests/statement/Declaration.spec.js.map +1 -1
- package/dist/parser/tests/statement/Enum.spec.d.ts +1 -0
- package/dist/parser/tests/statement/Enum.spec.js +774 -0
- package/dist/parser/tests/statement/Enum.spec.js.map +1 -0
- package/dist/parser/tests/statement/For.spec.d.ts +1 -0
- package/dist/parser/tests/statement/For.spec.js +46 -0
- package/dist/parser/tests/statement/For.spec.js.map +1 -0
- package/dist/parser/tests/statement/ForEach.spec.d.ts +1 -0
- package/dist/parser/tests/statement/ForEach.spec.js +37 -0
- package/dist/parser/tests/statement/ForEach.spec.js.map +1 -0
- package/dist/parser/tests/statement/Function.spec.js +121 -120
- package/dist/parser/tests/statement/Function.spec.js.map +1 -1
- package/dist/parser/tests/statement/Goto.spec.js +9 -8
- package/dist/parser/tests/statement/Goto.spec.js.map +1 -1
- package/dist/parser/tests/statement/Increment.spec.js +22 -22
- package/dist/parser/tests/statement/Increment.spec.js.map +1 -1
- package/dist/parser/tests/statement/InterfaceStatement.spec.js +12 -0
- package/dist/parser/tests/statement/InterfaceStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/LibraryStatement.spec.js +7 -7
- package/dist/parser/tests/statement/LibraryStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/Misc.spec.js +71 -70
- package/dist/parser/tests/statement/Misc.spec.js.map +1 -1
- package/dist/parser/tests/statement/PrintStatement.spec.js +17 -17
- package/dist/parser/tests/statement/PrintStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/ReturnStatement.spec.js +33 -33
- package/dist/parser/tests/statement/ReturnStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/Set.spec.js +53 -53
- package/dist/parser/tests/statement/Set.spec.js.map +1 -1
- package/dist/parser/tests/statement/Stop.spec.js +7 -6
- package/dist/parser/tests/statement/Stop.spec.js.map +1 -1
- package/dist/preprocessor/Chunk.d.ts +1 -1
- package/dist/preprocessor/Preprocessor.d.ts +1 -1
- package/dist/preprocessor/Preprocessor.js +7 -7
- package/dist/preprocessor/Preprocessor.js.map +1 -1
- package/dist/types/ArrayType.d.ts +8 -5
- package/dist/types/ArrayType.js +45 -9
- package/dist/types/ArrayType.js.map +1 -1
- package/dist/types/ArrayType.spec.js +62 -3
- package/dist/types/ArrayType.spec.js.map +1 -1
- package/dist/types/BscType.d.ts +1 -1
- package/dist/types/CustomType.d.ts +1 -1
- package/dist/types/CustomType.js +4 -2
- package/dist/types/CustomType.js.map +1 -1
- package/dist/types/FunctionType.d.ts +7 -6
- package/dist/types/FunctionType.js +21 -18
- package/dist/types/FunctionType.js.map +1 -1
- package/dist/types/FunctionType.spec.js +6 -0
- package/dist/types/FunctionType.spec.js.map +1 -1
- package/dist/types/LazyType.d.ts +1 -2
- package/dist/types/LazyType.js +1 -5
- package/dist/types/LazyType.js.map +1 -1
- package/dist/types/UniversalFunctionType.d.ts +9 -0
- package/dist/types/UniversalFunctionType.js +25 -0
- package/dist/types/UniversalFunctionType.js.map +1 -0
- package/dist/types/helpers.js +1 -1
- package/dist/types/helpers.js.map +1 -1
- package/dist/util.d.ts +26 -10
- package/dist/util.js +145 -61
- package/dist/util.js.map +1 -1
- package/dist/validators/ClassValidator.js +17 -24
- package/dist/validators/ClassValidator.js.map +1 -1
- package/package.json +3 -3
- package/dist/astUtils/index.d.ts +0 -7
- package/dist/astUtils/index.js +0 -26
- package/dist/astUtils/index.js.map +0 -1
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.d.ts +0 -7
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.js +0 -63
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.js.map +0 -1
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.spec.js.map +0 -1
- package/dist/lexer/index.d.ts +0 -3
- package/dist/lexer/index.js +0 -18
- package/dist/lexer/index.js.map +0 -1
- package/dist/parser/index.d.ts +0 -3
- package/dist/parser/index.js +0 -16
- package/dist/parser/index.js.map +0 -1
- package/dist/preprocessor/index.d.ts +0 -3
- package/dist/preprocessor/index.js +0 -16
- package/dist/preprocessor/index.js.map +0 -1
package/dist/lexer/Lexer.spec.js
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
/* eslint no-template-curly-in-string: 0 */
|
|
4
4
|
const chai_1 = require("chai");
|
|
5
|
-
const
|
|
5
|
+
const TokenKind_1 = require("./TokenKind");
|
|
6
6
|
const Lexer_1 = require("./Lexer");
|
|
7
7
|
const Token_1 = require("./Token");
|
|
8
8
|
const Parser_spec_1 = require("../parser/Parser.spec");
|
|
@@ -12,36 +12,36 @@ describe('lexer', () => {
|
|
|
12
12
|
it('recognizes namespace keywords', () => {
|
|
13
13
|
let { tokens } = Lexer_1.Lexer.scan('namespace end namespace endnamespace end namespace');
|
|
14
14
|
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
15
|
+
TokenKind_1.TokenKind.Namespace,
|
|
16
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
17
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
18
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
19
|
+
TokenKind_1.TokenKind.Eof
|
|
20
20
|
]);
|
|
21
21
|
});
|
|
22
22
|
it('recognizes the callfunc operator', () => {
|
|
23
23
|
let { tokens } = Lexer_1.Lexer.scan('@.');
|
|
24
|
-
(0, chai_1.expect)(tokens[0].kind).to.equal(
|
|
24
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Callfunc);
|
|
25
25
|
});
|
|
26
26
|
it('recognizes the import token', () => {
|
|
27
27
|
let { tokens } = Lexer_1.Lexer.scan('import');
|
|
28
|
-
(0, chai_1.expect)(tokens[0].kind).to.eql(
|
|
28
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.Import);
|
|
29
29
|
});
|
|
30
30
|
it('recognizes library token', () => {
|
|
31
31
|
let { tokens } = Lexer_1.Lexer.scan('library');
|
|
32
|
-
(0, chai_1.expect)(tokens[0].kind).to.eql(
|
|
32
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.Library);
|
|
33
33
|
});
|
|
34
34
|
it('recognizes the question mark operator', () => {
|
|
35
35
|
let { tokens } = Lexer_1.Lexer.scan('?');
|
|
36
|
-
(0, chai_1.expect)(tokens[0].kind).to.equal(
|
|
36
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Question);
|
|
37
37
|
});
|
|
38
38
|
it('produces an at symbol token', () => {
|
|
39
39
|
let { tokens } = Lexer_1.Lexer.scan('@');
|
|
40
|
-
(0, chai_1.expect)(tokens[0].kind).to.equal(
|
|
40
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.At);
|
|
41
41
|
});
|
|
42
42
|
it('produces a semicolon token', () => {
|
|
43
43
|
let { tokens } = Lexer_1.Lexer.scan(';');
|
|
44
|
-
(0, chai_1.expect)(tokens[0].kind).to.equal(
|
|
44
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Semicolon);
|
|
45
45
|
});
|
|
46
46
|
it('emits error on unknown character type', () => {
|
|
47
47
|
let { diagnostics } = Lexer_1.Lexer.scan('\0');
|
|
@@ -49,26 +49,26 @@ describe('lexer', () => {
|
|
|
49
49
|
});
|
|
50
50
|
it('includes an end-of-file marker', () => {
|
|
51
51
|
let { tokens } = Lexer_1.Lexer.scan('');
|
|
52
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
52
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Eof]);
|
|
53
53
|
});
|
|
54
54
|
it('ignores tabs and spaces', () => {
|
|
55
55
|
let { tokens } = Lexer_1.Lexer.scan('\t\t \t \t');
|
|
56
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
56
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Eof]);
|
|
57
57
|
});
|
|
58
58
|
it('retains every single newline', () => {
|
|
59
59
|
let { tokens } = Lexer_1.Lexer.scan('\n\n\'foo\n\n\nprint 2\n\n');
|
|
60
60
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
61
|
+
TokenKind_1.TokenKind.Newline,
|
|
62
|
+
TokenKind_1.TokenKind.Newline,
|
|
63
|
+
TokenKind_1.TokenKind.Comment,
|
|
64
|
+
TokenKind_1.TokenKind.Newline,
|
|
65
|
+
TokenKind_1.TokenKind.Newline,
|
|
66
|
+
TokenKind_1.TokenKind.Newline,
|
|
67
|
+
TokenKind_1.TokenKind.Print,
|
|
68
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
69
|
+
TokenKind_1.TokenKind.Newline,
|
|
70
|
+
TokenKind_1.TokenKind.Newline,
|
|
71
|
+
TokenKind_1.TokenKind.Eof
|
|
72
72
|
]);
|
|
73
73
|
});
|
|
74
74
|
it('does not insert double newlines with the windows \\r\\n newline', () => {
|
|
@@ -80,14 +80,14 @@ describe('lexer', () => {
|
|
|
80
80
|
' end if\r\n' +
|
|
81
81
|
'end function\r\n').tokens.map(x => x.kind);
|
|
82
82
|
(0, chai_1.expect)(kinds).to.eql([
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
83
|
+
TokenKind_1.TokenKind.Function, TokenKind_1.TokenKind.Identifier, TokenKind_1.TokenKind.LeftParen, TokenKind_1.TokenKind.RightParen, TokenKind_1.TokenKind.As, TokenKind_1.TokenKind.String, TokenKind_1.TokenKind.Newline,
|
|
84
|
+
TokenKind_1.TokenKind.If, TokenKind_1.TokenKind.True, TokenKind_1.TokenKind.Then, TokenKind_1.TokenKind.Newline,
|
|
85
|
+
TokenKind_1.TokenKind.Print, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Newline,
|
|
86
|
+
TokenKind_1.TokenKind.Else, TokenKind_1.TokenKind.Newline,
|
|
87
|
+
TokenKind_1.TokenKind.Print, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Newline,
|
|
88
|
+
TokenKind_1.TokenKind.EndIf, TokenKind_1.TokenKind.Newline,
|
|
89
|
+
TokenKind_1.TokenKind.EndFunction, TokenKind_1.TokenKind.Newline,
|
|
90
|
+
TokenKind_1.TokenKind.Eof
|
|
91
91
|
]);
|
|
92
92
|
});
|
|
93
93
|
it('computes range properly both with and without whitespace', () => {
|
|
@@ -95,7 +95,7 @@ describe('lexer', () => {
|
|
|
95
95
|
.map(x => (0, Parser_spec_1.rangeToArray)(x.range));
|
|
96
96
|
let withWhitespace = Lexer_1.Lexer.scan(`sub Main()\n bob = true\nend sub`).tokens
|
|
97
97
|
//filter out the whitespace...we only care that it was computed during the scan
|
|
98
|
-
.filter(x => x.kind !==
|
|
98
|
+
.filter(x => x.kind !== TokenKind_1.TokenKind.Whitespace)
|
|
99
99
|
.map(x => (0, Parser_spec_1.rangeToArray)(x.range));
|
|
100
100
|
/*eslint-disable */
|
|
101
101
|
let expectedLocations = [
|
|
@@ -126,26 +126,26 @@ describe('lexer', () => {
|
|
|
126
126
|
it('correctly splits the elseif token', () => {
|
|
127
127
|
let { tokens } = Lexer_1.Lexer.scan('else if elseif else if');
|
|
128
128
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
129
|
+
TokenKind_1.TokenKind.Else,
|
|
130
|
+
TokenKind_1.TokenKind.If,
|
|
131
|
+
TokenKind_1.TokenKind.Else,
|
|
132
|
+
TokenKind_1.TokenKind.If,
|
|
133
|
+
TokenKind_1.TokenKind.Else,
|
|
134
|
+
TokenKind_1.TokenKind.If,
|
|
135
|
+
TokenKind_1.TokenKind.Eof
|
|
136
136
|
]);
|
|
137
137
|
});
|
|
138
138
|
it('gives the `as` keyword its own TokenKind', () => {
|
|
139
139
|
let { tokens } = Lexer_1.Lexer.scan('as');
|
|
140
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
140
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.As, TokenKind_1.TokenKind.Eof]);
|
|
141
141
|
});
|
|
142
142
|
it('gives the `stop` keyword its own TokenKind', () => {
|
|
143
143
|
let { tokens } = Lexer_1.Lexer.scan('stop');
|
|
144
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
144
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Stop, TokenKind_1.TokenKind.Eof]);
|
|
145
145
|
});
|
|
146
146
|
it('does not alias \'?\' to \'print\' - the parser will do that', () => {
|
|
147
147
|
let { tokens } = Lexer_1.Lexer.scan('?2');
|
|
148
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
148
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Question, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Eof]);
|
|
149
149
|
});
|
|
150
150
|
describe('comments', () => {
|
|
151
151
|
it('does not include carriage return character', () => {
|
|
@@ -157,7 +157,7 @@ describe('lexer', () => {
|
|
|
157
157
|
'comment
|
|
158
158
|
REM some comment
|
|
159
159
|
`).tokens
|
|
160
|
-
.filter(x => ![
|
|
160
|
+
.filter(x => ![TokenKind_1.TokenKind.Newline, TokenKind_1.TokenKind.Eof].includes(x.kind))
|
|
161
161
|
.map(x => x.text);
|
|
162
162
|
(0, chai_1.expect)(text).to.eql([
|
|
163
163
|
`'comment`,
|
|
@@ -207,14 +207,14 @@ describe('lexer', () => {
|
|
|
207
207
|
let tokens = Lexer_1.Lexer.scan(`
|
|
208
208
|
'comment
|
|
209
209
|
REM some comment
|
|
210
|
-
`).tokens.filter(x => ![
|
|
210
|
+
`).tokens.filter(x => ![TokenKind_1.TokenKind.Newline, TokenKind_1.TokenKind.Eof].includes(x.kind));
|
|
211
211
|
(0, chai_1.expect)(tokens[0].range).to.eql(vscode_languageserver_1.Range.create(1, 16, 1, 24));
|
|
212
212
|
(0, chai_1.expect)(tokens[1].range).to.eql(vscode_languageserver_1.Range.create(2, 16, 2, 32));
|
|
213
213
|
});
|
|
214
214
|
it('finds correct location for newlines', () => {
|
|
215
215
|
let tokens = Lexer_1.Lexer.scan('sub\nsub\r\nsub\n\n').tokens
|
|
216
216
|
//ignore the Eof token
|
|
217
|
-
.filter(x => x.kind !==
|
|
217
|
+
.filter(x => x.kind !== TokenKind_1.TokenKind.Eof);
|
|
218
218
|
(0, chai_1.expect)(tokens.map(x => x.range)).to.eql([
|
|
219
219
|
vscode_languageserver_1.Range.create(0, 0, 0, 3),
|
|
220
220
|
vscode_languageserver_1.Range.create(0, 3, 0, 4),
|
|
@@ -237,91 +237,91 @@ describe('lexer', () => {
|
|
|
237
237
|
end if 'comment
|
|
238
238
|
end sub
|
|
239
239
|
`);
|
|
240
|
-
let comments = tokens.filter(x => x.kind ===
|
|
240
|
+
let comments = tokens.filter(x => x.kind === TokenKind_1.TokenKind.Comment);
|
|
241
241
|
(0, chai_1.expect)(comments).to.be.lengthOf(1);
|
|
242
242
|
(0, chai_1.expect)(comments[0].range).to.eql(vscode_languageserver_1.Range.create(8, 27, 8, 35));
|
|
243
243
|
});
|
|
244
244
|
it('ignores everything after `\'`', () => {
|
|
245
245
|
let { tokens } = Lexer_1.Lexer.scan('= \' (');
|
|
246
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
246
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
247
247
|
});
|
|
248
248
|
it('ignores everything after `REM`', () => {
|
|
249
249
|
let { tokens } = Lexer_1.Lexer.scan('= REM (');
|
|
250
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
250
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
251
251
|
});
|
|
252
252
|
it('ignores everything after `rem`', () => {
|
|
253
253
|
let { tokens } = Lexer_1.Lexer.scan('= rem (');
|
|
254
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
254
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
255
255
|
});
|
|
256
256
|
}); // comments
|
|
257
257
|
describe('non-literals', () => {
|
|
258
258
|
it('reads parens & braces', () => {
|
|
259
259
|
let { tokens } = Lexer_1.Lexer.scan('(){}');
|
|
260
260
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
261
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
262
|
+
TokenKind_1.TokenKind.RightParen,
|
|
263
|
+
TokenKind_1.TokenKind.LeftCurlyBrace,
|
|
264
|
+
TokenKind_1.TokenKind.RightCurlyBrace,
|
|
265
|
+
TokenKind_1.TokenKind.Eof
|
|
266
266
|
]);
|
|
267
267
|
});
|
|
268
268
|
it('reads operators', () => {
|
|
269
269
|
let { tokens } = Lexer_1.Lexer.scan('^ - + * MOD / \\ -- ++');
|
|
270
270
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
271
|
+
TokenKind_1.TokenKind.Caret,
|
|
272
|
+
TokenKind_1.TokenKind.Minus,
|
|
273
|
+
TokenKind_1.TokenKind.Plus,
|
|
274
|
+
TokenKind_1.TokenKind.Star,
|
|
275
|
+
TokenKind_1.TokenKind.Mod,
|
|
276
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
277
|
+
TokenKind_1.TokenKind.Backslash,
|
|
278
|
+
TokenKind_1.TokenKind.MinusMinus,
|
|
279
|
+
TokenKind_1.TokenKind.PlusPlus,
|
|
280
|
+
TokenKind_1.TokenKind.Eof
|
|
281
281
|
]);
|
|
282
282
|
});
|
|
283
283
|
it('reads bitshift operators', () => {
|
|
284
284
|
let { tokens } = Lexer_1.Lexer.scan('<< >> <<');
|
|
285
285
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
286
|
+
TokenKind_1.TokenKind.LeftShift,
|
|
287
|
+
TokenKind_1.TokenKind.RightShift,
|
|
288
|
+
TokenKind_1.TokenKind.LeftShift,
|
|
289
|
+
TokenKind_1.TokenKind.Eof
|
|
290
290
|
]);
|
|
291
291
|
});
|
|
292
292
|
it('reads bitshift assignment operators', () => {
|
|
293
293
|
let { tokens } = Lexer_1.Lexer.scan('<<= >>=');
|
|
294
294
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
295
|
+
TokenKind_1.TokenKind.LeftShiftEqual,
|
|
296
|
+
TokenKind_1.TokenKind.RightShiftEqual,
|
|
297
|
+
TokenKind_1.TokenKind.Eof
|
|
298
298
|
]);
|
|
299
299
|
});
|
|
300
300
|
it('reads comparators', () => {
|
|
301
301
|
let { tokens } = Lexer_1.Lexer.scan('< <= > >= = <>');
|
|
302
302
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
303
|
+
TokenKind_1.TokenKind.Less,
|
|
304
|
+
TokenKind_1.TokenKind.LessEqual,
|
|
305
|
+
TokenKind_1.TokenKind.Greater,
|
|
306
|
+
TokenKind_1.TokenKind.GreaterEqual,
|
|
307
|
+
TokenKind_1.TokenKind.Equal,
|
|
308
|
+
TokenKind_1.TokenKind.LessGreater,
|
|
309
|
+
TokenKind_1.TokenKind.Eof
|
|
310
310
|
]);
|
|
311
311
|
});
|
|
312
312
|
}); // non-literals
|
|
313
313
|
describe('string literals', () => {
|
|
314
314
|
it('produces string literal tokens', () => {
|
|
315
315
|
let { tokens } = Lexer_1.Lexer.scan(`"hello world"`);
|
|
316
|
-
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
316
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.StringLiteral, TokenKind_1.TokenKind.Eof]);
|
|
317
317
|
});
|
|
318
318
|
it(`safely escapes " literals`, () => {
|
|
319
319
|
let { tokens } = Lexer_1.Lexer.scan(`"the cat says ""meow"""`);
|
|
320
|
-
(0, chai_1.expect)(tokens[0].kind).to.equal(
|
|
320
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.StringLiteral);
|
|
321
321
|
});
|
|
322
322
|
it('captures text to end of line for unterminated strings with LF', () => {
|
|
323
323
|
let { tokens } = Lexer_1.Lexer.scan(`"unterminated!\n`);
|
|
324
|
-
(0, chai_1.expect)(tokens[0].kind).to.eql(
|
|
324
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.StringLiteral);
|
|
325
325
|
});
|
|
326
326
|
it('captures text to end of line for unterminated strings with CRLF', () => {
|
|
327
327
|
let { tokens } = Lexer_1.Lexer.scan(`"unterminated!\r\n`);
|
|
@@ -339,18 +339,18 @@ describe('lexer', () => {
|
|
|
339
339
|
it('supports escaped chars', () => {
|
|
340
340
|
let { tokens } = Lexer_1.Lexer.scan('`\\n\\`\\r\\n`');
|
|
341
341
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
342
|
+
TokenKind_1.TokenKind.BackTick,
|
|
343
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
344
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
345
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
346
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
347
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
348
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
349
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
350
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
351
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
352
|
+
TokenKind_1.TokenKind.BackTick,
|
|
353
|
+
TokenKind_1.TokenKind.Eof
|
|
354
354
|
]);
|
|
355
355
|
(0, chai_1.expect)(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
356
356
|
10,
|
|
@@ -362,27 +362,27 @@ describe('lexer', () => {
|
|
|
362
362
|
it('prevents expressions when escaping the dollar sign', () => {
|
|
363
363
|
let { tokens } = Lexer_1.Lexer.scan('`\\${just text}`');
|
|
364
364
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
365
|
+
TokenKind_1.TokenKind.BackTick,
|
|
366
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
367
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
368
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
369
|
+
TokenKind_1.TokenKind.BackTick,
|
|
370
|
+
TokenKind_1.TokenKind.Eof
|
|
371
371
|
]);
|
|
372
372
|
});
|
|
373
373
|
it('supports escaping unicode char codes', () => {
|
|
374
374
|
let { tokens } = Lexer_1.Lexer.scan('`\\c1\\c12\\c123`');
|
|
375
375
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
376
|
+
TokenKind_1.TokenKind.BackTick,
|
|
377
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
378
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
379
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
380
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
381
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
382
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
383
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
384
|
+
TokenKind_1.TokenKind.BackTick,
|
|
385
|
+
TokenKind_1.TokenKind.Eof
|
|
386
386
|
]);
|
|
387
387
|
(0, chai_1.expect)(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
388
388
|
1,
|
|
@@ -393,26 +393,26 @@ describe('lexer', () => {
|
|
|
393
393
|
it('converts doublequote to EscapedCharCodeLiteral', () => {
|
|
394
394
|
let { tokens } = Lexer_1.Lexer.scan('`"`');
|
|
395
395
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
396
|
+
TokenKind_1.TokenKind.BackTick,
|
|
397
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
398
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
399
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
400
|
+
TokenKind_1.TokenKind.BackTick,
|
|
401
|
+
TokenKind_1.TokenKind.Eof
|
|
402
402
|
]);
|
|
403
403
|
(0, chai_1.expect)(tokens[2].charCode).to.equal(34);
|
|
404
404
|
});
|
|
405
405
|
it(`safely escapes \` literals`, () => {
|
|
406
406
|
let { tokens } = Lexer_1.Lexer.scan('`the cat says \\`meow\\` a lot`');
|
|
407
407
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
408
|
+
TokenKind_1.TokenKind.BackTick,
|
|
409
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
410
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
411
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
412
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
413
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
414
|
+
TokenKind_1.TokenKind.BackTick,
|
|
415
|
+
TokenKind_1.TokenKind.Eof
|
|
416
416
|
]);
|
|
417
417
|
(0, chai_1.expect)(tokens.map(x => x.text)).to.eql([
|
|
418
418
|
'`',
|
|
@@ -428,24 +428,24 @@ describe('lexer', () => {
|
|
|
428
428
|
it('produces template string literal tokens', () => {
|
|
429
429
|
let { tokens } = Lexer_1.Lexer.scan('`hello world`');
|
|
430
430
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
431
|
+
TokenKind_1.TokenKind.BackTick,
|
|
432
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
433
|
+
TokenKind_1.TokenKind.BackTick,
|
|
434
|
+
TokenKind_1.TokenKind.Eof
|
|
435
435
|
]);
|
|
436
436
|
(0, chai_1.expect)(tokens[1].text).to.deep.equal('hello world');
|
|
437
437
|
});
|
|
438
438
|
it('collects quasis outside and expressions inside of template strings', () => {
|
|
439
439
|
let { tokens } = Lexer_1.Lexer.scan('`hello ${"world"}!`');
|
|
440
440
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
441
|
+
TokenKind_1.TokenKind.BackTick,
|
|
442
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
443
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
444
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
445
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
446
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
447
|
+
TokenKind_1.TokenKind.BackTick,
|
|
448
|
+
TokenKind_1.TokenKind.Eof
|
|
449
449
|
]);
|
|
450
450
|
(0, chai_1.expect)(tokens[1].text).to.deep.equal(`hello `);
|
|
451
451
|
});
|
|
@@ -467,130 +467,130 @@ describe('lexer', () => {
|
|
|
467
467
|
end function
|
|
468
468
|
`);
|
|
469
469
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
470
|
+
TokenKind_1.TokenKind.Newline,
|
|
471
|
+
TokenKind_1.TokenKind.Function,
|
|
472
|
+
TokenKind_1.TokenKind.Identifier,
|
|
473
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
474
|
+
TokenKind_1.TokenKind.Identifier,
|
|
475
|
+
TokenKind_1.TokenKind.RightParen,
|
|
476
|
+
TokenKind_1.TokenKind.Newline,
|
|
477
|
+
TokenKind_1.TokenKind.Return,
|
|
478
|
+
TokenKind_1.TokenKind.BackTick,
|
|
479
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
480
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
481
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
482
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
483
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
484
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
485
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
486
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
487
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
488
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
489
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
490
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
491
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
492
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
493
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
494
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
495
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
496
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
497
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
498
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
499
|
+
TokenKind_1.TokenKind.Identifier,
|
|
500
|
+
TokenKind_1.TokenKind.Dot,
|
|
501
|
+
TokenKind_1.TokenKind.Identifier,
|
|
502
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
503
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
504
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
505
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
506
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
507
|
+
TokenKind_1.TokenKind.Identifier,
|
|
508
|
+
TokenKind_1.TokenKind.Dot,
|
|
509
|
+
TokenKind_1.TokenKind.Identifier,
|
|
510
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
511
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
512
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
513
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
514
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
515
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
516
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
517
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
518
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
519
|
+
TokenKind_1.TokenKind.Identifier,
|
|
520
|
+
TokenKind_1.TokenKind.Dot,
|
|
521
|
+
TokenKind_1.TokenKind.Identifier,
|
|
522
|
+
TokenKind_1.TokenKind.Dot,
|
|
523
|
+
TokenKind_1.TokenKind.Identifier,
|
|
524
|
+
TokenKind_1.TokenKind.Dot,
|
|
525
|
+
TokenKind_1.TokenKind.Identifier,
|
|
526
|
+
TokenKind_1.TokenKind.Dot,
|
|
527
|
+
TokenKind_1.TokenKind.Identifier,
|
|
528
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
529
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
530
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
531
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
532
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
533
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
534
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
535
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
536
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
537
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
538
|
+
TokenKind_1.TokenKind.BackTick,
|
|
539
|
+
TokenKind_1.TokenKind.Newline,
|
|
540
|
+
TokenKind_1.TokenKind.EndFunction,
|
|
541
|
+
TokenKind_1.TokenKind.Newline,
|
|
542
|
+
TokenKind_1.TokenKind.Eof
|
|
543
543
|
]);
|
|
544
544
|
});
|
|
545
545
|
it('complicated example', () => {
|
|
546
546
|
let { tokens } = Lexer_1.Lexer.scan('`hello ${"world"}!I am a ${"template" + "string"} and I am very ${["pleased"][0]} to meet you ${m.top.getChildCount()}.The end`');
|
|
547
547
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.eql([
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
548
|
+
TokenKind_1.TokenKind.BackTick,
|
|
549
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
550
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
551
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
552
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
553
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
554
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
555
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
556
|
+
TokenKind_1.TokenKind.Plus,
|
|
557
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
558
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
559
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
560
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
561
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
562
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
563
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
564
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
565
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
566
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
567
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
568
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
569
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
570
|
+
TokenKind_1.TokenKind.Identifier,
|
|
571
|
+
TokenKind_1.TokenKind.Dot,
|
|
572
|
+
TokenKind_1.TokenKind.Identifier,
|
|
573
|
+
TokenKind_1.TokenKind.Dot,
|
|
574
|
+
TokenKind_1.TokenKind.Identifier,
|
|
575
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
576
|
+
TokenKind_1.TokenKind.RightParen,
|
|
577
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
578
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
579
|
+
TokenKind_1.TokenKind.BackTick,
|
|
580
|
+
TokenKind_1.TokenKind.Eof
|
|
581
581
|
]);
|
|
582
582
|
});
|
|
583
583
|
it('allows multiline strings', () => {
|
|
584
584
|
let { tokens } = Lexer_1.Lexer.scan('`multi-line\n\n`');
|
|
585
585
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
586
|
+
TokenKind_1.TokenKind.BackTick,
|
|
587
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
588
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
589
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
590
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
591
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
592
|
+
TokenKind_1.TokenKind.BackTick,
|
|
593
|
+
TokenKind_1.TokenKind.Eof
|
|
594
594
|
]);
|
|
595
595
|
(0, chai_1.expect)(tokens.map(x => x.text)).to.eql([
|
|
596
596
|
'`',
|
|
@@ -611,161 +611,161 @@ describe('lexer', () => {
|
|
|
611
611
|
kind: x.kind
|
|
612
612
|
};
|
|
613
613
|
})).to.eql([
|
|
614
|
-
{ range: vscode_languageserver_1.Range.create(0, 0, 0, 3), kind:
|
|
615
|
-
{ range: vscode_languageserver_1.Range.create(0, 4, 0, 5), kind:
|
|
616
|
-
{ range: vscode_languageserver_1.Range.create(0, 5, 0, 10), kind:
|
|
617
|
-
{ range: vscode_languageserver_1.Range.create(0, 10, 0, 11), kind:
|
|
618
|
-
{ range: vscode_languageserver_1.Range.create(1, 0, 1, 4), kind:
|
|
619
|
-
{ range: vscode_languageserver_1.Range.create(1, 4, 1, 5), kind:
|
|
620
|
-
{ range: vscode_languageserver_1.Range.create(1, 5, 1, 6), kind:
|
|
621
|
-
{ range: vscode_languageserver_1.Range.create(2, 0, 2, 7), kind:
|
|
622
|
-
{ range: vscode_languageserver_1.Range.create(2, 7, 2, 8), kind:
|
|
623
|
-
{ range: vscode_languageserver_1.Range.create(2, 9, 2, 13), kind:
|
|
624
|
-
{ range: vscode_languageserver_1.Range.create(2, 13, 2, 14), kind:
|
|
625
|
-
{ range: vscode_languageserver_1.Range.create(3, 0, 3, 5), kind:
|
|
626
|
-
{ range: vscode_languageserver_1.Range.create(3, 5, 3, 6), kind:
|
|
614
|
+
{ range: vscode_languageserver_1.Range.create(0, 0, 0, 3), kind: TokenKind_1.TokenKind.IntegerLiteral },
|
|
615
|
+
{ range: vscode_languageserver_1.Range.create(0, 4, 0, 5), kind: TokenKind_1.TokenKind.BackTick },
|
|
616
|
+
{ range: vscode_languageserver_1.Range.create(0, 5, 0, 10), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
617
|
+
{ range: vscode_languageserver_1.Range.create(0, 10, 0, 11), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
618
|
+
{ range: vscode_languageserver_1.Range.create(1, 0, 1, 4), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
619
|
+
{ range: vscode_languageserver_1.Range.create(1, 4, 1, 5), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
620
|
+
{ range: vscode_languageserver_1.Range.create(1, 5, 1, 6), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
621
|
+
{ range: vscode_languageserver_1.Range.create(2, 0, 2, 7), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
622
|
+
{ range: vscode_languageserver_1.Range.create(2, 7, 2, 8), kind: TokenKind_1.TokenKind.BackTick },
|
|
623
|
+
{ range: vscode_languageserver_1.Range.create(2, 9, 2, 13), kind: TokenKind_1.TokenKind.True },
|
|
624
|
+
{ range: vscode_languageserver_1.Range.create(2, 13, 2, 14), kind: TokenKind_1.TokenKind.Newline },
|
|
625
|
+
{ range: vscode_languageserver_1.Range.create(3, 0, 3, 5), kind: TokenKind_1.TokenKind.False },
|
|
626
|
+
{ range: vscode_languageserver_1.Range.create(3, 5, 3, 6), kind: TokenKind_1.TokenKind.Eof }
|
|
627
627
|
]);
|
|
628
628
|
});
|
|
629
629
|
it('Example that tripped up the expression tests', () => {
|
|
630
630
|
let { tokens } = Lexer_1.Lexer.scan('`I am a complex example\n${a.isRunning(["a","b","c"])}\nmore ${m.finish(true)}`');
|
|
631
631
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
632
|
+
TokenKind_1.TokenKind.BackTick,
|
|
633
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
634
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
635
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
636
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
637
|
+
TokenKind_1.TokenKind.Identifier,
|
|
638
|
+
TokenKind_1.TokenKind.Dot,
|
|
639
|
+
TokenKind_1.TokenKind.Identifier,
|
|
640
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
641
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
642
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
643
|
+
TokenKind_1.TokenKind.Comma,
|
|
644
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
645
|
+
TokenKind_1.TokenKind.Comma,
|
|
646
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
647
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
648
|
+
TokenKind_1.TokenKind.RightParen,
|
|
649
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
650
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
651
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
652
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
653
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
654
|
+
TokenKind_1.TokenKind.Identifier,
|
|
655
|
+
TokenKind_1.TokenKind.Dot,
|
|
656
|
+
TokenKind_1.TokenKind.Identifier,
|
|
657
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
658
|
+
TokenKind_1.TokenKind.True,
|
|
659
|
+
TokenKind_1.TokenKind.RightParen,
|
|
660
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
661
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
662
|
+
TokenKind_1.TokenKind.BackTick,
|
|
663
|
+
TokenKind_1.TokenKind.Eof
|
|
664
664
|
]);
|
|
665
665
|
});
|
|
666
666
|
}); // string literals
|
|
667
667
|
describe('double literals', () => {
|
|
668
668
|
it('respects \'#\' suffix', () => {
|
|
669
669
|
let d = Lexer_1.Lexer.scan('123#').tokens[0];
|
|
670
|
-
(0, chai_1.expect)(d.kind).to.equal(
|
|
670
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
671
671
|
(0, chai_1.expect)(d.text).to.eql('123#');
|
|
672
672
|
});
|
|
673
673
|
it('forces literals >= 10 digits into doubles', () => {
|
|
674
674
|
let d = Lexer_1.Lexer.scan('0000000005').tokens[0];
|
|
675
|
-
(0, chai_1.expect)(d.kind).to.equal(
|
|
675
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
676
676
|
(0, chai_1.expect)(d.text).to.eql('0000000005');
|
|
677
677
|
});
|
|
678
678
|
it('forces literals with \'D\' in exponent into doubles', () => {
|
|
679
679
|
let d = Lexer_1.Lexer.scan('2.5d3').tokens[0];
|
|
680
|
-
(0, chai_1.expect)(d.kind).to.equal(
|
|
680
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
681
681
|
(0, chai_1.expect)(d.text).to.eql('2.5d3');
|
|
682
682
|
});
|
|
683
683
|
it('allows digits before `.` to be elided', () => {
|
|
684
684
|
let f = Lexer_1.Lexer.scan('.123#').tokens[0];
|
|
685
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
685
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
686
686
|
(0, chai_1.expect)(f.text).to.eql('.123#');
|
|
687
687
|
});
|
|
688
688
|
it('allows digits after `.` to be elided', () => {
|
|
689
689
|
let f = Lexer_1.Lexer.scan('12.#').tokens[0];
|
|
690
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
690
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
691
691
|
(0, chai_1.expect)(f.text).to.eql('12.#');
|
|
692
692
|
});
|
|
693
693
|
});
|
|
694
694
|
describe('float literals', () => {
|
|
695
695
|
it('respects \'!\' suffix', () => {
|
|
696
696
|
let f = Lexer_1.Lexer.scan('0.00000008!').tokens[0];
|
|
697
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
697
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
698
698
|
// Floating precision will make this *not* equal
|
|
699
699
|
(0, chai_1.expect)(f.text).not.to.equal(8e-8);
|
|
700
700
|
(0, chai_1.expect)(f.text).to.eql('0.00000008!');
|
|
701
701
|
});
|
|
702
702
|
it('forces literals with a decimal into floats', () => {
|
|
703
703
|
let f = Lexer_1.Lexer.scan('1.0').tokens[0];
|
|
704
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
704
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
705
705
|
(0, chai_1.expect)(f.text).to.equal('1.0');
|
|
706
706
|
});
|
|
707
707
|
it('forces literals with \'E\' in exponent into floats', () => {
|
|
708
708
|
let f = Lexer_1.Lexer.scan('2.5e3').tokens[0];
|
|
709
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
709
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
710
710
|
(0, chai_1.expect)(f.text).to.eql('2.5e3');
|
|
711
711
|
});
|
|
712
712
|
it('supports larger-than-supported-precision floats to be defined with exponents', () => {
|
|
713
713
|
let f = Lexer_1.Lexer.scan('2.3659475627512424e-38').tokens[0];
|
|
714
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
714
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
715
715
|
(0, chai_1.expect)(f.text).to.eql('2.3659475627512424e-38');
|
|
716
716
|
});
|
|
717
717
|
it('allows digits before `.` to be elided', () => {
|
|
718
718
|
let f = Lexer_1.Lexer.scan('.123').tokens[0];
|
|
719
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
719
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
720
720
|
(0, chai_1.expect)(f.text).to.equal('.123');
|
|
721
721
|
});
|
|
722
722
|
it('allows digits after `.` to be elided', () => {
|
|
723
723
|
let f = Lexer_1.Lexer.scan('12.').tokens[0];
|
|
724
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
724
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
725
725
|
(0, chai_1.expect)(f.text).to.equal('12.');
|
|
726
726
|
});
|
|
727
727
|
});
|
|
728
728
|
describe('long integer literals', () => {
|
|
729
729
|
it('respects \'&\' suffix', () => {
|
|
730
730
|
let f = Lexer_1.Lexer.scan('1&').tokens[0];
|
|
731
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
731
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
732
732
|
(0, chai_1.expect)(f.text).to.eql('1&');
|
|
733
733
|
});
|
|
734
734
|
it('supports hexadecimal literals', () => {
|
|
735
735
|
let i = Lexer_1.Lexer.scan('&hf00d&').tokens[0];
|
|
736
|
-
(0, chai_1.expect)(i.kind).to.equal(
|
|
736
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
737
737
|
(0, chai_1.expect)(i.text).to.equal('&hf00d&');
|
|
738
738
|
});
|
|
739
739
|
it('allows very long Int64 literals', () => {
|
|
740
740
|
let li = Lexer_1.Lexer.scan('9876543210&').tokens[0];
|
|
741
|
-
(0, chai_1.expect)(li.kind).to.equal(
|
|
741
|
+
(0, chai_1.expect)(li.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
742
742
|
(0, chai_1.expect)(li.text).to.equal('9876543210&');
|
|
743
743
|
});
|
|
744
744
|
it('forces literals with \'&\' suffix into Int64s', () => {
|
|
745
745
|
let li = Lexer_1.Lexer.scan('123&').tokens[0];
|
|
746
|
-
(0, chai_1.expect)(li.kind).to.equal(
|
|
746
|
+
(0, chai_1.expect)(li.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
747
747
|
(0, chai_1.expect)(li.text).to.deep.equal('123&');
|
|
748
748
|
});
|
|
749
749
|
});
|
|
750
750
|
describe('integer literals', () => {
|
|
751
751
|
it('respects \'%\' suffix', () => {
|
|
752
752
|
let f = Lexer_1.Lexer.scan('1%').tokens[0];
|
|
753
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
753
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
754
754
|
(0, chai_1.expect)(f.text).to.eql('1%');
|
|
755
755
|
});
|
|
756
756
|
it('does not allow decimal numbers to end with %', () => {
|
|
757
757
|
let f = Lexer_1.Lexer.scan('1.2%').tokens[0];
|
|
758
|
-
(0, chai_1.expect)(f.kind).to.equal(
|
|
758
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
759
759
|
(0, chai_1.expect)(f.text).to.eql('1.2');
|
|
760
760
|
});
|
|
761
761
|
it('supports hexadecimal literals', () => {
|
|
762
762
|
let i = Lexer_1.Lexer.scan('&hFf').tokens[0];
|
|
763
|
-
(0, chai_1.expect)(i.kind).to.equal(
|
|
763
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
764
764
|
(0, chai_1.expect)(i.text).to.deep.equal('&hFf');
|
|
765
765
|
});
|
|
766
766
|
it('falls back to a regular integer', () => {
|
|
767
767
|
let i = Lexer_1.Lexer.scan('123').tokens[0];
|
|
768
|
-
(0, chai_1.expect)(i.kind).to.equal(
|
|
768
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
769
769
|
(0, chai_1.expect)(i.text).to.deep.equal('123');
|
|
770
770
|
});
|
|
771
771
|
});
|
|
@@ -774,18 +774,18 @@ describe('lexer', () => {
|
|
|
774
774
|
(0, chai_1.expect)(Lexer_1.Lexer.scan(`
|
|
775
775
|
void boolean integer longinteger float double string object interface invalid dynamic
|
|
776
776
|
`.trim()).tokens.map(x => x.kind)).to.eql([
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
777
|
+
TokenKind_1.TokenKind.Void,
|
|
778
|
+
TokenKind_1.TokenKind.Boolean,
|
|
779
|
+
TokenKind_1.TokenKind.Integer,
|
|
780
|
+
TokenKind_1.TokenKind.LongInteger,
|
|
781
|
+
TokenKind_1.TokenKind.Float,
|
|
782
|
+
TokenKind_1.TokenKind.Double,
|
|
783
|
+
TokenKind_1.TokenKind.String,
|
|
784
|
+
TokenKind_1.TokenKind.Object,
|
|
785
|
+
TokenKind_1.TokenKind.Interface,
|
|
786
|
+
TokenKind_1.TokenKind.Invalid,
|
|
787
|
+
TokenKind_1.TokenKind.Dynamic,
|
|
788
|
+
TokenKind_1.TokenKind.Eof
|
|
789
789
|
]);
|
|
790
790
|
});
|
|
791
791
|
});
|
|
@@ -795,57 +795,57 @@ describe('lexer', () => {
|
|
|
795
795
|
// if we find any that we've missed
|
|
796
796
|
let { tokens } = Lexer_1.Lexer.scan('and then or if else endif return true false line_num');
|
|
797
797
|
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
798
|
+
TokenKind_1.TokenKind.And,
|
|
799
|
+
TokenKind_1.TokenKind.Then,
|
|
800
|
+
TokenKind_1.TokenKind.Or,
|
|
801
|
+
TokenKind_1.TokenKind.If,
|
|
802
|
+
TokenKind_1.TokenKind.Else,
|
|
803
|
+
TokenKind_1.TokenKind.EndIf,
|
|
804
|
+
TokenKind_1.TokenKind.Return,
|
|
805
|
+
TokenKind_1.TokenKind.True,
|
|
806
|
+
TokenKind_1.TokenKind.False,
|
|
807
|
+
TokenKind_1.TokenKind.LineNumLiteral,
|
|
808
|
+
TokenKind_1.TokenKind.Eof
|
|
809
809
|
]);
|
|
810
810
|
});
|
|
811
811
|
it('matches multi-word keywords', () => {
|
|
812
812
|
let { tokens } = Lexer_1.Lexer.scan('end if end while End Sub end Function Exit wHILe');
|
|
813
813
|
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
814
|
+
TokenKind_1.TokenKind.EndIf,
|
|
815
|
+
TokenKind_1.TokenKind.EndWhile,
|
|
816
|
+
TokenKind_1.TokenKind.EndSub,
|
|
817
|
+
TokenKind_1.TokenKind.EndFunction,
|
|
818
|
+
TokenKind_1.TokenKind.ExitWhile,
|
|
819
|
+
TokenKind_1.TokenKind.Eof
|
|
820
820
|
]);
|
|
821
821
|
});
|
|
822
822
|
it('accepts \'exit for\' but not \'exitfor\'', () => {
|
|
823
823
|
let { tokens } = Lexer_1.Lexer.scan('exit for exitfor');
|
|
824
824
|
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
825
|
+
TokenKind_1.TokenKind.ExitFor,
|
|
826
|
+
TokenKind_1.TokenKind.Identifier,
|
|
827
|
+
TokenKind_1.TokenKind.Eof
|
|
828
828
|
]);
|
|
829
829
|
});
|
|
830
830
|
it('matches keywords with silly capitalization', () => {
|
|
831
831
|
let { tokens } = Lexer_1.Lexer.scan('iF ELSE eNDIf FUncTioN');
|
|
832
832
|
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
833
|
+
TokenKind_1.TokenKind.If,
|
|
834
|
+
TokenKind_1.TokenKind.Else,
|
|
835
|
+
TokenKind_1.TokenKind.EndIf,
|
|
836
|
+
TokenKind_1.TokenKind.Function,
|
|
837
|
+
TokenKind_1.TokenKind.Eof
|
|
838
838
|
]);
|
|
839
839
|
});
|
|
840
840
|
it('allows alpha-numeric (plus \'_\') identifiers', () => {
|
|
841
841
|
let identifier = Lexer_1.Lexer.scan('_abc_123_').tokens[0];
|
|
842
|
-
(0, chai_1.expect)(identifier.kind).to.equal(
|
|
842
|
+
(0, chai_1.expect)(identifier.kind).to.equal(TokenKind_1.TokenKind.Identifier);
|
|
843
843
|
(0, chai_1.expect)(identifier.text).to.equal('_abc_123_');
|
|
844
844
|
});
|
|
845
845
|
it('allows identifiers with trailing type designators', () => {
|
|
846
846
|
let { tokens } = Lexer_1.Lexer.scan('lorem$ ipsum% dolor! sit# amet&');
|
|
847
|
-
let identifiers = tokens.filter(t => t.kind !==
|
|
848
|
-
(0, chai_1.expect)(identifiers.every(t => t.kind ===
|
|
847
|
+
let identifiers = tokens.filter(t => t.kind !== TokenKind_1.TokenKind.Eof);
|
|
848
|
+
(0, chai_1.expect)(identifiers.every(t => t.kind === TokenKind_1.TokenKind.Identifier));
|
|
849
849
|
(0, chai_1.expect)(identifiers.map(t => t.text)).to.deep.equal([
|
|
850
850
|
'lorem$',
|
|
851
851
|
'ipsum%',
|
|
@@ -859,19 +859,19 @@ describe('lexer', () => {
|
|
|
859
859
|
it('reads constant declarations', () => {
|
|
860
860
|
let { tokens } = Lexer_1.Lexer.scan('#const foo true');
|
|
861
861
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
862
|
+
TokenKind_1.TokenKind.HashConst,
|
|
863
|
+
TokenKind_1.TokenKind.Identifier,
|
|
864
|
+
TokenKind_1.TokenKind.True,
|
|
865
|
+
TokenKind_1.TokenKind.Eof
|
|
866
866
|
]);
|
|
867
867
|
});
|
|
868
868
|
it('reads constant aliases', () => {
|
|
869
869
|
let { tokens } = Lexer_1.Lexer.scan('#const bar foo');
|
|
870
870
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
871
|
+
TokenKind_1.TokenKind.HashConst,
|
|
872
|
+
TokenKind_1.TokenKind.Identifier,
|
|
873
|
+
TokenKind_1.TokenKind.Identifier,
|
|
874
|
+
TokenKind_1.TokenKind.Eof
|
|
875
875
|
]);
|
|
876
876
|
});
|
|
877
877
|
it('reads conditional directives', () => {
|
|
@@ -885,19 +885,19 @@ describe('lexer', () => {
|
|
|
885
885
|
`, {
|
|
886
886
|
includeWhitespace: false
|
|
887
887
|
});
|
|
888
|
-
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !==
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
888
|
+
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !== TokenKind_1.TokenKind.Newline)).to.deep.equal([
|
|
889
|
+
TokenKind_1.TokenKind.HashIf,
|
|
890
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
891
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
892
|
+
TokenKind_1.TokenKind.HashElse,
|
|
893
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
894
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
895
|
+
TokenKind_1.TokenKind.Eof
|
|
896
896
|
]);
|
|
897
897
|
});
|
|
898
898
|
it('treats text "constructor" as an identifier', () => {
|
|
899
899
|
let lexer = Lexer_1.Lexer.scan(`function constructor()\nend function`);
|
|
900
|
-
(0, chai_1.expect)(lexer.tokens[1].kind).to.equal(
|
|
900
|
+
(0, chai_1.expect)(lexer.tokens[1].kind).to.equal(TokenKind_1.TokenKind.Identifier);
|
|
901
901
|
});
|
|
902
902
|
it('reads upper case conditional directives', () => {
|
|
903
903
|
let { tokens } = Lexer_1.Lexer.scan(`
|
|
@@ -910,25 +910,25 @@ describe('lexer', () => {
|
|
|
910
910
|
`, {
|
|
911
911
|
includeWhitespace: false
|
|
912
912
|
});
|
|
913
|
-
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !==
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
913
|
+
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !== TokenKind_1.TokenKind.Newline)).to.deep.equal([
|
|
914
|
+
TokenKind_1.TokenKind.HashIf,
|
|
915
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
916
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
917
|
+
TokenKind_1.TokenKind.HashElse,
|
|
918
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
919
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
920
|
+
TokenKind_1.TokenKind.Eof
|
|
921
921
|
]);
|
|
922
922
|
});
|
|
923
923
|
it('supports various spacings between #endif', () => {
|
|
924
924
|
let { tokens } = Lexer_1.Lexer.scan('#endif #end if #end\tif #end if #end\t\t if');
|
|
925
925
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
926
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
927
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
928
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
929
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
930
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
931
|
+
TokenKind_1.TokenKind.Eof
|
|
932
932
|
]);
|
|
933
933
|
});
|
|
934
934
|
it('reads forced compilation diagnostics with messages', () => {
|
|
@@ -936,11 +936,11 @@ describe('lexer', () => {
|
|
|
936
936
|
includeWhitespace: true
|
|
937
937
|
});
|
|
938
938
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
939
|
+
TokenKind_1.TokenKind.HashError,
|
|
940
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
941
|
+
TokenKind_1.TokenKind.HashErrorMessage,
|
|
942
|
+
TokenKind_1.TokenKind.Newline,
|
|
943
|
+
TokenKind_1.TokenKind.Eof
|
|
944
944
|
]);
|
|
945
945
|
(0, chai_1.expect)(tokens[2].text).to.equal('a message goes here');
|
|
946
946
|
});
|
|
@@ -984,26 +984,26 @@ describe('lexer', () => {
|
|
|
984
984
|
it('supports various spacing between for each', () => {
|
|
985
985
|
let { tokens } = Lexer_1.Lexer.scan('for each for each for each for\teach for\t each for \teach for \t each');
|
|
986
986
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
987
|
+
TokenKind_1.TokenKind.ForEach,
|
|
988
|
+
TokenKind_1.TokenKind.ForEach,
|
|
989
|
+
TokenKind_1.TokenKind.ForEach,
|
|
990
|
+
TokenKind_1.TokenKind.ForEach,
|
|
991
|
+
TokenKind_1.TokenKind.ForEach,
|
|
992
|
+
TokenKind_1.TokenKind.ForEach,
|
|
993
|
+
TokenKind_1.TokenKind.ForEach,
|
|
994
|
+
TokenKind_1.TokenKind.Eof
|
|
995
995
|
]);
|
|
996
996
|
});
|
|
997
997
|
});
|
|
998
998
|
it('detects rem when used as keyword', () => {
|
|
999
999
|
let { tokens } = Lexer_1.Lexer.scan('person.rem=true');
|
|
1000
1000
|
(0, chai_1.expect)(tokens.map(t => t.kind)).to.eql([
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1001
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1002
|
+
TokenKind_1.TokenKind.Dot,
|
|
1003
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1004
|
+
TokenKind_1.TokenKind.Equal,
|
|
1005
|
+
TokenKind_1.TokenKind.True,
|
|
1006
|
+
TokenKind_1.TokenKind.Eof
|
|
1007
1007
|
]);
|
|
1008
1008
|
//verify the location of `rem`
|
|
1009
1009
|
(0, chai_1.expect)(tokens.map(t => [t.range.start.character, t.range.end.character])).to.eql([
|
|
@@ -1018,61 +1018,69 @@ describe('lexer', () => {
|
|
|
1018
1018
|
describe('isToken', () => {
|
|
1019
1019
|
it('works', () => {
|
|
1020
1020
|
let range = vscode_languageserver_1.Range.create(0, 0, 0, 2);
|
|
1021
|
-
(0, chai_1.expect)((0, Token_1.isToken)({ kind:
|
|
1021
|
+
(0, chai_1.expect)((0, Token_1.isToken)({ kind: TokenKind_1.TokenKind.And, text: 'and', range: range })).is.true;
|
|
1022
1022
|
(0, chai_1.expect)((0, Token_1.isToken)({ text: 'and', range: range })).is.false;
|
|
1023
1023
|
});
|
|
1024
1024
|
});
|
|
1025
|
+
it('recognizes enum-related keywords', () => {
|
|
1026
|
+
(0, chai_1.expect)(Lexer_1.Lexer.scan('enum end enum endenum').tokens.map(x => x.kind)).to.eql([
|
|
1027
|
+
TokenKind_1.TokenKind.Enum,
|
|
1028
|
+
TokenKind_1.TokenKind.EndEnum,
|
|
1029
|
+
TokenKind_1.TokenKind.EndEnum,
|
|
1030
|
+
TokenKind_1.TokenKind.Eof
|
|
1031
|
+
]);
|
|
1032
|
+
});
|
|
1025
1033
|
it('recognizes class-related keywords', () => {
|
|
1026
1034
|
(0, chai_1.expect)(Lexer_1.Lexer.scan('class public protected private end class endclass new override').tokens.map(x => x.kind)).to.eql([
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1035
|
+
TokenKind_1.TokenKind.Class,
|
|
1036
|
+
TokenKind_1.TokenKind.Public,
|
|
1037
|
+
TokenKind_1.TokenKind.Protected,
|
|
1038
|
+
TokenKind_1.TokenKind.Private,
|
|
1039
|
+
TokenKind_1.TokenKind.EndClass,
|
|
1040
|
+
TokenKind_1.TokenKind.EndClass,
|
|
1041
|
+
TokenKind_1.TokenKind.New,
|
|
1042
|
+
TokenKind_1.TokenKind.Override,
|
|
1043
|
+
TokenKind_1.TokenKind.Eof
|
|
1036
1044
|
]);
|
|
1037
1045
|
});
|
|
1038
1046
|
describe('whitespace', () => {
|
|
1039
1047
|
it('preserves the exact number of whitespace characterswhitespace', () => {
|
|
1040
1048
|
let { tokens } = Lexer_1.Lexer.scan(' ', { includeWhitespace: true });
|
|
1041
1049
|
(0, chai_1.expect)(tokens[0]).to.include({
|
|
1042
|
-
kind:
|
|
1050
|
+
kind: TokenKind_1.TokenKind.Whitespace,
|
|
1043
1051
|
text: ' '
|
|
1044
1052
|
});
|
|
1045
1053
|
});
|
|
1046
1054
|
it('tokenizes whitespace between things', () => {
|
|
1047
1055
|
let { tokens } = Lexer_1.Lexer.scan('sub main ( ) \n end sub', { includeWhitespace: true });
|
|
1048
1056
|
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1057
|
+
TokenKind_1.TokenKind.Sub,
|
|
1058
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1059
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1060
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1061
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
1062
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1063
|
+
TokenKind_1.TokenKind.RightParen,
|
|
1064
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1065
|
+
TokenKind_1.TokenKind.Newline,
|
|
1066
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1067
|
+
TokenKind_1.TokenKind.EndSub,
|
|
1068
|
+
TokenKind_1.TokenKind.Eof
|
|
1061
1069
|
]);
|
|
1062
1070
|
});
|
|
1063
1071
|
});
|
|
1064
1072
|
it('identifies brighterscript source literals', () => {
|
|
1065
1073
|
let { tokens } = Lexer_1.Lexer.scan('LINE_NUM SOURCE_FILE_PATH SOURCE_LINE_NUM FUNCTION_NAME SOURCE_FUNCTION_NAME SOURCE_LOCATION PKG_PATH PKG_LOCATION');
|
|
1066
1074
|
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1075
|
+
TokenKind_1.TokenKind.LineNumLiteral,
|
|
1076
|
+
TokenKind_1.TokenKind.SourceFilePathLiteral,
|
|
1077
|
+
TokenKind_1.TokenKind.SourceLineNumLiteral,
|
|
1078
|
+
TokenKind_1.TokenKind.FunctionNameLiteral,
|
|
1079
|
+
TokenKind_1.TokenKind.SourceFunctionNameLiteral,
|
|
1080
|
+
TokenKind_1.TokenKind.SourceLocationLiteral,
|
|
1081
|
+
TokenKind_1.TokenKind.PkgPathLiteral,
|
|
1082
|
+
TokenKind_1.TokenKind.PkgLocationLiteral,
|
|
1083
|
+
TokenKind_1.TokenKind.Eof
|
|
1076
1084
|
]);
|
|
1077
1085
|
});
|
|
1078
1086
|
it('properly tracks leadingWhitespace', () => {
|
|
@@ -1089,12 +1097,12 @@ describe('lexer', () => {
|
|
|
1089
1097
|
it('properly detects try/catch tokens', () => {
|
|
1090
1098
|
const { tokens } = Lexer_1.Lexer.scan(`try catch endtry end try throw`, { includeWhitespace: false });
|
|
1091
1099
|
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1100
|
+
TokenKind_1.TokenKind.Try,
|
|
1101
|
+
TokenKind_1.TokenKind.Catch,
|
|
1102
|
+
TokenKind_1.TokenKind.EndTry,
|
|
1103
|
+
TokenKind_1.TokenKind.EndTry,
|
|
1104
|
+
TokenKind_1.TokenKind.Throw,
|
|
1105
|
+
TokenKind_1.TokenKind.Eof
|
|
1098
1106
|
]);
|
|
1099
1107
|
});
|
|
1100
1108
|
describe('regular expression literals', () => {
|
|
@@ -1119,29 +1127,29 @@ describe('lexer', () => {
|
|
|
1119
1127
|
includeWhitespace: false
|
|
1120
1128
|
});
|
|
1121
1129
|
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1130
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1131
|
+
TokenKind_1.TokenKind.Equal,
|
|
1132
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1133
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1134
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1135
|
+
TokenKind_1.TokenKind.Plus,
|
|
1136
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1137
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1138
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1139
|
+
TokenKind_1.TokenKind.Plus,
|
|
1140
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1141
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1142
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1143
|
+
TokenKind_1.TokenKind.Eof
|
|
1136
1144
|
]);
|
|
1137
1145
|
});
|
|
1138
1146
|
it('only captures alphanumeric flags', () => {
|
|
1139
1147
|
(0, chai_1.expect)(Lexer_1.Lexer.scan('speak(/a/)').tokens.map(x => x.kind)).to.eql([
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1148
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1149
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
1150
|
+
TokenKind_1.TokenKind.RegexLiteral,
|
|
1151
|
+
TokenKind_1.TokenKind.RightParen,
|
|
1152
|
+
TokenKind_1.TokenKind.Eof
|
|
1145
1153
|
]);
|
|
1146
1154
|
});
|
|
1147
1155
|
it('handles escape characters properly', () => {
|