brighterscript 1.0.0-alpha.10 → 1.0.0-alpha.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +247 -267
- package/README.md +2 -2
- package/dist/Cache.d.ts +3 -3
- package/dist/Cache.js +10 -6
- package/dist/Cache.js.map +1 -1
- package/dist/CodeActionUtil.js.map +1 -1
- package/dist/CommentFlagProcessor.d.ts +4 -4
- package/dist/CommentFlagProcessor.js +5 -3
- package/dist/CommentFlagProcessor.js.map +1 -1
- package/dist/DependencyGraph.js.map +1 -1
- package/dist/DiagnosticFilterer.js +1 -1
- package/dist/DiagnosticFilterer.js.map +1 -1
- package/dist/LanguageServer.d.ts +1 -6
- package/dist/LanguageServer.js +5 -14
- package/dist/LanguageServer.js.map +1 -1
- package/dist/Logger.js.map +1 -1
- package/dist/PluginInterface.d.ts +3 -3
- package/dist/PluginInterface.js +3 -0
- package/dist/PluginInterface.js.map +1 -1
- package/dist/Program.d.ts +35 -20
- package/dist/Program.js +132 -67
- package/dist/Program.js.map +1 -1
- package/dist/ProgramBuilder.js +4 -4
- package/dist/ProgramBuilder.js.map +1 -1
- package/dist/Scope.d.ts +22 -13
- package/dist/Scope.js +85 -70
- package/dist/Scope.js.map +1 -1
- package/dist/SymbolTable.d.ts +1 -1
- package/dist/SymbolTable.js +2 -2
- package/dist/SymbolTable.js.map +1 -1
- package/dist/XmlScope.d.ts +2 -2
- package/dist/XmlScope.js +5 -5
- package/dist/XmlScope.js.map +1 -1
- package/dist/astUtils/AstEditor.d.ts +27 -0
- package/dist/astUtils/AstEditor.js +97 -0
- package/dist/astUtils/AstEditor.js.map +1 -0
- package/dist/{bscPlugin/semanticTokens/SemanticTokensProcessor.spec.d.ts → astUtils/AstEditor.spec.d.ts} +0 -0
- package/dist/astUtils/AstEditor.spec.js +133 -0
- package/dist/astUtils/AstEditor.spec.js.map +1 -0
- package/dist/astUtils/creators.spec.js +4 -4
- package/dist/astUtils/creators.spec.js.map +1 -1
- package/dist/astUtils/reflection.d.ts +4 -2
- package/dist/astUtils/reflection.js +10 -1
- package/dist/astUtils/reflection.js.map +1 -1
- package/dist/astUtils/reflection.spec.js +116 -116
- package/dist/astUtils/reflection.spec.js.map +1 -1
- package/dist/astUtils/stackedVisitor.js.map +1 -1
- package/dist/astUtils/stackedVisitor.spec.js +13 -13
- package/dist/astUtils/stackedVisitor.spec.js.map +1 -1
- package/dist/astUtils/visitors.js +1 -1
- package/dist/astUtils/visitors.js.map +1 -1
- package/dist/astUtils/visitors.spec.js +26 -26
- package/dist/astUtils/visitors.spec.js.map +1 -1
- package/dist/astUtils/xml.d.ts +1 -0
- package/dist/astUtils/xml.js +6 -1
- package/dist/astUtils/xml.js.map +1 -1
- package/dist/bscPlugin/BscPlugin.js +5 -2
- package/dist/bscPlugin/BscPlugin.js.map +1 -1
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.js +3 -3
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.js.map +1 -1
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.spec.js +17 -17
- package/dist/bscPlugin/codeActions/CodeActionsProcessor.spec.js.map +1 -1
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.d.ts +8 -0
- package/dist/bscPlugin/semanticTokens/{SemanticTokensProcessor.js → BrsFileSemanticTokensProcessor.js} +13 -15
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.js.map +1 -0
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.spec.d.ts +1 -0
- package/dist/bscPlugin/semanticTokens/{SemanticTokensProcessor.spec.js → BrsFileSemanticTokensProcessor.spec.js} +5 -5
- package/dist/bscPlugin/semanticTokens/BrsFileSemanticTokensProcessor.spec.js.map +1 -0
- package/dist/diagnosticUtils.d.ts +1 -0
- package/dist/diagnosticUtils.js +14 -7
- package/dist/diagnosticUtils.js.map +1 -1
- package/dist/examples/plugins/removePrint.js +2 -2
- package/dist/examples/plugins/removePrint.js.map +1 -1
- package/dist/files/BrsFile.Class.spec.js +444 -269
- package/dist/files/BrsFile.Class.spec.js.map +1 -1
- package/dist/files/BrsFile.d.ts +17 -11
- package/dist/files/BrsFile.js +220 -153
- package/dist/files/BrsFile.js.map +1 -1
- package/dist/files/BrsFile.spec.js +620 -272
- package/dist/files/BrsFile.spec.js.map +1 -1
- package/dist/files/XmlFile.d.ts +6 -5
- package/dist/files/XmlFile.js +14 -9
- package/dist/files/XmlFile.js.map +1 -1
- package/dist/files/XmlFile.spec.js +238 -191
- package/dist/files/XmlFile.spec.js.map +1 -1
- package/dist/files/tests/imports.spec.js +29 -27
- package/dist/files/tests/imports.spec.js.map +1 -1
- package/dist/globalCallables.d.ts +3 -1
- package/dist/globalCallables.js +198 -99
- package/dist/globalCallables.js.map +1 -1
- package/dist/index.d.ts +12 -3
- package/dist/index.js +22 -4
- package/dist/index.js.map +1 -1
- package/dist/interfaces.d.ts +71 -10
- package/dist/lexer/Lexer.d.ts +5 -1
- package/dist/lexer/Lexer.js +52 -35
- package/dist/lexer/Lexer.js.map +1 -1
- package/dist/lexer/Lexer.spec.js +564 -534
- package/dist/lexer/Lexer.spec.js.map +1 -1
- package/dist/lexer/TokenKind.d.ts +4 -0
- package/dist/lexer/TokenKind.js +44 -1
- package/dist/lexer/TokenKind.js.map +1 -1
- package/dist/parser/Expression.d.ts +1 -1
- package/dist/parser/Expression.js +66 -66
- package/dist/parser/Expression.js.map +1 -1
- package/dist/parser/Parser.Class.spec.js +107 -106
- package/dist/parser/Parser.Class.spec.js.map +1 -1
- package/dist/parser/Parser.d.ts +17 -5
- package/dist/parser/Parser.js +431 -315
- package/dist/parser/Parser.js.map +1 -1
- package/dist/parser/Parser.spec.js +418 -296
- package/dist/parser/Parser.spec.js.map +1 -1
- package/dist/parser/SGParser.js +2 -2
- package/dist/parser/SGParser.js.map +1 -1
- package/dist/parser/SGParser.spec.js +22 -22
- package/dist/parser/SGParser.spec.js.map +1 -1
- package/dist/parser/SGTypes.d.ts +3 -0
- package/dist/parser/SGTypes.js +12 -7
- package/dist/parser/SGTypes.js.map +1 -1
- package/dist/parser/SGTypes.spec.js +84 -84
- package/dist/parser/SGTypes.spec.js.map +1 -1
- package/dist/parser/Statement.d.ts +3 -3
- package/dist/parser/Statement.js +61 -61
- package/dist/parser/Statement.js.map +1 -1
- package/dist/parser/Statement.spec.js +10 -10
- package/dist/parser/Statement.spec.js.map +1 -1
- package/dist/parser/tests/Parser.spec.d.ts +3 -3
- package/dist/parser/tests/Parser.spec.js +4 -4
- package/dist/parser/tests/Parser.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/For.spec.js +58 -58
- package/dist/parser/tests/controlFlow/For.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/ForEach.spec.js +40 -39
- package/dist/parser/tests/controlFlow/ForEach.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/If.spec.js +201 -200
- package/dist/parser/tests/controlFlow/If.spec.js.map +1 -1
- package/dist/parser/tests/controlFlow/While.spec.js +37 -37
- package/dist/parser/tests/controlFlow/While.spec.js.map +1 -1
- package/dist/parser/tests/expression/Additive.spec.js +30 -30
- package/dist/parser/tests/expression/Additive.spec.js.map +1 -1
- package/dist/parser/tests/expression/ArrayLiterals.spec.js +119 -119
- package/dist/parser/tests/expression/ArrayLiterals.spec.js.map +1 -1
- package/dist/parser/tests/expression/AssociativeArrayLiterals.spec.js +141 -141
- package/dist/parser/tests/expression/AssociativeArrayLiterals.spec.js.map +1 -1
- package/dist/parser/tests/expression/Boolean.spec.js +24 -24
- package/dist/parser/tests/expression/Boolean.spec.js.map +1 -1
- package/dist/parser/tests/expression/Call.spec.js +41 -40
- package/dist/parser/tests/expression/Call.spec.js.map +1 -1
- package/dist/parser/tests/expression/Exponential.spec.js +17 -17
- package/dist/parser/tests/expression/Exponential.spec.js.map +1 -1
- package/dist/parser/tests/expression/Function.spec.js +256 -256
- package/dist/parser/tests/expression/Function.spec.js.map +1 -1
- package/dist/parser/tests/expression/Indexing.spec.js +87 -87
- package/dist/parser/tests/expression/Indexing.spec.js.map +1 -1
- package/dist/parser/tests/expression/Multiplicative.spec.js +37 -37
- package/dist/parser/tests/expression/Multiplicative.spec.js.map +1 -1
- package/dist/parser/tests/expression/NullCoalescenceExpression.spec.js +74 -62
- package/dist/parser/tests/expression/NullCoalescenceExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/PrefixUnary.spec.js +41 -41
- package/dist/parser/tests/expression/PrefixUnary.spec.js.map +1 -1
- package/dist/parser/tests/expression/Primary.spec.js +41 -41
- package/dist/parser/tests/expression/Primary.spec.js.map +1 -1
- package/dist/parser/tests/expression/RegexLiteralExpression.spec.js +110 -2
- package/dist/parser/tests/expression/RegexLiteralExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/Relational.spec.js +43 -43
- package/dist/parser/tests/expression/Relational.spec.js.map +1 -1
- package/dist/parser/tests/expression/SourceLiteralExpression.spec.js +6 -6
- package/dist/parser/tests/expression/SourceLiteralExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/TemplateStringExpression.spec.js +18 -18
- package/dist/parser/tests/expression/TemplateStringExpression.spec.js.map +1 -1
- package/dist/parser/tests/expression/TernaryExpression.spec.js +100 -100
- package/dist/parser/tests/expression/TernaryExpression.spec.js.map +1 -1
- package/dist/parser/tests/statement/AssignmentOperators.spec.js +36 -36
- package/dist/parser/tests/statement/AssignmentOperators.spec.js.map +1 -1
- package/dist/parser/tests/statement/Declaration.spec.js +44 -44
- package/dist/parser/tests/statement/Declaration.spec.js.map +1 -1
- package/dist/parser/tests/statement/Dim.spec.js +21 -21
- package/dist/parser/tests/statement/Dim.spec.js.map +1 -1
- package/dist/parser/tests/statement/Function.spec.js +198 -197
- package/dist/parser/tests/statement/Function.spec.js.map +1 -1
- package/dist/parser/tests/statement/Goto.spec.js +15 -14
- package/dist/parser/tests/statement/Goto.spec.js.map +1 -1
- package/dist/parser/tests/statement/Increment.spec.js +50 -50
- package/dist/parser/tests/statement/Increment.spec.js.map +1 -1
- package/dist/parser/tests/statement/InterfaceStatement.spec.js +14 -2
- package/dist/parser/tests/statement/InterfaceStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/LibraryStatement.spec.js +17 -17
- package/dist/parser/tests/statement/LibraryStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/Misc.spec.js +91 -90
- package/dist/parser/tests/statement/Misc.spec.js.map +1 -1
- package/dist/parser/tests/statement/PrintStatement.spec.js +34 -34
- package/dist/parser/tests/statement/PrintStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/ReturnStatement.spec.js +46 -46
- package/dist/parser/tests/statement/ReturnStatement.spec.js.map +1 -1
- package/dist/parser/tests/statement/Set.spec.js +83 -83
- package/dist/parser/tests/statement/Set.spec.js.map +1 -1
- package/dist/parser/tests/statement/Stop.spec.js +12 -11
- package/dist/parser/tests/statement/Stop.spec.js.map +1 -1
- package/dist/parser/tests/statement/Throw.spec.js +5 -5
- package/dist/parser/tests/statement/Throw.spec.js.map +1 -1
- package/dist/parser/tests/statement/TryCatch.spec.js +13 -13
- package/dist/parser/tests/statement/TryCatch.spec.js.map +1 -1
- package/dist/preprocessor/Chunk.d.ts +1 -1
- package/dist/preprocessor/Chunk.js.map +1 -1
- package/dist/preprocessor/Manifest.d.ts +1 -1
- package/dist/preprocessor/Preprocessor.d.ts +1 -1
- package/dist/preprocessor/Preprocessor.js +8 -8
- package/dist/preprocessor/Preprocessor.js.map +1 -1
- package/dist/preprocessor/Preprocessor.spec.js +49 -49
- package/dist/preprocessor/Preprocessor.spec.js.map +1 -1
- package/dist/preprocessor/PreprocessorParser.spec.js +72 -72
- package/dist/preprocessor/PreprocessorParser.spec.js.map +1 -1
- package/dist/types/ArrayType.d.ts +8 -5
- package/dist/types/ArrayType.js +48 -12
- package/dist/types/ArrayType.js.map +1 -1
- package/dist/types/ArrayType.spec.js +69 -10
- package/dist/types/ArrayType.spec.js.map +1 -1
- package/dist/types/BooleanType.js +3 -3
- package/dist/types/BooleanType.js.map +1 -1
- package/dist/types/BooleanType.spec.js +2 -2
- package/dist/types/BooleanType.spec.js.map +1 -1
- package/dist/types/BscType.d.ts +1 -1
- package/dist/types/BscType.js +1 -1
- package/dist/types/BscType.js.map +1 -1
- package/dist/types/CustomType.d.ts +1 -1
- package/dist/types/CustomType.js +6 -4
- package/dist/types/CustomType.js.map +1 -1
- package/dist/types/DoubleType.js +7 -7
- package/dist/types/DoubleType.js.map +1 -1
- package/dist/types/DoubleType.spec.js +2 -2
- package/dist/types/DoubleType.spec.js.map +1 -1
- package/dist/types/DynamicType.js +1 -1
- package/dist/types/DynamicType.js.map +1 -1
- package/dist/types/DynamicType.spec.js +2 -2
- package/dist/types/DynamicType.spec.js.map +1 -1
- package/dist/types/FloatType.js +7 -7
- package/dist/types/FloatType.js.map +1 -1
- package/dist/types/FloatType.spec.js +2 -2
- package/dist/types/FloatType.spec.js.map +1 -1
- package/dist/types/FunctionType.d.ts +5 -5
- package/dist/types/FunctionType.js +13 -13
- package/dist/types/FunctionType.js.map +1 -1
- package/dist/types/FunctionType.spec.js +7 -7
- package/dist/types/FunctionType.spec.js.map +1 -1
- package/dist/types/IntegerType.js +7 -7
- package/dist/types/IntegerType.js.map +1 -1
- package/dist/types/IntegerType.spec.js +2 -2
- package/dist/types/IntegerType.spec.js.map +1 -1
- package/dist/types/InterfaceType.js +3 -3
- package/dist/types/InterfaceType.js.map +1 -1
- package/dist/types/InterfaceType.spec.js +7 -7
- package/dist/types/InterfaceType.spec.js.map +1 -1
- package/dist/types/InvalidType.js +4 -4
- package/dist/types/InvalidType.js.map +1 -1
- package/dist/types/InvalidType.spec.js +2 -2
- package/dist/types/InvalidType.spec.js.map +1 -1
- package/dist/types/LazyType.d.ts +1 -2
- package/dist/types/LazyType.js +1 -5
- package/dist/types/LazyType.js.map +1 -1
- package/dist/types/LongIntegerType.js +8 -8
- package/dist/types/LongIntegerType.js.map +1 -1
- package/dist/types/LongIntegerType.spec.js +2 -2
- package/dist/types/LongIntegerType.spec.js.map +1 -1
- package/dist/types/ObjectType.js +3 -3
- package/dist/types/ObjectType.js.map +1 -1
- package/dist/types/ObjectType.spec.js +2 -2
- package/dist/types/ObjectType.spec.js.map +1 -1
- package/dist/types/StringType.js +3 -3
- package/dist/types/StringType.js.map +1 -1
- package/dist/types/StringType.spec.js +2 -2
- package/dist/types/StringType.spec.js.map +1 -1
- package/dist/types/UninitializedType.js +3 -3
- package/dist/types/UninitializedType.js.map +1 -1
- package/dist/types/VoidType.js +3 -3
- package/dist/types/VoidType.js.map +1 -1
- package/dist/types/VoidType.spec.js +2 -2
- package/dist/types/VoidType.spec.js.map +1 -1
- package/dist/types/helpers.js +6 -6
- package/dist/types/helpers.js.map +1 -1
- package/dist/util.d.ts +15 -9
- package/dist/util.js +104 -57
- package/dist/util.js.map +1 -1
- package/dist/validators/ClassValidator.js +40 -40
- package/dist/validators/ClassValidator.js.map +1 -1
- package/package.json +10 -9
- package/dist/astUtils/index.d.ts +0 -7
- package/dist/astUtils/index.js +0 -26
- package/dist/astUtils/index.js.map +0 -1
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.d.ts +0 -7
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.js.map +0 -1
- package/dist/bscPlugin/semanticTokens/SemanticTokensProcessor.spec.js.map +0 -1
- package/dist/lexer/index.d.ts +0 -3
- package/dist/lexer/index.js +0 -17
- package/dist/lexer/index.js.map +0 -1
- package/dist/parser/index.d.ts +0 -3
- package/dist/parser/index.js +0 -16
- package/dist/parser/index.js.map +0 -1
- package/dist/preprocessor/index.d.ts +0 -3
- package/dist/preprocessor/index.js +0 -16
- package/dist/preprocessor/index.js.map +0 -1
package/dist/lexer/Lexer.spec.js
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
/* eslint no-template-curly-in-string: 0 */
|
|
4
4
|
const chai_1 = require("chai");
|
|
5
|
-
const
|
|
5
|
+
const TokenKind_1 = require("./TokenKind");
|
|
6
6
|
const Lexer_1 = require("./Lexer");
|
|
7
7
|
const Token_1 = require("./Token");
|
|
8
8
|
const Parser_spec_1 = require("../parser/Parser.spec");
|
|
@@ -11,64 +11,64 @@ const util_1 = require("../util");
|
|
|
11
11
|
describe('lexer', () => {
|
|
12
12
|
it('recognizes namespace keywords', () => {
|
|
13
13
|
let { tokens } = Lexer_1.Lexer.scan('namespace end namespace endnamespace end namespace');
|
|
14
|
-
chai_1.expect(tokens.map(x => x.kind)).to.eql([
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
14
|
+
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
15
|
+
TokenKind_1.TokenKind.Namespace,
|
|
16
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
17
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
18
|
+
TokenKind_1.TokenKind.EndNamespace,
|
|
19
|
+
TokenKind_1.TokenKind.Eof
|
|
20
20
|
]);
|
|
21
21
|
});
|
|
22
22
|
it('recognizes the callfunc operator', () => {
|
|
23
23
|
let { tokens } = Lexer_1.Lexer.scan('@.');
|
|
24
|
-
chai_1.expect(tokens[0].kind).to.equal(
|
|
24
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Callfunc);
|
|
25
25
|
});
|
|
26
26
|
it('recognizes the import token', () => {
|
|
27
27
|
let { tokens } = Lexer_1.Lexer.scan('import');
|
|
28
|
-
chai_1.expect(tokens[0].kind).to.eql(
|
|
28
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.Import);
|
|
29
29
|
});
|
|
30
30
|
it('recognizes library token', () => {
|
|
31
31
|
let { tokens } = Lexer_1.Lexer.scan('library');
|
|
32
|
-
chai_1.expect(tokens[0].kind).to.eql(
|
|
32
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.Library);
|
|
33
33
|
});
|
|
34
34
|
it('recognizes the question mark operator', () => {
|
|
35
35
|
let { tokens } = Lexer_1.Lexer.scan('?');
|
|
36
|
-
chai_1.expect(tokens[0].kind).to.equal(
|
|
36
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Question);
|
|
37
37
|
});
|
|
38
38
|
it('produces an at symbol token', () => {
|
|
39
39
|
let { tokens } = Lexer_1.Lexer.scan('@');
|
|
40
|
-
chai_1.expect(tokens[0].kind).to.equal(
|
|
40
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.At);
|
|
41
41
|
});
|
|
42
42
|
it('produces a semicolon token', () => {
|
|
43
43
|
let { tokens } = Lexer_1.Lexer.scan(';');
|
|
44
|
-
chai_1.expect(tokens[0].kind).to.equal(
|
|
44
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.Semicolon);
|
|
45
45
|
});
|
|
46
46
|
it('emits error on unknown character type', () => {
|
|
47
47
|
let { diagnostics } = Lexer_1.Lexer.scan('\0');
|
|
48
|
-
chai_1.expect(diagnostics).to.be.lengthOf(1);
|
|
48
|
+
(0, chai_1.expect)(diagnostics).to.be.lengthOf(1);
|
|
49
49
|
});
|
|
50
50
|
it('includes an end-of-file marker', () => {
|
|
51
51
|
let { tokens } = Lexer_1.Lexer.scan('');
|
|
52
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
52
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Eof]);
|
|
53
53
|
});
|
|
54
54
|
it('ignores tabs and spaces', () => {
|
|
55
55
|
let { tokens } = Lexer_1.Lexer.scan('\t\t \t \t');
|
|
56
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
56
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Eof]);
|
|
57
57
|
});
|
|
58
58
|
it('retains every single newline', () => {
|
|
59
59
|
let { tokens } = Lexer_1.Lexer.scan('\n\n\'foo\n\n\nprint 2\n\n');
|
|
60
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
60
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
61
|
+
TokenKind_1.TokenKind.Newline,
|
|
62
|
+
TokenKind_1.TokenKind.Newline,
|
|
63
|
+
TokenKind_1.TokenKind.Comment,
|
|
64
|
+
TokenKind_1.TokenKind.Newline,
|
|
65
|
+
TokenKind_1.TokenKind.Newline,
|
|
66
|
+
TokenKind_1.TokenKind.Newline,
|
|
67
|
+
TokenKind_1.TokenKind.Print,
|
|
68
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
69
|
+
TokenKind_1.TokenKind.Newline,
|
|
70
|
+
TokenKind_1.TokenKind.Newline,
|
|
71
|
+
TokenKind_1.TokenKind.Eof
|
|
72
72
|
]);
|
|
73
73
|
});
|
|
74
74
|
it('does not insert double newlines with the windows \\r\\n newline', () => {
|
|
@@ -79,24 +79,24 @@ describe('lexer', () => {
|
|
|
79
79
|
' print 0\r\n' +
|
|
80
80
|
' end if\r\n' +
|
|
81
81
|
'end function\r\n').tokens.map(x => x.kind);
|
|
82
|
-
chai_1.expect(kinds).to.eql([
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
82
|
+
(0, chai_1.expect)(kinds).to.eql([
|
|
83
|
+
TokenKind_1.TokenKind.Function, TokenKind_1.TokenKind.Identifier, TokenKind_1.TokenKind.LeftParen, TokenKind_1.TokenKind.RightParen, TokenKind_1.TokenKind.As, TokenKind_1.TokenKind.String, TokenKind_1.TokenKind.Newline,
|
|
84
|
+
TokenKind_1.TokenKind.If, TokenKind_1.TokenKind.True, TokenKind_1.TokenKind.Then, TokenKind_1.TokenKind.Newline,
|
|
85
|
+
TokenKind_1.TokenKind.Print, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Newline,
|
|
86
|
+
TokenKind_1.TokenKind.Else, TokenKind_1.TokenKind.Newline,
|
|
87
|
+
TokenKind_1.TokenKind.Print, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Newline,
|
|
88
|
+
TokenKind_1.TokenKind.EndIf, TokenKind_1.TokenKind.Newline,
|
|
89
|
+
TokenKind_1.TokenKind.EndFunction, TokenKind_1.TokenKind.Newline,
|
|
90
|
+
TokenKind_1.TokenKind.Eof
|
|
91
91
|
]);
|
|
92
92
|
});
|
|
93
93
|
it('computes range properly both with and without whitespace', () => {
|
|
94
94
|
let withoutWhitespace = Lexer_1.Lexer.scan(`sub Main()\n bob = true\nend sub`).tokens
|
|
95
|
-
.map(x => Parser_spec_1.rangeToArray(x.range));
|
|
95
|
+
.map(x => (0, Parser_spec_1.rangeToArray)(x.range));
|
|
96
96
|
let withWhitespace = Lexer_1.Lexer.scan(`sub Main()\n bob = true\nend sub`).tokens
|
|
97
97
|
//filter out the whitespace...we only care that it was computed during the scan
|
|
98
|
-
.filter(x => x.kind !==
|
|
99
|
-
.map(x => Parser_spec_1.rangeToArray(x.range));
|
|
98
|
+
.filter(x => x.kind !== TokenKind_1.TokenKind.Whitespace)
|
|
99
|
+
.map(x => (0, Parser_spec_1.rangeToArray)(x.range));
|
|
100
100
|
/*eslint-disable */
|
|
101
101
|
let expectedLocations = [
|
|
102
102
|
[0, 0, 0, 3],
|
|
@@ -112,54 +112,54 @@ describe('lexer', () => {
|
|
|
112
112
|
[2, 7, 2, 8] //Eof
|
|
113
113
|
];
|
|
114
114
|
/*eslint-enable*/
|
|
115
|
-
chai_1.expect(withoutWhitespace, 'Without whitespace').to.eql(expectedLocations);
|
|
116
|
-
chai_1.expect(withWhitespace, 'With whitespace').to.eql(expectedLocations);
|
|
115
|
+
(0, chai_1.expect)(withoutWhitespace, 'Without whitespace').to.eql(expectedLocations);
|
|
116
|
+
(0, chai_1.expect)(withWhitespace, 'With whitespace').to.eql(expectedLocations);
|
|
117
117
|
});
|
|
118
118
|
it('retains original line endings', () => {
|
|
119
119
|
let { tokens } = Lexer_1.Lexer.scan('print "hello"\r\nprint "world"\n');
|
|
120
|
-
chai_1.expect([
|
|
120
|
+
(0, chai_1.expect)([
|
|
121
121
|
tokens[2].text.charCodeAt(0),
|
|
122
122
|
tokens[2].text.charCodeAt(1)
|
|
123
123
|
], 'should contain \\r\\n').to.eql([13, 10]);
|
|
124
|
-
chai_1.expect(tokens[5].text.charCodeAt(0), 'should contain \\r\\n').to.eql(10);
|
|
124
|
+
(0, chai_1.expect)(tokens[5].text.charCodeAt(0), 'should contain \\r\\n').to.eql(10);
|
|
125
125
|
});
|
|
126
126
|
it('correctly splits the elseif token', () => {
|
|
127
127
|
let { tokens } = Lexer_1.Lexer.scan('else if elseif else if');
|
|
128
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
128
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
129
|
+
TokenKind_1.TokenKind.Else,
|
|
130
|
+
TokenKind_1.TokenKind.If,
|
|
131
|
+
TokenKind_1.TokenKind.Else,
|
|
132
|
+
TokenKind_1.TokenKind.If,
|
|
133
|
+
TokenKind_1.TokenKind.Else,
|
|
134
|
+
TokenKind_1.TokenKind.If,
|
|
135
|
+
TokenKind_1.TokenKind.Eof
|
|
136
136
|
]);
|
|
137
137
|
});
|
|
138
138
|
it('gives the `as` keyword its own TokenKind', () => {
|
|
139
139
|
let { tokens } = Lexer_1.Lexer.scan('as');
|
|
140
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
140
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.As, TokenKind_1.TokenKind.Eof]);
|
|
141
141
|
});
|
|
142
142
|
it('gives the `stop` keyword its own TokenKind', () => {
|
|
143
143
|
let { tokens } = Lexer_1.Lexer.scan('stop');
|
|
144
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
144
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Stop, TokenKind_1.TokenKind.Eof]);
|
|
145
145
|
});
|
|
146
146
|
it('does not alias \'?\' to \'print\' - the parser will do that', () => {
|
|
147
147
|
let { tokens } = Lexer_1.Lexer.scan('?2');
|
|
148
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
148
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Question, TokenKind_1.TokenKind.IntegerLiteral, TokenKind_1.TokenKind.Eof]);
|
|
149
149
|
});
|
|
150
150
|
describe('comments', () => {
|
|
151
151
|
it('does not include carriage return character', () => {
|
|
152
152
|
let tokens = Lexer_1.Lexer.scan(`'someComment\r\nprint "hello"`).tokens;
|
|
153
|
-
chai_1.expect(tokens[0].text).to.equal(`'someComment`);
|
|
153
|
+
(0, chai_1.expect)(tokens[0].text).to.equal(`'someComment`);
|
|
154
154
|
});
|
|
155
155
|
it('includes the comment characters in the text', () => {
|
|
156
156
|
let text = Lexer_1.Lexer.scan(`
|
|
157
157
|
'comment
|
|
158
158
|
REM some comment
|
|
159
159
|
`).tokens
|
|
160
|
-
.filter(x => ![
|
|
160
|
+
.filter(x => ![TokenKind_1.TokenKind.Newline, TokenKind_1.TokenKind.Eof].includes(x.kind))
|
|
161
161
|
.map(x => x.text);
|
|
162
|
-
chai_1.expect(text).to.eql([
|
|
162
|
+
(0, chai_1.expect)(text).to.eql([
|
|
163
163
|
`'comment`,
|
|
164
164
|
'REM some comment'
|
|
165
165
|
]);
|
|
@@ -172,8 +172,8 @@ describe('lexer', () => {
|
|
|
172
172
|
end sub
|
|
173
173
|
`, {
|
|
174
174
|
includeWhitespace: true
|
|
175
|
-
}).tokens.map(x => [...Parser_spec_1.rangeToArray(x.range), x.text]);
|
|
176
|
-
chai_1.expect(tokens).to.eql([
|
|
175
|
+
}).tokens.map(x => [...(0, Parser_spec_1.rangeToArray)(x.range), x.text]);
|
|
176
|
+
(0, chai_1.expect)(tokens).to.eql([
|
|
177
177
|
[0, 0, 0, 1, '\n'],
|
|
178
178
|
[1, 0, 1, 16, ' '],
|
|
179
179
|
[1, 16, 1, 19, 'sub'],
|
|
@@ -207,15 +207,15 @@ describe('lexer', () => {
|
|
|
207
207
|
let tokens = Lexer_1.Lexer.scan(`
|
|
208
208
|
'comment
|
|
209
209
|
REM some comment
|
|
210
|
-
`).tokens.filter(x => ![
|
|
211
|
-
chai_1.expect(tokens[0].range).to.eql(vscode_languageserver_1.Range.create(1, 16, 1, 24));
|
|
212
|
-
chai_1.expect(tokens[1].range).to.eql(vscode_languageserver_1.Range.create(2, 16, 2, 32));
|
|
210
|
+
`).tokens.filter(x => ![TokenKind_1.TokenKind.Newline, TokenKind_1.TokenKind.Eof].includes(x.kind));
|
|
211
|
+
(0, chai_1.expect)(tokens[0].range).to.eql(vscode_languageserver_1.Range.create(1, 16, 1, 24));
|
|
212
|
+
(0, chai_1.expect)(tokens[1].range).to.eql(vscode_languageserver_1.Range.create(2, 16, 2, 32));
|
|
213
213
|
});
|
|
214
214
|
it('finds correct location for newlines', () => {
|
|
215
215
|
let tokens = Lexer_1.Lexer.scan('sub\nsub\r\nsub\n\n').tokens
|
|
216
216
|
//ignore the Eof token
|
|
217
|
-
.filter(x => x.kind !==
|
|
218
|
-
chai_1.expect(tokens.map(x => x.range)).to.eql([
|
|
217
|
+
.filter(x => x.kind !== TokenKind_1.TokenKind.Eof);
|
|
218
|
+
(0, chai_1.expect)(tokens.map(x => x.range)).to.eql([
|
|
219
219
|
vscode_languageserver_1.Range.create(0, 0, 0, 3),
|
|
220
220
|
vscode_languageserver_1.Range.create(0, 3, 0, 4),
|
|
221
221
|
vscode_languageserver_1.Range.create(1, 0, 1, 3),
|
|
@@ -237,99 +237,99 @@ describe('lexer', () => {
|
|
|
237
237
|
end if 'comment
|
|
238
238
|
end sub
|
|
239
239
|
`);
|
|
240
|
-
let comments = tokens.filter(x => x.kind ===
|
|
241
|
-
chai_1.expect(comments).to.be.lengthOf(1);
|
|
242
|
-
chai_1.expect(comments[0].range).to.eql(vscode_languageserver_1.Range.create(8, 27, 8, 35));
|
|
240
|
+
let comments = tokens.filter(x => x.kind === TokenKind_1.TokenKind.Comment);
|
|
241
|
+
(0, chai_1.expect)(comments).to.be.lengthOf(1);
|
|
242
|
+
(0, chai_1.expect)(comments[0].range).to.eql(vscode_languageserver_1.Range.create(8, 27, 8, 35));
|
|
243
243
|
});
|
|
244
244
|
it('ignores everything after `\'`', () => {
|
|
245
245
|
let { tokens } = Lexer_1.Lexer.scan('= \' (');
|
|
246
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
246
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
247
247
|
});
|
|
248
248
|
it('ignores everything after `REM`', () => {
|
|
249
249
|
let { tokens } = Lexer_1.Lexer.scan('= REM (');
|
|
250
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
250
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
251
251
|
});
|
|
252
252
|
it('ignores everything after `rem`', () => {
|
|
253
253
|
let { tokens } = Lexer_1.Lexer.scan('= rem (');
|
|
254
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
254
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.Equal, TokenKind_1.TokenKind.Comment, TokenKind_1.TokenKind.Eof]);
|
|
255
255
|
});
|
|
256
256
|
}); // comments
|
|
257
257
|
describe('non-literals', () => {
|
|
258
258
|
it('reads parens & braces', () => {
|
|
259
259
|
let { tokens } = Lexer_1.Lexer.scan('(){}');
|
|
260
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
260
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
261
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
262
|
+
TokenKind_1.TokenKind.RightParen,
|
|
263
|
+
TokenKind_1.TokenKind.LeftCurlyBrace,
|
|
264
|
+
TokenKind_1.TokenKind.RightCurlyBrace,
|
|
265
|
+
TokenKind_1.TokenKind.Eof
|
|
266
266
|
]);
|
|
267
267
|
});
|
|
268
268
|
it('reads operators', () => {
|
|
269
269
|
let { tokens } = Lexer_1.Lexer.scan('^ - + * MOD / \\ -- ++');
|
|
270
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
270
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
271
|
+
TokenKind_1.TokenKind.Caret,
|
|
272
|
+
TokenKind_1.TokenKind.Minus,
|
|
273
|
+
TokenKind_1.TokenKind.Plus,
|
|
274
|
+
TokenKind_1.TokenKind.Star,
|
|
275
|
+
TokenKind_1.TokenKind.Mod,
|
|
276
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
277
|
+
TokenKind_1.TokenKind.Backslash,
|
|
278
|
+
TokenKind_1.TokenKind.MinusMinus,
|
|
279
|
+
TokenKind_1.TokenKind.PlusPlus,
|
|
280
|
+
TokenKind_1.TokenKind.Eof
|
|
281
281
|
]);
|
|
282
282
|
});
|
|
283
283
|
it('reads bitshift operators', () => {
|
|
284
284
|
let { tokens } = Lexer_1.Lexer.scan('<< >> <<');
|
|
285
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
285
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
286
|
+
TokenKind_1.TokenKind.LeftShift,
|
|
287
|
+
TokenKind_1.TokenKind.RightShift,
|
|
288
|
+
TokenKind_1.TokenKind.LeftShift,
|
|
289
|
+
TokenKind_1.TokenKind.Eof
|
|
290
290
|
]);
|
|
291
291
|
});
|
|
292
292
|
it('reads bitshift assignment operators', () => {
|
|
293
293
|
let { tokens } = Lexer_1.Lexer.scan('<<= >>=');
|
|
294
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
294
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
295
|
+
TokenKind_1.TokenKind.LeftShiftEqual,
|
|
296
|
+
TokenKind_1.TokenKind.RightShiftEqual,
|
|
297
|
+
TokenKind_1.TokenKind.Eof
|
|
298
298
|
]);
|
|
299
299
|
});
|
|
300
300
|
it('reads comparators', () => {
|
|
301
301
|
let { tokens } = Lexer_1.Lexer.scan('< <= > >= = <>');
|
|
302
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
302
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
303
|
+
TokenKind_1.TokenKind.Less,
|
|
304
|
+
TokenKind_1.TokenKind.LessEqual,
|
|
305
|
+
TokenKind_1.TokenKind.Greater,
|
|
306
|
+
TokenKind_1.TokenKind.GreaterEqual,
|
|
307
|
+
TokenKind_1.TokenKind.Equal,
|
|
308
|
+
TokenKind_1.TokenKind.LessGreater,
|
|
309
|
+
TokenKind_1.TokenKind.Eof
|
|
310
310
|
]);
|
|
311
311
|
});
|
|
312
312
|
}); // non-literals
|
|
313
313
|
describe('string literals', () => {
|
|
314
314
|
it('produces string literal tokens', () => {
|
|
315
315
|
let { tokens } = Lexer_1.Lexer.scan(`"hello world"`);
|
|
316
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
316
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([TokenKind_1.TokenKind.StringLiteral, TokenKind_1.TokenKind.Eof]);
|
|
317
317
|
});
|
|
318
318
|
it(`safely escapes " literals`, () => {
|
|
319
319
|
let { tokens } = Lexer_1.Lexer.scan(`"the cat says ""meow"""`);
|
|
320
|
-
chai_1.expect(tokens[0].kind).to.equal(
|
|
320
|
+
(0, chai_1.expect)(tokens[0].kind).to.equal(TokenKind_1.TokenKind.StringLiteral);
|
|
321
321
|
});
|
|
322
322
|
it('captures text to end of line for unterminated strings with LF', () => {
|
|
323
323
|
let { tokens } = Lexer_1.Lexer.scan(`"unterminated!\n`);
|
|
324
|
-
chai_1.expect(tokens[0].kind).to.eql(
|
|
324
|
+
(0, chai_1.expect)(tokens[0].kind).to.eql(TokenKind_1.TokenKind.StringLiteral);
|
|
325
325
|
});
|
|
326
326
|
it('captures text to end of line for unterminated strings with CRLF', () => {
|
|
327
327
|
let { tokens } = Lexer_1.Lexer.scan(`"unterminated!\r\n`);
|
|
328
|
-
chai_1.expect(tokens[0].text).to.equal('"unterminated!');
|
|
328
|
+
(0, chai_1.expect)(tokens[0].text).to.equal('"unterminated!');
|
|
329
329
|
});
|
|
330
330
|
it('disallows multiline strings', () => {
|
|
331
331
|
let { diagnostics } = Lexer_1.Lexer.scan(`"multi-line\n\n`);
|
|
332
|
-
chai_1.expect(diagnostics.map(err => err.message)).to.deep.equal([
|
|
332
|
+
(0, chai_1.expect)(diagnostics.map(err => err.message)).to.deep.equal([
|
|
333
333
|
'Unterminated string at end of line'
|
|
334
334
|
]);
|
|
335
335
|
});
|
|
@@ -338,21 +338,21 @@ describe('lexer', () => {
|
|
|
338
338
|
describe('template string literals', () => {
|
|
339
339
|
it('supports escaped chars', () => {
|
|
340
340
|
let { tokens } = Lexer_1.Lexer.scan('`\\n\\`\\r\\n`');
|
|
341
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
341
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
342
|
+
TokenKind_1.TokenKind.BackTick,
|
|
343
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
344
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
345
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
346
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
347
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
348
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
349
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
350
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
351
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
352
|
+
TokenKind_1.TokenKind.BackTick,
|
|
353
|
+
TokenKind_1.TokenKind.Eof
|
|
354
354
|
]);
|
|
355
|
-
chai_1.expect(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
355
|
+
(0, chai_1.expect)(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
356
356
|
10,
|
|
357
357
|
96,
|
|
358
358
|
13,
|
|
@@ -361,30 +361,30 @@ describe('lexer', () => {
|
|
|
361
361
|
});
|
|
362
362
|
it('prevents expressions when escaping the dollar sign', () => {
|
|
363
363
|
let { tokens } = Lexer_1.Lexer.scan('`\\${just text}`');
|
|
364
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
364
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
365
|
+
TokenKind_1.TokenKind.BackTick,
|
|
366
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
367
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
368
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
369
|
+
TokenKind_1.TokenKind.BackTick,
|
|
370
|
+
TokenKind_1.TokenKind.Eof
|
|
371
371
|
]);
|
|
372
372
|
});
|
|
373
373
|
it('supports escaping unicode char codes', () => {
|
|
374
374
|
let { tokens } = Lexer_1.Lexer.scan('`\\c1\\c12\\c123`');
|
|
375
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
375
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
376
|
+
TokenKind_1.TokenKind.BackTick,
|
|
377
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
378
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
379
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
380
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
381
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
382
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
383
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
384
|
+
TokenKind_1.TokenKind.BackTick,
|
|
385
|
+
TokenKind_1.TokenKind.Eof
|
|
386
386
|
]);
|
|
387
|
-
chai_1.expect(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
387
|
+
(0, chai_1.expect)(tokens.map(x => x.charCode).filter(x => !!x)).to.eql([
|
|
388
388
|
1,
|
|
389
389
|
12,
|
|
390
390
|
123
|
|
@@ -392,29 +392,29 @@ describe('lexer', () => {
|
|
|
392
392
|
});
|
|
393
393
|
it('converts doublequote to EscapedCharCodeLiteral', () => {
|
|
394
394
|
let { tokens } = Lexer_1.Lexer.scan('`"`');
|
|
395
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
395
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
396
|
+
TokenKind_1.TokenKind.BackTick,
|
|
397
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
398
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
399
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
400
|
+
TokenKind_1.TokenKind.BackTick,
|
|
401
|
+
TokenKind_1.TokenKind.Eof
|
|
402
402
|
]);
|
|
403
|
-
chai_1.expect(tokens[2].charCode).to.equal(34);
|
|
403
|
+
(0, chai_1.expect)(tokens[2].charCode).to.equal(34);
|
|
404
404
|
});
|
|
405
405
|
it(`safely escapes \` literals`, () => {
|
|
406
406
|
let { tokens } = Lexer_1.Lexer.scan('`the cat says \\`meow\\` a lot`');
|
|
407
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
407
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
408
|
+
TokenKind_1.TokenKind.BackTick,
|
|
409
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
410
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
411
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
412
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
413
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
414
|
+
TokenKind_1.TokenKind.BackTick,
|
|
415
|
+
TokenKind_1.TokenKind.Eof
|
|
416
416
|
]);
|
|
417
|
-
chai_1.expect(tokens.map(x => x.text)).to.eql([
|
|
417
|
+
(0, chai_1.expect)(tokens.map(x => x.text)).to.eql([
|
|
418
418
|
'`',
|
|
419
419
|
'the cat says ',
|
|
420
420
|
'\\`',
|
|
@@ -427,27 +427,27 @@ describe('lexer', () => {
|
|
|
427
427
|
});
|
|
428
428
|
it('produces template string literal tokens', () => {
|
|
429
429
|
let { tokens } = Lexer_1.Lexer.scan('`hello world`');
|
|
430
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
430
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
431
|
+
TokenKind_1.TokenKind.BackTick,
|
|
432
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
433
|
+
TokenKind_1.TokenKind.BackTick,
|
|
434
|
+
TokenKind_1.TokenKind.Eof
|
|
435
435
|
]);
|
|
436
|
-
chai_1.expect(tokens[1].text).to.deep.equal('hello world');
|
|
436
|
+
(0, chai_1.expect)(tokens[1].text).to.deep.equal('hello world');
|
|
437
437
|
});
|
|
438
438
|
it('collects quasis outside and expressions inside of template strings', () => {
|
|
439
439
|
let { tokens } = Lexer_1.Lexer.scan('`hello ${"world"}!`');
|
|
440
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
440
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
441
|
+
TokenKind_1.TokenKind.BackTick,
|
|
442
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
443
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
444
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
445
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
446
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
447
|
+
TokenKind_1.TokenKind.BackTick,
|
|
448
|
+
TokenKind_1.TokenKind.Eof
|
|
449
449
|
]);
|
|
450
|
-
chai_1.expect(tokens[1].text).to.deep.equal(`hello `);
|
|
450
|
+
(0, chai_1.expect)(tokens[1].text).to.deep.equal(`hello `);
|
|
451
451
|
});
|
|
452
452
|
it('real example, which is causing issues in the formatter', () => {
|
|
453
453
|
let { tokens } = Lexer_1.Lexer.scan(`
|
|
@@ -466,133 +466,133 @@ describe('lexer', () => {
|
|
|
466
466
|
\`
|
|
467
467
|
end function
|
|
468
468
|
`);
|
|
469
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
469
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
470
|
+
TokenKind_1.TokenKind.Newline,
|
|
471
|
+
TokenKind_1.TokenKind.Function,
|
|
472
|
+
TokenKind_1.TokenKind.Identifier,
|
|
473
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
474
|
+
TokenKind_1.TokenKind.Identifier,
|
|
475
|
+
TokenKind_1.TokenKind.RightParen,
|
|
476
|
+
TokenKind_1.TokenKind.Newline,
|
|
477
|
+
TokenKind_1.TokenKind.Return,
|
|
478
|
+
TokenKind_1.TokenKind.BackTick,
|
|
479
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
480
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
481
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
482
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
483
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
484
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
485
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
486
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
487
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
488
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
489
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
490
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
491
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
492
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
493
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
494
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
495
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
496
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
497
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
498
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
499
|
+
TokenKind_1.TokenKind.Identifier,
|
|
500
|
+
TokenKind_1.TokenKind.Dot,
|
|
501
|
+
TokenKind_1.TokenKind.Identifier,
|
|
502
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
503
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
504
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
505
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
506
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
507
|
+
TokenKind_1.TokenKind.Identifier,
|
|
508
|
+
TokenKind_1.TokenKind.Dot,
|
|
509
|
+
TokenKind_1.TokenKind.Identifier,
|
|
510
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
511
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
512
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
513
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
514
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
515
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
516
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
517
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
518
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
519
|
+
TokenKind_1.TokenKind.Identifier,
|
|
520
|
+
TokenKind_1.TokenKind.Dot,
|
|
521
|
+
TokenKind_1.TokenKind.Identifier,
|
|
522
|
+
TokenKind_1.TokenKind.Dot,
|
|
523
|
+
TokenKind_1.TokenKind.Identifier,
|
|
524
|
+
TokenKind_1.TokenKind.Dot,
|
|
525
|
+
TokenKind_1.TokenKind.Identifier,
|
|
526
|
+
TokenKind_1.TokenKind.Dot,
|
|
527
|
+
TokenKind_1.TokenKind.Identifier,
|
|
528
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
529
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
530
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
531
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
532
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
533
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
534
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
535
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
536
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
537
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
538
|
+
TokenKind_1.TokenKind.BackTick,
|
|
539
|
+
TokenKind_1.TokenKind.Newline,
|
|
540
|
+
TokenKind_1.TokenKind.EndFunction,
|
|
541
|
+
TokenKind_1.TokenKind.Newline,
|
|
542
|
+
TokenKind_1.TokenKind.Eof
|
|
543
543
|
]);
|
|
544
544
|
});
|
|
545
545
|
it('complicated example', () => {
|
|
546
546
|
let { tokens } = Lexer_1.Lexer.scan('`hello ${"world"}!I am a ${"template" + "string"} and I am very ${["pleased"][0]} to meet you ${m.top.getChildCount()}.The end`');
|
|
547
|
-
chai_1.expect(tokens.map(t => t.kind)).to.eql([
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
547
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.eql([
|
|
548
|
+
TokenKind_1.TokenKind.BackTick,
|
|
549
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
550
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
551
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
552
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
553
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
554
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
555
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
556
|
+
TokenKind_1.TokenKind.Plus,
|
|
557
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
558
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
559
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
560
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
561
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
562
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
563
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
564
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
565
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
566
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
567
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
568
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
569
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
570
|
+
TokenKind_1.TokenKind.Identifier,
|
|
571
|
+
TokenKind_1.TokenKind.Dot,
|
|
572
|
+
TokenKind_1.TokenKind.Identifier,
|
|
573
|
+
TokenKind_1.TokenKind.Dot,
|
|
574
|
+
TokenKind_1.TokenKind.Identifier,
|
|
575
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
576
|
+
TokenKind_1.TokenKind.RightParen,
|
|
577
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
578
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
579
|
+
TokenKind_1.TokenKind.BackTick,
|
|
580
|
+
TokenKind_1.TokenKind.Eof
|
|
581
581
|
]);
|
|
582
582
|
});
|
|
583
583
|
it('allows multiline strings', () => {
|
|
584
584
|
let { tokens } = Lexer_1.Lexer.scan('`multi-line\n\n`');
|
|
585
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
585
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
586
|
+
TokenKind_1.TokenKind.BackTick,
|
|
587
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
588
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
589
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
590
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
591
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
592
|
+
TokenKind_1.TokenKind.BackTick,
|
|
593
|
+
TokenKind_1.TokenKind.Eof
|
|
594
594
|
]);
|
|
595
|
-
chai_1.expect(tokens.map(x => x.text)).to.eql([
|
|
595
|
+
(0, chai_1.expect)(tokens.map(x => x.text)).to.eql([
|
|
596
596
|
'`',
|
|
597
597
|
'multi-line',
|
|
598
598
|
'\n',
|
|
@@ -605,187 +605,187 @@ describe('lexer', () => {
|
|
|
605
605
|
});
|
|
606
606
|
it('maintains proper line/column locations for multiline strings', () => {
|
|
607
607
|
let { tokens } = Lexer_1.Lexer.scan('123 `multi\nline\r\nstrings` true\nfalse');
|
|
608
|
-
chai_1.expect(tokens.map(x => {
|
|
608
|
+
(0, chai_1.expect)(tokens.map(x => {
|
|
609
609
|
return {
|
|
610
610
|
range: x.range,
|
|
611
611
|
kind: x.kind
|
|
612
612
|
};
|
|
613
613
|
})).to.eql([
|
|
614
|
-
{ range: vscode_languageserver_1.Range.create(0, 0, 0, 3), kind:
|
|
615
|
-
{ range: vscode_languageserver_1.Range.create(0, 4, 0, 5), kind:
|
|
616
|
-
{ range: vscode_languageserver_1.Range.create(0, 5, 0, 10), kind:
|
|
617
|
-
{ range: vscode_languageserver_1.Range.create(0, 10, 0, 11), kind:
|
|
618
|
-
{ range: vscode_languageserver_1.Range.create(1, 0, 1, 4), kind:
|
|
619
|
-
{ range: vscode_languageserver_1.Range.create(1, 4, 1, 5), kind:
|
|
620
|
-
{ range: vscode_languageserver_1.Range.create(1, 5, 1, 6), kind:
|
|
621
|
-
{ range: vscode_languageserver_1.Range.create(2, 0, 2, 7), kind:
|
|
622
|
-
{ range: vscode_languageserver_1.Range.create(2, 7, 2, 8), kind:
|
|
623
|
-
{ range: vscode_languageserver_1.Range.create(2, 9, 2, 13), kind:
|
|
624
|
-
{ range: vscode_languageserver_1.Range.create(2, 13, 2, 14), kind:
|
|
625
|
-
{ range: vscode_languageserver_1.Range.create(3, 0, 3, 5), kind:
|
|
626
|
-
{ range: vscode_languageserver_1.Range.create(3, 5, 3, 6), kind:
|
|
614
|
+
{ range: vscode_languageserver_1.Range.create(0, 0, 0, 3), kind: TokenKind_1.TokenKind.IntegerLiteral },
|
|
615
|
+
{ range: vscode_languageserver_1.Range.create(0, 4, 0, 5), kind: TokenKind_1.TokenKind.BackTick },
|
|
616
|
+
{ range: vscode_languageserver_1.Range.create(0, 5, 0, 10), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
617
|
+
{ range: vscode_languageserver_1.Range.create(0, 10, 0, 11), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
618
|
+
{ range: vscode_languageserver_1.Range.create(1, 0, 1, 4), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
619
|
+
{ range: vscode_languageserver_1.Range.create(1, 4, 1, 5), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
620
|
+
{ range: vscode_languageserver_1.Range.create(1, 5, 1, 6), kind: TokenKind_1.TokenKind.EscapedCharCodeLiteral },
|
|
621
|
+
{ range: vscode_languageserver_1.Range.create(2, 0, 2, 7), kind: TokenKind_1.TokenKind.TemplateStringQuasi },
|
|
622
|
+
{ range: vscode_languageserver_1.Range.create(2, 7, 2, 8), kind: TokenKind_1.TokenKind.BackTick },
|
|
623
|
+
{ range: vscode_languageserver_1.Range.create(2, 9, 2, 13), kind: TokenKind_1.TokenKind.True },
|
|
624
|
+
{ range: vscode_languageserver_1.Range.create(2, 13, 2, 14), kind: TokenKind_1.TokenKind.Newline },
|
|
625
|
+
{ range: vscode_languageserver_1.Range.create(3, 0, 3, 5), kind: TokenKind_1.TokenKind.False },
|
|
626
|
+
{ range: vscode_languageserver_1.Range.create(3, 5, 3, 6), kind: TokenKind_1.TokenKind.Eof }
|
|
627
627
|
]);
|
|
628
628
|
});
|
|
629
629
|
it('Example that tripped up the expression tests', () => {
|
|
630
630
|
let { tokens } = Lexer_1.Lexer.scan('`I am a complex example\n${a.isRunning(["a","b","c"])}\nmore ${m.finish(true)}`');
|
|
631
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
631
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
632
|
+
TokenKind_1.TokenKind.BackTick,
|
|
633
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
634
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
635
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
636
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
637
|
+
TokenKind_1.TokenKind.Identifier,
|
|
638
|
+
TokenKind_1.TokenKind.Dot,
|
|
639
|
+
TokenKind_1.TokenKind.Identifier,
|
|
640
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
641
|
+
TokenKind_1.TokenKind.LeftSquareBracket,
|
|
642
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
643
|
+
TokenKind_1.TokenKind.Comma,
|
|
644
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
645
|
+
TokenKind_1.TokenKind.Comma,
|
|
646
|
+
TokenKind_1.TokenKind.StringLiteral,
|
|
647
|
+
TokenKind_1.TokenKind.RightSquareBracket,
|
|
648
|
+
TokenKind_1.TokenKind.RightParen,
|
|
649
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
650
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
651
|
+
TokenKind_1.TokenKind.EscapedCharCodeLiteral,
|
|
652
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
653
|
+
TokenKind_1.TokenKind.TemplateStringExpressionBegin,
|
|
654
|
+
TokenKind_1.TokenKind.Identifier,
|
|
655
|
+
TokenKind_1.TokenKind.Dot,
|
|
656
|
+
TokenKind_1.TokenKind.Identifier,
|
|
657
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
658
|
+
TokenKind_1.TokenKind.True,
|
|
659
|
+
TokenKind_1.TokenKind.RightParen,
|
|
660
|
+
TokenKind_1.TokenKind.TemplateStringExpressionEnd,
|
|
661
|
+
TokenKind_1.TokenKind.TemplateStringQuasi,
|
|
662
|
+
TokenKind_1.TokenKind.BackTick,
|
|
663
|
+
TokenKind_1.TokenKind.Eof
|
|
664
664
|
]);
|
|
665
665
|
});
|
|
666
666
|
}); // string literals
|
|
667
667
|
describe('double literals', () => {
|
|
668
668
|
it('respects \'#\' suffix', () => {
|
|
669
669
|
let d = Lexer_1.Lexer.scan('123#').tokens[0];
|
|
670
|
-
chai_1.expect(d.kind).to.equal(
|
|
671
|
-
chai_1.expect(d.text).to.eql('123#');
|
|
670
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
671
|
+
(0, chai_1.expect)(d.text).to.eql('123#');
|
|
672
672
|
});
|
|
673
673
|
it('forces literals >= 10 digits into doubles', () => {
|
|
674
674
|
let d = Lexer_1.Lexer.scan('0000000005').tokens[0];
|
|
675
|
-
chai_1.expect(d.kind).to.equal(
|
|
676
|
-
chai_1.expect(d.text).to.eql('0000000005');
|
|
675
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
676
|
+
(0, chai_1.expect)(d.text).to.eql('0000000005');
|
|
677
677
|
});
|
|
678
678
|
it('forces literals with \'D\' in exponent into doubles', () => {
|
|
679
679
|
let d = Lexer_1.Lexer.scan('2.5d3').tokens[0];
|
|
680
|
-
chai_1.expect(d.kind).to.equal(
|
|
681
|
-
chai_1.expect(d.text).to.eql('2.5d3');
|
|
680
|
+
(0, chai_1.expect)(d.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
681
|
+
(0, chai_1.expect)(d.text).to.eql('2.5d3');
|
|
682
682
|
});
|
|
683
683
|
it('allows digits before `.` to be elided', () => {
|
|
684
684
|
let f = Lexer_1.Lexer.scan('.123#').tokens[0];
|
|
685
|
-
chai_1.expect(f.kind).to.equal(
|
|
686
|
-
chai_1.expect(f.text).to.eql('.123#');
|
|
685
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
686
|
+
(0, chai_1.expect)(f.text).to.eql('.123#');
|
|
687
687
|
});
|
|
688
688
|
it('allows digits after `.` to be elided', () => {
|
|
689
689
|
let f = Lexer_1.Lexer.scan('12.#').tokens[0];
|
|
690
|
-
chai_1.expect(f.kind).to.equal(
|
|
691
|
-
chai_1.expect(f.text).to.eql('12.#');
|
|
690
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.DoubleLiteral);
|
|
691
|
+
(0, chai_1.expect)(f.text).to.eql('12.#');
|
|
692
692
|
});
|
|
693
693
|
});
|
|
694
694
|
describe('float literals', () => {
|
|
695
695
|
it('respects \'!\' suffix', () => {
|
|
696
696
|
let f = Lexer_1.Lexer.scan('0.00000008!').tokens[0];
|
|
697
|
-
chai_1.expect(f.kind).to.equal(
|
|
697
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
698
698
|
// Floating precision will make this *not* equal
|
|
699
|
-
chai_1.expect(f.text).not.to.equal(8e-8);
|
|
700
|
-
chai_1.expect(f.text).to.eql('0.00000008!');
|
|
699
|
+
(0, chai_1.expect)(f.text).not.to.equal(8e-8);
|
|
700
|
+
(0, chai_1.expect)(f.text).to.eql('0.00000008!');
|
|
701
701
|
});
|
|
702
702
|
it('forces literals with a decimal into floats', () => {
|
|
703
703
|
let f = Lexer_1.Lexer.scan('1.0').tokens[0];
|
|
704
|
-
chai_1.expect(f.kind).to.equal(
|
|
705
|
-
chai_1.expect(f.text).to.equal('1.0');
|
|
704
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
705
|
+
(0, chai_1.expect)(f.text).to.equal('1.0');
|
|
706
706
|
});
|
|
707
707
|
it('forces literals with \'E\' in exponent into floats', () => {
|
|
708
708
|
let f = Lexer_1.Lexer.scan('2.5e3').tokens[0];
|
|
709
|
-
chai_1.expect(f.kind).to.equal(
|
|
710
|
-
chai_1.expect(f.text).to.eql('2.5e3');
|
|
709
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
710
|
+
(0, chai_1.expect)(f.text).to.eql('2.5e3');
|
|
711
711
|
});
|
|
712
712
|
it('supports larger-than-supported-precision floats to be defined with exponents', () => {
|
|
713
713
|
let f = Lexer_1.Lexer.scan('2.3659475627512424e-38').tokens[0];
|
|
714
|
-
chai_1.expect(f.kind).to.equal(
|
|
715
|
-
chai_1.expect(f.text).to.eql('2.3659475627512424e-38');
|
|
714
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
715
|
+
(0, chai_1.expect)(f.text).to.eql('2.3659475627512424e-38');
|
|
716
716
|
});
|
|
717
717
|
it('allows digits before `.` to be elided', () => {
|
|
718
718
|
let f = Lexer_1.Lexer.scan('.123').tokens[0];
|
|
719
|
-
chai_1.expect(f.kind).to.equal(
|
|
720
|
-
chai_1.expect(f.text).to.equal('.123');
|
|
719
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
720
|
+
(0, chai_1.expect)(f.text).to.equal('.123');
|
|
721
721
|
});
|
|
722
722
|
it('allows digits after `.` to be elided', () => {
|
|
723
723
|
let f = Lexer_1.Lexer.scan('12.').tokens[0];
|
|
724
|
-
chai_1.expect(f.kind).to.equal(
|
|
725
|
-
chai_1.expect(f.text).to.equal('12.');
|
|
724
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
725
|
+
(0, chai_1.expect)(f.text).to.equal('12.');
|
|
726
726
|
});
|
|
727
727
|
});
|
|
728
728
|
describe('long integer literals', () => {
|
|
729
729
|
it('respects \'&\' suffix', () => {
|
|
730
730
|
let f = Lexer_1.Lexer.scan('1&').tokens[0];
|
|
731
|
-
chai_1.expect(f.kind).to.equal(
|
|
732
|
-
chai_1.expect(f.text).to.eql('1&');
|
|
731
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
732
|
+
(0, chai_1.expect)(f.text).to.eql('1&');
|
|
733
733
|
});
|
|
734
734
|
it('supports hexadecimal literals', () => {
|
|
735
735
|
let i = Lexer_1.Lexer.scan('&hf00d&').tokens[0];
|
|
736
|
-
chai_1.expect(i.kind).to.equal(
|
|
737
|
-
chai_1.expect(i.text).to.equal('&hf00d&');
|
|
736
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
737
|
+
(0, chai_1.expect)(i.text).to.equal('&hf00d&');
|
|
738
738
|
});
|
|
739
739
|
it('allows very long Int64 literals', () => {
|
|
740
740
|
let li = Lexer_1.Lexer.scan('9876543210&').tokens[0];
|
|
741
|
-
chai_1.expect(li.kind).to.equal(
|
|
742
|
-
chai_1.expect(li.text).to.equal('9876543210&');
|
|
741
|
+
(0, chai_1.expect)(li.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
742
|
+
(0, chai_1.expect)(li.text).to.equal('9876543210&');
|
|
743
743
|
});
|
|
744
744
|
it('forces literals with \'&\' suffix into Int64s', () => {
|
|
745
745
|
let li = Lexer_1.Lexer.scan('123&').tokens[0];
|
|
746
|
-
chai_1.expect(li.kind).to.equal(
|
|
747
|
-
chai_1.expect(li.text).to.deep.equal('123&');
|
|
746
|
+
(0, chai_1.expect)(li.kind).to.equal(TokenKind_1.TokenKind.LongIntegerLiteral);
|
|
747
|
+
(0, chai_1.expect)(li.text).to.deep.equal('123&');
|
|
748
748
|
});
|
|
749
749
|
});
|
|
750
750
|
describe('integer literals', () => {
|
|
751
751
|
it('respects \'%\' suffix', () => {
|
|
752
752
|
let f = Lexer_1.Lexer.scan('1%').tokens[0];
|
|
753
|
-
chai_1.expect(f.kind).to.equal(
|
|
754
|
-
chai_1.expect(f.text).to.eql('1%');
|
|
753
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
754
|
+
(0, chai_1.expect)(f.text).to.eql('1%');
|
|
755
755
|
});
|
|
756
756
|
it('does not allow decimal numbers to end with %', () => {
|
|
757
757
|
let f = Lexer_1.Lexer.scan('1.2%').tokens[0];
|
|
758
|
-
chai_1.expect(f.kind).to.equal(
|
|
759
|
-
chai_1.expect(f.text).to.eql('1.2');
|
|
758
|
+
(0, chai_1.expect)(f.kind).to.equal(TokenKind_1.TokenKind.FloatLiteral);
|
|
759
|
+
(0, chai_1.expect)(f.text).to.eql('1.2');
|
|
760
760
|
});
|
|
761
761
|
it('supports hexadecimal literals', () => {
|
|
762
762
|
let i = Lexer_1.Lexer.scan('&hFf').tokens[0];
|
|
763
|
-
chai_1.expect(i.kind).to.equal(
|
|
764
|
-
chai_1.expect(i.text).to.deep.equal('&hFf');
|
|
763
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
764
|
+
(0, chai_1.expect)(i.text).to.deep.equal('&hFf');
|
|
765
765
|
});
|
|
766
766
|
it('falls back to a regular integer', () => {
|
|
767
767
|
let i = Lexer_1.Lexer.scan('123').tokens[0];
|
|
768
|
-
chai_1.expect(i.kind).to.equal(
|
|
769
|
-
chai_1.expect(i.text).to.deep.equal('123');
|
|
768
|
+
(0, chai_1.expect)(i.kind).to.equal(TokenKind_1.TokenKind.IntegerLiteral);
|
|
769
|
+
(0, chai_1.expect)(i.text).to.deep.equal('123');
|
|
770
770
|
});
|
|
771
771
|
});
|
|
772
772
|
describe('types', () => {
|
|
773
773
|
it('captures type tokens', () => {
|
|
774
|
-
chai_1.expect(Lexer_1.Lexer.scan(`
|
|
774
|
+
(0, chai_1.expect)(Lexer_1.Lexer.scan(`
|
|
775
775
|
void boolean integer longinteger float double string object interface invalid dynamic
|
|
776
776
|
`.trim()).tokens.map(x => x.kind)).to.eql([
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
777
|
+
TokenKind_1.TokenKind.Void,
|
|
778
|
+
TokenKind_1.TokenKind.Boolean,
|
|
779
|
+
TokenKind_1.TokenKind.Integer,
|
|
780
|
+
TokenKind_1.TokenKind.LongInteger,
|
|
781
|
+
TokenKind_1.TokenKind.Float,
|
|
782
|
+
TokenKind_1.TokenKind.Double,
|
|
783
|
+
TokenKind_1.TokenKind.String,
|
|
784
|
+
TokenKind_1.TokenKind.Object,
|
|
785
|
+
TokenKind_1.TokenKind.Interface,
|
|
786
|
+
TokenKind_1.TokenKind.Invalid,
|
|
787
|
+
TokenKind_1.TokenKind.Dynamic,
|
|
788
|
+
TokenKind_1.TokenKind.Eof
|
|
789
789
|
]);
|
|
790
790
|
});
|
|
791
791
|
});
|
|
@@ -794,59 +794,59 @@ describe('lexer', () => {
|
|
|
794
794
|
// test just a sample of single-word reserved words for now.
|
|
795
795
|
// if we find any that we've missed
|
|
796
796
|
let { tokens } = Lexer_1.Lexer.scan('and then or if else endif return true false line_num');
|
|
797
|
-
chai_1.expect(tokens.map(w => w.kind)).to.deep.equal([
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
797
|
+
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
798
|
+
TokenKind_1.TokenKind.And,
|
|
799
|
+
TokenKind_1.TokenKind.Then,
|
|
800
|
+
TokenKind_1.TokenKind.Or,
|
|
801
|
+
TokenKind_1.TokenKind.If,
|
|
802
|
+
TokenKind_1.TokenKind.Else,
|
|
803
|
+
TokenKind_1.TokenKind.EndIf,
|
|
804
|
+
TokenKind_1.TokenKind.Return,
|
|
805
|
+
TokenKind_1.TokenKind.True,
|
|
806
|
+
TokenKind_1.TokenKind.False,
|
|
807
|
+
TokenKind_1.TokenKind.LineNumLiteral,
|
|
808
|
+
TokenKind_1.TokenKind.Eof
|
|
809
809
|
]);
|
|
810
810
|
});
|
|
811
811
|
it('matches multi-word keywords', () => {
|
|
812
812
|
let { tokens } = Lexer_1.Lexer.scan('end if end while End Sub end Function Exit wHILe');
|
|
813
|
-
chai_1.expect(tokens.map(w => w.kind)).to.deep.equal([
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
813
|
+
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
814
|
+
TokenKind_1.TokenKind.EndIf,
|
|
815
|
+
TokenKind_1.TokenKind.EndWhile,
|
|
816
|
+
TokenKind_1.TokenKind.EndSub,
|
|
817
|
+
TokenKind_1.TokenKind.EndFunction,
|
|
818
|
+
TokenKind_1.TokenKind.ExitWhile,
|
|
819
|
+
TokenKind_1.TokenKind.Eof
|
|
820
820
|
]);
|
|
821
821
|
});
|
|
822
822
|
it('accepts \'exit for\' but not \'exitfor\'', () => {
|
|
823
823
|
let { tokens } = Lexer_1.Lexer.scan('exit for exitfor');
|
|
824
|
-
chai_1.expect(tokens.map(w => w.kind)).to.deep.equal([
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
824
|
+
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
825
|
+
TokenKind_1.TokenKind.ExitFor,
|
|
826
|
+
TokenKind_1.TokenKind.Identifier,
|
|
827
|
+
TokenKind_1.TokenKind.Eof
|
|
828
828
|
]);
|
|
829
829
|
});
|
|
830
830
|
it('matches keywords with silly capitalization', () => {
|
|
831
831
|
let { tokens } = Lexer_1.Lexer.scan('iF ELSE eNDIf FUncTioN');
|
|
832
|
-
chai_1.expect(tokens.map(w => w.kind)).to.deep.equal([
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
832
|
+
(0, chai_1.expect)(tokens.map(w => w.kind)).to.deep.equal([
|
|
833
|
+
TokenKind_1.TokenKind.If,
|
|
834
|
+
TokenKind_1.TokenKind.Else,
|
|
835
|
+
TokenKind_1.TokenKind.EndIf,
|
|
836
|
+
TokenKind_1.TokenKind.Function,
|
|
837
|
+
TokenKind_1.TokenKind.Eof
|
|
838
838
|
]);
|
|
839
839
|
});
|
|
840
840
|
it('allows alpha-numeric (plus \'_\') identifiers', () => {
|
|
841
841
|
let identifier = Lexer_1.Lexer.scan('_abc_123_').tokens[0];
|
|
842
|
-
chai_1.expect(identifier.kind).to.equal(
|
|
843
|
-
chai_1.expect(identifier.text).to.equal('_abc_123_');
|
|
842
|
+
(0, chai_1.expect)(identifier.kind).to.equal(TokenKind_1.TokenKind.Identifier);
|
|
843
|
+
(0, chai_1.expect)(identifier.text).to.equal('_abc_123_');
|
|
844
844
|
});
|
|
845
845
|
it('allows identifiers with trailing type designators', () => {
|
|
846
846
|
let { tokens } = Lexer_1.Lexer.scan('lorem$ ipsum% dolor! sit# amet&');
|
|
847
|
-
let identifiers = tokens.filter(t => t.kind !==
|
|
848
|
-
chai_1.expect(identifiers.every(t => t.kind ===
|
|
849
|
-
chai_1.expect(identifiers.map(t => t.text)).to.deep.equal([
|
|
847
|
+
let identifiers = tokens.filter(t => t.kind !== TokenKind_1.TokenKind.Eof);
|
|
848
|
+
(0, chai_1.expect)(identifiers.every(t => t.kind === TokenKind_1.TokenKind.Identifier));
|
|
849
|
+
(0, chai_1.expect)(identifiers.map(t => t.text)).to.deep.equal([
|
|
850
850
|
'lorem$',
|
|
851
851
|
'ipsum%',
|
|
852
852
|
'dolor!',
|
|
@@ -858,20 +858,20 @@ describe('lexer', () => {
|
|
|
858
858
|
describe('conditional compilation', () => {
|
|
859
859
|
it('reads constant declarations', () => {
|
|
860
860
|
let { tokens } = Lexer_1.Lexer.scan('#const foo true');
|
|
861
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
861
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
862
|
+
TokenKind_1.TokenKind.HashConst,
|
|
863
|
+
TokenKind_1.TokenKind.Identifier,
|
|
864
|
+
TokenKind_1.TokenKind.True,
|
|
865
|
+
TokenKind_1.TokenKind.Eof
|
|
866
866
|
]);
|
|
867
867
|
});
|
|
868
868
|
it('reads constant aliases', () => {
|
|
869
869
|
let { tokens } = Lexer_1.Lexer.scan('#const bar foo');
|
|
870
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
870
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
871
|
+
TokenKind_1.TokenKind.HashConst,
|
|
872
|
+
TokenKind_1.TokenKind.Identifier,
|
|
873
|
+
TokenKind_1.TokenKind.Identifier,
|
|
874
|
+
TokenKind_1.TokenKind.Eof
|
|
875
875
|
]);
|
|
876
876
|
});
|
|
877
877
|
it('reads conditional directives', () => {
|
|
@@ -885,19 +885,19 @@ describe('lexer', () => {
|
|
|
885
885
|
`, {
|
|
886
886
|
includeWhitespace: false
|
|
887
887
|
});
|
|
888
|
-
chai_1.expect(tokens.map(t => t.kind).filter(x => x !==
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
888
|
+
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !== TokenKind_1.TokenKind.Newline)).to.deep.equal([
|
|
889
|
+
TokenKind_1.TokenKind.HashIf,
|
|
890
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
891
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
892
|
+
TokenKind_1.TokenKind.HashElse,
|
|
893
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
894
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
895
|
+
TokenKind_1.TokenKind.Eof
|
|
896
896
|
]);
|
|
897
897
|
});
|
|
898
898
|
it('treats text "constructor" as an identifier', () => {
|
|
899
899
|
let lexer = Lexer_1.Lexer.scan(`function constructor()\nend function`);
|
|
900
|
-
chai_1.expect(lexer.tokens[1].kind).to.equal(
|
|
900
|
+
(0, chai_1.expect)(lexer.tokens[1].kind).to.equal(TokenKind_1.TokenKind.Identifier);
|
|
901
901
|
});
|
|
902
902
|
it('reads upper case conditional directives', () => {
|
|
903
903
|
let { tokens } = Lexer_1.Lexer.scan(`
|
|
@@ -910,45 +910,45 @@ describe('lexer', () => {
|
|
|
910
910
|
`, {
|
|
911
911
|
includeWhitespace: false
|
|
912
912
|
});
|
|
913
|
-
chai_1.expect(tokens.map(t => t.kind).filter(x => x !==
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
913
|
+
(0, chai_1.expect)(tokens.map(t => t.kind).filter(x => x !== TokenKind_1.TokenKind.Newline)).to.deep.equal([
|
|
914
|
+
TokenKind_1.TokenKind.HashIf,
|
|
915
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
916
|
+
TokenKind_1.TokenKind.HashElseIf,
|
|
917
|
+
TokenKind_1.TokenKind.HashElse,
|
|
918
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
919
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
920
|
+
TokenKind_1.TokenKind.Eof
|
|
921
921
|
]);
|
|
922
922
|
});
|
|
923
923
|
it('supports various spacings between #endif', () => {
|
|
924
924
|
let { tokens } = Lexer_1.Lexer.scan('#endif #end if #end\tif #end if #end\t\t if');
|
|
925
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
925
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
926
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
927
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
928
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
929
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
930
|
+
TokenKind_1.TokenKind.HashEndIf,
|
|
931
|
+
TokenKind_1.TokenKind.Eof
|
|
932
932
|
]);
|
|
933
933
|
});
|
|
934
934
|
it('reads forced compilation diagnostics with messages', () => {
|
|
935
935
|
let { tokens } = Lexer_1.Lexer.scan('#error a message goes here\n', {
|
|
936
936
|
includeWhitespace: true
|
|
937
937
|
});
|
|
938
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
938
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
939
|
+
TokenKind_1.TokenKind.HashError,
|
|
940
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
941
|
+
TokenKind_1.TokenKind.HashErrorMessage,
|
|
942
|
+
TokenKind_1.TokenKind.Newline,
|
|
943
|
+
TokenKind_1.TokenKind.Eof
|
|
944
944
|
]);
|
|
945
|
-
chai_1.expect(tokens[2].text).to.equal('a message goes here');
|
|
945
|
+
(0, chai_1.expect)(tokens[2].text).to.equal('a message goes here');
|
|
946
946
|
});
|
|
947
947
|
});
|
|
948
948
|
describe('location tracking', () => {
|
|
949
949
|
it('tracks starting and ending locations including whitespace', () => {
|
|
950
950
|
let { tokens } = Lexer_1.Lexer.scan(`sub foo()\n print "bar"\r\nend sub`, { includeWhitespace: true });
|
|
951
|
-
chai_1.expect(tokens.map(t => t.range)).to.eql([
|
|
951
|
+
(0, chai_1.expect)(tokens.map(t => t.range)).to.eql([
|
|
952
952
|
vscode_languageserver_1.Range.create(0, 0, 0, 3),
|
|
953
953
|
vscode_languageserver_1.Range.create(0, 3, 0, 4),
|
|
954
954
|
vscode_languageserver_1.Range.create(0, 4, 0, 7),
|
|
@@ -966,7 +966,7 @@ describe('lexer', () => {
|
|
|
966
966
|
});
|
|
967
967
|
it('tracks starting and ending locations excluding whitespace', () => {
|
|
968
968
|
let { tokens } = Lexer_1.Lexer.scan(`sub foo()\n print "bar"\r\nend sub`, { includeWhitespace: false });
|
|
969
|
-
chai_1.expect(tokens.map(t => t.range)).to.eql([
|
|
969
|
+
(0, chai_1.expect)(tokens.map(t => t.range)).to.eql([
|
|
970
970
|
vscode_languageserver_1.Range.create(0, 0, 0, 3),
|
|
971
971
|
vscode_languageserver_1.Range.create(0, 4, 0, 7),
|
|
972
972
|
vscode_languageserver_1.Range.create(0, 7, 0, 8),
|
|
@@ -983,30 +983,30 @@ describe('lexer', () => {
|
|
|
983
983
|
describe('two word keywords', () => {
|
|
984
984
|
it('supports various spacing between for each', () => {
|
|
985
985
|
let { tokens } = Lexer_1.Lexer.scan('for each for each for each for\teach for\t each for \teach for \t each');
|
|
986
|
-
chai_1.expect(tokens.map(t => t.kind)).to.deep.equal([
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
986
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.deep.equal([
|
|
987
|
+
TokenKind_1.TokenKind.ForEach,
|
|
988
|
+
TokenKind_1.TokenKind.ForEach,
|
|
989
|
+
TokenKind_1.TokenKind.ForEach,
|
|
990
|
+
TokenKind_1.TokenKind.ForEach,
|
|
991
|
+
TokenKind_1.TokenKind.ForEach,
|
|
992
|
+
TokenKind_1.TokenKind.ForEach,
|
|
993
|
+
TokenKind_1.TokenKind.ForEach,
|
|
994
|
+
TokenKind_1.TokenKind.Eof
|
|
995
995
|
]);
|
|
996
996
|
});
|
|
997
997
|
});
|
|
998
998
|
it('detects rem when used as keyword', () => {
|
|
999
999
|
let { tokens } = Lexer_1.Lexer.scan('person.rem=true');
|
|
1000
|
-
chai_1.expect(tokens.map(t => t.kind)).to.eql([
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1000
|
+
(0, chai_1.expect)(tokens.map(t => t.kind)).to.eql([
|
|
1001
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1002
|
+
TokenKind_1.TokenKind.Dot,
|
|
1003
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1004
|
+
TokenKind_1.TokenKind.Equal,
|
|
1005
|
+
TokenKind_1.TokenKind.True,
|
|
1006
|
+
TokenKind_1.TokenKind.Eof
|
|
1007
1007
|
]);
|
|
1008
1008
|
//verify the location of `rem`
|
|
1009
|
-
chai_1.expect(tokens.map(t => [t.range.start.character, t.range.end.character])).to.eql([
|
|
1009
|
+
(0, chai_1.expect)(tokens.map(t => [t.range.start.character, t.range.end.character])).to.eql([
|
|
1010
1010
|
[0, 6],
|
|
1011
1011
|
[6, 7],
|
|
1012
1012
|
[7, 10],
|
|
@@ -1018,61 +1018,61 @@ describe('lexer', () => {
|
|
|
1018
1018
|
describe('isToken', () => {
|
|
1019
1019
|
it('works', () => {
|
|
1020
1020
|
let range = vscode_languageserver_1.Range.create(0, 0, 0, 2);
|
|
1021
|
-
chai_1.expect(Token_1.isToken({ kind:
|
|
1022
|
-
chai_1.expect(Token_1.isToken({ text: 'and', range: range })).is.false;
|
|
1021
|
+
(0, chai_1.expect)((0, Token_1.isToken)({ kind: TokenKind_1.TokenKind.And, text: 'and', range: range })).is.true;
|
|
1022
|
+
(0, chai_1.expect)((0, Token_1.isToken)({ text: 'and', range: range })).is.false;
|
|
1023
1023
|
});
|
|
1024
1024
|
});
|
|
1025
1025
|
it('recognizes class-related keywords', () => {
|
|
1026
|
-
chai_1.expect(Lexer_1.Lexer.scan('class public protected private end class endclass new override').tokens.map(x => x.kind)).to.eql([
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1026
|
+
(0, chai_1.expect)(Lexer_1.Lexer.scan('class public protected private end class endclass new override').tokens.map(x => x.kind)).to.eql([
|
|
1027
|
+
TokenKind_1.TokenKind.Class,
|
|
1028
|
+
TokenKind_1.TokenKind.Public,
|
|
1029
|
+
TokenKind_1.TokenKind.Protected,
|
|
1030
|
+
TokenKind_1.TokenKind.Private,
|
|
1031
|
+
TokenKind_1.TokenKind.EndClass,
|
|
1032
|
+
TokenKind_1.TokenKind.EndClass,
|
|
1033
|
+
TokenKind_1.TokenKind.New,
|
|
1034
|
+
TokenKind_1.TokenKind.Override,
|
|
1035
|
+
TokenKind_1.TokenKind.Eof
|
|
1036
1036
|
]);
|
|
1037
1037
|
});
|
|
1038
1038
|
describe('whitespace', () => {
|
|
1039
1039
|
it('preserves the exact number of whitespace characterswhitespace', () => {
|
|
1040
1040
|
let { tokens } = Lexer_1.Lexer.scan(' ', { includeWhitespace: true });
|
|
1041
|
-
chai_1.expect(tokens[0]).to.include({
|
|
1042
|
-
kind:
|
|
1041
|
+
(0, chai_1.expect)(tokens[0]).to.include({
|
|
1042
|
+
kind: TokenKind_1.TokenKind.Whitespace,
|
|
1043
1043
|
text: ' '
|
|
1044
1044
|
});
|
|
1045
1045
|
});
|
|
1046
1046
|
it('tokenizes whitespace between things', () => {
|
|
1047
1047
|
let { tokens } = Lexer_1.Lexer.scan('sub main ( ) \n end sub', { includeWhitespace: true });
|
|
1048
|
-
chai_1.expect(tokens.map(x => x.kind)).to.eql([
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1048
|
+
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1049
|
+
TokenKind_1.TokenKind.Sub,
|
|
1050
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1051
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1052
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1053
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
1054
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1055
|
+
TokenKind_1.TokenKind.RightParen,
|
|
1056
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1057
|
+
TokenKind_1.TokenKind.Newline,
|
|
1058
|
+
TokenKind_1.TokenKind.Whitespace,
|
|
1059
|
+
TokenKind_1.TokenKind.EndSub,
|
|
1060
|
+
TokenKind_1.TokenKind.Eof
|
|
1061
1061
|
]);
|
|
1062
1062
|
});
|
|
1063
1063
|
});
|
|
1064
1064
|
it('identifies brighterscript source literals', () => {
|
|
1065
1065
|
let { tokens } = Lexer_1.Lexer.scan('LINE_NUM SOURCE_FILE_PATH SOURCE_LINE_NUM FUNCTION_NAME SOURCE_FUNCTION_NAME SOURCE_LOCATION PKG_PATH PKG_LOCATION');
|
|
1066
|
-
chai_1.expect(tokens.map(x => x.kind)).to.eql([
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1066
|
+
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1067
|
+
TokenKind_1.TokenKind.LineNumLiteral,
|
|
1068
|
+
TokenKind_1.TokenKind.SourceFilePathLiteral,
|
|
1069
|
+
TokenKind_1.TokenKind.SourceLineNumLiteral,
|
|
1070
|
+
TokenKind_1.TokenKind.FunctionNameLiteral,
|
|
1071
|
+
TokenKind_1.TokenKind.SourceFunctionNameLiteral,
|
|
1072
|
+
TokenKind_1.TokenKind.SourceLocationLiteral,
|
|
1073
|
+
TokenKind_1.TokenKind.PkgPathLiteral,
|
|
1074
|
+
TokenKind_1.TokenKind.PkgLocationLiteral,
|
|
1075
|
+
TokenKind_1.TokenKind.Eof
|
|
1076
1076
|
]);
|
|
1077
1077
|
});
|
|
1078
1078
|
it('properly tracks leadingWhitespace', () => {
|
|
@@ -1084,17 +1084,17 @@ describe('lexer', () => {
|
|
|
1084
1084
|
end sub
|
|
1085
1085
|
`;
|
|
1086
1086
|
const { tokens } = Lexer_1.Lexer.scan(text, { includeWhitespace: false });
|
|
1087
|
-
chai_1.expect(util_1.default.tokensToString(tokens)).to.equal(text);
|
|
1087
|
+
(0, chai_1.expect)(util_1.default.tokensToString(tokens)).to.equal(text);
|
|
1088
1088
|
});
|
|
1089
1089
|
it('properly detects try/catch tokens', () => {
|
|
1090
1090
|
const { tokens } = Lexer_1.Lexer.scan(`try catch endtry end try throw`, { includeWhitespace: false });
|
|
1091
|
-
chai_1.expect(tokens.map(x => x.kind)).to.eql([
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1091
|
+
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1092
|
+
TokenKind_1.TokenKind.Try,
|
|
1093
|
+
TokenKind_1.TokenKind.Catch,
|
|
1094
|
+
TokenKind_1.TokenKind.EndTry,
|
|
1095
|
+
TokenKind_1.TokenKind.EndTry,
|
|
1096
|
+
TokenKind_1.TokenKind.Throw,
|
|
1097
|
+
TokenKind_1.TokenKind.Eof
|
|
1098
1098
|
]);
|
|
1099
1099
|
});
|
|
1100
1100
|
describe('regular expression literals', () => {
|
|
@@ -1105,7 +1105,7 @@ describe('lexer', () => {
|
|
|
1105
1105
|
const { tokens } = Lexer_1.Lexer.scan(regexp);
|
|
1106
1106
|
results.push(tokens[0].text);
|
|
1107
1107
|
}
|
|
1108
|
-
chai_1.expect(results).to.eql(regexps);
|
|
1108
|
+
(0, chai_1.expect)(results).to.eql(regexps);
|
|
1109
1109
|
}
|
|
1110
1110
|
it('recognizes regex literals', () => {
|
|
1111
1111
|
testRegex(/simple/, /SimpleWithValidFlags/g, /UnknownFlags/gi, /with spaces/s, /with(parens)and[squarebraces]/,
|
|
@@ -1114,6 +1114,36 @@ describe('lexer', () => {
|
|
|
1114
1114
|
//captures quote char
|
|
1115
1115
|
/"/);
|
|
1116
1116
|
});
|
|
1117
|
+
it('does not capture multiple divisions on one line as regex', () => {
|
|
1118
|
+
const { tokens } = Lexer_1.Lexer.scan(`one = 1/2 + 1/4 + 1/4`, {
|
|
1119
|
+
includeWhitespace: false
|
|
1120
|
+
});
|
|
1121
|
+
(0, chai_1.expect)(tokens.map(x => x.kind)).to.eql([
|
|
1122
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1123
|
+
TokenKind_1.TokenKind.Equal,
|
|
1124
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1125
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1126
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1127
|
+
TokenKind_1.TokenKind.Plus,
|
|
1128
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1129
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1130
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1131
|
+
TokenKind_1.TokenKind.Plus,
|
|
1132
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1133
|
+
TokenKind_1.TokenKind.Forwardslash,
|
|
1134
|
+
TokenKind_1.TokenKind.IntegerLiteral,
|
|
1135
|
+
TokenKind_1.TokenKind.Eof
|
|
1136
|
+
]);
|
|
1137
|
+
});
|
|
1138
|
+
it('only captures alphanumeric flags', () => {
|
|
1139
|
+
(0, chai_1.expect)(Lexer_1.Lexer.scan('speak(/a/)').tokens.map(x => x.kind)).to.eql([
|
|
1140
|
+
TokenKind_1.TokenKind.Identifier,
|
|
1141
|
+
TokenKind_1.TokenKind.LeftParen,
|
|
1142
|
+
TokenKind_1.TokenKind.RegexLiteral,
|
|
1143
|
+
TokenKind_1.TokenKind.RightParen,
|
|
1144
|
+
TokenKind_1.TokenKind.Eof
|
|
1145
|
+
]);
|
|
1146
|
+
});
|
|
1117
1147
|
it('handles escape characters properly', () => {
|
|
1118
1148
|
testRegex(
|
|
1119
1149
|
//an escaped forward slash right next to the end-regexp forwardslash
|