@zzzen/pyright-internal 1.2.0-dev.20230430 → 1.2.0-dev.20230514

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (231) hide show
  1. package/dist/analyzer/backgroundAnalysisProgram.d.ts +4 -1
  2. package/dist/analyzer/backgroundAnalysisProgram.js +12 -0
  3. package/dist/analyzer/backgroundAnalysisProgram.js.map +1 -1
  4. package/dist/analyzer/checker.d.ts +1 -0
  5. package/dist/analyzer/checker.js +89 -6
  6. package/dist/analyzer/checker.js.map +1 -1
  7. package/dist/analyzer/constraintSolver.js +14 -15
  8. package/dist/analyzer/constraintSolver.js.map +1 -1
  9. package/dist/analyzer/constructors.d.ts +6 -0
  10. package/dist/analyzer/constructors.js +513 -0
  11. package/dist/analyzer/constructors.js.map +1 -0
  12. package/dist/analyzer/dataClasses.js +86 -2
  13. package/dist/analyzer/dataClasses.js.map +1 -1
  14. package/dist/analyzer/docStringConversion.js +1 -1
  15. package/dist/analyzer/docStringConversion.js.map +1 -1
  16. package/dist/analyzer/enums.js +62 -8
  17. package/dist/analyzer/enums.js.map +1 -1
  18. package/dist/analyzer/importResolver.js +47 -29
  19. package/dist/analyzer/importResolver.js.map +1 -1
  20. package/dist/analyzer/importStatementUtils.d.ts +2 -2
  21. package/dist/analyzer/importStatementUtils.js.map +1 -1
  22. package/dist/analyzer/namedTuples.js +3 -6
  23. package/dist/analyzer/namedTuples.js.map +1 -1
  24. package/dist/analyzer/operations.d.ts +16 -0
  25. package/dist/analyzer/operations.js +749 -0
  26. package/dist/analyzer/operations.js.map +1 -0
  27. package/dist/analyzer/parseTreeUtils.d.ts +4 -2
  28. package/dist/analyzer/parseTreeUtils.js +32 -1
  29. package/dist/analyzer/parseTreeUtils.js.map +1 -1
  30. package/dist/analyzer/patternMatching.js +16 -0
  31. package/dist/analyzer/patternMatching.js.map +1 -1
  32. package/dist/analyzer/program.d.ts +11 -33
  33. package/dist/analyzer/program.js +73 -735
  34. package/dist/analyzer/program.js.map +1 -1
  35. package/dist/analyzer/protocols.js +1 -1
  36. package/dist/analyzer/protocols.js.map +1 -1
  37. package/dist/analyzer/service.d.ts +5 -21
  38. package/dist/analyzer/service.js +26 -33
  39. package/dist/analyzer/service.js.map +1 -1
  40. package/dist/analyzer/sourceFile.d.ts +9 -41
  41. package/dist/analyzer/sourceFile.js +219 -238
  42. package/dist/analyzer/sourceFile.js.map +1 -1
  43. package/dist/analyzer/sourceFileInfoUtils.d.ts +3 -9
  44. package/dist/analyzer/sourceFileInfoUtils.js.map +1 -1
  45. package/dist/analyzer/symbol.d.ts +3 -1
  46. package/dist/analyzer/symbol.js +5 -0
  47. package/dist/analyzer/symbol.js.map +1 -1
  48. package/dist/analyzer/typeEvaluator.js +460 -1425
  49. package/dist/analyzer/typeEvaluator.js.map +1 -1
  50. package/dist/analyzer/typeEvaluatorTypes.d.ts +42 -7
  51. package/dist/analyzer/typeEvaluatorTypes.js +33 -1
  52. package/dist/analyzer/typeEvaluatorTypes.js.map +1 -1
  53. package/dist/analyzer/typeGuards.js +2 -8
  54. package/dist/analyzer/typeGuards.js.map +1 -1
  55. package/dist/analyzer/typePrinter.d.ts +3 -3
  56. package/dist/analyzer/typePrinter.js +247 -100
  57. package/dist/analyzer/typePrinter.js.map +1 -1
  58. package/dist/analyzer/typeUtils.d.ts +14 -7
  59. package/dist/analyzer/typeUtils.js +204 -49
  60. package/dist/analyzer/typeUtils.js.map +1 -1
  61. package/dist/analyzer/typeVarContext.d.ts +6 -7
  62. package/dist/analyzer/typeVarContext.js +21 -32
  63. package/dist/analyzer/typeVarContext.js.map +1 -1
  64. package/dist/analyzer/typedDicts.js +2 -2
  65. package/dist/analyzer/typedDicts.js.map +1 -1
  66. package/dist/analyzer/types.d.ts +7 -4
  67. package/dist/analyzer/types.js +20 -10
  68. package/dist/analyzer/types.js.map +1 -1
  69. package/dist/backgroundAnalysisBase.d.ts +1 -1
  70. package/dist/backgroundAnalysisBase.js +16 -0
  71. package/dist/backgroundAnalysisBase.js.map +1 -1
  72. package/dist/commands/dumpFileDebugInfoCommand.js +0 -1
  73. package/dist/commands/dumpFileDebugInfoCommand.js.map +1 -1
  74. package/dist/common/extensibility.d.ts +28 -4
  75. package/dist/common/extensibility.js.map +1 -1
  76. package/dist/common/logTracker.d.ts +2 -0
  77. package/dist/common/logTracker.js +8 -1
  78. package/dist/common/logTracker.js.map +1 -1
  79. package/dist/common/lspUtils.d.ts +4 -1
  80. package/dist/common/lspUtils.js +38 -1
  81. package/dist/common/lspUtils.js.map +1 -1
  82. package/dist/common/pathUtils.d.ts +11 -11
  83. package/dist/common/pathUtils.js.map +1 -1
  84. package/dist/common/pythonVersion.d.ts +2 -1
  85. package/dist/common/pythonVersion.js +1 -0
  86. package/dist/common/pythonVersion.js.map +1 -1
  87. package/dist/common/workspaceEditUtils.d.ts +8 -8
  88. package/dist/common/workspaceEditUtils.js +10 -10
  89. package/dist/common/workspaceEditUtils.js.map +1 -1
  90. package/dist/languageServerBase.d.ts +3 -7
  91. package/dist/languageServerBase.js +41 -73
  92. package/dist/languageServerBase.js.map +1 -1
  93. package/dist/languageService/autoImporter.d.ts +50 -51
  94. package/dist/languageService/autoImporter.js +125 -210
  95. package/dist/languageService/autoImporter.js.map +1 -1
  96. package/dist/languageService/callHierarchyProvider.d.ts +1 -1
  97. package/dist/languageService/callHierarchyProvider.js +11 -37
  98. package/dist/languageService/callHierarchyProvider.js.map +1 -1
  99. package/dist/languageService/completionProvider.d.ts +39 -81
  100. package/dist/languageService/completionProvider.js +572 -801
  101. package/dist/languageService/completionProvider.js.map +1 -1
  102. package/dist/languageService/documentHighlightProvider.js +1 -1
  103. package/dist/languageService/documentHighlightProvider.js.map +1 -1
  104. package/dist/languageService/documentSymbolCollector.d.ts +6 -7
  105. package/dist/languageService/documentSymbolCollector.js +47 -28
  106. package/dist/languageService/documentSymbolCollector.js.map +1 -1
  107. package/dist/languageService/documentSymbolProvider.d.ts +13 -35
  108. package/dist/languageService/documentSymbolProvider.js +52 -264
  109. package/dist/languageService/documentSymbolProvider.js.map +1 -1
  110. package/dist/languageService/hoverProvider.d.ts +5 -6
  111. package/dist/languageService/hoverProvider.js +40 -132
  112. package/dist/languageService/hoverProvider.js.map +1 -1
  113. package/dist/languageService/referencesProvider.d.ts +6 -11
  114. package/dist/languageService/referencesProvider.js +23 -17
  115. package/dist/languageService/referencesProvider.js.map +1 -1
  116. package/dist/languageService/renameProvider.d.ts +16 -0
  117. package/dist/languageService/renameProvider.js +139 -0
  118. package/dist/languageService/renameProvider.js.map +1 -0
  119. package/dist/languageService/symbolIndexer.d.ts +31 -0
  120. package/dist/languageService/symbolIndexer.js +105 -0
  121. package/dist/languageService/symbolIndexer.js.map +1 -0
  122. package/dist/languageService/tooltipUtils.d.ts +8 -1
  123. package/dist/languageService/tooltipUtils.js +102 -1
  124. package/dist/languageService/tooltipUtils.js.map +1 -1
  125. package/dist/languageService/workspaceSymbolProvider.d.ts +17 -0
  126. package/dist/languageService/workspaceSymbolProvider.js +133 -0
  127. package/dist/languageService/workspaceSymbolProvider.js.map +1 -0
  128. package/dist/localization/localize.d.ts +33 -15
  129. package/dist/localization/localize.js +13 -7
  130. package/dist/localization/localize.js.map +1 -1
  131. package/dist/localization/package.nls.en-us.json +14 -7
  132. package/dist/parser/parser.js +3 -0
  133. package/dist/parser/parser.js.map +1 -1
  134. package/dist/pyright.js +26 -4
  135. package/dist/pyright.js.map +1 -1
  136. package/dist/tests/chainedSourceFiles.test.js +15 -20
  137. package/dist/tests/chainedSourceFiles.test.js.map +1 -1
  138. package/dist/tests/checker.test.js +14 -0
  139. package/dist/tests/checker.test.js.map +1 -1
  140. package/dist/tests/completions.test.js +11 -236
  141. package/dist/tests/completions.test.js.map +1 -1
  142. package/dist/tests/docStringConversion.test.js +36 -2
  143. package/dist/tests/docStringConversion.test.js.map +1 -1
  144. package/dist/tests/documentSymbolCollector.test.js +3 -3
  145. package/dist/tests/documentSymbolCollector.test.js.map +1 -1
  146. package/dist/tests/fourslash/completions.override2.fourslash.js +1 -16
  147. package/dist/tests/fourslash/completions.override2.fourslash.js.map +1 -1
  148. package/dist/tests/fourslash/fourslash.d.ts +4 -4
  149. package/dist/tests/fourslash/missingTypeStub.codeAction.fourslash.js +1 -1
  150. package/dist/tests/fourslash/missingTypeStub.codeAction.fourslash.js.map +1 -1
  151. package/dist/tests/harness/fourslash/testState.d.ts +17 -11
  152. package/dist/tests/harness/fourslash/testState.js +39 -50
  153. package/dist/tests/harness/fourslash/testState.js.map +1 -1
  154. package/dist/tests/importResolver.test.js +81 -1
  155. package/dist/tests/importResolver.test.js.map +1 -1
  156. package/dist/tests/sourceFile.test.js +1 -1
  157. package/dist/tests/sourceFile.test.js.map +1 -1
  158. package/dist/tests/testStateUtils.d.ts +2 -2
  159. package/dist/tests/testStateUtils.js +38 -8
  160. package/dist/tests/testStateUtils.js.map +1 -1
  161. package/dist/tests/typeEvaluator2.test.js +13 -1
  162. package/dist/tests/typeEvaluator2.test.js.map +1 -1
  163. package/dist/tests/typeEvaluator3.test.js +5 -1
  164. package/dist/tests/typeEvaluator3.test.js.map +1 -1
  165. package/dist/tests/typeEvaluator4.test.js +9 -1
  166. package/dist/tests/typeEvaluator4.test.js.map +1 -1
  167. package/dist/tests/typeEvaluator5.test.js +25 -9
  168. package/dist/tests/typeEvaluator5.test.js.map +1 -1
  169. package/dist/tests/workspaceEditUtils.test.js +95 -6
  170. package/dist/tests/workspaceEditUtils.test.js.map +1 -1
  171. package/package.json +4 -4
  172. package/dist/languageService/importAdder.d.ts +0 -40
  173. package/dist/languageService/importAdder.js +0 -388
  174. package/dist/languageService/importAdder.js.map +0 -1
  175. package/dist/languageService/indentationUtils.d.ts +0 -16
  176. package/dist/languageService/indentationUtils.js +0 -727
  177. package/dist/languageService/indentationUtils.js.map +0 -1
  178. package/dist/languageService/insertionPointUtils.d.ts +0 -9
  179. package/dist/languageService/insertionPointUtils.js +0 -132
  180. package/dist/languageService/insertionPointUtils.js.map +0 -1
  181. package/dist/languageService/renameModuleProvider.d.ts +0 -65
  182. package/dist/languageService/renameModuleProvider.js +0 -939
  183. package/dist/languageService/renameModuleProvider.js.map +0 -1
  184. package/dist/tests/fourslash/completions.commitChars.fourslash.d.ts +0 -1
  185. package/dist/tests/fourslash/completions.commitChars.fourslash.js +0 -81
  186. package/dist/tests/fourslash/completions.commitChars.fourslash.js.map +0 -1
  187. package/dist/tests/importAdder.test.d.ts +0 -1
  188. package/dist/tests/importAdder.test.js +0 -1325
  189. package/dist/tests/importAdder.test.js.map +0 -1
  190. package/dist/tests/indentationUtils.ptvs.test.d.ts +0 -1
  191. package/dist/tests/indentationUtils.ptvs.test.js +0 -324
  192. package/dist/tests/indentationUtils.ptvs.test.js.map +0 -1
  193. package/dist/tests/indentationUtils.reindent.test.d.ts +0 -1
  194. package/dist/tests/indentationUtils.reindent.test.js +0 -372
  195. package/dist/tests/indentationUtils.reindent.test.js.map +0 -1
  196. package/dist/tests/indentationUtils.test.d.ts +0 -1
  197. package/dist/tests/indentationUtils.test.js +0 -502
  198. package/dist/tests/indentationUtils.test.js.map +0 -1
  199. package/dist/tests/insertionPointUtils.test.d.ts +0 -1
  200. package/dist/tests/insertionPointUtils.test.js +0 -154
  201. package/dist/tests/insertionPointUtils.test.js.map +0 -1
  202. package/dist/tests/moveSymbol.importAdder.test.d.ts +0 -1
  203. package/dist/tests/moveSymbol.importAdder.test.js +0 -298
  204. package/dist/tests/moveSymbol.importAdder.test.js.map +0 -1
  205. package/dist/tests/moveSymbol.insertion.test.d.ts +0 -1
  206. package/dist/tests/moveSymbol.insertion.test.js +0 -537
  207. package/dist/tests/moveSymbol.insertion.test.js.map +0 -1
  208. package/dist/tests/moveSymbol.misc.test.d.ts +0 -1
  209. package/dist/tests/moveSymbol.misc.test.js +0 -169
  210. package/dist/tests/moveSymbol.misc.test.js.map +0 -1
  211. package/dist/tests/moveSymbol.updateReference.test.d.ts +0 -1
  212. package/dist/tests/moveSymbol.updateReference.test.js +0 -1071
  213. package/dist/tests/moveSymbol.updateReference.test.js.map +0 -1
  214. package/dist/tests/renameModule.folder.test.d.ts +0 -1
  215. package/dist/tests/renameModule.folder.test.js +0 -229
  216. package/dist/tests/renameModule.folder.test.js.map +0 -1
  217. package/dist/tests/renameModule.fromImports.test.d.ts +0 -1
  218. package/dist/tests/renameModule.fromImports.test.js +0 -790
  219. package/dist/tests/renameModule.fromImports.test.js.map +0 -1
  220. package/dist/tests/renameModule.imports.test.d.ts +0 -1
  221. package/dist/tests/renameModule.imports.test.js +0 -380
  222. package/dist/tests/renameModule.imports.test.js.map +0 -1
  223. package/dist/tests/renameModule.misc.test.d.ts +0 -1
  224. package/dist/tests/renameModule.misc.test.js +0 -615
  225. package/dist/tests/renameModule.misc.test.js.map +0 -1
  226. package/dist/tests/renameModule.relativePath.test.d.ts +0 -1
  227. package/dist/tests/renameModule.relativePath.test.js +0 -231
  228. package/dist/tests/renameModule.relativePath.test.js.map +0 -1
  229. package/dist/tests/renameModuleTestUtils.d.ts +0 -4
  230. package/dist/tests/renameModuleTestUtils.js +0 -76
  231. package/dist/tests/renameModuleTestUtils.js.map +0 -1
@@ -1,727 +0,0 @@
1
- "use strict";
2
- /*
3
- * indentationUtils.ts
4
- * Copyright (c) Microsoft Corporation.
5
- * Licensed under the MIT license.
6
- *
7
- * Provides code to get indentation and re-indent code for the
8
- * given indentation.
9
- */
10
- Object.defineProperty(exports, "__esModule", { value: true });
11
- exports.createIndentationString = exports.getIndentationFromText = exports.findNonWhitespaceTokenAtOrBeforeOffset = exports.getModuleStatementIndentation = exports.reindentSpan = exports.getNewlineIndentation = void 0;
12
- const parseTreeUtils_1 = require("../analyzer/parseTreeUtils");
13
- const collectionUtils_1 = require("../common/collectionUtils");
14
- const positionUtils_1 = require("../common/positionUtils");
15
- const textRange_1 = require("../common/textRange");
16
- const tokenizer_1 = require("../parser/tokenizer");
17
- const insertionPointUtils_1 = require("./insertionPointUtils");
18
- function getNewlineIndentation(parseResults, newlineOffset, preferDedent) {
19
- // ex)
20
- // a = """
21
- // | <= here
22
- const strIndent = _tryHandleStringLiterals(parseResults, newlineOffset);
23
- if (strIndent !== undefined) {
24
- return strIndent;
25
- }
26
- // ex)
27
- // a = 1 + \
28
- // | <= here
29
- // or
30
- // a = (1 +
31
- // | <= here
32
- const exprIndent = _tryHandleMultilineConstructs(parseResults, newlineOffset);
33
- if (exprIndent !== undefined) {
34
- return exprIndent;
35
- }
36
- preferDedent = preferDedent !== null && preferDedent !== void 0 ? preferDedent : _shouldDedentAfterKeyword(parseResults, newlineOffset);
37
- return Math.max(_getIndentation(parseResults, newlineOffset, preferDedent).indentation, 0);
38
- }
39
- exports.getNewlineIndentation = getNewlineIndentation;
40
- function reindentSpan(parseResults, span, indentation, indentFirstToken = true) {
41
- let indentDelta = 0;
42
- const texts = [];
43
- // Currently _convertTokenStreams converts text in the span as whitespace and non whitespace
44
- // and then this function puts those back to string with reindentation if needed.
45
- //
46
- // Another approach we can take is converting the text in 2 chunks that require reindentation and not
47
- // and process chunks that require reindentation line by line (like how it currently does for
48
- // multiline doc comments) and put chunks that don't require reindentation as it is.
49
- const tokenInfo = _convertTokenStreams(parseResults, span);
50
- let previousInfo = tokenInfo[0];
51
- indentDelta =
52
- indentation -
53
- getIndentationFromText(parseResults, previousInfo.range.start.line, previousInfo.range.start.character)
54
- .indentation;
55
- if (previousInfo.multilineDocComment) {
56
- (0, collectionUtils_1.appendArray)(texts, _reindentLinesFromText(parseResults, previousInfo, indentDelta));
57
- }
58
- else {
59
- if (indentFirstToken) {
60
- texts.push(createIndentationString(parseResults, indentation));
61
- }
62
- texts.push(previousInfo.text);
63
- }
64
- for (let i = 1; i < tokenInfo.length; i++) {
65
- const info = tokenInfo[i];
66
- if (info.firstTokenOnLine) {
67
- texts.push(parseResults.tokenizerOutput.predominantEndOfLineSequence.repeat(info.range.start.line - previousInfo.range.end.line));
68
- if (info.multilineDocComment) {
69
- (0, collectionUtils_1.appendArray)(texts, _reindentLinesFromText(parseResults, info, indentDelta));
70
- }
71
- else {
72
- // Put indentation for the first token on the line.
73
- texts.push(createIndentationString(parseResults, Math.max(0, getIndentationFromText(parseResults, info.range.start.line, info.range.start.character)
74
- .indentation + indentDelta)));
75
- texts.push(info.text);
76
- }
77
- }
78
- else {
79
- // Put whitespace between 2 tokens on same line
80
- // token1[space]token2
81
- texts.push(' '.repeat(info.range.start.character - previousInfo.range.end.character));
82
- texts.push(info.text);
83
- }
84
- previousInfo = info;
85
- }
86
- return {
87
- originalSpan: textRange_1.TextRange.combine(tokenInfo),
88
- text: texts.join(''),
89
- };
90
- }
91
- exports.reindentSpan = reindentSpan;
92
- function getModuleStatementIndentation(parseResults) {
93
- if (parseResults.parseTree.statements.length === 0) {
94
- return getNewlineIndentation(parseResults, parseResults.parseTree.length, /* preferDedent */ true);
95
- }
96
- return getNewlineIndentation(parseResults, parseResults.parseTree.statements[0].start, /* preferDedent */ true);
97
- }
98
- exports.getModuleStatementIndentation = getModuleStatementIndentation;
99
- function _getIndentation(parseResults, offset, preferDedent) {
100
- const tokens = parseResults.tokenizerOutput.tokens;
101
- const startingToken = findNonWhitespaceTokenAtOrBeforeOffset(tokens, offset);
102
- if (!startingToken) {
103
- return {
104
- indentation: 0,
105
- };
106
- }
107
- const node = (0, parseTreeUtils_1.findNodeByOffset)(parseResults.parseTree, textRange_1.TextRange.getEnd(startingToken));
108
- if (!node) {
109
- return {
110
- indentation: 0,
111
- };
112
- }
113
- // Special-case the match statement since it does not contain a suite. Case statements do,
114
- // but match does not.
115
- if (node.nodeType === 63 /* Match */) {
116
- const tabSize = _getTabSize(parseResults);
117
- const outerContainer = (0, insertionPointUtils_1.getContainer)(node, /* includeSelf */ false);
118
- const result = _getIndentationForNode(parseResults, offset, outerContainer !== null && outerContainer !== void 0 ? outerContainer : parseResults.parseTree, node);
119
- result.indentation += tabSize;
120
- return result;
121
- }
122
- const suite = (0, parseTreeUtils_1.getFirstAncestorOrSelfOfKind)(node, 50 /* Suite */);
123
- if (!suite) {
124
- return _getIndentationForNode(parseResults, offset, parseResults.parseTree, node);
125
- }
126
- const suiteSpan = (0, positionUtils_1.convertTextRangeToRange)(suite, parseResults.tokenizerOutput.lines);
127
- if (preferDedent || (suiteSpan.start.line === suiteSpan.end.line && suite.statements.length > 0)) {
128
- // Go one more level up.
129
- const outerContainer = (0, insertionPointUtils_1.getContainer)(suite, /* includeSelf */ false);
130
- return _getIndentationForNode(parseResults, offset, outerContainer !== null && outerContainer !== void 0 ? outerContainer : parseResults.parseTree, suite);
131
- }
132
- return _getIndentationForNode(parseResults, offset, suite, node);
133
- }
134
- function _getIndentationForNode(parseResults, offset, container, current) {
135
- if (container.nodeType === 36 /* Module */) {
136
- // It is at the module level
137
- return {
138
- token: _getFirstTokenOfStatement(parseResults, container, current),
139
- indentation: 0,
140
- };
141
- }
142
- if (container.nodeType === 63 /* Match */ ||
143
- _containsNoIndentBeforeFirstStatement(parseResults, offset, container)) {
144
- const tabSize = _getTabSize(parseResults);
145
- const outerContainer = (0, insertionPointUtils_1.getContainer)(container, /* includeSelf */ false);
146
- const result = _getIndentationForNode(parseResults, offset, outerContainer !== null && outerContainer !== void 0 ? outerContainer : parseResults.parseTree, container);
147
- return {
148
- token: result.token,
149
- indentation: result.indentation + tabSize,
150
- };
151
- }
152
- else {
153
- const tokens = parseResults.tokenizerOutput.tokens;
154
- return {
155
- token: _getFirstTokenOfStatement(parseResults, container, current),
156
- indentation: _getIndentationFromIndentToken(tokens, tokens.getItemAtPosition(container.start)),
157
- };
158
- }
159
- }
160
- function _containsNoIndentBeforeFirstStatement(parseResults, offset, suite) {
161
- const statements = suite.statements.filter((s) => s.length > 0);
162
- if (statements.length === 0) {
163
- // There is no statement in the suite.
164
- // ex)
165
- // def foo():
166
- // | <= here
167
- return true;
168
- }
169
- if (statements.length === 1) {
170
- if (statements[0].nodeType !== 47 /* StatementList */ || statements[0].statements.length === 1) {
171
- if (statements[0].start >= offset) {
172
- const statementLine = parseResults.tokenizerOutput.lines.getItemAtPosition(statements[0].start);
173
- const offsetLine = parseResults.tokenizerOutput.lines.getItemAtPosition(offset);
174
- if (statementLine === offsetLine) {
175
- // We are calculating indent for only statement in suite.
176
- // ex)
177
- // def foo():
178
- // |pass <= offset before first statement
179
- return true;
180
- }
181
- }
182
- }
183
- }
184
- // If suite contains no indent before first statement, then consider user is in the middle of writing block
185
- // and parser is in broken state.
186
- // ex)
187
- // def foo():
188
- // while True:
189
- // | <= here
190
- // def bar():
191
- // pass
192
- //
193
- // parser will think "def bar" belongs to "while True" with invalid indentation.
194
- const tokens = parseResults.tokenizerOutput.tokens;
195
- const start = tokens.getItemAtPosition(suite.start);
196
- const end = tokens.getItemAtPosition(suite.statements[0].start);
197
- for (let i = start; i <= end; i++) {
198
- const token = _getTokenAtIndex(tokens, i);
199
- if ((token === null || token === void 0 ? void 0 : token.type) === 3 /* Indent */) {
200
- return false;
201
- }
202
- }
203
- return true;
204
- }
205
- function _getFirstTokenOfStatement(parseResults, container, span) {
206
- const tokens = parseResults.tokenizerOutput.tokens;
207
- for (const statement of container.statements) {
208
- if (!textRange_1.TextRange.containsRange(statement, span)) {
209
- continue;
210
- }
211
- return (0, parseTreeUtils_1.getTokenAt)(tokens, statement.start);
212
- }
213
- return (0, parseTreeUtils_1.getTokenAt)(tokens, container.start);
214
- }
215
- function _getIndentationFromIndentToken(tokens, index) {
216
- for (let i = index; i < tokens.count; i++) {
217
- const token = _getTokenAtIndex(tokens, i);
218
- if ((token === null || token === void 0 ? void 0 : token.type) === 3 /* Indent */) {
219
- return token.indentAmount;
220
- }
221
- }
222
- // At the module level.
223
- return 0;
224
- }
225
- function _tryHandleMultilineConstructs(parseResults, offset) {
226
- const tokens = parseResults.tokenizerOutput.tokens;
227
- const lines = parseResults.tokenizerOutput.lines;
228
- // Make sure we use next token to get line delta.
229
- // This is just to handle how tokenizer associates new lines to which token.
230
- // ex) a = 1 + \
231
- // | <= here
232
- // [b] = 2
233
- const index = _findNextTokenIndex(tokens, offset);
234
- if (index < 0) {
235
- return undefined;
236
- }
237
- for (let i = index; i > 0; i--) {
238
- const token = _getTokenAtIndex(tokens, i);
239
- if (textRange_1.TextRange.getEnd(token) < offset) {
240
- return undefined;
241
- }
242
- const previousToken = _getTokenAtIndex(tokens, i - 1);
243
- const tokenSpan = token ? (0, positionUtils_1.convertTextRangeToRange)(token, lines) : undefined;
244
- const previousTokenSpan = previousToken ? (0, positionUtils_1.convertTextRangeToRange)(previousToken, lines) : undefined;
245
- if (tokenSpan &&
246
- previousTokenSpan &&
247
- previousTokenSpan.start.line < tokenSpan.start.line &&
248
- previousToken.type !== 2 /* NewLine */) {
249
- return _getIndentationForNextLine(parseResults, previousToken, token, offset);
250
- }
251
- }
252
- return undefined;
253
- }
254
- function _tryHandleStringLiterals(parseResults, offset) {
255
- const tokens = parseResults.tokenizerOutput.tokens;
256
- const index = tokens.getItemAtPosition(offset);
257
- if (index < 0) {
258
- return undefined;
259
- }
260
- const token = _findStringToken(tokens, index);
261
- if (!token || token.type !== 5 /* String */) {
262
- return undefined;
263
- }
264
- const stringToken = token;
265
- if (!(stringToken.flags & 4 /* Triplicate */)) {
266
- // We only care """ string literal
267
- return undefined;
268
- }
269
- if (!(stringToken.flags & 65536 /* Unterminated */) &&
270
- !textRange_1.TextRange.contains((0, parseTreeUtils_1.getStringValueRange)(stringToken), offset)) {
271
- // ex) We only support these 2 cases.
272
- // """
273
- // | <= here
274
- // or
275
- // """
276
- // | <= here
277
- // """
278
- return undefined;
279
- }
280
- const lines = parseResults.tokenizerOutput.lines;
281
- const begin = (0, positionUtils_1.convertOffsetToPosition)(token.start, lines);
282
- const current = (0, positionUtils_1.convertOffsetToPosition)(offset, lines);
283
- return _getFirstNonBlankLineIndentationFromText(parseResults, current.line, begin.line);
284
- }
285
- function _isOpenToken(token) {
286
- return (token.type === 13 /* OpenParenthesis */ ||
287
- token.type === 15 /* OpenBracket */ ||
288
- token.type === 17 /* OpenCurlyBrace */);
289
- }
290
- function _isCloseToken(token) {
291
- return (token.type === 14 /* CloseParenthesis */ ||
292
- token.type === 16 /* CloseBracket */ ||
293
- token.type === 18 /* CloseCurlyBrace */);
294
- }
295
- function _getIndentationForNextLine(parseResults, prevToken, nextToken, offset) {
296
- // Get the last token on the same line as the previous token
297
- const lines = parseResults.tokenizerOutput.lines;
298
- const lineIndex = (0, positionUtils_1.convertOffsetToPosition)(prevToken.start, lines).line;
299
- const line = lines.getItemAt(lineIndex);
300
- const tabSize = _getTabSize(parseResults);
301
- let token = prevToken;
302
- // Go backwards through tokens up until the front of the line
303
- let whitespaceOnly = true;
304
- let closeCount = 0;
305
- while (token && token.start >= line.start) {
306
- if (_isCloseToken(token)) {
307
- whitespaceOnly = false;
308
- closeCount += 1;
309
- }
310
- else if (_isOpenToken(token) && closeCount === 0) {
311
- // Special case for parenthesis
312
- if (token.type === 13 /* OpenParenthesis */ && whitespaceOnly) {
313
- const baseIndentation = _getIndentation(parseResults, token.start, false).indentation;
314
- // In PEP 8, this should be this case here:
315
- // # Add 4 spaces (an extra level of indentation) to distinguish arguments from the rest.
316
- // def long_function_name(
317
- // var_one, var_two, var_three,
318
- // var_four):
319
- // print(var_one)
320
- //
321
- const node = (0, parseTreeUtils_1.findNodeByOffset)(parseResults.parseTree, token.start - 1);
322
- const funcNode = (0, parseTreeUtils_1.getFirstAncestorOrSelfOfKind)(node, 28 /* Function */);
323
- if (funcNode &&
324
- funcNode.nodeType === 28 /* Function */ &&
325
- (0, positionUtils_1.convertOffsetToPosition)(funcNode.start, lines).line === lineIndex) {
326
- return baseIndentation + tabSize * 2;
327
- }
328
- // Not inside a function, just need one tab. See this in PEP 8
329
- // # Hanging indents should add a level.
330
- // foo = long_function_name(
331
- // var_one, var_two,
332
- // var_three, var_four)
333
- return baseIndentation + tabSize;
334
- }
335
- else if (whitespaceOnly) {
336
- // This is the case where the user put a newline right after a (, [, or {. We want
337
- // to be one tab over from the [.
338
- const line = (0, positionUtils_1.convertOffsetToPosition)(token.start, lines).line;
339
- return getIndentationFromText(parseResults, line).indentation + tabSize;
340
- }
341
- else {
342
- // In PEP 8, this should be this case here:
343
- // # Aligned with opening delimiter.
344
- // def long_function_name(var_one, var_two,
345
- // var_three, var_four)
346
- // + 1 is to accommodate for the parenthesis.
347
- //
348
- // This same indentation applies for function calls or just dictionary or list assignments.
349
- return token.start - line.start + 1;
350
- }
351
- }
352
- else if (_isOpenToken(token) && closeCount > 0) {
353
- closeCount--;
354
- whitespaceOnly = false;
355
- }
356
- else if (!_isWhitespaceToken(token.type)) {
357
- // Found a non whitespace token before we returned.
358
- whitespaceOnly = false;
359
- }
360
- token = findNonWhitespaceTokenAtOrBeforeOffset(parseResults.tokenizerOutput.tokens, token.start - 1);
361
- }
362
- // No parenthesis found
363
- return _getFirstNonBlankLineIndentationFromText(parseResults, (0, positionUtils_1.convertOffsetToPosition)(offset, parseResults.tokenizerOutput.lines).line, lineIndex);
364
- }
365
- function _getFirstNonBlankLineIndentationFromText(parseResults, currentLine, endingLine) {
366
- endingLine = Math.max(endingLine, 0);
367
- for (let i = currentLine; i >= endingLine; i--) {
368
- const result = getIndentationFromText(parseResults, i);
369
- if (!_isBlankLine(parseResults, i, result.charOffset)) {
370
- // Not blank line.
371
- // ex) [indentation]i = 1
372
- return result.indentation;
373
- }
374
- }
375
- return getIndentationFromText(parseResults, endingLine).indentation;
376
- }
377
- function _findStringToken(tokens, index) {
378
- const token = _findNonWhitespaceTokenAtOrBeforeIndex(tokens, index);
379
- if (!token) {
380
- return undefined;
381
- }
382
- return token.type === 5 /* String */ ? token : undefined;
383
- }
384
- function findNonWhitespaceTokenAtOrBeforeOffset(tokens, offset) {
385
- const index = tokens.getItemAtPosition(offset);
386
- if (index < 0) {
387
- return undefined;
388
- }
389
- return _findNonWhitespaceTokenAtOrBeforeIndex(tokens, index);
390
- }
391
- exports.findNonWhitespaceTokenAtOrBeforeOffset = findNonWhitespaceTokenAtOrBeforeOffset;
392
- function _findNonWhitespaceTokenAtOrBeforeIndex(tokens, index) {
393
- for (let i = index; i >= 0; i--) {
394
- const token = _getTokenAtIndex(tokens, i);
395
- if (!token) {
396
- break;
397
- }
398
- if (_isWhitespaceToken(token.type)) {
399
- continue;
400
- }
401
- return token;
402
- }
403
- return undefined;
404
- }
405
- function _findNextTokenIndex(tokens, offset) {
406
- const index = tokens.getItemAtPosition(offset);
407
- if (index < 0) {
408
- return index;
409
- }
410
- for (let i = index + 1; i < tokens.count; i++) {
411
- const token = _getTokenAtIndex(tokens, i);
412
- if ((token === null || token === void 0 ? void 0 : token.type) === 4 /* Dedent */ || (token === null || token === void 0 ? void 0 : token.type) === 2 /* NewLine */) {
413
- continue;
414
- }
415
- return i;
416
- }
417
- return tokens.count - 1;
418
- }
419
- function _getTokenAtIndex(tokens, index) {
420
- if (index < 0) {
421
- return undefined;
422
- }
423
- return tokens.getItemAt(index);
424
- }
425
- function _shouldDedentAfterKeyword(parseResults, offset) {
426
- // For now, we won't include all small statements that can put at single line.
427
- // See parser.ts to see all small statements or see python grammar.
428
- // ex) def foo(): pass
429
- const tokens = parseResults.tokenizerOutput.tokens;
430
- const index = tokens.getItemAtPosition(offset);
431
- if (index < 0) {
432
- return false;
433
- }
434
- for (let i = index; i >= 0; i--) {
435
- const token = _getTokenAtIndex(tokens, i);
436
- if (!token) {
437
- return false;
438
- }
439
- switch (token.type) {
440
- case 4 /* Dedent */:
441
- case 2 /* NewLine */:
442
- case 1 /* EndOfStream */:
443
- continue;
444
- case 8 /* Keyword */: {
445
- const previousToken = _getTokenAtIndex(tokens, i - 1);
446
- if ((previousToken === null || previousToken === void 0 ? void 0 : previousToken.type) === 10 /* Colon */) {
447
- // Not for single line construct.
448
- // ex) def foo(): pass
449
- return false;
450
- }
451
- const keyword = token;
452
- // Dedent if we found one of these keywords
453
- if (keyword.keywordType === 30 /* Pass */ ||
454
- keyword.keywordType === 32 /* Return */ ||
455
- keyword.keywordType === 5 /* Break */ ||
456
- keyword.keywordType === 8 /* Continue */ ||
457
- keyword.keywordType === 31 /* Raise */) {
458
- return true;
459
- }
460
- // Otherwise, unless the keyword can be used as a return/raise value, don't dedent.
461
- if (keyword.keywordType !== 33 /* True */ &&
462
- keyword.keywordType !== 15 /* False */ &&
463
- keyword.keywordType !== 26 /* None */ &&
464
- keyword.keywordType !== 9 /* Debug */) {
465
- return false;
466
- }
467
- }
468
- }
469
- // Dedent if we've found a return or raise statement
470
- const node = (0, parseTreeUtils_1.findNodeByOffset)(parseResults.parseTree, token.start);
471
- const returnOrRaise = (0, parseTreeUtils_1.getFirstAncestorOrSelf)(node, (x) => x.nodeType === 44 /* Return */ || x.nodeType === 43 /* Raise */);
472
- return !!returnOrRaise;
473
- }
474
- return false;
475
- }
476
- function _isBlankLine(parseResults, line, charOffset) {
477
- const endingLength = _getLineEndingLength(parseResults, line);
478
- const lineSpan = parseResults.tokenizerOutput.lines.getItemAt(line);
479
- return charOffset === lineSpan.length - endingLength;
480
- }
481
- function _getLineEndingLength(parseResults, line) {
482
- let length = 0;
483
- const range = parseResults.tokenizerOutput.lines.getItemAt(line);
484
- for (let i = range.length - 1; i >= 0; i--) {
485
- const charCode = parseResults.text.charCodeAt(range.start + i);
486
- switch (charCode) {
487
- case 12 /* FormFeed */:
488
- case 35 /* Hash */:
489
- case 10 /* LineFeed */:
490
- case 13 /* CarriageReturn */:
491
- length++;
492
- break;
493
- default:
494
- return length;
495
- }
496
- }
497
- return length;
498
- }
499
- function getIndentationFromText(parseResults, line, uptoLineOffset) {
500
- let indentation = 0;
501
- let charOffset = 0;
502
- const tabSize = _getTabSize(parseResults);
503
- const range = parseResults.tokenizerOutput.lines.getItemAt(line);
504
- for (let i = 0; i < range.length; i++) {
505
- const charCode = parseResults.text.charCodeAt(range.start + i);
506
- switch (charCode) {
507
- case 32 /* Space */:
508
- charOffset++;
509
- indentation++;
510
- break;
511
- case 9 /* Tab */:
512
- charOffset++;
513
- indentation += tabSize;
514
- break;
515
- default:
516
- if (!uptoLineOffset || uptoLineOffset === i) {
517
- return {
518
- charOffset,
519
- indentation,
520
- };
521
- }
522
- // calculate indentation upto line offset given.
523
- charOffset++;
524
- indentation++;
525
- }
526
- }
527
- return {
528
- charOffset,
529
- indentation,
530
- };
531
- }
532
- exports.getIndentationFromText = getIndentationFromText;
533
- function _convertTokenStreams(parseResults, span) {
534
- // Existing token stream contains text and whitespace mixed, making it difficult
535
- // to process for re-indentation. This will convert those to strictly text and whitespace.
536
- const tokens = parseResults.tokenizerOutput.tokens;
537
- let startIndex = Math.max(tokens.getItemAtPosition(span.start), 0);
538
- const startToken = _getTokenAtIndex(tokens, startIndex);
539
- if (textRange_1.TextRange.getEnd(startToken) < span.start) {
540
- // ex) firstToken | <= span start.
541
- startIndex++;
542
- }
543
- let endIndex = Math.min(tokens.getItemAtPosition(textRange_1.TextRange.getEnd(span)), tokens.length - 1);
544
- const endToken = _getTokenAtIndex(tokens, endIndex);
545
- if (textRange_1.TextRange.getEnd(span) <= endToken.start) {
546
- // ex) |< = span end [endToken]
547
- endIndex--;
548
- }
549
- // If the next token is a newline, we want to include it in the range
550
- // so that we can include comments if there is any.
551
- if (endIndex < tokens.length - 1 && _isWhitespaceToken(tokens.getItemAt(endIndex + 1).type)) {
552
- endIndex++;
553
- }
554
- const tokenInfoArray = [];
555
- const lines = parseResults.tokenizerOutput.lines;
556
- for (let i = startIndex; i <= endIndex; i++) {
557
- const token = _getTokenAtIndex(tokens, i);
558
- if (token.comments) {
559
- for (const comment of token.comments) {
560
- tokenInfoArray.push({
561
- start: comment.start,
562
- length: comment.length,
563
- range: (0, positionUtils_1.convertTextRangeToRange)(comment, lines),
564
- text: comment.value,
565
- kind: 'comment',
566
- firstTokenOnLine: false,
567
- multilineDocComment: false,
568
- });
569
- }
570
- }
571
- if (_isWhitespaceToken(token.type) || token.length === 0) {
572
- continue;
573
- }
574
- tokenInfoArray.push({
575
- start: token.start,
576
- length: token.length,
577
- range: (0, positionUtils_1.convertTextRangeToRange)(token, lines),
578
- text: parseResults.text.substr(token.start, token.length),
579
- kind: token.type === 5 /* String */ ? 'string' : 'token',
580
- firstTokenOnLine: false,
581
- multilineDocComment: false,
582
- });
583
- }
584
- if (tokenInfoArray.length === 0) {
585
- return tokenInfoArray;
586
- }
587
- tokenInfoArray.sort((a, b) => a.start - b.start);
588
- // Handle text in whitespace that is not part of token stream.
589
- let previousInfo = tokenInfoArray[0];
590
- const additionalTokens = [];
591
- if (previousInfo.kind === 'comment') {
592
- // ex) token [#] comment
593
- const start = startIndex === 0 ? 0 : textRange_1.TextRange.getEnd(_getTokenAtIndex(tokens, startIndex - 1));
594
- _addTokenInfoIfMatch(parseResults, start, previousInfo.start, 35 /* Hash */, additionalTokens);
595
- }
596
- for (let i = 1; i < tokenInfoArray.length; i++) {
597
- const info = tokenInfoArray[i];
598
- // Another approach is just blindly go through the range looking for
599
- // non whitespace char rather than looking for specific cases like below.
600
- if (previousInfo.kind !== 'comment') {
601
- for (let whitespaceLine = previousInfo.range.end.line; whitespaceLine < info.range.start.line; whitespaceLine++) {
602
- const lineTextRange = lines.getItemAt(whitespaceLine);
603
- const lastCharOffset = lineTextRange.length - _getLineEndingLength(parseResults, whitespaceLine) - 1;
604
- if (lastCharOffset >= 0) {
605
- // ex) i = 1 \ <= explicit multiline construct
606
- // +
607
- // 2
608
- const start = lineTextRange.start + lastCharOffset;
609
- _addTokenInfoIfMatch(parseResults, start, start + 1, 92 /* Backslash */, additionalTokens);
610
- }
611
- }
612
- }
613
- if (info.kind === 'comment') {
614
- const start = previousInfo.range.end.line === info.range.start.line
615
- ? textRange_1.TextRange.getEnd(previousInfo)
616
- : lines.getItemAt(info.range.start.line).start;
617
- // ex) token [#] comment
618
- _addTokenInfoIfMatch(parseResults, start, info.start, 35 /* Hash */, additionalTokens);
619
- }
620
- previousInfo = info;
621
- }
622
- (0, collectionUtils_1.appendArray)(tokenInfoArray, additionalTokens);
623
- tokenInfoArray.sort((a, b) => a.start - b.start);
624
- // Update firstTokenOnLine and multilineDocComment
625
- previousInfo = tokenInfoArray[0];
626
- if (startIndex === 0) {
627
- // It is the first token in the file.
628
- previousInfo.firstTokenOnLine = true;
629
- }
630
- else {
631
- const previousNonWhitespaceToken = _findNonWhitespaceTokenAtOrBeforeIndex(tokens, startIndex - 1);
632
- if (previousNonWhitespaceToken) {
633
- const previousEnd = (0, positionUtils_1.convertOffsetToPosition)(textRange_1.TextRange.getEnd(previousNonWhitespaceToken), lines);
634
- previousInfo.firstTokenOnLine = previousEnd.line !== previousInfo.range.start.line;
635
- }
636
- else {
637
- previousInfo.firstTokenOnLine = true;
638
- }
639
- }
640
- previousInfo.multilineDocComment = _isMultilineDocComment(parseResults, previousInfo);
641
- for (let i = 1; i < tokenInfoArray.length; i++) {
642
- const info = tokenInfoArray[i];
643
- info.firstTokenOnLine = previousInfo.range.end.line !== info.range.start.line;
644
- info.multilineDocComment = _isMultilineDocComment(parseResults, info);
645
- previousInfo = info;
646
- }
647
- return tokenInfoArray;
648
- }
649
- function _addTokenInfoIfMatch(parseResults, start, end, charCode, tokens) {
650
- for (let i = start; i < end; i++) {
651
- if (parseResults.text.charCodeAt(i) === charCode) {
652
- tokens.push({
653
- start: i,
654
- length: 1,
655
- range: (0, positionUtils_1.convertTextRangeToRange)(textRange_1.TextRange.create(i, 1), parseResults.tokenizerOutput.lines),
656
- text: String.fromCharCode(charCode),
657
- kind: 'token',
658
- firstTokenOnLine: false,
659
- multilineDocComment: false,
660
- });
661
- }
662
- }
663
- }
664
- function _isWhitespaceToken(type) {
665
- switch (type) {
666
- case 4 /* Dedent */:
667
- case 2 /* NewLine */:
668
- case 3 /* Indent */:
669
- case 1 /* EndOfStream */:
670
- return true;
671
- default:
672
- return false;
673
- }
674
- }
675
- function _isMultilineDocComment(parseResults, info) {
676
- var _a, _b;
677
- if (info.kind !== 'string' || !info.firstTokenOnLine || info.range.start.line === info.range.end.line) {
678
- return false;
679
- }
680
- const node = (0, parseTreeUtils_1.findNodeByOffset)(parseResults.parseTree, info.start);
681
- if ((node === null || node === void 0 ? void 0 : node.nodeType) !== 49 /* String */ ||
682
- ((_a = node.parent) === null || _a === void 0 ? void 0 : _a.nodeType) !== 48 /* StringList */ ||
683
- ((_b = node.parent.parent) === null || _b === void 0 ? void 0 : _b.nodeType) !== 47 /* StatementList */) {
684
- return false;
685
- }
686
- return (0, parseTreeUtils_1.isDocString)(node.parent.parent);
687
- }
688
- function _reindentLinesFromText(parseResults, info, indentDelta) {
689
- const texts = [];
690
- for (let i = info.range.start.line; i <= info.range.end.line; i++) {
691
- texts.push(_reindentLineFromText(parseResults, i, indentDelta, i === info.range.end.line ? info : undefined));
692
- }
693
- return texts;
694
- }
695
- function _reindentLineFromText(parseResults, line, indentDelta, range) {
696
- const result = getIndentationFromText(parseResults, line);
697
- if (_isBlankLine(parseResults, line, result.charOffset)) {
698
- return '';
699
- }
700
- let lineRange = parseResults.tokenizerOutput.lines.getItemAt(line);
701
- if (range) {
702
- lineRange = textRange_1.TextRange.fromBounds(lineRange.start, Math.min(textRange_1.TextRange.getEnd(range), textRange_1.TextRange.getEnd(lineRange)));
703
- }
704
- const text = parseResults.text.substr(lineRange.start + result.charOffset, lineRange.length - result.charOffset);
705
- return createIndentationString(parseResults, Math.max(result.indentation + indentDelta, 0)) + text;
706
- }
707
- function _getTabSize(parseResults) {
708
- const tab = parseResults.tokenizerOutput.predominantTabSequence;
709
- const tabLength = tab.length;
710
- if (tabLength === 1 && tab.charCodeAt(0) === 9 /* Tab */) {
711
- // Tokenizer will use 8 for Char.Tab and put that info in indentToken's indent size.
712
- return tokenizer_1.defaultTabSize;
713
- }
714
- return tabLength;
715
- }
716
- function createIndentationString(parseResults, indentation) {
717
- const tab = parseResults.tokenizerOutput.predominantTabSequence;
718
- const tabLength = tab.length;
719
- if (tabLength === 1 && tab.charCodeAt(0) === 9 /* Tab */) {
720
- const spaceCount = indentation % tokenizer_1.defaultTabSize;
721
- const tabCount = (indentation - spaceCount) / tokenizer_1.defaultTabSize;
722
- return '\t'.repeat(tabCount) + ' '.repeat(spaceCount);
723
- }
724
- return ' '.repeat(indentation);
725
- }
726
- exports.createIndentationString = createIndentationString;
727
- //# sourceMappingURL=indentationUtils.js.map