@borgar/fx 4.13.0 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. package/dist/index-BMr6cTgc.d.cts +1444 -0
  2. package/dist/index-BMr6cTgc.d.ts +1444 -0
  3. package/dist/index.cjs +3054 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +1 -0
  6. package/dist/index.d.ts +1 -0
  7. package/dist/index.js +2984 -0
  8. package/dist/index.js.map +1 -0
  9. package/dist/xlsx/index.cjs +3120 -0
  10. package/dist/xlsx/index.cjs.map +1 -0
  11. package/dist/xlsx/index.d.cts +55 -0
  12. package/dist/xlsx/index.d.ts +55 -0
  13. package/dist/xlsx/index.js +3049 -0
  14. package/dist/xlsx/index.js.map +1 -0
  15. package/docs/API.md +2959 -718
  16. package/docs/AST_format.md +2 -2
  17. package/eslint.config.mjs +40 -0
  18. package/lib/a1.spec.ts +32 -0
  19. package/lib/a1.ts +26 -0
  20. package/lib/addA1RangeBounds.ts +50 -0
  21. package/lib/addTokenMeta.spec.ts +166 -0
  22. package/lib/{addTokenMeta.js → addTokenMeta.ts} +53 -33
  23. package/lib/astTypes.ts +211 -0
  24. package/lib/cloneToken.ts +29 -0
  25. package/lib/{constants.js → constants.ts} +6 -3
  26. package/lib/fixRanges.spec.ts +220 -0
  27. package/lib/fixRanges.ts +260 -0
  28. package/lib/fromCol.spec.ts +15 -0
  29. package/lib/{fromCol.js → fromCol.ts} +1 -1
  30. package/lib/index.spec.ts +119 -0
  31. package/lib/index.ts +76 -0
  32. package/lib/isNodeType.ts +151 -0
  33. package/lib/isType.spec.ts +208 -0
  34. package/lib/{isType.js → isType.ts} +26 -25
  35. package/lib/lexers/{advRangeOp.js → advRangeOp.ts} +1 -1
  36. package/lib/lexers/{canEndRange.js → canEndRange.ts} +2 -2
  37. package/lib/lexers/{lexBoolean.js → lexBoolean.ts} +25 -6
  38. package/lib/lexers/{lexContext.js → lexContext.ts} +14 -6
  39. package/lib/lexers/{lexError.js → lexError.ts} +3 -3
  40. package/lib/lexers/{lexFunction.js → lexFunction.ts} +3 -2
  41. package/lib/lexers/lexNameFuncCntx.ts +112 -0
  42. package/lib/lexers/{lexNamed.js → lexNamed.ts} +4 -4
  43. package/lib/lexers/{lexNewLine.js → lexNewLine.ts} +3 -2
  44. package/lib/lexers/{lexNumber.js → lexNumber.ts} +4 -3
  45. package/lib/lexers/{lexOperator.js → lexOperator.ts} +5 -4
  46. package/lib/lexers/lexRange.ts +15 -0
  47. package/lib/lexers/{lexRangeA1.js → lexRangeA1.ts} +11 -7
  48. package/lib/lexers/{lexRangeR1C1.js → lexRangeR1C1.ts} +10 -6
  49. package/lib/lexers/{lexRangeTrim.js → lexRangeTrim.ts} +3 -2
  50. package/lib/lexers/{lexRefOp.js → lexRefOp.ts} +4 -3
  51. package/lib/lexers/{lexString.js → lexString.ts} +3 -3
  52. package/lib/lexers/{lexStructured.js → lexStructured.ts} +5 -5
  53. package/lib/lexers/{lexWhitespace.js → lexWhitespace.ts} +3 -2
  54. package/lib/lexers/sets.ts +51 -0
  55. package/lib/mergeRefTokens.spec.ts +141 -0
  56. package/lib/{mergeRefTokens.js → mergeRefTokens.ts} +14 -9
  57. package/lib/nodeTypes.ts +54 -0
  58. package/lib/parse.spec.ts +1410 -0
  59. package/lib/{parser.js → parse.ts} +81 -63
  60. package/lib/parseA1Range.spec.ts +233 -0
  61. package/lib/parseA1Range.ts +206 -0
  62. package/lib/parseA1Ref.spec.ts +337 -0
  63. package/lib/parseA1Ref.ts +115 -0
  64. package/lib/parseR1C1Range.ts +191 -0
  65. package/lib/parseR1C1Ref.spec.ts +323 -0
  66. package/lib/parseR1C1Ref.ts +127 -0
  67. package/lib/parseRef.spec.ts +90 -0
  68. package/lib/parseRef.ts +240 -0
  69. package/lib/{parseSRange.js → parseSRange.ts} +15 -10
  70. package/lib/parseStructRef.spec.ts +168 -0
  71. package/lib/parseStructRef.ts +76 -0
  72. package/lib/stringifyA1Range.spec.ts +72 -0
  73. package/lib/stringifyA1Range.ts +72 -0
  74. package/lib/stringifyA1Ref.spec.ts +64 -0
  75. package/lib/stringifyA1Ref.ts +59 -0
  76. package/lib/{stringifyPrefix.js → stringifyPrefix.ts} +17 -2
  77. package/lib/stringifyR1C1Range.spec.ts +92 -0
  78. package/lib/stringifyR1C1Range.ts +73 -0
  79. package/lib/stringifyR1C1Ref.spec.ts +63 -0
  80. package/lib/stringifyR1C1Ref.ts +67 -0
  81. package/lib/stringifyStructRef.spec.ts +124 -0
  82. package/lib/stringifyStructRef.ts +113 -0
  83. package/lib/stringifyTokens.ts +15 -0
  84. package/lib/toCol.spec.ts +11 -0
  85. package/lib/{toCol.js → toCol.ts} +4 -4
  86. package/lib/tokenTypes.ts +76 -0
  87. package/lib/tokenize-srefs.spec.ts +429 -0
  88. package/lib/tokenize.spec.ts +2103 -0
  89. package/lib/tokenize.ts +346 -0
  90. package/lib/translate.spec.ts +35 -0
  91. package/lib/translateToA1.spec.ts +247 -0
  92. package/lib/translateToA1.ts +231 -0
  93. package/lib/translateToR1C1.spec.ts +227 -0
  94. package/lib/translateToR1C1.ts +145 -0
  95. package/lib/types.ts +179 -0
  96. package/lib/xlsx/index.spec.ts +27 -0
  97. package/lib/xlsx/index.ts +32 -0
  98. package/package.json +45 -31
  99. package/tsconfig.json +28 -0
  100. package/typedoc-ignore-links.ts +17 -0
  101. package/typedoc.json +41 -0
  102. package/.eslintrc +0 -22
  103. package/benchmark/benchmark.js +0 -48
  104. package/benchmark/formulas.json +0 -15677
  105. package/dist/fx.d.ts +0 -823
  106. package/dist/fx.js +0 -2
  107. package/dist/package.json +0 -1
  108. package/lib/a1.js +0 -348
  109. package/lib/a1.spec.js +0 -458
  110. package/lib/addTokenMeta.spec.js +0 -153
  111. package/lib/astTypes.js +0 -96
  112. package/lib/extraTypes.js +0 -74
  113. package/lib/fixRanges.js +0 -104
  114. package/lib/fixRanges.spec.js +0 -171
  115. package/lib/fromCol.spec.js +0 -11
  116. package/lib/index.js +0 -134
  117. package/lib/index.spec.js +0 -67
  118. package/lib/isType.spec.js +0 -168
  119. package/lib/lexer-srefs.spec.js +0 -324
  120. package/lib/lexer.js +0 -264
  121. package/lib/lexer.spec.js +0 -1953
  122. package/lib/lexers/lexRange.js +0 -8
  123. package/lib/lexers/sets.js +0 -38
  124. package/lib/mergeRefTokens.spec.js +0 -121
  125. package/lib/package.json +0 -1
  126. package/lib/parseRef.js +0 -157
  127. package/lib/parseRef.spec.js +0 -71
  128. package/lib/parseStructRef.js +0 -48
  129. package/lib/parseStructRef.spec.js +0 -164
  130. package/lib/parser.spec.js +0 -1208
  131. package/lib/rc.js +0 -341
  132. package/lib/rc.spec.js +0 -403
  133. package/lib/stringifyStructRef.js +0 -80
  134. package/lib/stringifyStructRef.spec.js +0 -182
  135. package/lib/toCol.spec.js +0 -11
  136. package/lib/translate-toA1.spec.js +0 -214
  137. package/lib/translate-toRC.spec.js +0 -197
  138. package/lib/translate.js +0 -239
  139. package/lib/translate.spec.js +0 -21
  140. package/rollup.config.mjs +0 -22
  141. package/tsd.json +0 -12
package/lib/lexer.js DELETED
@@ -1,264 +0,0 @@
1
- import {
2
- FX_PREFIX,
3
- NEWLINE,
4
- NUMBER,
5
- OPERATOR,
6
- REF_NAMED,
7
- UNKNOWN,
8
- WHITESPACE,
9
- FUNCTION,
10
- OPERATOR_TRIM,
11
- REF_RANGE
12
- } from './constants.js';
13
- import { mergeRefTokens } from './mergeRefTokens.js';
14
- import { lexers } from './lexers/sets.js';
15
-
16
- const isType = (t, type) => t && t.type === type;
17
-
18
- const defaultOptions = {
19
- withLocation: false,
20
- mergeRefs: true,
21
- allowTernary: false,
22
- negativeNumbers: true,
23
- r1c1: false
24
- };
25
-
26
- const isTextToken = token => {
27
- return (
28
- token.type === REF_NAMED ||
29
- token.type === FUNCTION
30
- );
31
- };
32
-
33
- const causesBinaryMinus = token => {
34
- return !isType(token, OPERATOR) || (
35
- token.value === '%' ||
36
- token.value === '}' ||
37
- token.value === ')' ||
38
- token.value === '#'
39
- );
40
- };
41
-
42
- function fixRCNames (tokens) {
43
- let withinCall = 0;
44
- let parenDepth = 0;
45
- let lastToken;
46
- for (const token of tokens) {
47
- if (token.type === OPERATOR) {
48
- if (token.value === '(') {
49
- parenDepth++;
50
- if (lastToken.type === FUNCTION) {
51
- const v = lastToken.value.toLowerCase();
52
- if (v === 'lambda' || v === 'let') {
53
- withinCall = parenDepth;
54
- }
55
- }
56
- }
57
- else if (token.value === ')') {
58
- parenDepth--;
59
- if (parenDepth < withinCall) {
60
- withinCall = 0;
61
- }
62
- }
63
- }
64
- else if (withinCall && token.type === UNKNOWN && /^[rc]$/.test(token.value)) {
65
- token.type = REF_NAMED;
66
- }
67
- lastToken = token;
68
- }
69
- return tokens;
70
- }
71
-
72
- export function getTokens (fx, tokenHandlers, options = {}) {
73
- const opts = { ...defaultOptions, ...options };
74
- // const opts = {
75
- // withLocation: options.withLocation ?? false,
76
- // mergeRefs: options.mergeRefs ?? true,
77
- // allowTernary: options.allowTernary ?? false,
78
- // negativeNumbers: options.negativeNumbers ?? true,
79
- // r1c1: options.r1c1 ?? false
80
- // };
81
- const { withLocation, mergeRefs, negativeNumbers } = opts;
82
- const tokens = [];
83
- let pos = 0;
84
- let letOrLambda = 0;
85
- let unknownRC = 0;
86
- const trimOps = [];
87
-
88
- let tail0 = null; // last non-whitespace token
89
- let tail1 = null; // penultimate non-whitespace token
90
- let lastToken = null; // last token
91
- const pushToken = token => {
92
- const isCurrUnknown = token.type === UNKNOWN;
93
- const isLastUnknown = lastToken && lastToken.type === UNKNOWN;
94
- if (lastToken && (
95
- (isCurrUnknown && isLastUnknown) ||
96
- (isCurrUnknown && isTextToken(lastToken)) ||
97
- (isLastUnknown && isTextToken(token))
98
- )) {
99
- // UNKNOWN tokens "contaminate" sibling text tokens
100
- lastToken.value += token.value;
101
- lastToken.type = UNKNOWN;
102
- if (withLocation) {
103
- lastToken.loc[1] = token.loc[1];
104
- }
105
- }
106
- else {
107
- if (token.type === OPERATOR_TRIM) {
108
- trimOps.push(tokens.length);
109
- token.type = UNKNOWN;
110
- }
111
- // push token as normally
112
- // tokens.push(token);
113
- tokens[tokens.length] = token;
114
- lastToken = token;
115
- if (token.type !== WHITESPACE && token.type !== NEWLINE) {
116
- tail1 = tail0;
117
- tail0 = token;
118
- }
119
- }
120
- };
121
-
122
- if (fx.startsWith('=')) {
123
- const token = { type: FX_PREFIX, value: '=' };
124
- if (withLocation) {
125
- token.loc = [ 0, 1 ];
126
- }
127
- pos++;
128
- pushToken(token);
129
- }
130
-
131
- const numHandlers = tokenHandlers.length;
132
- while (pos < fx.length) {
133
- const startPos = pos;
134
- let token;
135
- for (let i = 0; i < numHandlers; i++) {
136
- token = tokenHandlers[i](fx, pos, opts);
137
- if (token) {
138
- pos += token.value.length;
139
- break;
140
- }
141
- }
142
-
143
- if (!token) {
144
- token = {
145
- type: UNKNOWN,
146
- value: fx[pos]
147
- };
148
- pos++;
149
- }
150
- if (withLocation) {
151
- token.loc = [ startPos, pos ];
152
- }
153
-
154
- // make a note if we found a let/lambda call
155
- if (lastToken && token.value === '(' && lastToken.type === FUNCTION) {
156
- if (/^l(?:ambda|et)$/i.test(lastToken.value)) {
157
- letOrLambda++;
158
- }
159
- }
160
- // make a note if we found a R or C unknown
161
- if (token.type === UNKNOWN && token.value.length === 1) {
162
- const valLC = token.value.toLowerCase();
163
- unknownRC += (valLC === 'r' || valLC === 'c') ? 1 : 0;
164
- }
165
-
166
- if (negativeNumbers && token.type === NUMBER) {
167
- const last1 = lastToken;
168
- // do we have a number preceded by a minus?
169
- if (last1 && isType(last1, OPERATOR) && last1.value === '-') {
170
- // missing tail1 means we are at the start of the stream
171
- if (
172
- !tail1 ||
173
- isType(tail1, FX_PREFIX) ||
174
- !causesBinaryMinus(tail1)
175
- ) {
176
- const minus = tokens.pop();
177
- token.value = '-' + token.value;
178
- if (token.loc) {
179
- // ensure offsets are up to date
180
- token.loc[0] = minus.loc[0];
181
- }
182
- // next step tries to counter the screwing around with the tailing
183
- // it should be correct again once we pushToken()
184
- tail0 = tail1;
185
- lastToken = tokens[tokens.length - 1];
186
- }
187
- }
188
- }
189
-
190
- pushToken(token);
191
- }
192
-
193
- // if we encountered both a LAMBDA/LET call, and unknown 'r' or 'c' tokens
194
- // we'll turn the unknown tokens into names within the call.
195
- if (unknownRC && letOrLambda) {
196
- fixRCNames(tokens);
197
- }
198
-
199
- // Any OPERATOR_TRIM tokens have been indexed already, they now need to be
200
- // either turned into OPERATORs or UNKNOWNs. Trim operators are only allowed
201
- // between two REF_RANGE tokens as they are not valid in expressions as full
202
- // operators.
203
- for (const index of trimOps) {
204
- const before = tokens[index - 1];
205
- const after = tokens[index + 1];
206
- if (before && before.type === REF_RANGE && after && after.type === REF_RANGE) {
207
- tokens[index].type = OPERATOR;
208
- }
209
- else {
210
- tokens[index].type = UNKNOWN;
211
- }
212
- }
213
-
214
- if (mergeRefs) {
215
- return mergeRefTokens(tokens);
216
- }
217
-
218
- return tokens;
219
- }
220
-
221
- /**
222
- * Breaks a string formula into a list of tokens.
223
- *
224
- * The returned output will be an array of objects representing the tokens:
225
- *
226
- * ```js
227
- * [
228
- * { type: FX_PREFIX, value: '=' },
229
- * { type: FUNCTION, value: 'SUM' },
230
- * { type: OPERATOR, value: '(' },
231
- * { type: REF_RANGE, value: 'A1:B2' },
232
- * { type: OPERATOR, value: ')' }
233
- * ]
234
- * ```
235
- *
236
- * Token types may be found as an Object as the
237
- * [`tokenTypes` export]{@link tokenTypes} on the package
238
- * (`import {tokenTypes} from '@borgar/fx';`).
239
- *
240
- * To support syntax highlighting as you type, `STRING` tokens are allowed to be
241
- * "unterminated". For example, the incomplete formula `="Hello world` would be
242
- * tokenized as:
243
- *
244
- * ```js
245
- * [
246
- * { type: FX_PREFIX, value: '=' },
247
- * { type: STRING, value: '"Hello world', unterminated: true },
248
- * ]
249
- * ```
250
- *
251
- * @see tokenTypes
252
- * @param {string} formula An Excel formula string (an Excel expression) or an array of tokens.
253
- * @param {object} [options={}] Options
254
- * @param {boolean} [options.allowTernary=false] Enables the recognition of ternary ranges in the style of `A1:A` or `A1:1`. These are supported by Google Sheets but not Excel. See: References.md.
255
- * @param {boolean} [options.negativeNumbers=true] Merges unary minuses with their immediately following number tokens (`-`,`1`) => `-1` (alternatively these will be unary operations in the tree).
256
- * @param {boolean} [options.r1c1=false] Ranges are expected to be in the R1C1 style format rather than the more popular A1 style.
257
- * @param {boolean} [options.withLocation=true] Nodes will include source position offsets to the tokens: `{ loc: [ start, end ] }`
258
- * @param {boolean} [options.mergeRefs=true] Should ranges be returned as whole references (`Sheet1!A1:B2`) or as separate tokens for each part: (`Sheet1`,`!`,`A1`,`:`,`B2`). This is the same as calling [`mergeRefTokens`](#mergeRefTokens)
259
- * @param {boolean} [options.xlsx=false] Enables a `[1]Sheet1!A1` or `[1]!name` syntax form for external workbooks found only in XLSX files.
260
- * @returns {Array<Token>} An AST of nodes
261
- */
262
- export function tokenize (formula, options = {}) {
263
- return getTokens(formula, lexers, options);
264
- }