@borgar/fx 4.12.0 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. package/dist/index-BMr6cTgc.d.cts +1444 -0
  2. package/dist/index-BMr6cTgc.d.ts +1444 -0
  3. package/dist/index.cjs +3054 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +1 -0
  6. package/dist/index.d.ts +1 -0
  7. package/dist/index.js +2984 -0
  8. package/dist/index.js.map +1 -0
  9. package/dist/xlsx/index.cjs +3120 -0
  10. package/dist/xlsx/index.cjs.map +1 -0
  11. package/dist/xlsx/index.d.cts +55 -0
  12. package/dist/xlsx/index.d.ts +55 -0
  13. package/dist/xlsx/index.js +3049 -0
  14. package/dist/xlsx/index.js.map +1 -0
  15. package/docs/API.md +2959 -718
  16. package/docs/AST_format.md +2 -2
  17. package/eslint.config.mjs +40 -0
  18. package/lib/a1.spec.ts +32 -0
  19. package/lib/a1.ts +26 -0
  20. package/lib/addA1RangeBounds.ts +50 -0
  21. package/lib/addTokenMeta.spec.ts +166 -0
  22. package/lib/{addTokenMeta.js → addTokenMeta.ts} +53 -33
  23. package/lib/astTypes.ts +211 -0
  24. package/lib/cloneToken.ts +29 -0
  25. package/lib/{constants.js → constants.ts} +6 -3
  26. package/lib/fixRanges.spec.ts +220 -0
  27. package/lib/fixRanges.ts +260 -0
  28. package/lib/fromCol.spec.ts +15 -0
  29. package/lib/{fromCol.js → fromCol.ts} +1 -1
  30. package/lib/index.spec.ts +119 -0
  31. package/lib/index.ts +76 -0
  32. package/lib/isNodeType.ts +151 -0
  33. package/lib/isType.spec.ts +208 -0
  34. package/lib/{isType.js → isType.ts} +26 -25
  35. package/lib/lexers/advRangeOp.ts +18 -0
  36. package/lib/lexers/canEndRange.ts +25 -0
  37. package/lib/lexers/lexBoolean.ts +55 -0
  38. package/lib/lexers/lexContext.ts +104 -0
  39. package/lib/lexers/lexError.ts +15 -0
  40. package/lib/lexers/lexFunction.ts +37 -0
  41. package/lib/lexers/lexNameFuncCntx.ts +112 -0
  42. package/lib/lexers/lexNamed.ts +60 -0
  43. package/lib/lexers/lexNewLine.ts +12 -0
  44. package/lib/lexers/lexNumber.ts +48 -0
  45. package/lib/lexers/lexOperator.ts +26 -0
  46. package/lib/lexers/lexRange.ts +15 -0
  47. package/lib/lexers/lexRangeA1.ts +134 -0
  48. package/lib/lexers/lexRangeR1C1.ts +146 -0
  49. package/lib/lexers/lexRangeTrim.ts +26 -0
  50. package/lib/lexers/lexRefOp.ts +19 -0
  51. package/lib/lexers/lexString.ts +22 -0
  52. package/lib/lexers/lexStructured.ts +25 -0
  53. package/lib/lexers/lexWhitespace.ts +31 -0
  54. package/lib/lexers/sets.ts +51 -0
  55. package/lib/mergeRefTokens.spec.ts +141 -0
  56. package/lib/{mergeRefTokens.js → mergeRefTokens.ts} +47 -32
  57. package/lib/nodeTypes.ts +54 -0
  58. package/lib/parse.spec.ts +1410 -0
  59. package/lib/{parser.js → parse.ts} +81 -63
  60. package/lib/parseA1Range.spec.ts +233 -0
  61. package/lib/parseA1Range.ts +206 -0
  62. package/lib/parseA1Ref.spec.ts +337 -0
  63. package/lib/parseA1Ref.ts +115 -0
  64. package/lib/parseR1C1Range.ts +191 -0
  65. package/lib/parseR1C1Ref.spec.ts +323 -0
  66. package/lib/parseR1C1Ref.ts +127 -0
  67. package/lib/parseRef.spec.ts +90 -0
  68. package/lib/parseRef.ts +240 -0
  69. package/lib/parseSRange.ts +240 -0
  70. package/lib/parseStructRef.spec.ts +168 -0
  71. package/lib/parseStructRef.ts +76 -0
  72. package/lib/stringifyA1Range.spec.ts +72 -0
  73. package/lib/stringifyA1Range.ts +72 -0
  74. package/lib/stringifyA1Ref.spec.ts +64 -0
  75. package/lib/stringifyA1Ref.ts +59 -0
  76. package/lib/{stringifyPrefix.js → stringifyPrefix.ts} +17 -2
  77. package/lib/stringifyR1C1Range.spec.ts +92 -0
  78. package/lib/stringifyR1C1Range.ts +73 -0
  79. package/lib/stringifyR1C1Ref.spec.ts +63 -0
  80. package/lib/stringifyR1C1Ref.ts +67 -0
  81. package/lib/stringifyStructRef.spec.ts +124 -0
  82. package/lib/stringifyStructRef.ts +113 -0
  83. package/lib/stringifyTokens.ts +15 -0
  84. package/lib/toCol.spec.ts +11 -0
  85. package/lib/{toCol.js → toCol.ts} +4 -4
  86. package/lib/tokenTypes.ts +76 -0
  87. package/lib/tokenize-srefs.spec.ts +429 -0
  88. package/lib/tokenize.spec.ts +2103 -0
  89. package/lib/tokenize.ts +346 -0
  90. package/lib/translate.spec.ts +35 -0
  91. package/lib/translateToA1.spec.ts +247 -0
  92. package/lib/translateToA1.ts +231 -0
  93. package/lib/translateToR1C1.spec.ts +227 -0
  94. package/lib/translateToR1C1.ts +145 -0
  95. package/lib/types.ts +179 -0
  96. package/lib/xlsx/index.spec.ts +27 -0
  97. package/lib/xlsx/index.ts +32 -0
  98. package/package.json +46 -30
  99. package/tsconfig.json +28 -0
  100. package/typedoc-ignore-links.ts +17 -0
  101. package/typedoc.json +41 -0
  102. package/.eslintrc +0 -22
  103. package/dist/fx.d.ts +0 -823
  104. package/dist/fx.js +0 -2
  105. package/dist/package.json +0 -1
  106. package/lib/a1.js +0 -348
  107. package/lib/a1.spec.js +0 -458
  108. package/lib/addTokenMeta.spec.js +0 -153
  109. package/lib/astTypes.js +0 -96
  110. package/lib/extraTypes.js +0 -74
  111. package/lib/fixRanges.js +0 -104
  112. package/lib/fixRanges.spec.js +0 -170
  113. package/lib/fromCol.spec.js +0 -11
  114. package/lib/index.js +0 -134
  115. package/lib/index.spec.js +0 -67
  116. package/lib/isType.spec.js +0 -168
  117. package/lib/lexer-srefs.spec.js +0 -324
  118. package/lib/lexer.js +0 -283
  119. package/lib/lexer.spec.js +0 -1953
  120. package/lib/lexerParts.js +0 -228
  121. package/lib/mergeRefTokens.spec.js +0 -121
  122. package/lib/package.json +0 -1
  123. package/lib/parseRef.js +0 -157
  124. package/lib/parseRef.spec.js +0 -71
  125. package/lib/parseSRange.js +0 -167
  126. package/lib/parseStructRef.js +0 -48
  127. package/lib/parseStructRef.spec.js +0 -164
  128. package/lib/parser.spec.js +0 -1208
  129. package/lib/rc.js +0 -341
  130. package/lib/rc.spec.js +0 -403
  131. package/lib/stringifyStructRef.js +0 -80
  132. package/lib/stringifyStructRef.spec.js +0 -182
  133. package/lib/toCol.spec.js +0 -11
  134. package/lib/translate-toA1.spec.js +0 -214
  135. package/lib/translate-toRC.spec.js +0 -197
  136. package/lib/translate.js +0 -239
  137. package/lib/translate.spec.js +0 -21
  138. package/rollup.config.mjs +0 -22
  139. package/tsd.json +0 -12
package/lib/lexer.js DELETED
@@ -1,283 +0,0 @@
1
- import {
2
- FX_PREFIX,
3
- NEWLINE,
4
- NUMBER,
5
- OPERATOR,
6
- REF_NAMED,
7
- STRING,
8
- UNKNOWN,
9
- WHITESPACE,
10
- FUNCTION,
11
- OPERATOR_TRIM,
12
- REF_RANGE
13
- } from './constants.js';
14
- import { lexers } from './lexerParts.js';
15
- import { mergeRefTokens } from './mergeRefTokens.js';
16
-
17
- const isType = (t, type) => t && t.type === type;
18
-
19
- const defaultOptions = {
20
- withLocation: false,
21
- mergeRefs: true,
22
- allowTernary: false,
23
- negativeNumbers: true,
24
- r1c1: false
25
- };
26
-
27
- const isTextToken = token => {
28
- return (
29
- token.type === REF_NAMED ||
30
- token.type === FUNCTION
31
- );
32
- };
33
-
34
- const causesBinaryMinus = token => {
35
- return !isType(token, OPERATOR) || (
36
- token.value === '%' ||
37
- token.value === '}' ||
38
- token.value === ')' ||
39
- token.value === '#'
40
- );
41
- };
42
-
43
- function fixRCNames (tokens) {
44
- let withinCall = 0;
45
- let parenDepth = 0;
46
- let lastToken;
47
- for (const token of tokens) {
48
- if (token.type === OPERATOR) {
49
- if (token.value === '(') {
50
- parenDepth++;
51
- if (lastToken.type === FUNCTION) {
52
- const v = lastToken.value.toLowerCase();
53
- if (v === 'lambda' || v === 'let') {
54
- withinCall = parenDepth;
55
- }
56
- }
57
- }
58
- else if (token.value === ')') {
59
- parenDepth--;
60
- if (parenDepth < withinCall) {
61
- withinCall = 0;
62
- }
63
- }
64
- }
65
- else if (withinCall && token.type === UNKNOWN && /^[rc]$/.test(token.value)) {
66
- token.type = REF_NAMED;
67
- }
68
- lastToken = token;
69
- }
70
- return tokens;
71
- }
72
-
73
- export function getTokens (fx, tokenHandlers, options = {}) {
74
- const opts = Object.assign({}, defaultOptions, options);
75
- const { withLocation, mergeRefs, negativeNumbers } = opts;
76
- const tokens = [];
77
- let pos = 0;
78
- let letOrLambda = 0;
79
- let unknownRC = 0;
80
- const trimOps = [];
81
-
82
- let tail0 = null; // last non-whitespace token
83
- let tail1 = null; // penultimate non-whitespace token
84
- let lastToken = null; // last token
85
- const pushToken = token => {
86
- const isCurrUnknown = token.type === UNKNOWN;
87
- const isLastUnknown = lastToken && lastToken.type === UNKNOWN;
88
- if (lastToken && (
89
- (isCurrUnknown && isLastUnknown) ||
90
- (isCurrUnknown && isTextToken(lastToken)) ||
91
- (isLastUnknown && isTextToken(token))
92
- )) {
93
- // UNKNOWN tokens "contaminate" sibling text tokens
94
- lastToken.value += token.value;
95
- lastToken.type = UNKNOWN;
96
- if (withLocation) {
97
- lastToken.loc[1] = token.loc[1];
98
- }
99
- }
100
- else {
101
- if (token.type === OPERATOR_TRIM) {
102
- trimOps.push(tokens.length);
103
- token.type = UNKNOWN;
104
- }
105
- // push token as normally
106
- tokens.push(token);
107
- lastToken = token;
108
- if (token.type !== WHITESPACE && token.type !== NEWLINE) {
109
- tail1 = tail0;
110
- tail0 = token;
111
- }
112
- }
113
- };
114
-
115
- if (fx[0] === '=') {
116
- const token = {
117
- type: FX_PREFIX,
118
- value: '=',
119
- ...(withLocation ? { loc: [ 0, 1 ] } : {})
120
- };
121
- pos++;
122
- pushToken(token);
123
- }
124
-
125
- while (pos < fx.length) {
126
- const startPos = pos;
127
- const s = fx.slice(pos);
128
- let tokenType = '';
129
- let tokenValue = '';
130
- for (let i = 0; i < tokenHandlers.length; i++) {
131
- const t = tokenHandlers[i](s, opts);
132
- if (t) {
133
- tokenType = t.type;
134
- tokenValue = t.value;
135
- pos += tokenValue.length;
136
- break;
137
- }
138
- }
139
-
140
- if (!tokenType) {
141
- tokenType = UNKNOWN;
142
- tokenValue = fx[pos];
143
- pos++;
144
- }
145
-
146
- const token = {
147
- type: tokenType,
148
- value: tokenValue,
149
- ...(withLocation ? { loc: [ startPos, pos ] } : {})
150
- };
151
-
152
- // make a note if we found a let/lambda call
153
- if (lastToken && lastToken.type === FUNCTION && tokenValue === '(') {
154
- const lastLC = lastToken.value.toLowerCase();
155
- if (lastLC === 'lambda' || lastLC === 'let') {
156
- letOrLambda++;
157
- }
158
- }
159
- // make a note if we found a R or C unknown
160
- if (tokenType === UNKNOWN) {
161
- const valLC = tokenValue.toLowerCase();
162
- unknownRC += (valLC === 'r' || valLC === 'c') ? 1 : 0;
163
- }
164
-
165
- // check for termination
166
- if (tokenType === STRING) {
167
- const l = tokenValue.length;
168
- if (tokenValue === '""') {
169
- // common case that IS terminated
170
- }
171
- else if (tokenValue === '"' || tokenValue[l - 1] !== '"') {
172
- token.unterminated = true;
173
- }
174
- else if (tokenValue !== '""' && tokenValue[l - 2] === '"') {
175
- let p = l - 1;
176
- while (tokenValue[p] === '"') { p--; }
177
- const atStart = (p + 1);
178
- const oddNum = ((l - p + 1) % 2 === 0);
179
- if (!atStart ^ oddNum) {
180
- token.unterminated = true;
181
- }
182
- }
183
- }
184
-
185
- if (negativeNumbers && tokenType === NUMBER) {
186
- const last1 = lastToken;
187
- // do we have a number preceded by a minus?
188
- if (last1 && isType(last1, OPERATOR) && last1.value === '-') {
189
- // missing tail1 means we are at the start of the stream
190
- if (
191
- !tail1 ||
192
- isType(tail1, FX_PREFIX) ||
193
- !causesBinaryMinus(tail1)
194
- ) {
195
- const minus = tokens.pop();
196
- token.value = '-' + tokenValue;
197
- if (withLocation) {
198
- // ensure offsets are up to date
199
- token.loc[0] = minus.loc[0];
200
- }
201
- // next step tries to counter the screwing around with the tailing
202
- // it should be correct again once we pushToken()
203
- tail0 = tail1;
204
- lastToken = tokens[tokens.length - 1];
205
- }
206
- }
207
- }
208
-
209
- pushToken(token);
210
- }
211
-
212
- // if we encountered both a LAMBDA/LET call, and unknown 'r' or 'c' tokens
213
- // we'll turn the unknown tokens into names within the call.
214
- if (unknownRC && letOrLambda) {
215
- fixRCNames(tokens);
216
- }
217
-
218
- // Any OPERATOR_TRIM tokens have been indexed already, they now need to be
219
- // either turned into OPERATORs or UNKNOWNs. Trim operators are only allowed
220
- // between two REF_RANGE tokens as they are not valid in expressions as full
221
- // operators.
222
- for (const index of trimOps) {
223
- const before = tokens[index - 1];
224
- const after = tokens[index - 1];
225
- if (before && before.type === REF_RANGE && after && after.type === REF_RANGE) {
226
- tokens[index].type = OPERATOR;
227
- }
228
- else {
229
- tokens[index].type = UNKNOWN;
230
- }
231
- }
232
-
233
- if (mergeRefs) {
234
- return mergeRefTokens(tokens);
235
- }
236
-
237
- return tokens;
238
- }
239
-
240
- /**
241
- * Breaks a string formula into a list of tokens.
242
- *
243
- * The returned output will be an array of objects representing the tokens:
244
- *
245
- * ```js
246
- * [
247
- * { type: FX_PREFIX, value: '=' },
248
- * { type: FUNCTION, value: 'SUM' },
249
- * { type: OPERATOR, value: '(' },
250
- * { type: REF_RANGE, value: 'A1:B2' },
251
- * { type: OPERATOR, value: ')' }
252
- * ]
253
- * ```
254
- *
255
- * Token types may be found as an Object as the
256
- * [`tokenTypes` export]{@link tokenTypes} on the package
257
- * (`import {tokenTypes} from '@borgar/fx';`).
258
- *
259
- * To support syntax highlighting as you type, `STRING` tokens are allowed to be
260
- * "unterminated". For example, the incomplete formula `="Hello world` would be
261
- * tokenized as:
262
- *
263
- * ```js
264
- * [
265
- * { type: FX_PREFIX, value: '=' },
266
- * { type: STRING, value: '"Hello world', unterminated: true },
267
- * ]
268
- * ```
269
- *
270
- * @see tokenTypes
271
- * @param {string} formula An Excel formula string (an Excel expression) or an array of tokens.
272
- * @param {object} [options={}] Options
273
- * @param {boolean} [options.allowTernary=false] Enables the recognition of ternary ranges in the style of `A1:A` or `A1:1`. These are supported by Google Sheets but not Excel. See: References.md.
274
- * @param {boolean} [options.negativeNumbers=true] Merges unary minuses with their immediately following number tokens (`-`,`1`) => `-1` (alternatively these will be unary operations in the tree).
275
- * @param {boolean} [options.r1c1=false] Ranges are expected to be in the R1C1 style format rather than the more popular A1 style.
276
- * @param {boolean} [options.withLocation=true] Nodes will include source position offsets to the tokens: `{ loc: [ start, end ] }`
277
- * @param {boolean} [options.mergeRefs=true] Should ranges be returned as whole references (`Sheet1!A1:B2`) or as separate tokens for each part: (`Sheet1`,`!`,`A1`,`:`,`B2`). This is the same as calling [`mergeRefTokens`](#mergeRefTokens)
278
- * @param {boolean} [options.xlsx=false] Enables a `[1]Sheet1!A1` or `[1]!name` syntax form for external workbooks found only in XLSX files.
279
- * @returns {Array<Token>} An AST of nodes
280
- */
281
- export function tokenize (formula, options = {}) {
282
- return getTokens(formula, lexers, options);
283
- }