@borgar/fx 4.12.0 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-BMr6cTgc.d.cts +1444 -0
- package/dist/index-BMr6cTgc.d.ts +1444 -0
- package/dist/index.cjs +3054 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +2984 -0
- package/dist/index.js.map +1 -0
- package/dist/xlsx/index.cjs +3120 -0
- package/dist/xlsx/index.cjs.map +1 -0
- package/dist/xlsx/index.d.cts +55 -0
- package/dist/xlsx/index.d.ts +55 -0
- package/dist/xlsx/index.js +3049 -0
- package/dist/xlsx/index.js.map +1 -0
- package/docs/API.md +2959 -718
- package/docs/AST_format.md +2 -2
- package/eslint.config.mjs +40 -0
- package/lib/a1.spec.ts +32 -0
- package/lib/a1.ts +26 -0
- package/lib/addA1RangeBounds.ts +50 -0
- package/lib/addTokenMeta.spec.ts +166 -0
- package/lib/{addTokenMeta.js → addTokenMeta.ts} +53 -33
- package/lib/astTypes.ts +211 -0
- package/lib/cloneToken.ts +29 -0
- package/lib/{constants.js → constants.ts} +6 -3
- package/lib/fixRanges.spec.ts +220 -0
- package/lib/fixRanges.ts +260 -0
- package/lib/fromCol.spec.ts +15 -0
- package/lib/{fromCol.js → fromCol.ts} +1 -1
- package/lib/index.spec.ts +119 -0
- package/lib/index.ts +76 -0
- package/lib/isNodeType.ts +151 -0
- package/lib/isType.spec.ts +208 -0
- package/lib/{isType.js → isType.ts} +26 -25
- package/lib/lexers/advRangeOp.ts +18 -0
- package/lib/lexers/canEndRange.ts +25 -0
- package/lib/lexers/lexBoolean.ts +55 -0
- package/lib/lexers/lexContext.ts +104 -0
- package/lib/lexers/lexError.ts +15 -0
- package/lib/lexers/lexFunction.ts +37 -0
- package/lib/lexers/lexNameFuncCntx.ts +112 -0
- package/lib/lexers/lexNamed.ts +60 -0
- package/lib/lexers/lexNewLine.ts +12 -0
- package/lib/lexers/lexNumber.ts +48 -0
- package/lib/lexers/lexOperator.ts +26 -0
- package/lib/lexers/lexRange.ts +15 -0
- package/lib/lexers/lexRangeA1.ts +134 -0
- package/lib/lexers/lexRangeR1C1.ts +146 -0
- package/lib/lexers/lexRangeTrim.ts +26 -0
- package/lib/lexers/lexRefOp.ts +19 -0
- package/lib/lexers/lexString.ts +22 -0
- package/lib/lexers/lexStructured.ts +25 -0
- package/lib/lexers/lexWhitespace.ts +31 -0
- package/lib/lexers/sets.ts +51 -0
- package/lib/mergeRefTokens.spec.ts +141 -0
- package/lib/{mergeRefTokens.js → mergeRefTokens.ts} +47 -32
- package/lib/nodeTypes.ts +54 -0
- package/lib/parse.spec.ts +1410 -0
- package/lib/{parser.js → parse.ts} +81 -63
- package/lib/parseA1Range.spec.ts +233 -0
- package/lib/parseA1Range.ts +206 -0
- package/lib/parseA1Ref.spec.ts +337 -0
- package/lib/parseA1Ref.ts +115 -0
- package/lib/parseR1C1Range.ts +191 -0
- package/lib/parseR1C1Ref.spec.ts +323 -0
- package/lib/parseR1C1Ref.ts +127 -0
- package/lib/parseRef.spec.ts +90 -0
- package/lib/parseRef.ts +240 -0
- package/lib/parseSRange.ts +240 -0
- package/lib/parseStructRef.spec.ts +168 -0
- package/lib/parseStructRef.ts +76 -0
- package/lib/stringifyA1Range.spec.ts +72 -0
- package/lib/stringifyA1Range.ts +72 -0
- package/lib/stringifyA1Ref.spec.ts +64 -0
- package/lib/stringifyA1Ref.ts +59 -0
- package/lib/{stringifyPrefix.js → stringifyPrefix.ts} +17 -2
- package/lib/stringifyR1C1Range.spec.ts +92 -0
- package/lib/stringifyR1C1Range.ts +73 -0
- package/lib/stringifyR1C1Ref.spec.ts +63 -0
- package/lib/stringifyR1C1Ref.ts +67 -0
- package/lib/stringifyStructRef.spec.ts +124 -0
- package/lib/stringifyStructRef.ts +113 -0
- package/lib/stringifyTokens.ts +15 -0
- package/lib/toCol.spec.ts +11 -0
- package/lib/{toCol.js → toCol.ts} +4 -4
- package/lib/tokenTypes.ts +76 -0
- package/lib/tokenize-srefs.spec.ts +429 -0
- package/lib/tokenize.spec.ts +2103 -0
- package/lib/tokenize.ts +346 -0
- package/lib/translate.spec.ts +35 -0
- package/lib/translateToA1.spec.ts +247 -0
- package/lib/translateToA1.ts +231 -0
- package/lib/translateToR1C1.spec.ts +227 -0
- package/lib/translateToR1C1.ts +145 -0
- package/lib/types.ts +179 -0
- package/lib/xlsx/index.spec.ts +27 -0
- package/lib/xlsx/index.ts +32 -0
- package/package.json +46 -30
- package/tsconfig.json +28 -0
- package/typedoc-ignore-links.ts +17 -0
- package/typedoc.json +41 -0
- package/.eslintrc +0 -22
- package/dist/fx.d.ts +0 -823
- package/dist/fx.js +0 -2
- package/dist/package.json +0 -1
- package/lib/a1.js +0 -348
- package/lib/a1.spec.js +0 -458
- package/lib/addTokenMeta.spec.js +0 -153
- package/lib/astTypes.js +0 -96
- package/lib/extraTypes.js +0 -74
- package/lib/fixRanges.js +0 -104
- package/lib/fixRanges.spec.js +0 -170
- package/lib/fromCol.spec.js +0 -11
- package/lib/index.js +0 -134
- package/lib/index.spec.js +0 -67
- package/lib/isType.spec.js +0 -168
- package/lib/lexer-srefs.spec.js +0 -324
- package/lib/lexer.js +0 -283
- package/lib/lexer.spec.js +0 -1953
- package/lib/lexerParts.js +0 -228
- package/lib/mergeRefTokens.spec.js +0 -121
- package/lib/package.json +0 -1
- package/lib/parseRef.js +0 -157
- package/lib/parseRef.spec.js +0 -71
- package/lib/parseSRange.js +0 -167
- package/lib/parseStructRef.js +0 -48
- package/lib/parseStructRef.spec.js +0 -164
- package/lib/parser.spec.js +0 -1208
- package/lib/rc.js +0 -341
- package/lib/rc.spec.js +0 -403
- package/lib/stringifyStructRef.js +0 -80
- package/lib/stringifyStructRef.spec.js +0 -182
- package/lib/toCol.spec.js +0 -11
- package/lib/translate-toA1.spec.js +0 -214
- package/lib/translate-toRC.spec.js +0 -197
- package/lib/translate.js +0 -239
- package/lib/translate.spec.js +0 -21
- package/rollup.config.mjs +0 -22
- package/tsd.json +0 -12
package/lib/lexerParts.js
DELETED
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
OPERATOR,
|
|
3
|
-
BOOLEAN,
|
|
4
|
-
ERROR,
|
|
5
|
-
NUMBER,
|
|
6
|
-
FUNCTION,
|
|
7
|
-
NEWLINE,
|
|
8
|
-
WHITESPACE,
|
|
9
|
-
STRING,
|
|
10
|
-
CONTEXT,
|
|
11
|
-
CONTEXT_QUOTE,
|
|
12
|
-
REF_RANGE,
|
|
13
|
-
REF_BEAM,
|
|
14
|
-
REF_NAMED,
|
|
15
|
-
REF_TERNARY,
|
|
16
|
-
REF_STRUCT,
|
|
17
|
-
MAX_COLS,
|
|
18
|
-
MAX_ROWS,
|
|
19
|
-
OPERATOR_TRIM
|
|
20
|
-
} from './constants.js';
|
|
21
|
-
import { fromCol } from './fromCol.js';
|
|
22
|
-
import { parseSRange } from './parseSRange.js';
|
|
23
|
-
|
|
24
|
-
const re_ERROR = /^#(NAME\?|FIELD!|CALC!|VALUE!|REF!|DIV\/0!|NULL!|NUM!|N\/A|GETTING_DATA\b|SPILL!|UNKNOWN!|FIELD\b|CALC\b|SYNTAX\?|ERROR!|CONNECT!|BLOCKED!|EXTERNAL!)/i;
|
|
25
|
-
const re_OPERATOR = /^(<=|>=|<>|[-+/*^%&<>=]|[{},;]|[()]|@|:|!|#)/;
|
|
26
|
-
const re_BOOLEAN = /^(TRUE|FALSE)\b/i;
|
|
27
|
-
const re_FUNCTION = /^[A-Z_]+[A-Z\d_.]*(?=\()/i;
|
|
28
|
-
const re_NEWLINE = /^\n+/;
|
|
29
|
-
const re_WHITESPACE = /^[ \f\r\t\v\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]+/;
|
|
30
|
-
const re_STRING = /^"(?:""|[^"])*("|$)/;
|
|
31
|
-
const re_NUMBER = /^(?:\d+(\.\d+)?(?:[eE][+-]?\d+)?|\d+)/;
|
|
32
|
-
const re_CONTEXT = /^(?!!)(\[(?:[^\]])+\])?([0-9A-Za-z._¡¤§¨ª\u00ad¯-\uffff]+)?(?=!)/;
|
|
33
|
-
const re_CONTEXT_QUOTE = /^'(?:''|[^'])*('|$)(?=!)/;
|
|
34
|
-
const re_RANGE_TRIM = /^(\.:\.|\.:|:\.)/;
|
|
35
|
-
|
|
36
|
-
const rngPart = '\\$?[A-Z]{1,3}\\$?[1-9][0-9]{0,6}';
|
|
37
|
-
const colPart = '\\$?[A-Z]{1,3}';
|
|
38
|
-
const rowPart = '\\$?[1-9][0-9]{0,6}';
|
|
39
|
-
const rangeOp = '\\.?:\\.?';
|
|
40
|
-
const nextNotChar = '(?![a-z0-9_\\u00a1-\\uffff])';
|
|
41
|
-
const re_A1COL = new RegExp(`^${colPart}${rangeOp}${colPart}${nextNotChar}`, 'i');
|
|
42
|
-
const re_A1ROW = new RegExp(`^${rowPart}${rangeOp}${rowPart}${nextNotChar}`, 'i');
|
|
43
|
-
const re_A1RANGE = new RegExp(`^${rngPart}${nextNotChar}`, 'i');
|
|
44
|
-
const re_A1PARTIAL = new RegExp(`^((${colPart}|${rowPart})${rangeOp}${rngPart}|${rngPart}${rangeOp}(${colPart}|${rowPart}))(?![\\w($.])`, 'i');
|
|
45
|
-
const rPart = '(?:R(?:\\[[+-]?\\d+\\]|[1-9][0-9]{0,6})?)';
|
|
46
|
-
const cPart = '(?:C(?:\\[[+-]?\\d+\\]|[1-9][0-9]{0,4})?)';
|
|
47
|
-
const re_RCCOL = new RegExp(`^${cPart}(${rangeOp}${cPart})?${nextNotChar}`, 'i');
|
|
48
|
-
const re_RCROW = new RegExp(`^${rPart}(${rangeOp}${rPart})?${nextNotChar}`, 'i');
|
|
49
|
-
const re_RCRANGE = new RegExp(`^(?:(?=[RC])${rPart}${cPart})${nextNotChar}`, 'i');
|
|
50
|
-
const re_RCPARTIAL = new RegExp(`^(${rPart}${cPart}(${rangeOp}${cPart}|${rangeOp}${rPart})(?![[\\d])|(${rPart}|${cPart})(${rangeOp}${rPart}${cPart}))${nextNotChar}`, 'i');
|
|
51
|
-
|
|
52
|
-
// The advertized named ranges rules are a bit off from what Excel seems to do:
|
|
53
|
-
// in the "extended range" of chars, it looks like it allows most things above
|
|
54
|
-
// U+00B0 with the range between U+00A0-U+00AF rather random.
|
|
55
|
-
// eslint-disable-next-line
|
|
56
|
-
// const re_NAMED = /^[a-zA-Z\\_¡¤§¨ª\u00ad¯\u00b0-\uffff][a-zA-Z0-9\\_.?¡¤§¨ª\u00ad¯\u00b0-\uffff]{0,254}/i;
|
|
57
|
-
// I've simplified to allowing everything above U+00A1:
|
|
58
|
-
const re_NAMED = /^[a-zA-Z\\_\u00a1-\uffff][a-zA-Z0-9\\_.?\u00a1-\uffff]{0,254}/i;
|
|
59
|
-
|
|
60
|
-
function makeHandler (type, re) {
|
|
61
|
-
return str => {
|
|
62
|
-
const m = re.exec(str);
|
|
63
|
-
if (m) {
|
|
64
|
-
return { type: type, value: m[0] };
|
|
65
|
-
}
|
|
66
|
-
};
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
function lexNamed (str) {
|
|
70
|
-
const m = re_NAMED.exec(str);
|
|
71
|
-
if (m) {
|
|
72
|
-
const lc = m[0].toLowerCase();
|
|
73
|
-
// names starting with \ must be at least 3 char long
|
|
74
|
-
if (lc[0] === '\\' && m[0].length < 3) {
|
|
75
|
-
return null;
|
|
76
|
-
}
|
|
77
|
-
// single characters R and C are forbidden as names
|
|
78
|
-
if (lc === 'r' || lc === 'c') {
|
|
79
|
-
return null;
|
|
80
|
-
}
|
|
81
|
-
return { type: REF_NAMED, value: m[0] };
|
|
82
|
-
}
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
const re_QUOTED_VALUE = /^'(?:[^[\]]+?)?(?:\[(.+?)\])?(?:[^[\]]+?)'$/;
|
|
86
|
-
const re_QUOTED_VALUE_XLSX = /^'\[(.+?)\]'$/;
|
|
87
|
-
function lexContext (str, options) {
|
|
88
|
-
const mq = re_CONTEXT_QUOTE.exec(str);
|
|
89
|
-
if (mq) {
|
|
90
|
-
const value = mq[0];
|
|
91
|
-
const isValid = options.xlsx
|
|
92
|
-
? re_QUOTED_VALUE_XLSX.test(value) || re_QUOTED_VALUE.test(value)
|
|
93
|
-
: re_QUOTED_VALUE.test(value);
|
|
94
|
-
if (isValid) {
|
|
95
|
-
return { type: CONTEXT_QUOTE, value: value };
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
// xlsx xml uses a variant of the syntax that has external references in
|
|
99
|
-
// bracets. Any of: [1]Sheet1!A1, '[1]Sheet one'!A1, [1]!named
|
|
100
|
-
// We're only concerned with the non quoted version here as the quoted version
|
|
101
|
-
// doesn't currently examine what is in the quotes.
|
|
102
|
-
const m = re_CONTEXT.exec(str);
|
|
103
|
-
if (m) {
|
|
104
|
-
const [ , a, b ] = m;
|
|
105
|
-
const valid = (
|
|
106
|
-
((a && b) || b) || // "[a]b!" or "b!" forms
|
|
107
|
-
(a && !b && options.xlsx) // "[a]" form (allowed in xlsx mode)
|
|
108
|
-
);
|
|
109
|
-
if (valid) {
|
|
110
|
-
return { type: CONTEXT, value: m[0] };
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
function lexStructured (str) {
|
|
116
|
-
const structData = parseSRange(str);
|
|
117
|
-
if (structData) {
|
|
118
|
-
// we have a match for a valid SR
|
|
119
|
-
let i = structData.length;
|
|
120
|
-
// skip tailing whitespace
|
|
121
|
-
while (str[i] === ' ') {
|
|
122
|
-
i++;
|
|
123
|
-
}
|
|
124
|
-
// and ensure that it isn't followed by a !
|
|
125
|
-
if (str[i] !== '!') {
|
|
126
|
-
return {
|
|
127
|
-
type: REF_STRUCT,
|
|
128
|
-
value: structData.token
|
|
129
|
-
};
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
return null;
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
const reRCNums = /([RC])(\[?)(-?\d+)/gi;
|
|
136
|
-
const reA1Nums = /(\d+|[a-zA-Z]+)/gi;
|
|
137
|
-
function lexRange (str, options) {
|
|
138
|
-
let m, t;
|
|
139
|
-
if (options.r1c1) {
|
|
140
|
-
// RC notation
|
|
141
|
-
if (options.allowTernary && (m = re_RCPARTIAL.exec(str))) {
|
|
142
|
-
t = { type: REF_TERNARY, value: m[0] };
|
|
143
|
-
}
|
|
144
|
-
else if ((m = re_RCRANGE.exec(str))) {
|
|
145
|
-
t = { type: REF_RANGE, value: m[0] };
|
|
146
|
-
}
|
|
147
|
-
else if ((m = re_RCROW.exec(str)) || (m = re_RCCOL.exec(str))) {
|
|
148
|
-
t = { type: REF_BEAM, value: m[0] };
|
|
149
|
-
}
|
|
150
|
-
if (t) {
|
|
151
|
-
reRCNums.lastIndex = 0;
|
|
152
|
-
while ((m = reRCNums.exec(t.value)) !== null) {
|
|
153
|
-
const x = (m[1] === 'R' ? MAX_ROWS : MAX_COLS) + (m[2] ? 0 : 1);
|
|
154
|
-
const val = parseInt(m[3], 10);
|
|
155
|
-
if (val > x || val < -x) {
|
|
156
|
-
return null;
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
return t;
|
|
160
|
-
}
|
|
161
|
-
}
|
|
162
|
-
else {
|
|
163
|
-
// A1 notation
|
|
164
|
-
if (options.allowTernary && (m = re_A1PARTIAL.exec(str))) {
|
|
165
|
-
t = { type: REF_TERNARY, value: m[0] };
|
|
166
|
-
}
|
|
167
|
-
else if ((m = re_A1COL.exec(str)) || (m = re_A1ROW.exec(str))) {
|
|
168
|
-
t = { type: REF_BEAM, value: m[0] };
|
|
169
|
-
}
|
|
170
|
-
else if ((m = re_A1RANGE.exec(str))) {
|
|
171
|
-
t = { type: REF_RANGE, value: m[0] };
|
|
172
|
-
}
|
|
173
|
-
if (t) {
|
|
174
|
-
reA1Nums.lastIndex = 0;
|
|
175
|
-
// XXX: can probably optimize this as we know letters can only be 3 at max
|
|
176
|
-
while ((m = reA1Nums.exec(t.value)) !== null) {
|
|
177
|
-
if (/^\d/.test(m[1])) { // row
|
|
178
|
-
if ((parseInt(m[1], 10) - 1) > MAX_ROWS) {
|
|
179
|
-
return null;
|
|
180
|
-
}
|
|
181
|
-
}
|
|
182
|
-
else if (fromCol(m[1]) > MAX_COLS) {
|
|
183
|
-
return null;
|
|
184
|
-
}
|
|
185
|
-
}
|
|
186
|
-
return t;
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
function lexRefOp (s, opts) {
|
|
192
|
-
// in R1C1 mode we only allow !
|
|
193
|
-
if (opts.r1c1) {
|
|
194
|
-
return (s[0] === '!')
|
|
195
|
-
? { type: OPERATOR, value: s[0] }
|
|
196
|
-
: null;
|
|
197
|
-
}
|
|
198
|
-
// in A1 mode we allow [ '!', ':', '.:', ':.', '.:.']
|
|
199
|
-
const m = /^(!|\.?:\.?)/.exec(s);
|
|
200
|
-
if (m) {
|
|
201
|
-
return { type: OPERATOR, value: m[1] };
|
|
202
|
-
}
|
|
203
|
-
return null;
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
export const lexers = [
|
|
207
|
-
makeHandler(ERROR, re_ERROR),
|
|
208
|
-
makeHandler(OPERATOR_TRIM, re_RANGE_TRIM),
|
|
209
|
-
makeHandler(OPERATOR, re_OPERATOR),
|
|
210
|
-
makeHandler(FUNCTION, re_FUNCTION),
|
|
211
|
-
makeHandler(BOOLEAN, re_BOOLEAN),
|
|
212
|
-
makeHandler(NEWLINE, re_NEWLINE),
|
|
213
|
-
makeHandler(WHITESPACE, re_WHITESPACE),
|
|
214
|
-
makeHandler(STRING, re_STRING),
|
|
215
|
-
lexContext,
|
|
216
|
-
lexRange,
|
|
217
|
-
lexStructured,
|
|
218
|
-
makeHandler(NUMBER, re_NUMBER),
|
|
219
|
-
lexNamed
|
|
220
|
-
];
|
|
221
|
-
|
|
222
|
-
export const lexersRefs = [
|
|
223
|
-
lexRefOp,
|
|
224
|
-
lexContext,
|
|
225
|
-
lexRange,
|
|
226
|
-
lexStructured,
|
|
227
|
-
lexNamed
|
|
228
|
-
];
|
|
@@ -1,121 +0,0 @@
|
|
|
1
|
-
import { CONTEXT, FUNCTION, FX_PREFIX, OPERATOR, REF_RANGE, REF_BEAM, REF_NAMED, REF_TERNARY, UNKNOWN } from './constants.js';
|
|
2
|
-
import { test } from 'tape';
|
|
3
|
-
import { mergeRefTokens } from './mergeRefTokens.js';
|
|
4
|
-
import { tokenize } from './lexer.js';
|
|
5
|
-
|
|
6
|
-
test('mergeRefTokens basics', t => {
|
|
7
|
-
const list = tokenize('=SUM([Wb1]Sheet1!A1:B2)', { mergeRefs: false, withLocation: true });
|
|
8
|
-
|
|
9
|
-
t.deepEqual(list, [
|
|
10
|
-
{ type: FX_PREFIX, value: '=', loc: [ 0, 1 ] },
|
|
11
|
-
{ type: FUNCTION, value: 'SUM', loc: [ 1, 4 ] },
|
|
12
|
-
{ type: OPERATOR, value: '(', loc: [ 4, 5 ] },
|
|
13
|
-
|
|
14
|
-
{ type: CONTEXT, value: '[Wb1]Sheet1', loc: [ 5, 16 ] },
|
|
15
|
-
{ type: OPERATOR, value: '!', loc: [ 16, 17 ] },
|
|
16
|
-
{ type: REF_RANGE, value: 'A1', loc: [ 17, 19 ] },
|
|
17
|
-
{ type: OPERATOR, value: ':', loc: [ 19, 20 ] },
|
|
18
|
-
{ type: REF_RANGE, value: 'B2', loc: [ 20, 22 ] },
|
|
19
|
-
|
|
20
|
-
{ type: OPERATOR, value: ')', loc: [ 22, 23 ] }
|
|
21
|
-
]);
|
|
22
|
-
|
|
23
|
-
// set IDs on all tokens about to be joined
|
|
24
|
-
list[3].id = 'id1';
|
|
25
|
-
list[4].id = 'id2';
|
|
26
|
-
list[5].id = 'id3';
|
|
27
|
-
list[6].id = 'id4';
|
|
28
|
-
list[7].id = 'id5';
|
|
29
|
-
|
|
30
|
-
const mergedList = mergeRefTokens(list);
|
|
31
|
-
t.deepEqual(mergedList, [
|
|
32
|
-
{ type: FX_PREFIX, value: '=', loc: [ 0, 1 ] },
|
|
33
|
-
{ type: FUNCTION, value: 'SUM', loc: [ 1, 4 ] },
|
|
34
|
-
{ type: OPERATOR, value: '(', loc: [ 4, 5 ] },
|
|
35
|
-
{ type: REF_RANGE,
|
|
36
|
-
id: 'id5', // token has the id of the first one
|
|
37
|
-
value: '[Wb1]Sheet1!A1:B2',
|
|
38
|
-
loc: [ 5, 22 ] },
|
|
39
|
-
{ type: OPERATOR, value: ')', loc: [ 22, 23 ] }
|
|
40
|
-
]);
|
|
41
|
-
|
|
42
|
-
t.end();
|
|
43
|
-
});
|
|
44
|
-
|
|
45
|
-
test('mergeRefTokens cases', t => {
|
|
46
|
-
const opts = { mergeRefs: true, allowTernary: true };
|
|
47
|
-
t.deepEqual(tokenize('A1', opts), [
|
|
48
|
-
{ type: REF_RANGE, value: 'A1' }
|
|
49
|
-
]);
|
|
50
|
-
t.deepEqual(tokenize('A1:A1', opts), [
|
|
51
|
-
{ type: REF_RANGE, value: 'A1:A1' }
|
|
52
|
-
]);
|
|
53
|
-
t.deepEqual(tokenize('A:A', opts), [
|
|
54
|
-
{ type: REF_BEAM, value: 'A:A' }
|
|
55
|
-
]);
|
|
56
|
-
t.deepEqual(tokenize('A1:A', opts), [
|
|
57
|
-
{ type: REF_TERNARY, value: 'A1:A' }
|
|
58
|
-
]);
|
|
59
|
-
|
|
60
|
-
t.deepEqual(tokenize('\'Sheet1\'!A1', opts), [
|
|
61
|
-
{ type: REF_RANGE, value: '\'Sheet1\'!A1' }
|
|
62
|
-
]);
|
|
63
|
-
t.deepEqual(tokenize('\'Sheet1\'!A:A', opts), [
|
|
64
|
-
{ type: REF_BEAM, value: '\'Sheet1\'!A:A' }
|
|
65
|
-
]);
|
|
66
|
-
t.deepEqual(tokenize('\'Sheet1\'!A1:A', opts), [
|
|
67
|
-
{ type: REF_TERNARY, value: '\'Sheet1\'!A1:A' }
|
|
68
|
-
]);
|
|
69
|
-
t.deepEqual(tokenize('\'Sheet1\'!A1:A', opts), [
|
|
70
|
-
{ type: REF_TERNARY, value: '\'Sheet1\'!A1:A' }
|
|
71
|
-
]);
|
|
72
|
-
|
|
73
|
-
t.deepEqual(tokenize('Sheet1!A1', opts), [
|
|
74
|
-
{ type: REF_RANGE, value: 'Sheet1!A1' }
|
|
75
|
-
]);
|
|
76
|
-
t.deepEqual(tokenize('Sheet1!A:A', opts), [
|
|
77
|
-
{ type: REF_BEAM, value: 'Sheet1!A:A' }
|
|
78
|
-
]);
|
|
79
|
-
t.deepEqual(tokenize('Sheet1!A1:A', opts), [
|
|
80
|
-
{ type: REF_TERNARY, value: 'Sheet1!A1:A' }
|
|
81
|
-
]);
|
|
82
|
-
t.deepEqual(tokenize('Sheet1!A1:A', opts), [
|
|
83
|
-
{ type: REF_TERNARY, value: 'Sheet1!A1:A' }
|
|
84
|
-
]);
|
|
85
|
-
|
|
86
|
-
t.deepEqual(tokenize('[WB]Sheet1!A1', opts), [
|
|
87
|
-
{ type: REF_RANGE, value: '[WB]Sheet1!A1' }
|
|
88
|
-
]);
|
|
89
|
-
t.deepEqual(tokenize('[WB]Sheet1!A:A', opts), [
|
|
90
|
-
{ type: REF_BEAM, value: '[WB]Sheet1!A:A' }
|
|
91
|
-
]);
|
|
92
|
-
t.deepEqual(tokenize('[WB]Sheet1!A1:A', opts), [
|
|
93
|
-
{ type: REF_TERNARY, value: '[WB]Sheet1!A1:A' }
|
|
94
|
-
]);
|
|
95
|
-
t.deepEqual(tokenize('[WB]Sheet1!A1:A', opts), [
|
|
96
|
-
{ type: REF_TERNARY, value: '[WB]Sheet1!A1:A' }
|
|
97
|
-
]);
|
|
98
|
-
t.deepEqual(tokenize('[WB]Sheet1!A1.:.C3', opts), [
|
|
99
|
-
{ type: REF_RANGE, value: '[WB]Sheet1!A1.:.C3' }
|
|
100
|
-
]);
|
|
101
|
-
|
|
102
|
-
t.deepEqual(tokenize('foo', opts), [
|
|
103
|
-
{ type: REF_NAMED, value: 'foo' }
|
|
104
|
-
]);
|
|
105
|
-
t.deepEqual(tokenize('\'quoted\'!foo', opts), [
|
|
106
|
-
{ type: REF_NAMED, value: '\'quoted\'!foo' }
|
|
107
|
-
]);
|
|
108
|
-
t.deepEqual(tokenize('Sheet1!foo', opts), [
|
|
109
|
-
{ type: REF_NAMED, value: 'Sheet1!foo' }
|
|
110
|
-
]);
|
|
111
|
-
t.deepEqual(tokenize('[path]!foo', opts), [
|
|
112
|
-
{ type: UNKNOWN, value: '[path]' },
|
|
113
|
-
{ type: OPERATOR, value: '!' },
|
|
114
|
-
{ type: REF_NAMED, value: 'foo' }
|
|
115
|
-
]);
|
|
116
|
-
t.deepEqual(tokenize('[path]prefix!foo', opts), [
|
|
117
|
-
{ type: REF_NAMED, value: '[path]prefix!foo' }
|
|
118
|
-
]);
|
|
119
|
-
|
|
120
|
-
t.end();
|
|
121
|
-
});
|
package/lib/package.json
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{ "type": "module" }
|
package/lib/parseRef.js
DELETED
|
@@ -1,157 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
FX_PREFIX,
|
|
3
|
-
CONTEXT,
|
|
4
|
-
CONTEXT_QUOTE,
|
|
5
|
-
REF_RANGE,
|
|
6
|
-
REF_TERNARY,
|
|
7
|
-
REF_NAMED,
|
|
8
|
-
REF_BEAM,
|
|
9
|
-
REF_STRUCT,
|
|
10
|
-
OPERATOR
|
|
11
|
-
} from './constants.js';
|
|
12
|
-
import { lexersRefs } from './lexerParts.js';
|
|
13
|
-
import { getTokens } from './lexer.js';
|
|
14
|
-
|
|
15
|
-
// Liberally split a context string up into parts.
|
|
16
|
-
// Permits any combination of braced and unbraced items.
|
|
17
|
-
export function splitPrefix (str, stringsOnly = false) {
|
|
18
|
-
let inBrace = false;
|
|
19
|
-
let currStr = '';
|
|
20
|
-
const parts = [];
|
|
21
|
-
const flush = () => {
|
|
22
|
-
if (currStr) {
|
|
23
|
-
parts.push(
|
|
24
|
-
stringsOnly
|
|
25
|
-
? currStr
|
|
26
|
-
: { value: currStr, braced: inBrace }
|
|
27
|
-
);
|
|
28
|
-
}
|
|
29
|
-
currStr = '';
|
|
30
|
-
};
|
|
31
|
-
for (let i = 0; i < str.length; i++) {
|
|
32
|
-
const char = str[i];
|
|
33
|
-
if (char === '[') {
|
|
34
|
-
flush();
|
|
35
|
-
inBrace = true;
|
|
36
|
-
}
|
|
37
|
-
else if (char === ']') {
|
|
38
|
-
flush();
|
|
39
|
-
inBrace = false;
|
|
40
|
-
}
|
|
41
|
-
else {
|
|
42
|
-
currStr += char;
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
flush();
|
|
46
|
-
return parts;
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
function splitContext (contextString) {
|
|
50
|
-
return { context: splitPrefix(contextString, true) };
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
function splitContextXlsx (contextString) {
|
|
54
|
-
const context = {};
|
|
55
|
-
const ctx = splitPrefix(contextString);
|
|
56
|
-
if (ctx.length > 1) {
|
|
57
|
-
context.workbookName = ctx[ctx.length - 2].value;
|
|
58
|
-
context.sheetName = ctx[ctx.length - 1].value;
|
|
59
|
-
}
|
|
60
|
-
else if (ctx.length === 1) {
|
|
61
|
-
const item = ctx[0];
|
|
62
|
-
if (item.braced) {
|
|
63
|
-
context.workbookName = item.value;
|
|
64
|
-
}
|
|
65
|
-
else {
|
|
66
|
-
context.sheetName = item.value;
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
return context;
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
const unquote = d => d.slice(1, -1).replace(/''/g, "'");
|
|
73
|
-
|
|
74
|
-
const pRangeOp = t => t && (t.value === ':' || t.value === '.:' || t.value === ':.' || t.value === '.:.') && { operator: t.value };
|
|
75
|
-
const pRange = t => t && t.type === REF_RANGE && { r0: t.value };
|
|
76
|
-
const pPartial = t => t && t.type === REF_TERNARY && { r0: t.value };
|
|
77
|
-
const pRange2 = t => t && t.type === REF_RANGE && { r1: t.value };
|
|
78
|
-
const pBang = t => t && t.type === OPERATOR && t.value === '!' && {};
|
|
79
|
-
const pBeam = t => t && t.type === REF_BEAM && { r0: t.value };
|
|
80
|
-
const pStrucured = t => t && t.type === REF_STRUCT && { struct: t.value };
|
|
81
|
-
const pContext = (t, opts) => {
|
|
82
|
-
const splitter = opts.xlsx ? splitContextXlsx : splitContext;
|
|
83
|
-
if (t && t.type === CONTEXT) {
|
|
84
|
-
return splitter(t.value);
|
|
85
|
-
}
|
|
86
|
-
if (t && t.type === CONTEXT_QUOTE) {
|
|
87
|
-
return splitter(unquote(t.value));
|
|
88
|
-
}
|
|
89
|
-
};
|
|
90
|
-
const pNamed = t => t && t.type === REF_NAMED && { name: t.value };
|
|
91
|
-
|
|
92
|
-
const validRuns = [
|
|
93
|
-
[ pPartial ],
|
|
94
|
-
[ pRange, pRangeOp, pRange2 ],
|
|
95
|
-
[ pRange ],
|
|
96
|
-
[ pBeam ],
|
|
97
|
-
[ pContext, pBang, pPartial ],
|
|
98
|
-
[ pContext, pBang, pRange, pRangeOp, pRange2 ],
|
|
99
|
-
[ pContext, pBang, pRange ],
|
|
100
|
-
[ pContext, pBang, pBeam ]
|
|
101
|
-
];
|
|
102
|
-
|
|
103
|
-
const validRunsNamed = validRuns.concat([
|
|
104
|
-
[ pNamed ],
|
|
105
|
-
[ pContext, pBang, pNamed ],
|
|
106
|
-
[ pStrucured ],
|
|
107
|
-
[ pNamed, pStrucured ],
|
|
108
|
-
[ pContext, pBang, pNamed, pStrucured ]
|
|
109
|
-
]);
|
|
110
|
-
|
|
111
|
-
export function parseRef (ref, opts) {
|
|
112
|
-
const options = {
|
|
113
|
-
withLocation: false,
|
|
114
|
-
mergeRefs: false,
|
|
115
|
-
allowTernary: false,
|
|
116
|
-
allowNamed: true,
|
|
117
|
-
r1c1: false,
|
|
118
|
-
xlsx: false,
|
|
119
|
-
...opts
|
|
120
|
-
};
|
|
121
|
-
const tokens = getTokens(ref, lexersRefs, options);
|
|
122
|
-
const refData = options.xlsx
|
|
123
|
-
? {
|
|
124
|
-
workbookName: '',
|
|
125
|
-
sheetName: '',
|
|
126
|
-
r0: '',
|
|
127
|
-
r1: '',
|
|
128
|
-
name: '',
|
|
129
|
-
operator: ''
|
|
130
|
-
}
|
|
131
|
-
: {
|
|
132
|
-
context: [],
|
|
133
|
-
r0: '',
|
|
134
|
-
r1: '',
|
|
135
|
-
name: '',
|
|
136
|
-
operator: ''
|
|
137
|
-
};
|
|
138
|
-
// discard the "="-prefix if it is there
|
|
139
|
-
if (tokens.length && tokens[0].type === FX_PREFIX) {
|
|
140
|
-
tokens.shift();
|
|
141
|
-
}
|
|
142
|
-
const runs = options.allowNamed ? validRunsNamed : validRuns;
|
|
143
|
-
for (let i = 0; i < runs.length; i++) {
|
|
144
|
-
const data = { ...refData };
|
|
145
|
-
if (runs[i].length === tokens.length) {
|
|
146
|
-
const valid = runs[i].every((parse, j) => {
|
|
147
|
-
const d = parse(tokens[j], options);
|
|
148
|
-
Object.assign(data, d);
|
|
149
|
-
return d;
|
|
150
|
-
});
|
|
151
|
-
if (valid) {
|
|
152
|
-
return data;
|
|
153
|
-
}
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
return null;
|
|
157
|
-
}
|
package/lib/parseRef.spec.js
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
import { test } from 'tape';
|
|
2
|
-
import { splitPrefix } from './parseRef.js';
|
|
3
|
-
|
|
4
|
-
test('splitPrefix', t => {
|
|
5
|
-
const testStr = (str, opt, expected) => {
|
|
6
|
-
t.deepEqual(splitPrefix(str, opt), expected, str);
|
|
7
|
-
};
|
|
8
|
-
|
|
9
|
-
testStr('[foo][bar][baz]', true, [ 'foo', 'bar', 'baz' ]);
|
|
10
|
-
testStr('foo[bar][baz]', true, [ 'foo', 'bar', 'baz' ]);
|
|
11
|
-
testStr('[foo]bar[baz]', true, [ 'foo', 'bar', 'baz' ]);
|
|
12
|
-
testStr('[foo][bar]baz', true, [ 'foo', 'bar', 'baz' ]);
|
|
13
|
-
testStr('foo[bar]baz', true, [ 'foo', 'bar', 'baz' ]);
|
|
14
|
-
testStr('[foo]bar[baz]', true, [ 'foo', 'bar', 'baz' ]);
|
|
15
|
-
testStr('[foo]bar', true, [ 'foo', 'bar' ]);
|
|
16
|
-
testStr('foo[bar]', true, [ 'foo', 'bar' ]);
|
|
17
|
-
testStr('[foo][bar]', true, [ 'foo', 'bar' ]);
|
|
18
|
-
testStr('[foo]', true, [ 'foo' ]);
|
|
19
|
-
testStr('foo', true, [ 'foo' ]);
|
|
20
|
-
|
|
21
|
-
testStr('[foo][bar][baz]', false, [
|
|
22
|
-
{ value: 'foo', braced: true },
|
|
23
|
-
{ value: 'bar', braced: true },
|
|
24
|
-
{ value: 'baz', braced: true }
|
|
25
|
-
]);
|
|
26
|
-
testStr('foo[bar][baz]', false, [
|
|
27
|
-
{ value: 'foo', braced: false },
|
|
28
|
-
{ value: 'bar', braced: true },
|
|
29
|
-
{ value: 'baz', braced: true }
|
|
30
|
-
]);
|
|
31
|
-
testStr('[foo]bar[baz]', false, [
|
|
32
|
-
{ value: 'foo', braced: true },
|
|
33
|
-
{ value: 'bar', braced: false },
|
|
34
|
-
{ value: 'baz', braced: true }
|
|
35
|
-
]);
|
|
36
|
-
testStr('[foo][bar]baz', false, [
|
|
37
|
-
{ value: 'foo', braced: true },
|
|
38
|
-
{ value: 'bar', braced: true },
|
|
39
|
-
{ value: 'baz', braced: false }
|
|
40
|
-
]);
|
|
41
|
-
testStr('foo[bar]baz', false, [
|
|
42
|
-
{ value: 'foo', braced: false },
|
|
43
|
-
{ value: 'bar', braced: true },
|
|
44
|
-
{ value: 'baz', braced: false }
|
|
45
|
-
]);
|
|
46
|
-
testStr('[foo]bar[baz]', false, [
|
|
47
|
-
{ value: 'foo', braced: true },
|
|
48
|
-
{ value: 'bar', braced: false },
|
|
49
|
-
{ value: 'baz', braced: true }
|
|
50
|
-
]);
|
|
51
|
-
testStr('[foo]bar', false, [
|
|
52
|
-
{ value: 'foo', braced: true },
|
|
53
|
-
{ value: 'bar', braced: false }
|
|
54
|
-
]);
|
|
55
|
-
testStr('foo[bar]', false, [
|
|
56
|
-
{ value: 'foo', braced: false },
|
|
57
|
-
{ value: 'bar', braced: true }
|
|
58
|
-
]);
|
|
59
|
-
testStr('[foo][bar]', false, [
|
|
60
|
-
{ value: 'foo', braced: true },
|
|
61
|
-
{ value: 'bar', braced: true }
|
|
62
|
-
]);
|
|
63
|
-
testStr('[foo]', false, [
|
|
64
|
-
{ value: 'foo', braced: true }
|
|
65
|
-
]);
|
|
66
|
-
testStr('foo', false, [
|
|
67
|
-
{ value: 'foo', braced: false }
|
|
68
|
-
]);
|
|
69
|
-
|
|
70
|
-
t.end();
|
|
71
|
-
});
|