@borgar/fx 2.1.1 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +139 -24
- package/References.md +39 -0
- package/dist/fx.js +1 -1
- package/lib/a1.js +152 -87
- package/lib/a1.spec.js +264 -0
- package/lib/addMeta.js +72 -13
- package/lib/{addMeta-test.js → addMeta.spec.js} +39 -6
- package/lib/constants.js +7 -89
- package/lib/fixRanges.js +41 -0
- package/lib/fixRanges.spec.js +111 -0
- package/lib/index.js +9 -6
- package/lib/isType.js +18 -0
- package/lib/lexer.js +99 -70
- package/lib/{lexer-test.js → lexer.spec.js} +445 -142
- package/lib/lexerParts.js +153 -0
- package/lib/mergeRefTokens.js +77 -0
- package/lib/mergeRefTokens.spec.js +118 -0
- package/lib/parseRef.js +44 -40
- package/lib/rc.js +154 -49
- package/lib/rc.spec.js +220 -0
- package/lib/stringifyPrefix.js +21 -0
- package/lib/{translate-toA1-test.js → translate-toA1.spec.js} +20 -2
- package/lib/{translate-toRC-test.js → translate-toRC.spec.js} +18 -1
- package/lib/translate.js +20 -32
- package/package.json +12 -10
- package/lib/a1-test.js +0 -158
- package/lib/quickVerify.js +0 -35
- package/lib/rc-test.js +0 -111
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
import {
|
|
2
|
+
OPERATOR,
|
|
3
|
+
BOOLEAN,
|
|
4
|
+
ERROR,
|
|
5
|
+
NUMBER,
|
|
6
|
+
FUNCTION,
|
|
7
|
+
NEWLINE,
|
|
8
|
+
WHITESPACE,
|
|
9
|
+
STRING,
|
|
10
|
+
CONTEXT,
|
|
11
|
+
CONTEXT_QUOTE,
|
|
12
|
+
RANGE,
|
|
13
|
+
RANGE_BEAM,
|
|
14
|
+
RANGE_NAMED,
|
|
15
|
+
RANGE_TERNARY,
|
|
16
|
+
MAX_COLS,
|
|
17
|
+
MAX_ROWS
|
|
18
|
+
} from './constants.js';
|
|
19
|
+
import { fromCol } from './a1.js';
|
|
20
|
+
|
|
21
|
+
const re_ERROR = /^#(NAME\?|FIELD!|CALC!|VALUE!|REF!|DIV\/0!|NULL!|NUM!|N\/A|GETTING_DATA\b|SPILL!|UNKNOWN!|FIELD\b|CALC\b|SYNTAX\?|ERROR!)/i;
|
|
22
|
+
const re_OPERATOR = /^(<=|>=|<>|[-+/*^%&<>=]|[{},;]|[()]|@|:|!|#)/;
|
|
23
|
+
const re_BOOLEAN = /^(TRUE|FALSE)\b/i;
|
|
24
|
+
const re_FUNCTION = /^[A-Z_]+[A-Z\d_.]*(?=\()/i;
|
|
25
|
+
const re_NEWLINE = /^\n+/;
|
|
26
|
+
const re_WHITESPACE = /^[ \f\r\t\v\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]+/;
|
|
27
|
+
const re_STRING = /^"(?:""|[^"])*("|$)/;
|
|
28
|
+
const re_NUMBER = /^(?:\d+(\.\d+)?(?:[eE][+-]?\d+)?|\d+)/;
|
|
29
|
+
|
|
30
|
+
const re_CONTEXT = /^(\[(?:[^\]])+\])?([0-9A-Za-z._¡¤§¨ª\u00ad¯-\uffff]+)(?=!)/;
|
|
31
|
+
const re_CONTEXT_QUOTE = /^'(?:''|[^'])*('|$)(?=!)/;
|
|
32
|
+
|
|
33
|
+
const rngPart = '\\$?[A-Z]{1,3}\\$?[1-9][0-9]{0,6}';
|
|
34
|
+
const colPart = '\\$?[A-Z]{1,3}';
|
|
35
|
+
const rowPart = '\\$?[1-9][0-9]{0,6}';
|
|
36
|
+
const re_A1COL = new RegExp(`^${colPart}:${colPart}`, 'i');
|
|
37
|
+
const re_A1ROW = new RegExp(`^${rowPart}:${rowPart}`, 'i');
|
|
38
|
+
const re_A1RANGE = new RegExp(`^${rngPart}`, 'i');
|
|
39
|
+
const re_A1PARTIAL = new RegExp(`^((${colPart}|${rowPart}):${rngPart}|${rngPart}:(${colPart}|${rowPart}))(?![\\w($.])`, 'i');
|
|
40
|
+
const rPart = '(?:R(?:\\[[+-]?\\d+\\]|[1-9][0-9]{0,6})?)';
|
|
41
|
+
const cPart = '(?:C(?:\\[[+-]?\\d+\\]|[1-9][0-9]{0,4})?)';
|
|
42
|
+
const re_RCCOL = new RegExp(`^${cPart}(:${cPart})?(?=\\W|$)`, 'i');
|
|
43
|
+
const re_RCROW = new RegExp(`^${rPart}(:${rPart})?(?=\\W|$)`, 'i');
|
|
44
|
+
const re_RCRANGE = new RegExp(`^(?:(?=[RC])${rPart}${cPart})`, 'i');
|
|
45
|
+
const re_RCPARTIAL = new RegExp(`^(${rPart}${cPart}(:${cPart}|:${rPart})(?![[\\d])|(${rPart}|${cPart})(:${rPart}${cPart}))(?=\\W|$)`, 'i');
|
|
46
|
+
|
|
47
|
+
// The advertized named ranges rules are a bit off from what Excel seems to do:
|
|
48
|
+
// in the "extended range" of chars, it looks like it allows most things above
|
|
49
|
+
// U+00B0 with the range between U+00A0-U+00AF rather random.
|
|
50
|
+
// eslint-disable-next-line
|
|
51
|
+
// const re_NAMED = /^[a-zA-Z\\_¡¤§¨ª\u00ad¯\u00b0-\uffff][a-zA-Z0-9\\_.?¡¤§¨ª\u00ad¯\u00b0-\uffff]{0,254}/i;
|
|
52
|
+
// I've simplified to allowing everything above U+00A1:
|
|
53
|
+
const re_NAMED = /^[a-zA-Z\\_\u00a1-\uffff][a-zA-Z0-9\\_.?\u00a1-\uffff]{0,254}/i;
|
|
54
|
+
|
|
55
|
+
function makeHandler (type, re) {
|
|
56
|
+
return str => {
|
|
57
|
+
const m = re.exec(str);
|
|
58
|
+
if (m) {
|
|
59
|
+
return { type: type, value: m[0] };
|
|
60
|
+
}
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const reRCNums = /([RC])(\[?)(-?\d+)/gi;
|
|
65
|
+
const reA1Nums = /(\d+|[a-zA-Z]+)/gi;
|
|
66
|
+
function lexRange (str, options) {
|
|
67
|
+
let m, t;
|
|
68
|
+
if (options.r1c1) {
|
|
69
|
+
// RC notation
|
|
70
|
+
if (options.allowTernary && (m = re_RCPARTIAL.exec(str))) {
|
|
71
|
+
t = { type: RANGE_TERNARY, value: m[0] };
|
|
72
|
+
}
|
|
73
|
+
else if ((m = re_RCRANGE.exec(str))) {
|
|
74
|
+
t = { type: RANGE, value: m[0] };
|
|
75
|
+
}
|
|
76
|
+
else if ((m = re_RCROW.exec(str)) || (m = re_RCCOL.exec(str))) {
|
|
77
|
+
t = { type: RANGE_BEAM, value: m[0] };
|
|
78
|
+
}
|
|
79
|
+
if (t) {
|
|
80
|
+
reRCNums.lastIndex = 0;
|
|
81
|
+
while ((m = reRCNums.exec(t.value)) !== null) {
|
|
82
|
+
const x = (m[1] === 'R' ? MAX_ROWS : MAX_COLS) + (m[2] ? 1 : 0);
|
|
83
|
+
const val = parseInt(m[3], 10);
|
|
84
|
+
if (val >= x || val <= -x) {
|
|
85
|
+
return null;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
return t;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
else {
|
|
92
|
+
// A1 notation
|
|
93
|
+
if (options.allowTernary && (m = re_A1PARTIAL.exec(str))) {
|
|
94
|
+
t = { type: RANGE_TERNARY, value: m[0] };
|
|
95
|
+
}
|
|
96
|
+
else if ((m = re_A1COL.exec(str)) || (m = re_A1ROW.exec(str))) {
|
|
97
|
+
t = { type: RANGE_BEAM, value: m[0] };
|
|
98
|
+
}
|
|
99
|
+
else if ((m = re_A1RANGE.exec(str))) {
|
|
100
|
+
t = { type: RANGE, value: m[0] };
|
|
101
|
+
}
|
|
102
|
+
if (t) {
|
|
103
|
+
reA1Nums.lastIndex = 0;
|
|
104
|
+
while ((m = reA1Nums.exec(t.value)) !== null) {
|
|
105
|
+
if (/^\d/.test(m[1])) { // row
|
|
106
|
+
if ((parseInt(m[1], 10) - 1) > MAX_ROWS) {
|
|
107
|
+
return null;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
else if (fromCol(m[1]) > MAX_COLS) {
|
|
111
|
+
return null;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
return t;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
function lexRefOp (s, opts) {
|
|
120
|
+
// in RC mode we only allow !
|
|
121
|
+
if (opts.r1c1) {
|
|
122
|
+
return (s[0] === '!')
|
|
123
|
+
? { type: OPERATOR, value: s[0] }
|
|
124
|
+
: null;
|
|
125
|
+
}
|
|
126
|
+
// in A1 mode we allow ! and :
|
|
127
|
+
return (s[0] === '!' || s[0] === ':')
|
|
128
|
+
? { type: OPERATOR, value: s[0] }
|
|
129
|
+
: null;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
export const lexers = [
|
|
133
|
+
makeHandler(ERROR, re_ERROR),
|
|
134
|
+
makeHandler(OPERATOR, re_OPERATOR),
|
|
135
|
+
makeHandler(BOOLEAN, re_BOOLEAN),
|
|
136
|
+
makeHandler(FUNCTION, re_FUNCTION),
|
|
137
|
+
makeHandler(NEWLINE, re_NEWLINE),
|
|
138
|
+
makeHandler(WHITESPACE, re_WHITESPACE),
|
|
139
|
+
makeHandler(STRING, re_STRING),
|
|
140
|
+
makeHandler(CONTEXT_QUOTE, re_CONTEXT_QUOTE),
|
|
141
|
+
makeHandler(CONTEXT, re_CONTEXT),
|
|
142
|
+
lexRange,
|
|
143
|
+
makeHandler(NUMBER, re_NUMBER),
|
|
144
|
+
makeHandler(RANGE_NAMED, re_NAMED)
|
|
145
|
+
];
|
|
146
|
+
|
|
147
|
+
export const lexersRefs = [
|
|
148
|
+
lexRefOp,
|
|
149
|
+
makeHandler(CONTEXT_QUOTE, re_CONTEXT_QUOTE),
|
|
150
|
+
makeHandler(CONTEXT, re_CONTEXT),
|
|
151
|
+
lexRange,
|
|
152
|
+
makeHandler(RANGE_NAMED, re_NAMED)
|
|
153
|
+
];
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { CONTEXT, CONTEXT_QUOTE, RANGE, RANGE_NAMED, RANGE_BEAM, RANGE_TERNARY, OPERATOR } from './constants.js';
|
|
2
|
+
|
|
3
|
+
const END = '$';
|
|
4
|
+
|
|
5
|
+
const validRunsMerge = [
|
|
6
|
+
[ RANGE, ':', RANGE ],
|
|
7
|
+
[ RANGE ],
|
|
8
|
+
[ RANGE_BEAM ],
|
|
9
|
+
[ RANGE_TERNARY ],
|
|
10
|
+
[ CONTEXT, '!', RANGE, ':', RANGE ],
|
|
11
|
+
[ CONTEXT, '!', RANGE ],
|
|
12
|
+
[ CONTEXT, '!', RANGE_BEAM ],
|
|
13
|
+
[ CONTEXT, '!', RANGE_TERNARY ],
|
|
14
|
+
[ CONTEXT_QUOTE, '!', RANGE, ':', RANGE ],
|
|
15
|
+
[ CONTEXT_QUOTE, '!', RANGE ],
|
|
16
|
+
[ CONTEXT_QUOTE, '!', RANGE_BEAM ],
|
|
17
|
+
[ CONTEXT_QUOTE, '!', RANGE_TERNARY ],
|
|
18
|
+
[ RANGE_NAMED ],
|
|
19
|
+
[ CONTEXT, '!', RANGE_NAMED ],
|
|
20
|
+
[ CONTEXT_QUOTE, '!', RANGE_NAMED ]
|
|
21
|
+
];
|
|
22
|
+
|
|
23
|
+
// valid token runs are converted to a tree structure
|
|
24
|
+
const refPartsTree = {};
|
|
25
|
+
function packList (f, node) {
|
|
26
|
+
if (f.length) {
|
|
27
|
+
const key = f[0];
|
|
28
|
+
node[key] = node[key] || {};
|
|
29
|
+
packList(f.slice(1), node[key]);
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
node[END] = true;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
validRunsMerge.forEach(run => packList(run.concat().reverse(), refPartsTree));
|
|
36
|
+
|
|
37
|
+
// attempt to match a backwards run of tokens from a given point
|
|
38
|
+
// to a path in the tree
|
|
39
|
+
const matcher = (tokens, currNode, anchorIndex, index = 0) => {
|
|
40
|
+
const token = tokens[anchorIndex - index];
|
|
41
|
+
if (token) {
|
|
42
|
+
const key = (token.type === OPERATOR) ? token.value : token.type;
|
|
43
|
+
if (key in currNode) {
|
|
44
|
+
return matcher(tokens, currNode[key], anchorIndex, index + 1);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (currNode[END]) {
|
|
48
|
+
// we may end here so this is a match
|
|
49
|
+
return index;
|
|
50
|
+
}
|
|
51
|
+
// no match
|
|
52
|
+
return 0;
|
|
53
|
+
};
|
|
54
|
+
|
|
55
|
+
// merge reference tokens as possible in a list of tokens
|
|
56
|
+
export function mergeRefTokens (tokens) {
|
|
57
|
+
const finalTokens = [];
|
|
58
|
+
// this seeks backwards because it's really the range part
|
|
59
|
+
// that controls what can be joined.
|
|
60
|
+
for (let i = tokens.length - 1; i >= 0; i--) {
|
|
61
|
+
let token = tokens[i];
|
|
62
|
+
const valid = matcher(tokens, refPartsTree, i);
|
|
63
|
+
if (valid) {
|
|
64
|
+
const toMerge = tokens.slice(i - valid + 1, i + 1);
|
|
65
|
+
// use the meta properties from the "first" token (right-most token)
|
|
66
|
+
token = { ...token };
|
|
67
|
+
token.value = toMerge.map(d => d.value).join('');
|
|
68
|
+
// adjust the range to include all the text
|
|
69
|
+
if (token.range && toMerge[0].range) {
|
|
70
|
+
token.range[0] = toMerge[0].range[0];
|
|
71
|
+
}
|
|
72
|
+
i -= valid - 1;
|
|
73
|
+
}
|
|
74
|
+
finalTokens.unshift(token);
|
|
75
|
+
}
|
|
76
|
+
return finalTokens;
|
|
77
|
+
}
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import { CONTEXT, FUNCTION, FX_PREFIX, OPERATOR, RANGE, RANGE_BEAM, RANGE_NAMED, RANGE_TERNARY, UNKNOWN } from './constants.js';
|
|
2
|
+
import { test } from 'tape';
|
|
3
|
+
import { mergeRefTokens } from './mergeRefTokens.js';
|
|
4
|
+
import { tokenize } from './lexer.js';
|
|
5
|
+
|
|
6
|
+
test('mergeRefTokens basics', t => {
|
|
7
|
+
const list = tokenize('=SUM([Wb1]Sheet1!A1:B2)', { mergeRanges: false, emitRanges: true });
|
|
8
|
+
|
|
9
|
+
t.deepEqual(list, [
|
|
10
|
+
{ type: FX_PREFIX, value: '=', range: [ 0, 1 ] },
|
|
11
|
+
{ type: FUNCTION, value: 'SUM', range: [ 1, 4 ] },
|
|
12
|
+
{ type: OPERATOR, value: '(', range: [ 4, 5 ] },
|
|
13
|
+
|
|
14
|
+
{ type: CONTEXT, value: '[Wb1]Sheet1', range: [ 5, 16 ] },
|
|
15
|
+
{ type: OPERATOR, value: '!', range: [ 16, 17 ] },
|
|
16
|
+
{ type: RANGE, value: 'A1', range: [ 17, 19 ] },
|
|
17
|
+
{ type: OPERATOR, value: ':', range: [ 19, 20 ] },
|
|
18
|
+
{ type: RANGE, value: 'B2', range: [ 20, 22 ] },
|
|
19
|
+
|
|
20
|
+
{ type: OPERATOR, value: ')', range: [ 22, 23 ] }
|
|
21
|
+
]);
|
|
22
|
+
|
|
23
|
+
// set IDs on all tokens about to be joined
|
|
24
|
+
list[3].id = 'id1';
|
|
25
|
+
list[4].id = 'id2';
|
|
26
|
+
list[5].id = 'id3';
|
|
27
|
+
list[6].id = 'id4';
|
|
28
|
+
list[7].id = 'id5';
|
|
29
|
+
|
|
30
|
+
const mergedList = mergeRefTokens(list);
|
|
31
|
+
t.deepEqual(mergedList, [
|
|
32
|
+
{ type: FX_PREFIX, value: '=', range: [ 0, 1 ] },
|
|
33
|
+
{ type: FUNCTION, value: 'SUM', range: [ 1, 4 ] },
|
|
34
|
+
{ type: OPERATOR, value: '(', range: [ 4, 5 ] },
|
|
35
|
+
{ type: RANGE,
|
|
36
|
+
id: 'id5', // token has the id of the first one
|
|
37
|
+
value: '[Wb1]Sheet1!A1:B2',
|
|
38
|
+
range: [ 5, 22 ] },
|
|
39
|
+
{ type: OPERATOR, value: ')', range: [ 22, 23 ] }
|
|
40
|
+
]);
|
|
41
|
+
|
|
42
|
+
t.end();
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
test('mergeRefTokens cases', t => {
|
|
46
|
+
const opts = { mergeRanges: true, allowTernary: true };
|
|
47
|
+
t.deepEqual(tokenize('A1', opts), [
|
|
48
|
+
{ type: RANGE, value: 'A1' }
|
|
49
|
+
]);
|
|
50
|
+
t.deepEqual(tokenize('A1:A1', opts), [
|
|
51
|
+
{ type: RANGE, value: 'A1:A1' }
|
|
52
|
+
]);
|
|
53
|
+
t.deepEqual(tokenize('A:A', opts), [
|
|
54
|
+
{ type: RANGE_BEAM, value: 'A:A' }
|
|
55
|
+
]);
|
|
56
|
+
t.deepEqual(tokenize('A1:A', opts), [
|
|
57
|
+
{ type: RANGE_TERNARY, value: 'A1:A' }
|
|
58
|
+
]);
|
|
59
|
+
|
|
60
|
+
t.deepEqual(tokenize('\'Sheet1\'!A1', opts), [
|
|
61
|
+
{ type: RANGE, value: '\'Sheet1\'!A1' }
|
|
62
|
+
]);
|
|
63
|
+
t.deepEqual(tokenize('\'Sheet1\'!A:A', opts), [
|
|
64
|
+
{ type: RANGE_BEAM, value: '\'Sheet1\'!A:A' }
|
|
65
|
+
]);
|
|
66
|
+
t.deepEqual(tokenize('\'Sheet1\'!A1:A', opts), [
|
|
67
|
+
{ type: RANGE_TERNARY, value: '\'Sheet1\'!A1:A' }
|
|
68
|
+
]);
|
|
69
|
+
t.deepEqual(tokenize('\'Sheet1\'!A1:A', opts), [
|
|
70
|
+
{ type: RANGE_TERNARY, value: '\'Sheet1\'!A1:A' }
|
|
71
|
+
]);
|
|
72
|
+
|
|
73
|
+
t.deepEqual(tokenize('Sheet1!A1', opts), [
|
|
74
|
+
{ type: RANGE, value: 'Sheet1!A1' }
|
|
75
|
+
]);
|
|
76
|
+
t.deepEqual(tokenize('Sheet1!A:A', opts), [
|
|
77
|
+
{ type: RANGE_BEAM, value: 'Sheet1!A:A' }
|
|
78
|
+
]);
|
|
79
|
+
t.deepEqual(tokenize('Sheet1!A1:A', opts), [
|
|
80
|
+
{ type: RANGE_TERNARY, value: 'Sheet1!A1:A' }
|
|
81
|
+
]);
|
|
82
|
+
t.deepEqual(tokenize('Sheet1!A1:A', opts), [
|
|
83
|
+
{ type: RANGE_TERNARY, value: 'Sheet1!A1:A' }
|
|
84
|
+
]);
|
|
85
|
+
|
|
86
|
+
t.deepEqual(tokenize('[WB]Sheet1!A1', opts), [
|
|
87
|
+
{ type: RANGE, value: '[WB]Sheet1!A1' }
|
|
88
|
+
]);
|
|
89
|
+
t.deepEqual(tokenize('[WB]Sheet1!A:A', opts), [
|
|
90
|
+
{ type: RANGE_BEAM, value: '[WB]Sheet1!A:A' }
|
|
91
|
+
]);
|
|
92
|
+
t.deepEqual(tokenize('[WB]Sheet1!A1:A', opts), [
|
|
93
|
+
{ type: RANGE_TERNARY, value: '[WB]Sheet1!A1:A' }
|
|
94
|
+
]);
|
|
95
|
+
t.deepEqual(tokenize('[WB]Sheet1!A1:A', opts), [
|
|
96
|
+
{ type: RANGE_TERNARY, value: '[WB]Sheet1!A1:A' }
|
|
97
|
+
]);
|
|
98
|
+
|
|
99
|
+
t.deepEqual(tokenize('foo', opts), [
|
|
100
|
+
{ type: RANGE_NAMED, value: 'foo' }
|
|
101
|
+
]);
|
|
102
|
+
t.deepEqual(tokenize('\'quoted\'!foo', opts), [
|
|
103
|
+
{ type: RANGE_NAMED, value: '\'quoted\'!foo' }
|
|
104
|
+
]);
|
|
105
|
+
t.deepEqual(tokenize('Sheet1!foo', opts), [
|
|
106
|
+
{ type: RANGE_NAMED, value: 'Sheet1!foo' }
|
|
107
|
+
]);
|
|
108
|
+
t.deepEqual(tokenize('[path]!foo', opts), [
|
|
109
|
+
{ type: UNKNOWN, value: '[path]' },
|
|
110
|
+
{ type: OPERATOR, value: '!' },
|
|
111
|
+
{ type: RANGE_NAMED, value: 'foo' }
|
|
112
|
+
]);
|
|
113
|
+
t.deepEqual(tokenize('[path]prefix!foo', opts), [
|
|
114
|
+
{ type: RANGE_NAMED, value: '[path]prefix!foo' }
|
|
115
|
+
]);
|
|
116
|
+
|
|
117
|
+
t.end();
|
|
118
|
+
});
|
package/lib/parseRef.js
CHANGED
|
@@ -1,63 +1,67 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
FX_PREFIX,
|
|
3
|
+
CONTEXT,
|
|
4
|
+
CONTEXT_QUOTE,
|
|
5
|
+
RANGE,
|
|
6
|
+
RANGE_TERNARY,
|
|
7
|
+
RANGE_NAMED,
|
|
8
|
+
RANGE_BEAM,
|
|
9
|
+
OPERATOR
|
|
10
|
+
} from './constants.js';
|
|
11
|
+
import { lexersRefs } from './lexerParts.js';
|
|
2
12
|
import { getTokens } from './lexer.js';
|
|
3
13
|
|
|
14
|
+
function splitContext (contextString) {
|
|
15
|
+
const m = /(?:\[(.+?)\])?([^[\]]+?)$/.exec(contextString);
|
|
16
|
+
if (m) {
|
|
17
|
+
const [ , a, b ] = m;
|
|
18
|
+
const context = [ a, b ].filter(Boolean);
|
|
19
|
+
return { context };
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
4
23
|
const unquote = d => d.slice(1, -1).replace(/''/g, "'");
|
|
5
24
|
|
|
6
25
|
const pRangeOp = t => t && t.value === ':' && {};
|
|
7
26
|
const pRange = t => t && t.type === RANGE && { r0: t.value };
|
|
27
|
+
const pPartial = t => t && t.type === RANGE_TERNARY && { r0: t.value };
|
|
8
28
|
const pRange2 = t => t && t.type === RANGE && { r1: t.value };
|
|
9
|
-
const pBang = t => t && t.value === '!' && {};
|
|
29
|
+
const pBang = t => t && t.type === OPERATOR && t.value === '!' && {};
|
|
10
30
|
const pBeam = t => t && t.type === RANGE_BEAM && { r0: t.value };
|
|
11
|
-
const
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
const pNamed = t => t && t.type === RANGE_NAMED && { name: t.value };
|
|
15
|
-
|
|
16
|
-
const pQuoted = t => {
|
|
17
|
-
if (t && t.type === PATH_QUOTE) {
|
|
18
|
-
const m = /(?:\[(.+?)\])?([^[\]]+?)$/.exec(unquote(t.value));
|
|
19
|
-
if (m) {
|
|
20
|
-
const [ , file, sheet ] = m;
|
|
21
|
-
if (!sheet || /^[^:\\/?*[\]]{0,31}$/.test(sheet)) {
|
|
22
|
-
return {
|
|
23
|
-
workbookName: file || '',
|
|
24
|
-
sheetName: sheet || ''
|
|
25
|
-
};
|
|
26
|
-
}
|
|
27
|
-
}
|
|
28
|
-
}
|
|
31
|
+
const pContext = t => {
|
|
32
|
+
if (t && t.type === CONTEXT) { return splitContext(t.value); }
|
|
33
|
+
if (t && t.type === CONTEXT_QUOTE) { return splitContext(unquote(t.value)); }
|
|
29
34
|
};
|
|
30
|
-
const
|
|
35
|
+
const pNamed = t => t && t.type === RANGE_NAMED && { name: t.value };
|
|
31
36
|
|
|
32
37
|
const validRuns = [
|
|
33
|
-
[
|
|
38
|
+
[ pPartial ],
|
|
34
39
|
[ pRange, pRangeOp, pRange2 ],
|
|
40
|
+
[ pRange ],
|
|
35
41
|
[ pBeam ],
|
|
36
|
-
[
|
|
37
|
-
[
|
|
38
|
-
[
|
|
39
|
-
[
|
|
40
|
-
[ pSheet, pBang, pRange, pRangeOp, pRange2 ],
|
|
41
|
-
[ pSheet, pBang, pBeam ],
|
|
42
|
-
[ pFile, pSheet, pBang, pRange ],
|
|
43
|
-
[ pFile, pSheet, pBang, pRange, pRangeOp, pRange2 ],
|
|
44
|
-
[ pFile, pSheet, pBang, pBeam ]
|
|
42
|
+
[ pContext, pBang, pPartial ],
|
|
43
|
+
[ pContext, pBang, pRange, pRangeOp, pRange2 ],
|
|
44
|
+
[ pContext, pBang, pRange ],
|
|
45
|
+
[ pContext, pBang, pBeam ]
|
|
45
46
|
];
|
|
46
47
|
|
|
47
48
|
const validRunsNamed = validRuns.concat([
|
|
48
49
|
[ pNamed ],
|
|
49
|
-
[
|
|
50
|
-
[ pQuoted2, pBang, pNamed ]
|
|
50
|
+
[ pContext, pBang, pNamed ]
|
|
51
51
|
]);
|
|
52
52
|
|
|
53
|
-
export function parseRef (ref,
|
|
54
|
-
const
|
|
53
|
+
export function parseRef (ref, opts) {
|
|
54
|
+
const options = {
|
|
55
55
|
emitRanges: false,
|
|
56
|
-
mergeRanges: false
|
|
57
|
-
|
|
56
|
+
mergeRanges: false,
|
|
57
|
+
allowTernary: false,
|
|
58
|
+
allowNamed: true,
|
|
59
|
+
r1c1: false,
|
|
60
|
+
...opts
|
|
61
|
+
};
|
|
62
|
+
const tokens = getTokens(ref, lexersRefs, options);
|
|
58
63
|
const refData = {
|
|
59
|
-
|
|
60
|
-
workbookName: '',
|
|
64
|
+
context: [],
|
|
61
65
|
r0: '',
|
|
62
66
|
r1: '',
|
|
63
67
|
name: ''
|
|
@@ -66,7 +70,7 @@ export function parseRef (ref, allow_named = true, tokenHandlers = []) {
|
|
|
66
70
|
if (tokens.length && tokens[0].type === FX_PREFIX) {
|
|
67
71
|
tokens.shift();
|
|
68
72
|
}
|
|
69
|
-
const runs =
|
|
73
|
+
const runs = options.allowNamed ? validRunsNamed : validRuns;
|
|
70
74
|
for (let i = 0; i < runs.length; i++) {
|
|
71
75
|
const data = { ...refData };
|
|
72
76
|
if (runs[i].length === tokens.length) {
|