numbers-parser 4.17.0.post1__py3-none-any.whl → 4.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,548 +0,0 @@
1
- import re
2
-
3
- from numbers_parser.xrefs import CellRange, CellRangeType
4
-
5
-
6
- def parse_numbers_range(model: object, range_str: str) -> CellRange:
7
- """
8
- Parse a cell range string in Numbers format.
9
-
10
- Args:
11
- range_str (str): The Numbers cell range string.
12
-
13
- Returns:
14
- dict: A dictionary containing the start and end column/row numbers
15
- with zero offset, booleans indicating whether the references
16
- are absolute or relative, and any sheet or table name.
17
-
18
- """
19
-
20
- def col_to_index(col_str: str) -> int:
21
- """Convert Excel-like column letter to zero-based index."""
22
- col = 0
23
- for expn, char in enumerate(reversed(col_str)):
24
- col += (ord(char) - ord("A") + 1) * (26**expn)
25
- return col - 1
26
-
27
- def parse_row_range(model: object, match: re.Match[str]) -> CellRange:
28
- """Parse row range format (e.g., '1:2' or '$1:$2')."""
29
- return CellRange(
30
- model=model,
31
- row_start_is_abs=match.group(1) == "$",
32
- row_start=int(match.group(2)) - 1,
33
- row_end_is_abs=match.group(3) == "$",
34
- row_end=int(match.group(4)) - 1,
35
- range_type=CellRangeType.ROW_RANGE,
36
- )
37
-
38
- def parse_col_range(model: object, match: re.Match[str]) -> CellRange:
39
- """Parse column range format (e.g., 'A:C' or '$E:$F')."""
40
- return CellRange(
41
- model=model,
42
- col_start_is_abs=match.group(1) == "$",
43
- col_start=col_to_index(match.group(2)),
44
- col_end_is_abs=match.group(3) == "$",
45
- col_end=col_to_index(match.group(4)),
46
- range_type=CellRangeType.COL_RANGE,
47
- )
48
-
49
- def parse_full_range(model: object, match: re.Match[str]) -> CellRange:
50
- """Parse full range format (e.g., 'A1:C4' or '$A3:$B3')."""
51
- return CellRange(
52
- model=model,
53
- col_start_is_abs=match.group(1) == "$",
54
- col_start=col_to_index(match.group(2)),
55
- row_start_is_abs=match.group(3) == "$",
56
- row_start=int(match.group(4)) - 1,
57
- col_end_is_abs=match.group(5) == "$",
58
- col_end=col_to_index(match.group(6)),
59
- row_end_is_abs=match.group(7) == "$",
60
- row_end=int(match.group(8)) - 1,
61
- range_type=CellRangeType.RANGE,
62
- )
63
-
64
- def parse_named_range(model: object, match: re.Match[str]) -> CellRange:
65
- """Parse a named range (e.g. 'cats:dogs' from 'Table::cats:dogs')."""
66
- return CellRange(
67
- model=model,
68
- row_start_is_abs=match.group(1) == "$",
69
- row_start=match.group(2),
70
- row_end_is_abs=match.group(3) == "$",
71
- row_end=match.group(4),
72
- range_type=CellRangeType.NAMED_RANGE,
73
- )
74
-
75
- def parse_single_cell(model: object, match: re.Match[str]) -> CellRange:
76
- """Parse single cell format (e.g., 'A1' or '$B$3')."""
77
- return CellRange(
78
- model=model,
79
- col_start_is_abs=match.group(1) == "$",
80
- col_start=col_to_index(match.group(2)),
81
- row_start_is_abs=match.group(3) == "$",
82
- row_start=int(match.group(4)) - 1,
83
- range_type=CellRangeType.CELL,
84
- )
85
-
86
- def parse_named_row_column(model: object, match: re.Match[str]) -> CellRange:
87
- """Parse single cell format (e.g., 'cats' from 'Table::cats')."""
88
- return CellRange(
89
- model=model,
90
- row_start_is_abs=match.group(1) == "$",
91
- row_start=match.group(2),
92
- range_type=CellRangeType.NAMED_ROW_COLUMN,
93
- )
94
-
95
- parts = range_str.split("::")
96
- if len(parts) == 3:
97
- name_scope_1, name_scope_2, ref = parts
98
- elif len(parts) == 2:
99
- name_scope_1, name_scope_2, ref = "", parts[0], parts[1]
100
- else:
101
- name_scope_1, name_scope_2, ref = "", "", parts[0]
102
-
103
- patterns = [
104
- (r"(\$?)(\d+):(\$?)(\d+)", parse_row_range),
105
- (r"(\$?)([A-Z]+):(\$?)([A-Z]+)", parse_col_range),
106
- (r"(\$?)([A-Z]+)(\$?)(\d+):(\$?)([A-Z]+)(\$?)(\d+)", parse_full_range),
107
- (r"(\$?)([A-Z]+)(\$?)(\d+)", parse_single_cell),
108
- (r"(\$?)([^:]+):(\$?)(.*)", parse_named_range),
109
- (r"(\$?)(.*)", parse_named_row_column),
110
- ]
111
-
112
- # Function never falls through to return as parse_named_row_column()
113
- # will be a catch-all as row/column names can be any string
114
- for pattern, handler in patterns: # noqa: RET503 # pragma: no branch
115
- if match := re.match(pattern, ref):
116
- result = handler(model, match)
117
- result.name_scope_1 = name_scope_1
118
- result.name_scope_2 = name_scope_2
119
- return result
120
-
121
-
122
- # The Tokenizer and Token classes are taken from the openpyxl library which is
123
- # licensed under the MIT License. The original source code can be found at:
124
- #
125
- # https://github.com/gleeda/openpyxl/blob/master/openpyxl/formula/tokenizer.py
126
- #
127
- # Copyright (c) 2010 openpyxl
128
- #
129
- # The openpyxl tokenizer is based on the Javascript tokenizer originally found at
130
- # http://ewbi.blogs.com/develops/2004/12/excel_formula_p.html written by Eric
131
- # Bachtal, and now archived by the Internet Archive at the following URL:
132
- #
133
- # https://archive.is/OCsys
134
-
135
-
136
- class TokenizerError(Exception):
137
- """Base class for all Tokenizer errors."""
138
-
139
-
140
- class Tokenizer:
141
- """
142
- A tokenizer for Excel worksheet formulae.
143
-
144
- Converts a unicode string representing an Excel formula (in A1 notation)
145
- into a sequence of `Token` objects.
146
-
147
- `formula`: The unicode string to tokenize
148
-
149
- Tokenizer defines a method `.parse()` to parse the formula into tokens,
150
- which can then be accessed through the `.items` attribute.
151
-
152
- """
153
-
154
- SN_RE = re.compile("^[1-9](\\.[0-9]+)?E$") # Scientific notation
155
- WSPACE_RE = re.compile(" +")
156
- STRING_REGEXES = { # noqa: RUF012
157
- # Inside a string, all characters are treated as literals, except for
158
- # the quote character used to start the string. That character, when
159
- # doubled is treated as a single character in the string. If an
160
- # unmatched quote appears, the string is terminated.
161
- '"': re.compile('"(?:[^"]*"")*[^"]*"(?!")'),
162
- # Single-quoted string includes an optional sequence to match
163
- # range names such as 'start':'finish' including quoted strings
164
- # such as ''10%''.
165
- "'": re.compile(r"(?:'[^']*(?:''[^']*)*')(?:\s*:\s*'[^']*(?:''[^']*)*')*"),
166
- }
167
- ERROR_CODES = ("#NULL!", "#DIV/0!", "#VALUE!", "#REF!", "#NAME?", "#NUM!", "#N/A")
168
- TOKEN_ENDERS = ",;})+-*/^&=><%×÷≥≤≠" # Each of these characters, marks the # noqa: S105
169
- # end of an operand token
170
-
171
- def __init__(self, formula):
172
- self.formula = formula
173
- self.items = []
174
- self.token_stack = [] # Used to keep track of arrays, functions, and
175
- # parentheses
176
- self.offset = 0 # How many chars have we read
177
- self.token = [] # Used to build up token values char by char
178
- self.parse()
179
-
180
- def __repr__(self):
181
- item_str = ",".join([repr(token) for token in self.items])
182
- return f"[{item_str}]"
183
-
184
- def parse(self):
185
- """Populate self.items with the tokens from the formula."""
186
- consumers = (
187
- ("\"'", self.parse_string),
188
- # ("[", self.parse_brackets),
189
- ("#", self.parse_error),
190
- ("+-*/^&=><%×÷≥≤≠", self.parse_operator),
191
- ("{(", self.parse_opener),
192
- (")}", self.parse_closer),
193
- (";,", self.parse_separator),
194
- )
195
- dispatcher = {} # maps chars to the specific parsing function
196
- for chars, consumer in consumers:
197
- dispatcher.update(dict.fromkeys(chars, consumer))
198
- while self.offset < len(self.formula):
199
- if self.check_scientific_notation(): # May consume one character
200
- continue
201
- curr_char = self.formula[self.offset]
202
- if curr_char in self.TOKEN_ENDERS:
203
- self.save_token()
204
- if curr_char in dispatcher:
205
- self.offset += dispatcher[curr_char]()
206
- else:
207
- # TODO: this can probably be sped up using a regex to get to
208
- # the next interesting character
209
- self.token.append(curr_char)
210
- self.offset += 1
211
- self.save_token()
212
-
213
- def parse_string(self):
214
- """
215
- Parse a "-delimited string or '-delimited link.
216
-
217
- The offset must be pointing to either a single quote ("'") or double
218
- quote ('"') character. The strings are parsed according to Excel
219
- rules where to escape the delimiter you just double it up. E.g.,
220
- "abc""def" in Excel is parsed as 'abc"def' in Python.
221
-
222
- Returns the number of characters matched. (Does not update
223
- self.offset)
224
-
225
- """
226
- self.assert_empty_token()
227
- delim = self.formula[self.offset]
228
- # if delim not in ('"', "'"):
229
- # msg = f"Invalid string delimiter: {delim}"
230
- # raise TokenizerError(msg)
231
- regex = self.STRING_REGEXES[delim]
232
- match = regex.match(self.formula[self.offset :])
233
- if match is None:
234
- subtype = "string" if delim == '"' else "link"
235
- msg = f"Reached end of formula while parsing {subtype} in {self.formula}"
236
- raise TokenizerError(msg)
237
- match = match.group(0)
238
- if delim == '"' or (delim.startswith("'") and delim.endswith("'")):
239
- self.items.append(Token.make_operand(match))
240
- else:
241
- self.token.append(match)
242
- return len(match)
243
-
244
- # def parse_brackets(self):
245
- # """
246
- # Consume all the text between square brackets [].
247
-
248
- # Returns the number of characters matched. (Does not update
249
- # self.offset)
250
-
251
- # """
252
- # if self.formula[self.offset] != "[":
253
- # msg = f"Expected '[', found: {self.formula[self.offset]}"
254
- # raise TokenizerError(msg)
255
- # right = self.formula.find("]", self.offset) + 1
256
- # if right == 0:
257
- # msg = "Encountered unmatched '[' in '{self.formula}'"
258
- # raise TokenizerError(msg)
259
- # self.token.append(self.formula[self.offset : right])
260
- # return right - self.offset
261
-
262
- def parse_error(self):
263
- """
264
- Consume the text following a '#' as an error.
265
-
266
- Looks for a match in self.ERROR_CODES and returns the number of
267
- characters matched. (Does not update self.offset)
268
-
269
- """
270
- self.assert_empty_token()
271
- if self.formula[self.offset] != "#":
272
- msg = f"Expected '#', found: {self.formula[self.offset]}"
273
- raise TokenizerError(msg)
274
- subformula = self.formula[self.offset :]
275
- for err in self.ERROR_CODES:
276
- if subformula.startswith(err):
277
- self.items.append(Token.make_operand(err))
278
- return len(err)
279
- msg = f"Invalid error code at position {self.offset} in 'self.formula'"
280
- raise TokenizerError(msg)
281
-
282
- def parse_operator(self):
283
- """
284
- Consume the characters constituting an operator.
285
-
286
- Returns the number of charactes consumed. (Does not update
287
- self.offset)
288
-
289
- """
290
- if self.formula[self.offset : self.offset + 2] in (">=", "<=", "<>", "≥", "≤", "≠"):
291
- self.items.append(
292
- Token(
293
- self.formula[self.offset : self.offset + 2],
294
- Token.OP_IN,
295
- ),
296
- )
297
- return 2
298
- curr_char = self.formula[self.offset] # guaranteed to be 1 char
299
- if curr_char == "%":
300
- token = Token("%", Token.OP_POST)
301
- elif curr_char in "*/^&=><×÷≥≤≠":
302
- token = Token(curr_char, Token.OP_IN)
303
- # From here on, curr_char is guaranteed to be in '+-'
304
- elif not self.items:
305
- token = Token(curr_char, Token.OP_PRE)
306
- else:
307
- prev = self.items[-1]
308
- is_infix = prev.subtype == Token.CLOSE or prev.type in (Token.OP_POST, Token.OPERAND)
309
- token = Token(curr_char, Token.OP_IN) if is_infix else Token(curr_char, Token.OP_PRE)
310
- self.items.append(token)
311
- return 1
312
-
313
- def parse_opener(self):
314
- """
315
- Consumes a ( or { character.
316
-
317
- Returns the number of charactes consumed. (Does not update
318
- self.offset)
319
-
320
- """
321
- if self.formula[self.offset] not in ("(", "{"):
322
- msg = f"Expected '(' or '{{', found: {self.formula[self.offset]}"
323
- raise TokenizerError(msg)
324
- if self.formula[self.offset] == "{":
325
- self.assert_empty_token()
326
- token = Token.make_subexp("{")
327
- elif self.token:
328
- token_value = "".join(self.token) + "("
329
- del self.token[:]
330
- token = Token.make_subexp(token_value)
331
- else:
332
- token = Token.make_subexp("(")
333
- self.items.append(token)
334
- self.token_stack.append(token)
335
- return 1
336
-
337
- def parse_closer(self):
338
- """
339
- Consumes a } or ) character.
340
-
341
- Returns the number of charactes consumed. (Does not update
342
- self.offset)
343
-
344
- """
345
- if self.formula[self.offset] not in (")", "}"):
346
- msg = f"Expected ')' or '}}', found: {self.formula[self.offset]}"
347
- raise TokenizerError(msg)
348
- token = self.token_stack.pop().get_closer()
349
- if token.value != self.formula[self.offset]:
350
- msg = "Mismatched ( and { pair in '{self.formula}'"
351
- raise TokenizerError(msg)
352
- self.items.append(token)
353
- return 1
354
-
355
- def parse_separator(self):
356
- """
357
- Consumes a ; or , character.
358
-
359
- Returns the number of charactes consumed. (Does not update
360
- self.offset)
361
-
362
- """
363
- curr_char = self.formula[self.offset]
364
- if curr_char not in (";", ","):
365
- msg = f"Expected ';' or ',', found: {curr_char}"
366
- raise TokenizerError(msg)
367
- if curr_char == ";":
368
- token = Token.make_separator(";")
369
- else:
370
- try:
371
- top_type = self.token_stack[-1].type
372
- except IndexError:
373
- token = Token(",", Token.OP_IN) # Range Union operator
374
- else:
375
- if top_type == Token.PAREN:
376
- token = Token(",", Token.OP_IN) # Range Union operator
377
- else:
378
- token = Token.make_separator(",")
379
- self.items.append(token)
380
- return 1
381
-
382
- def check_scientific_notation(self):
383
- """
384
- Consumes a + or - character if part of a number in sci. notation.
385
-
386
- Returns True if the character was consumed and self.offset was
387
- updated, False otherwise.
388
-
389
- """
390
- curr_char = self.formula[self.offset]
391
- if curr_char in "+-" and len(self.token) >= 1 and self.SN_RE.match("".join(self.token)):
392
- self.token.append(curr_char)
393
- self.offset += 1
394
- return True
395
- return False
396
-
397
- def assert_empty_token(self):
398
- """
399
- Ensure that there's no token currently being parsed.
400
-
401
- If there are unconsumed token contents, it means we hit an unexpected
402
- token transition. In this case, we raise a TokenizerError
403
-
404
- """
405
- if self.token:
406
- msg = "Unexpected character at position {self.offset} in 'self.formula'"
407
- raise TokenizerError(msg)
408
-
409
- def save_token(self):
410
- """If there's a token being parsed, add it to the item list."""
411
- if self.token:
412
- self.items.append(Token.make_operand("".join(self.token)))
413
- del self.token[:]
414
-
415
-
416
- class Token:
417
- """
418
- A token in an Excel formula.
419
-
420
- Tokens have three attributes:
421
-
422
- * `value`: The string value parsed that led to this token
423
- * `type`: A string identifying the type of token
424
- * `subtype`: A string identifying subtype of the token (optional, and
425
- defaults to "")
426
-
427
- """
428
-
429
- __slots__ = ["num_args", "subtype", "type", "value"]
430
-
431
- LITERAL = "LITERAL"
432
- OPERAND = "OPERAND"
433
- FUNC = "FUNC"
434
- ARRAY = "ARRAY"
435
- PAREN = "PAREN"
436
- SEP = "SEP"
437
- OP_PRE = "OPERATOR-PREFIX"
438
- OP_IN = "OPERATOR-INFIX"
439
- OP_POST = "OPERATOR-POSTFIX"
440
- WSPACE = "WHITE-SPACE"
441
-
442
- def __init__(self, value, type_, subtype=""):
443
- self.value = value
444
- self.type = type_
445
- self.subtype = subtype
446
- self.num_args = 0
447
-
448
- def __repr__(self):
449
- return f"{self.type}({self.subtype},'{self.value}')"
450
-
451
- # Literal operands:
452
- #
453
- # Literal operands are always of type 'OPERAND' and can be of subtype
454
- # 'TEXT' (for text strings), 'NUMBER' (for all numeric types), 'LOGICAL'
455
- # (for TRUE and FALSE), 'ERROR' (for literal error values), or 'RANGE'
456
- # (for all range references).
457
-
458
- TEXT = "TEXT"
459
- NUMBER = "NUMBER"
460
- LOGICAL = "LOGICAL"
461
- ERROR = "ERROR"
462
- RANGE = "RANGE"
463
-
464
- @classmethod
465
- def make_operand(cls, value):
466
- """Create an operand token."""
467
- if value.startswith('"'):
468
- subtype = cls.TEXT
469
- elif value.startswith("#"):
470
- subtype = cls.ERROR
471
- elif value in ("TRUE", "FALSE"):
472
- subtype = cls.LOGICAL
473
- else:
474
- try:
475
- float(value)
476
- subtype = cls.NUMBER
477
- except ValueError:
478
- subtype = cls.RANGE
479
- return cls(value, cls.OPERAND, subtype)
480
-
481
- # Subexpresssions
482
- #
483
- # There are 3 types of `Subexpressions`: functions, array literals, and
484
- # parentheticals. Subexpressions have 'OPEN' and 'CLOSE' tokens. 'OPEN'
485
- # is used when parsing the initital expression token (i.e., '(' or '{')
486
- # and 'CLOSE' is used when parsing the closing expression token ('}' or
487
- # ')').
488
-
489
- OPEN = "OPEN"
490
- CLOSE = "CLOSE"
491
-
492
- @classmethod
493
- def make_subexp(cls, value, func=False):
494
- """
495
- Create a subexpression token.
496
-
497
- `value`: The value of the token
498
- `func`: If True, force the token to be of type FUNC
499
-
500
- """
501
- if value[-1] not in ("{", "}", "(", ")"):
502
- msg = f"Invalid subexpression value: {value}"
503
- raise TokenizerError(msg)
504
- if func:
505
- if not re.match(".+\\(|\\)", value):
506
- msg = f"Invalid function subexpression value: {value}"
507
- raise TokenizerError(msg)
508
- type_ = Token.FUNC
509
- elif value in "{}":
510
- type_ = Token.ARRAY
511
- elif value in "()":
512
- type_ = Token.PAREN
513
- else:
514
- type_ = Token.FUNC
515
- subtype = cls.CLOSE if value in ")}" else cls.OPEN
516
- return cls(value, type_, subtype)
517
-
518
- def get_closer(self):
519
- """Return a closing token that matches this token's type."""
520
- if self.type not in (self.FUNC, self.ARRAY, self.PAREN):
521
- msg = f"Invalid token type for closer: {self.type}"
522
- raise TokenizerError(msg)
523
- if self.subtype != self.OPEN:
524
- msg = f"Invalid token subtype for closer: {self.subtype}"
525
- raise TokenizerError(msg)
526
- value = "}" if self.type == self.ARRAY else ")"
527
- return self.make_subexp(value, func=self.type == self.FUNC)
528
-
529
- # Separator tokens
530
- #
531
- # Argument separators always have type 'SEP' and can have one of two
532
- # subtypes: 'ARG', 'ROW'. 'ARG' is used for the ',' token, when used to
533
- # delimit either function arguments or array elements. 'ROW' is used for
534
- # the ';' token, which is always used to delimit rows in an array
535
- # literal.
536
-
537
- ARG = "ARG"
538
- ROW = "ROW"
539
-
540
- @classmethod
541
- def make_separator(cls, value):
542
- """Create a separator token"""
543
- if value not in (",", ";"):
544
- msg = f"Invalid separator value: {value}"
545
- raise TokenizerError(msg)
546
-
547
- subtype = cls.ARG if value == "," else cls.ROW
548
- return cls(value, cls.SEP, subtype)