pbi-parsers 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. pbi_parsers/__init__.py +9 -0
  2. pbi_parsers/base/__init__.py +7 -0
  3. pbi_parsers/base/lexer.py +127 -0
  4. pbi_parsers/base/tokens.py +61 -0
  5. pbi_parsers/dax/__init__.py +22 -0
  6. pbi_parsers/dax/exprs/__init__.py +107 -0
  7. pbi_parsers/dax/exprs/_base.py +46 -0
  8. pbi_parsers/dax/exprs/_utils.py +45 -0
  9. pbi_parsers/dax/exprs/add_sub.py +73 -0
  10. pbi_parsers/dax/exprs/add_sub_unary.py +72 -0
  11. pbi_parsers/dax/exprs/array.py +75 -0
  12. pbi_parsers/dax/exprs/column.py +56 -0
  13. pbi_parsers/dax/exprs/comparison.py +76 -0
  14. pbi_parsers/dax/exprs/concatenation.py +73 -0
  15. pbi_parsers/dax/exprs/div_mul.py +75 -0
  16. pbi_parsers/dax/exprs/exponent.py +67 -0
  17. pbi_parsers/dax/exprs/function.py +102 -0
  18. pbi_parsers/dax/exprs/hierarchy.py +68 -0
  19. pbi_parsers/dax/exprs/identifier.py +46 -0
  20. pbi_parsers/dax/exprs/ins.py +67 -0
  21. pbi_parsers/dax/exprs/keyword.py +60 -0
  22. pbi_parsers/dax/exprs/literal_number.py +46 -0
  23. pbi_parsers/dax/exprs/literal_string.py +45 -0
  24. pbi_parsers/dax/exprs/logical.py +76 -0
  25. pbi_parsers/dax/exprs/measure.py +44 -0
  26. pbi_parsers/dax/exprs/none.py +30 -0
  27. pbi_parsers/dax/exprs/parens.py +61 -0
  28. pbi_parsers/dax/exprs/returns.py +76 -0
  29. pbi_parsers/dax/exprs/table.py +51 -0
  30. pbi_parsers/dax/exprs/variable.py +68 -0
  31. pbi_parsers/dax/formatter.py +215 -0
  32. pbi_parsers/dax/lexer.py +222 -0
  33. pbi_parsers/dax/main.py +63 -0
  34. pbi_parsers/dax/parser.py +66 -0
  35. pbi_parsers/dax/tokens.py +54 -0
  36. pbi_parsers/dax/utils.py +120 -0
  37. pbi_parsers/pq/__init__.py +17 -0
  38. pbi_parsers/pq/exprs/__init__.py +98 -0
  39. pbi_parsers/pq/exprs/_base.py +33 -0
  40. pbi_parsers/pq/exprs/_utils.py +31 -0
  41. pbi_parsers/pq/exprs/add_sub.py +59 -0
  42. pbi_parsers/pq/exprs/add_sub_unary.py +57 -0
  43. pbi_parsers/pq/exprs/and_or_expr.py +60 -0
  44. pbi_parsers/pq/exprs/array.py +53 -0
  45. pbi_parsers/pq/exprs/arrow.py +50 -0
  46. pbi_parsers/pq/exprs/column.py +42 -0
  47. pbi_parsers/pq/exprs/comparison.py +62 -0
  48. pbi_parsers/pq/exprs/concatenation.py +61 -0
  49. pbi_parsers/pq/exprs/div_mul.py +59 -0
  50. pbi_parsers/pq/exprs/each.py +41 -0
  51. pbi_parsers/pq/exprs/ellipsis_expr.py +28 -0
  52. pbi_parsers/pq/exprs/function.py +63 -0
  53. pbi_parsers/pq/exprs/identifier.py +77 -0
  54. pbi_parsers/pq/exprs/if_expr.py +70 -0
  55. pbi_parsers/pq/exprs/is_expr.py +54 -0
  56. pbi_parsers/pq/exprs/keyword.py +40 -0
  57. pbi_parsers/pq/exprs/literal_number.py +31 -0
  58. pbi_parsers/pq/exprs/literal_string.py +31 -0
  59. pbi_parsers/pq/exprs/meta.py +54 -0
  60. pbi_parsers/pq/exprs/negation.py +52 -0
  61. pbi_parsers/pq/exprs/none.py +22 -0
  62. pbi_parsers/pq/exprs/not_expr.py +39 -0
  63. pbi_parsers/pq/exprs/parens.py +43 -0
  64. pbi_parsers/pq/exprs/record.py +58 -0
  65. pbi_parsers/pq/exprs/row.py +54 -0
  66. pbi_parsers/pq/exprs/row_index.py +57 -0
  67. pbi_parsers/pq/exprs/statement.py +67 -0
  68. pbi_parsers/pq/exprs/try_expr.py +55 -0
  69. pbi_parsers/pq/exprs/type_expr.py +78 -0
  70. pbi_parsers/pq/exprs/variable.py +52 -0
  71. pbi_parsers/pq/formatter.py +13 -0
  72. pbi_parsers/pq/lexer.py +219 -0
  73. pbi_parsers/pq/main.py +63 -0
  74. pbi_parsers/pq/parser.py +65 -0
  75. pbi_parsers/pq/tokens.py +81 -0
  76. pbi_parsers-0.7.8.dist-info/METADATA +66 -0
  77. pbi_parsers-0.7.8.dist-info/RECORD +78 -0
  78. pbi_parsers-0.7.8.dist-info/WHEEL +4 -0
@@ -0,0 +1,9 @@
1
+ from . import dax, pq
2
+
3
+ __version__ = "0.7.8"
4
+
5
+
6
+ __all__ = [
7
+ "dax",
8
+ "pq",
9
+ ]
@@ -0,0 +1,7 @@
1
+ from .lexer import BaseLexer
2
+ from .tokens import BaseToken
3
+
4
+ __all__ = [
5
+ "BaseLexer",
6
+ "BaseToken",
7
+ ]
@@ -0,0 +1,127 @@
1
+ from collections.abc import Callable
2
+
3
+ from .tokens import BaseToken
4
+
5
+ MAX_POSITION = 1_000_000
6
+
7
+
8
+ class BaseLexer:
9
+ source: str
10
+ start_position: int
11
+ current_position: int
12
+ tokens: list[BaseToken]
13
+
14
+ def __init__(self, source: str) -> None:
15
+ self.source = source
16
+ self.start_position = 0
17
+ self.current_position = 0
18
+ self.tokens = []
19
+
20
+ def scan_helper(self) -> BaseToken:
21
+ """Contains the orchestration logic for converting tokens into expressions."""
22
+ msg = "Subclasses should implement match_tokens method."
23
+ raise NotImplementedError(msg)
24
+
25
+ def match(
26
+ self,
27
+ matcher: Callable[[str], bool] | str,
28
+ chunk: int = 1,
29
+ *,
30
+ case_insensitive: bool = True,
31
+ ) -> bool:
32
+ """Match a string or a callable matcher against the current position in the source.
33
+
34
+ Args:
35
+ ----
36
+ matcher (Callable[[str], bool] | str): A string to match or a callable that
37
+ takes a string and returns a boolean.
38
+ chunk (int): The number of characters to check from the current position.
39
+ case_insensitive (bool): If True, perform a case-insensitive match __only__ for strings.
40
+
41
+ """
42
+ if isinstance(matcher, str):
43
+ chunk = len(matcher)
44
+
45
+ string_chunk = self.peek(chunk)
46
+ if not string_chunk:
47
+ return False
48
+
49
+ if isinstance(matcher, str):
50
+ if case_insensitive:
51
+ string_chunk = string_chunk.lower()
52
+ matcher = matcher.lower()
53
+ if string_chunk == matcher:
54
+ self.advance(chunk)
55
+ return True
56
+ return False
57
+
58
+ if matcher(string_chunk):
59
+ self.advance(chunk)
60
+ return True
61
+ return False
62
+
63
+ def peek(self, chunk: int = 1) -> str:
64
+ """Returns the next chunk of text from the current position. Defaults to a single character.
65
+
66
+ Args:
67
+ chunk (int): The number of characters to return from the current position.
68
+
69
+ Returns:
70
+ str: The next chunk of text from the current position.
71
+
72
+ """
73
+ return (
74
+ self.source[self.current_position : self.current_position + chunk]
75
+ if self.current_position < len(self.source)
76
+ else ""
77
+ )
78
+
79
+ def remaining(self) -> str:
80
+ """Returns the remaining text from the current position to the end of the source.
81
+
82
+ Only used for testing and debugging purposes.
83
+
84
+ Returns:
85
+ str: The remaining text from the current position to the end of the source.
86
+
87
+ """
88
+ return self.source[self.current_position :]
89
+
90
+ def advance(self, chunk: int = 1) -> None:
91
+ """Advances the current position by the specified chunk size.
92
+
93
+ Generally used alongside peek to consume characters.
94
+
95
+ Args:
96
+ chunk (int): The number of characters to advance the current position.
97
+
98
+ Raises:
99
+ ValueError: If the current position exceeds a predefined MAX_POSITION (1,000,000 characters).
100
+ This is to avoid errors with the lexer causing the process to hang
101
+
102
+ """
103
+ if self.current_position > MAX_POSITION:
104
+ msg = f"Current position exceeds {MAX_POSITION:,} characters."
105
+ raise ValueError(msg)
106
+ self.current_position += chunk
107
+
108
+ def scan(self) -> tuple[BaseToken, ...]:
109
+ """Repeatedly calls scan_helper until the end of the source is reached.
110
+
111
+ Returns:
112
+ tuple[BaseToken, ...]: A tuple of tokens scanned from the source.
113
+
114
+ """
115
+ while not self.at_end():
116
+ self.tokens.append(self.scan_helper())
117
+ return tuple(self.tokens)
118
+
119
+ def at_end(self) -> bool:
120
+ """Checks if the current position is at (or beyond) the end of the source.
121
+
122
+ Returns:
123
+ bool: True if the current position is at or beyond the end of the source, False
124
+ otherwise.
125
+
126
+ """
127
+ return self.current_position >= len(self.source)
@@ -0,0 +1,61 @@
1
+ from dataclasses import dataclass, field
2
+ from typing import Any
3
+
4
+
5
+ @dataclass
6
+ class TextSlice:
7
+ full_text: str = ""
8
+ start: int = -1
9
+ end: int = -1
10
+
11
+ def __eq__(self, other: object) -> bool:
12
+ """Checks equality based on the text slice."""
13
+ if not isinstance(other, TextSlice):
14
+ return NotImplemented
15
+ return self.full_text == other.full_text and self.start == other.start and self.end == other.end
16
+
17
+ def __hash__(self) -> int:
18
+ """Returns a hash based on the text slice."""
19
+ return hash((self.full_text, self.start, self.end))
20
+
21
+ def __repr__(self) -> str:
22
+ """Returns a string representation of the TextSlice."""
23
+ return f"TextSlice(text='{self.get_text()}', start={self.start}, end={self.end})"
24
+
25
+ def get_text(self) -> str:
26
+ """Returns the text slice."""
27
+ return self.full_text[self.start : self.end]
28
+
29
+
30
+ @dataclass
31
+ class BaseToken:
32
+ tok_type: Any
33
+ text_slice: TextSlice = field(default_factory=TextSlice)
34
+
35
+ def __repr__(self) -> str:
36
+ pretty_text = self.text_slice.get_text().replace("\n", "\\n").replace("\r", "\\r")
37
+ return f"Token(type={self.tok_type.name}, text='{pretty_text}')"
38
+
39
+ @property
40
+ def text(self) -> str:
41
+ """Returns the text underlying the token."""
42
+ return self.text_slice.get_text()
43
+
44
+ def position(self) -> tuple[int, int]:
45
+ """Returns the start and end positions of the token.
46
+
47
+ Returns:
48
+ tuple[int, int]: A tuple containing the start and end positions of the token within the source text.
49
+
50
+ """
51
+ return self.text_slice.start, self.text_slice.end
52
+
53
+ def __eq__(self, other: object) -> bool:
54
+ """Checks equality based on token type and text slice."""
55
+ if not isinstance(other, BaseToken):
56
+ return NotImplemented
57
+ return self.tok_type == other.tok_type and self.text_slice == other.text_slice
58
+
59
+ def __hash__(self) -> int:
60
+ """Returns a hash based on token type and text slice."""
61
+ return hash((self.tok_type, self.text_slice))
@@ -0,0 +1,22 @@
1
+ from . import exprs, utils
2
+ from .exprs import Expression
3
+ from .formatter import Formatter
4
+ from .lexer import Lexer
5
+ from .main import format_expression, to_ast
6
+ from .parser import Parser
7
+ from .tokens import Token, TokenType
8
+ from .utils import highlight_section
9
+
10
+ __all__ = [
11
+ "Expression",
12
+ "Formatter",
13
+ "Lexer",
14
+ "Parser",
15
+ "Token",
16
+ "TokenType",
17
+ "exprs",
18
+ "format_expression",
19
+ "highlight_section",
20
+ "to_ast",
21
+ "utils",
22
+ ]
@@ -0,0 +1,107 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ._base import Expression
4
+ from .add_sub import AddSubExpression
5
+ from .add_sub_unary import AddSubUnaryExpression
6
+ from .array import ArrayExpression
7
+ from .column import ColumnExpression
8
+ from .comparison import ComparisonExpression
9
+ from .concatenation import ConcatenationExpression
10
+ from .div_mul import DivMulExpression
11
+ from .exponent import ExponentExpression
12
+ from .function import FunctionExpression
13
+ from .hierarchy import HierarchyExpression
14
+ from .identifier import IdentifierExpression
15
+ from .ins import InExpression
16
+ from .keyword import KeywordExpression
17
+ from .literal_number import LiteralNumberExpression
18
+ from .literal_string import LiteralStringExpression
19
+ from .logical import LogicalExpression
20
+ from .measure import MeasureExpression
21
+ from .none import NoneExpression
22
+ from .parens import ParenthesesExpression
23
+ from .returns import ReturnExpression
24
+ from .table import TableExpression
25
+ from .variable import VariableExpression
26
+
27
+ if TYPE_CHECKING:
28
+ from pbi_parsers.dax.parser import Parser
29
+
30
+ # Bool/AddSub/DivMul must be in this order to ensure correct precedence. They must also be above all other expressions.
31
+ # Column expression must be before table and identifier expressions to ensure correct precedence.
32
+ # identifer must be before table to ensure correct precedence.
33
+
34
+ # operator precedence (https://learn.microsoft.com/en-us/dax/dax-operator-reference). This is from tightest to loosest:
35
+ # unary +,-
36
+ # ^
37
+ # *,/
38
+ # +,-
39
+ # &
40
+ # &&, || (note: this is not specified in the docs, so I'm guessing here)
41
+ # =, ==, <>, <, <=, >, >=
42
+ # IN (note: this is not specified in the docs, so I'm guessing here)
43
+ # NOT
44
+
45
+ EXPRESSION_HIERARCHY = (
46
+ # Operators, must come first
47
+ InExpression,
48
+ LogicalExpression,
49
+ ComparisonExpression,
50
+ ConcatenationExpression,
51
+ AddSubExpression,
52
+ DivMulExpression,
53
+ ExponentExpression,
54
+ AddSubUnaryExpression,
55
+ # For performance, the ones with a defined prefix
56
+ ReturnExpression, # must come before VariableExpression
57
+ VariableExpression,
58
+ ParenthesesExpression,
59
+ ArrayExpression,
60
+ FunctionExpression,
61
+ MeasureExpression,
62
+ HierarchyExpression,
63
+ ColumnExpression,
64
+ KeywordExpression,
65
+ IdentifierExpression,
66
+ TableExpression, # Technically, it's partially ambiguous with IdentifierExpression
67
+ LiteralStringExpression,
68
+ LiteralNumberExpression,
69
+ )
70
+
71
+
72
+ def any_expression_match(parser: "Parser", skip_first: int = 0) -> Expression | None:
73
+ """Matches any expression type.
74
+
75
+ This is a utility function to simplify the matching process in other expressions.
76
+ """
77
+ for expr in EXPRESSION_HIERARCHY[skip_first:]:
78
+ if match := expr.match(parser):
79
+ return match
80
+ return None
81
+
82
+
83
+ __all__ = [
84
+ "AddSubExpression",
85
+ "AddSubUnaryExpression",
86
+ "ArrayExpression",
87
+ "ColumnExpression",
88
+ "ComparisonExpression",
89
+ "ConcatenationExpression",
90
+ "DivMulExpression",
91
+ "ExponentExpression",
92
+ "Expression",
93
+ "FunctionExpression",
94
+ "HierarchyExpression",
95
+ "IdentifierExpression",
96
+ "InExpression",
97
+ "KeywordExpression",
98
+ "LiteralNumberExpression",
99
+ "LiteralStringExpression",
100
+ "LogicalExpression",
101
+ "MeasureExpression",
102
+ "NoneExpression",
103
+ "ParenthesesExpression",
104
+ "ReturnExpression",
105
+ "TableExpression",
106
+ "VariableExpression",
107
+ ]
@@ -0,0 +1,46 @@
1
+ from typing import TYPE_CHECKING, Any
2
+
3
+ from pbi_parsers.dax.tokens import TokenType
4
+
5
+ if TYPE_CHECKING:
6
+ from pbi_parsers.dax.parser import Parser
7
+
8
+
9
+ class Expression:
10
+ pre_comments: list[Any] = []
11
+ post_comments: list[Any] = []
12
+
13
+ def pprint(self) -> str:
14
+ msg = "Subclasses should implement this method."
15
+ raise NotImplementedError(msg)
16
+
17
+ @classmethod
18
+ def match(cls, parser: "Parser") -> "Expression | None":
19
+ """Attempt to match the current tokens to this expression type.
20
+
21
+ Returns an instance of the expression if matched, otherwise None.
22
+ """
23
+ msg = "Subclasses should implement this method."
24
+ raise NotImplementedError(msg)
25
+
26
+ @staticmethod
27
+ def match_tokens(parser: "Parser", match_tokens: list[TokenType]) -> bool:
28
+ return all(parser.peek(i).tok_type == token_type for i, token_type in enumerate(match_tokens))
29
+
30
+ def __repr__(self) -> str:
31
+ return self.pprint()
32
+
33
+ def children(self) -> list["Expression"]:
34
+ """Returns a list of child expressions."""
35
+ msg = "This method should be implemented by subclasses."
36
+ raise NotImplementedError(msg)
37
+
38
+ def position(self) -> tuple[int, int]:
39
+ """Returns the start and end positions of the expression in the source code."""
40
+ msg = "This method should be implemented by subclasses."
41
+ raise NotImplementedError(msg)
42
+
43
+ def full_text(self) -> str:
44
+ """Returns the full text of the expression."""
45
+ msg = "This method should be implemented by subclasses."
46
+ raise NotImplementedError(msg)
@@ -0,0 +1,45 @@
1
+ from collections.abc import Callable
2
+ from typing import ParamSpec, TypeVar
3
+
4
+ from pbi_parsers.dax.exprs._base import Expression
5
+ from pbi_parsers.dax.parser import Parser
6
+ from pbi_parsers.dax.tokens import TokenType
7
+
8
+ P = ParamSpec("P") # Represents the parameters of the decorated function
9
+ R = TypeVar("R") # Represents the return type of the decorated function
10
+
11
+
12
+ def lexer_reset(func: Callable[P, R]) -> Callable[P, R]:
13
+ def lexer_reset_inner(*args: P.args, **kwargs: P.kwargs) -> R:
14
+ parser = args[1]
15
+ if not isinstance(parser, Parser):
16
+ msg = f"Expected the second argument to be a Parser instance, got {type(parser)}"
17
+ raise TypeError(msg)
18
+ idx = parser.index
19
+
20
+ pre_comments = []
21
+ while parser.peek().tok_type in {TokenType.SINGLE_LINE_COMMENT, TokenType.MULTI_LINE_COMMENT}:
22
+ pre_comments.append(parser.consume())
23
+
24
+ # Speed up of a bazillion
25
+ cached_val, cached_index = parser.cache.get((idx, id(func)), (None, -1))
26
+ if cached_val is not None:
27
+ parser.index = cached_index
28
+ return cached_val
29
+
30
+ ret = func(*args, **kwargs)
31
+
32
+ post_comments = []
33
+ while parser.peek().tok_type in {TokenType.SINGLE_LINE_COMMENT, TokenType.MULTI_LINE_COMMENT}:
34
+ post_comments.append(parser.consume())
35
+
36
+ if isinstance(ret, Expression):
37
+ ret.pre_comments = pre_comments
38
+ ret.post_comments = post_comments
39
+
40
+ parser.cache[idx, id(func)] = (ret, parser.index)
41
+ if ret is None:
42
+ parser.index = idx
43
+ return ret
44
+
45
+ return lexer_reset_inner
@@ -0,0 +1,73 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.dax.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.dax.parser import Parser
11
+
12
+
13
+ class AddSubExpression(Expression):
14
+ """Represents an addition or subtraction expression.
15
+
16
+ Examples:
17
+ 1 + 2
18
+ func() - 3
19
+
20
+ """
21
+
22
+ operator: Token
23
+ left: Expression
24
+ right: Expression
25
+
26
+ def __init__(self, operator: Token, left: Expression, right: Expression) -> None:
27
+ self.operator = operator
28
+ self.left = left
29
+ self.right = right
30
+
31
+ @classmethod
32
+ @lexer_reset
33
+ def match(cls, parser: "Parser") -> "AddSubExpression | None":
34
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
35
+
36
+ skip_index = EXPRESSION_HIERARCHY.index(AddSubExpression)
37
+
38
+ left_term = any_expression_match(parser=parser, skip_first=skip_index + 1)
39
+ operator = parser.consume()
40
+
41
+ if not left_term:
42
+ return None
43
+ if operator.tok_type not in {
44
+ TokenType.PLUS_SIGN,
45
+ TokenType.MINUS_SIGN,
46
+ }:
47
+ return None
48
+
49
+ right_term = any_expression_match(parser=parser, skip_first=skip_index)
50
+ if right_term is None:
51
+ msg = f"Expected a right term after operator {operator.text}, found: {parser.peek()}"
52
+ raise ValueError(msg)
53
+ return AddSubExpression(operator=operator, left=left_term, right=right_term)
54
+
55
+ def pprint(self) -> str:
56
+ op_str = "Add" if self.operator.text == "+" else "Sub"
57
+ left_str = textwrap.indent(self.left.pprint(), " " * 10).lstrip()
58
+ right_str = textwrap.indent(self.right.pprint(), " " * 11).lstrip()
59
+ return f"""
60
+ {op_str} (
61
+ left: {left_str},
62
+ right: {right_str}
63
+ )""".strip()
64
+
65
+ def children(self) -> list[Expression]:
66
+ """Returns a list of child expressions."""
67
+ return [self.left, self.right]
68
+
69
+ def position(self) -> tuple[int, int]:
70
+ return self.left.position()[0], self.right.position()[1]
71
+
72
+ def full_text(self) -> str:
73
+ return self.operator.text_slice.full_text
@@ -0,0 +1,72 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.dax.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.dax.parser import Parser
11
+
12
+
13
+ class AddSubUnaryExpression(Expression):
14
+ """Represents an addition or subtraction expression.
15
+
16
+ Examples:
17
+ +2
18
+ -func()
19
+
20
+ """
21
+
22
+ operator: Token
23
+ number: Expression
24
+
25
+ def __init__(self, operator: Token, number: Expression) -> None:
26
+ self.operator = operator
27
+ self.number = number
28
+
29
+ @classmethod
30
+ @lexer_reset
31
+ def match(cls, parser: "Parser") -> "AddSubUnaryExpression | None":
32
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
33
+
34
+ skip_index = EXPRESSION_HIERARCHY.index(
35
+ AddSubUnaryExpression,
36
+ ) # intentionally inclusive of self to allow +-++- chains
37
+
38
+ operator = parser.consume()
39
+
40
+ if operator.tok_type not in {
41
+ TokenType.PLUS_SIGN,
42
+ TokenType.MINUS_SIGN,
43
+ }:
44
+ return None
45
+
46
+ # Handle chained +-++-+ prefixes
47
+ number: Expression | None = any_expression_match(
48
+ parser=parser,
49
+ skip_first=skip_index,
50
+ )
51
+ if number is None:
52
+ msg = f"Expected a right term after operator {operator.text}, found: {parser.peek()}"
53
+ raise ValueError(msg)
54
+ return AddSubUnaryExpression(operator=operator, number=number)
55
+
56
+ def pprint(self) -> str:
57
+ number = textwrap.indent(self.number.pprint(), " " * 12).lstrip()
58
+ return f"""
59
+ Number (
60
+ sign: {self.operator.text},
61
+ number: {number},
62
+ )""".strip()
63
+
64
+ def children(self) -> list[Expression]:
65
+ """Returns a list of child expressions."""
66
+ return [self.number]
67
+
68
+ def position(self) -> tuple[int, int]:
69
+ return self.operator.text_slice.start, self.number.position()[1]
70
+
71
+ def full_text(self) -> str:
72
+ return self.operator.text_slice.full_text
@@ -0,0 +1,75 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.dax.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.dax.parser import Parser
11
+
12
+
13
+ class ArrayExpression(Expression):
14
+ """Represents an array expression.
15
+
16
+ Examples:
17
+ {1, 2, 3}
18
+ {func(), 4, 5}
19
+
20
+ """
21
+
22
+ elements: list[Expression]
23
+ braces: tuple[Token, Token]
24
+
25
+ def __init__(self, elements: list[Expression], braces: tuple[Token, Token]) -> None:
26
+ self.elements: list[Expression] = elements
27
+ self.braces = braces
28
+
29
+ def pprint(self) -> str:
30
+ elements = ",\n".join(element.pprint() for element in self.elements)
31
+ elements = textwrap.indent(elements, " " * 14)[14:]
32
+ return f"""
33
+ Array (
34
+ elements: {elements}
35
+ ) """.strip()
36
+
37
+ @classmethod
38
+ @lexer_reset
39
+ def match(cls, parser: "Parser") -> "ArrayExpression | None":
40
+ from . import any_expression_match # noqa: PLC0415
41
+
42
+ left_brace = parser.consume()
43
+ if left_brace.tok_type != TokenType.LEFT_CURLY_BRACE:
44
+ return None
45
+
46
+ elements: list[Expression] = []
47
+
48
+ while not cls.match_tokens(parser, [TokenType.RIGHT_CURLY_BRACE]):
49
+ # We gotta handle operators next :(
50
+ element = any_expression_match(parser)
51
+ if element is not None:
52
+ elements.append(element)
53
+ else:
54
+ msg = f"Unexpected token sequence: {parser.peek()}, {parser.index}"
55
+ raise ValueError(msg)
56
+
57
+ if not cls.match_tokens(parser, [TokenType.RIGHT_CURLY_BRACE]):
58
+ assert parser.consume().tok_type == TokenType.COMMA
59
+
60
+ right_brace = parser.consume()
61
+ if right_brace.tok_type != TokenType.RIGHT_CURLY_BRACE:
62
+ msg = f"Expected a right curly brace, found: {right_brace}"
63
+ raise ValueError(msg)
64
+
65
+ return ArrayExpression(elements=elements, braces=(left_brace, right_brace))
66
+
67
+ def children(self) -> list[Expression]:
68
+ """Returns a list of child expressions."""
69
+ return self.elements
70
+
71
+ def position(self) -> tuple[int, int]:
72
+ return self.braces[0].text_slice.start, self.braces[1].text_slice.end
73
+
74
+ def full_text(self) -> str:
75
+ return self.braces[0].text_slice.full_text