pbi-parsers 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. pbi_parsers/__init__.py +9 -0
  2. pbi_parsers/base/__init__.py +7 -0
  3. pbi_parsers/base/lexer.py +127 -0
  4. pbi_parsers/base/tokens.py +61 -0
  5. pbi_parsers/dax/__init__.py +22 -0
  6. pbi_parsers/dax/exprs/__init__.py +107 -0
  7. pbi_parsers/dax/exprs/_base.py +46 -0
  8. pbi_parsers/dax/exprs/_utils.py +45 -0
  9. pbi_parsers/dax/exprs/add_sub.py +73 -0
  10. pbi_parsers/dax/exprs/add_sub_unary.py +72 -0
  11. pbi_parsers/dax/exprs/array.py +75 -0
  12. pbi_parsers/dax/exprs/column.py +56 -0
  13. pbi_parsers/dax/exprs/comparison.py +76 -0
  14. pbi_parsers/dax/exprs/concatenation.py +73 -0
  15. pbi_parsers/dax/exprs/div_mul.py +75 -0
  16. pbi_parsers/dax/exprs/exponent.py +67 -0
  17. pbi_parsers/dax/exprs/function.py +102 -0
  18. pbi_parsers/dax/exprs/hierarchy.py +68 -0
  19. pbi_parsers/dax/exprs/identifier.py +46 -0
  20. pbi_parsers/dax/exprs/ins.py +67 -0
  21. pbi_parsers/dax/exprs/keyword.py +60 -0
  22. pbi_parsers/dax/exprs/literal_number.py +46 -0
  23. pbi_parsers/dax/exprs/literal_string.py +45 -0
  24. pbi_parsers/dax/exprs/logical.py +76 -0
  25. pbi_parsers/dax/exprs/measure.py +44 -0
  26. pbi_parsers/dax/exprs/none.py +30 -0
  27. pbi_parsers/dax/exprs/parens.py +61 -0
  28. pbi_parsers/dax/exprs/returns.py +76 -0
  29. pbi_parsers/dax/exprs/table.py +51 -0
  30. pbi_parsers/dax/exprs/variable.py +68 -0
  31. pbi_parsers/dax/formatter.py +215 -0
  32. pbi_parsers/dax/lexer.py +222 -0
  33. pbi_parsers/dax/main.py +63 -0
  34. pbi_parsers/dax/parser.py +66 -0
  35. pbi_parsers/dax/tokens.py +54 -0
  36. pbi_parsers/dax/utils.py +120 -0
  37. pbi_parsers/pq/__init__.py +17 -0
  38. pbi_parsers/pq/exprs/__init__.py +98 -0
  39. pbi_parsers/pq/exprs/_base.py +33 -0
  40. pbi_parsers/pq/exprs/_utils.py +31 -0
  41. pbi_parsers/pq/exprs/add_sub.py +59 -0
  42. pbi_parsers/pq/exprs/add_sub_unary.py +57 -0
  43. pbi_parsers/pq/exprs/and_or_expr.py +60 -0
  44. pbi_parsers/pq/exprs/array.py +53 -0
  45. pbi_parsers/pq/exprs/arrow.py +50 -0
  46. pbi_parsers/pq/exprs/column.py +42 -0
  47. pbi_parsers/pq/exprs/comparison.py +62 -0
  48. pbi_parsers/pq/exprs/concatenation.py +61 -0
  49. pbi_parsers/pq/exprs/div_mul.py +59 -0
  50. pbi_parsers/pq/exprs/each.py +41 -0
  51. pbi_parsers/pq/exprs/ellipsis_expr.py +28 -0
  52. pbi_parsers/pq/exprs/function.py +63 -0
  53. pbi_parsers/pq/exprs/identifier.py +77 -0
  54. pbi_parsers/pq/exprs/if_expr.py +70 -0
  55. pbi_parsers/pq/exprs/is_expr.py +54 -0
  56. pbi_parsers/pq/exprs/keyword.py +40 -0
  57. pbi_parsers/pq/exprs/literal_number.py +31 -0
  58. pbi_parsers/pq/exprs/literal_string.py +31 -0
  59. pbi_parsers/pq/exprs/meta.py +54 -0
  60. pbi_parsers/pq/exprs/negation.py +52 -0
  61. pbi_parsers/pq/exprs/none.py +22 -0
  62. pbi_parsers/pq/exprs/not_expr.py +39 -0
  63. pbi_parsers/pq/exprs/parens.py +43 -0
  64. pbi_parsers/pq/exprs/record.py +58 -0
  65. pbi_parsers/pq/exprs/row.py +54 -0
  66. pbi_parsers/pq/exprs/row_index.py +57 -0
  67. pbi_parsers/pq/exprs/statement.py +67 -0
  68. pbi_parsers/pq/exprs/try_expr.py +55 -0
  69. pbi_parsers/pq/exprs/type_expr.py +78 -0
  70. pbi_parsers/pq/exprs/variable.py +52 -0
  71. pbi_parsers/pq/formatter.py +13 -0
  72. pbi_parsers/pq/lexer.py +219 -0
  73. pbi_parsers/pq/main.py +63 -0
  74. pbi_parsers/pq/parser.py +65 -0
  75. pbi_parsers/pq/tokens.py +81 -0
  76. pbi_parsers-0.7.8.dist-info/METADATA +66 -0
  77. pbi_parsers-0.7.8.dist-info/RECORD +78 -0
  78. pbi_parsers-0.7.8.dist-info/WHEEL +4 -0
@@ -0,0 +1,120 @@
1
+ from typing import TypeVar
2
+
3
+ import jinja2
4
+ from colorama import Fore, Style
5
+
6
+ from .exprs import Expression
7
+ from .tokens import Token
8
+
9
+ T = TypeVar("T", bound=Expression)
10
+
11
+
12
+ def find_all(ast: Expression, class_type: type[T] | tuple[type[T], ...]) -> list[T]:
13
+ """Find all instances of a specific class type in the AST."""
14
+ ret = []
15
+ for child in ast.children():
16
+ if isinstance(child, class_type):
17
+ ret.append(child)
18
+ ret.extend(find_all(child, class_type))
19
+ return ret
20
+
21
+
22
+ CONSOLE = jinja2.Template("""
23
+ {%- for i, section_line in enumerate(lines) -%}
24
+ {%- if i in highlights %}
25
+ {{ Style.BRIGHT }}{{ Fore.CYAN }}{{ i }} |{{ Style.RESET_ALL }} {{ section_line }}
26
+ {{ " " * (highlights[i][0]) }}{{ Style.BRIGHT }}{{ Fore.YELLOW }}{{ "^" * (highlights[i][1] - highlights[i][0]) }}{{ Style.RESET_ALL }}
27
+ {%- elif i >= section_boundary_lines[0] and i <= section_boundary_lines[1] %}
28
+ {{ i }} | {{ section_line }}
29
+ {%- endif %}
30
+ {%- endfor %}
31
+ """)
32
+ HTML = jinja2.Template("""
33
+ <div>
34
+ {% for i, section_line in enumerate(section_lines) %}
35
+ <span class="{{ "" if i == 0 or i == section_lines|length - 1 else "highlighted" }}">{{ starting_line + i }} |</span>
36
+ <span>{{ section_line }}</span>
37
+ {% endfor %}
38
+ <div>
39
+ """) # noqa: E501
40
+
41
+
42
+ class Context:
43
+ position: tuple[int, int]
44
+ full_text: str
45
+
46
+ def __init__(self, position: tuple[int, int], full_text: str) -> None:
47
+ self.position = position
48
+ self.full_text = full_text
49
+
50
+ def __repr__(self) -> str:
51
+ return self.to_console()
52
+
53
+ @staticmethod
54
+ def _get_highlighted_text(
55
+ lines: list[str],
56
+ position: tuple[int, int],
57
+ ) -> dict[int, tuple[int, int]]:
58
+ highlight_line_dict: dict[int, tuple[int, int]] = {}
59
+
60
+ remaining_start, remaining_end = position
61
+ for i, line in enumerate(lines):
62
+ if len(line) > remaining_start and remaining_end > 0:
63
+ buffer = len(str(i)) + 3
64
+ highlight_line_dict[i] = (
65
+ buffer + remaining_start,
66
+ buffer + min(remaining_end, len(line)),
67
+ )
68
+ remaining_start -= len(line) + 1 # +1 for the newline character
69
+ remaining_end -= len(line) + 1
70
+ return highlight_line_dict
71
+
72
+ def to_console(self, context_lines: int = 2) -> str:
73
+ """Render the context for console output."""
74
+ lines = self.full_text.split("\n")
75
+ starting_line = self.full_text.count("\n", 0, self.position[0])
76
+ final_line = self.full_text.count("\n", 0, self.position[1])
77
+ section_boundary_lines = (max(starting_line - context_lines, 0), min(final_line + context_lines, len(lines)))
78
+ highlights = self._get_highlighted_text(lines, self.position)
79
+ return CONSOLE.render(
80
+ lines=lines,
81
+ section_boundary_lines=section_boundary_lines,
82
+ highlights=highlights,
83
+ enumerate=enumerate,
84
+ Style=Style,
85
+ Fore=Fore,
86
+ )
87
+
88
+ def to_dict(self) -> dict[str, str | tuple[int, int]]:
89
+ """Convert the context to a dictionary."""
90
+ return {
91
+ "position": self.position,
92
+ "full_text": self.full_text,
93
+ }
94
+
95
+ def to_html(self) -> str:
96
+ """Render the context for console output."""
97
+ lines = self.full_text.split("\n")
98
+ starting_line = self.full_text.count("\n", 0, self.position[0]) + 1
99
+ final_line = self.full_text.count("\n", 0, self.position[1]) + 1
100
+
101
+ section_lines = lines[starting_line - 2 : final_line + 1]
102
+ return HTML.render(
103
+ section_lines=section_lines,
104
+ enumerate=enumerate,
105
+ starting_line=starting_line,
106
+ Style=Style,
107
+ Fore=Fore,
108
+ )
109
+
110
+
111
+ def highlight_section(node: Expression | Token | list[Token] | list[Expression]) -> Context:
112
+ if isinstance(node, list):
113
+ position = (node[0].position()[0], node[-1].position()[1])
114
+ first_node = node[0]
115
+ full_text = first_node.text_slice.full_text if isinstance(first_node, Token) else first_node.full_text()
116
+ return Context(position, full_text)
117
+
118
+ position = node.position()
119
+ full_text = node.text_slice.full_text if isinstance(node, Token) else node.full_text()
120
+ return Context(position, full_text)
@@ -0,0 +1,17 @@
1
+ from .exprs import Expression
2
+ from .formatter import Formatter
3
+ from .lexer import Lexer
4
+ from .main import format_expression, to_ast
5
+ from .parser import Parser
6
+ from .tokens import Token, TokenType
7
+
8
+ __all__ = [
9
+ "Expression",
10
+ "Formatter",
11
+ "Lexer",
12
+ "Parser",
13
+ "Token",
14
+ "TokenType",
15
+ "format_expression",
16
+ "to_ast",
17
+ ]
@@ -0,0 +1,98 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from ._base import Expression
4
+ from .add_sub import AddSubExpression
5
+ from .add_sub_unary import AddSubUnaryExpression
6
+ from .and_or_expr import AndOrExpression
7
+ from .array import ArrayExpression
8
+ from .arrow import ArrowExpression
9
+ from .column import ColumnExpression
10
+ from .comparison import ComparisonExpression
11
+ from .concatenation import ConcatenationExpression
12
+ from .div_mul import DivMulExpression
13
+ from .each import EachExpression
14
+ from .ellipsis_expr import EllipsisExpression
15
+ from .function import FunctionExpression
16
+ from .identifier import BracketedIdentifierExpression, IdentifierExpression
17
+ from .if_expr import IfExpression
18
+ from .is_expr import IsExpression
19
+ from .keyword import KeywordExpression
20
+ from .literal_number import LiteralNumberExpression
21
+ from .literal_string import LiteralStringExpression
22
+ from .meta import MetaExpression
23
+ from .negation import NegationExpression
24
+ from .not_expr import NotExpression
25
+ from .parens import ParenthesesExpression
26
+ from .record import RecordExpression
27
+ from .row import RowExpression
28
+ from .row_index import RowIndexExpression
29
+ from .statement import StatementExpression
30
+ from .try_expr import TryExpression
31
+ from .type_expr import TypingExpression
32
+ from .variable import VariableExpression
33
+
34
+ if TYPE_CHECKING:
35
+ from pbi_parsers.pq.parser import Parser
36
+
37
+ """
38
+ operators > all
39
+
40
+ operators:
41
+ if, comparison, concatenation, add/sub, div/mul, meta, add/sub, unary
42
+
43
+ variable > identifier
44
+ row > identifier
45
+ function > identifier
46
+ typing > keyword
47
+ arrow > parentheses
48
+ """
49
+ EXPRESSION_HIERARCHY: tuple[type[Expression], ...] = (
50
+ IfExpression,
51
+ AndOrExpression,
52
+ ComparisonExpression,
53
+ ConcatenationExpression,
54
+ AddSubExpression,
55
+ DivMulExpression,
56
+ IsExpression,
57
+ MetaExpression,
58
+ NegationExpression,
59
+ AddSubUnaryExpression,
60
+ RowIndexExpression,
61
+ # above are left-associative operators
62
+ NotExpression,
63
+ EllipsisExpression,
64
+ ArrowExpression,
65
+ TryExpression,
66
+ ParenthesesExpression,
67
+ StatementExpression,
68
+ ColumnExpression,
69
+ EachExpression,
70
+ ArrayExpression,
71
+ FunctionExpression,
72
+ VariableExpression,
73
+ RowExpression,
74
+ TypingExpression,
75
+ KeywordExpression,
76
+ IdentifierExpression,
77
+ RecordExpression,
78
+ BracketedIdentifierExpression,
79
+ LiteralStringExpression,
80
+ LiteralNumberExpression,
81
+ )
82
+
83
+
84
+ def any_expression_match(parser: "Parser", skip_first: int = 0) -> Expression | None:
85
+ """Matches any expression type.
86
+
87
+ This is a utility function to simplify the matching process in other expressions.
88
+ """
89
+ for expr in EXPRESSION_HIERARCHY[skip_first:]:
90
+ if match := expr.match(parser):
91
+ return match
92
+ return None
93
+
94
+
95
+ __all__ = [
96
+ "Expression",
97
+ "any_expression_match",
98
+ ]
@@ -0,0 +1,33 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from pbi_parsers.pq.tokens import TokenType
4
+
5
+ if TYPE_CHECKING:
6
+ from pbi_parsers.pq.parser import Parser
7
+
8
+
9
+ class Expression:
10
+ def pprint(self) -> str:
11
+ msg = "Subclasses should implement this method."
12
+ raise NotImplementedError(msg)
13
+
14
+ @classmethod
15
+ def match(cls, parser: "Parser") -> "Expression | None":
16
+ """Attempt to match the current tokens to this expression type.
17
+
18
+ Returns an instance of the expression if matched, otherwise None.
19
+ """
20
+ msg = "Subclasses should implement this method."
21
+ raise NotImplementedError(msg)
22
+
23
+ @staticmethod
24
+ def match_tokens(parser: "Parser", match_tokens: list[TokenType]) -> bool:
25
+ return all(parser.peek(i).tok_type == token_type for i, token_type in enumerate(match_tokens))
26
+
27
+ def __repr__(self) -> str:
28
+ return self.pprint()
29
+
30
+ def children(self) -> list["Expression"]:
31
+ """Returns a list of child expressions."""
32
+ msg = "This method should be implemented by subclasses."
33
+ raise NotImplementedError(msg)
@@ -0,0 +1,31 @@
1
+ from collections.abc import Callable
2
+ from typing import ParamSpec, TypeVar
3
+
4
+ from pbi_parsers.pq.parser import Parser
5
+
6
+ P = ParamSpec("P") # Represents the parameters of the decorated function
7
+ R = TypeVar("R") # Represents the return type of the decorated function
8
+
9
+
10
+ def lexer_reset(func: Callable[P, R]) -> Callable[P, R]:
11
+ def lexer_reset_inner(*args: P.args, **kwargs: P.kwargs) -> R:
12
+ parser = args[1]
13
+ if not isinstance(parser, Parser):
14
+ msg = f"Expected the second argument to be a Parser instance, got {type(parser)}"
15
+ raise TypeError(msg)
16
+ idx = parser.index
17
+
18
+ # Speed up of a bazillion
19
+ cached_val, cached_index = parser.cache.get((idx, id(func)), (None, -1))
20
+ if cached_val is not None:
21
+ parser.index = cached_index
22
+ return cached_val
23
+
24
+ ret = func(*args, **kwargs)
25
+
26
+ parser.cache[idx, id(func)] = (ret, parser.index)
27
+ if ret is None:
28
+ parser.index = idx
29
+ return ret
30
+
31
+ return lexer_reset_inner
@@ -0,0 +1,59 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.pq.parser import Parser
11
+
12
+
13
+ class AddSubExpression(Expression):
14
+ """Represents an addition or subtraction expression."""
15
+
16
+ operator: Token
17
+ left: Expression
18
+ right: Expression
19
+
20
+ def __init__(self, operator: Token, left: Expression, right: Expression) -> None:
21
+ self.operator = operator
22
+ self.left = left
23
+ self.right = right
24
+
25
+ @classmethod
26
+ @lexer_reset
27
+ def match(cls, parser: "Parser") -> "AddSubExpression | None":
28
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
29
+
30
+ skip_index = EXPRESSION_HIERARCHY.index(AddSubExpression)
31
+
32
+ left_term = any_expression_match(parser=parser, skip_first=skip_index + 1)
33
+ operator = parser.consume()
34
+
35
+ if not left_term:
36
+ return None
37
+ if operator.tok_type not in {TokenType.PLUS_SIGN, TokenType.MINUS_SIGN}:
38
+ return None
39
+
40
+ right_term = any_expression_match(parser=parser, skip_first=skip_index)
41
+ if right_term is None:
42
+ msg = f"Expected a right term after operator {operator.text}, found: {parser.peek()}"
43
+ raise ValueError(msg)
44
+ return AddSubExpression(operator=operator, left=left_term, right=right_term)
45
+
46
+ def pprint(self) -> str:
47
+ op_str = "Add" if self.operator.text == "+" else "Sub"
48
+ left_str = textwrap.indent(self.left.pprint(), " " * 10).lstrip()
49
+ right_str = textwrap.indent(self.right.pprint(), " " * 10).lstrip()
50
+ return f"""
51
+ {op_str} (
52
+ operator: {self.operator.text},
53
+ left: {left_str},
54
+ right: {right_str}
55
+ )""".strip()
56
+
57
+ def children(self) -> list[Expression]:
58
+ """Returns a list of child expressions."""
59
+ return [self.left, self.right]
@@ -0,0 +1,57 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.pq.parser import Parser
11
+
12
+
13
+ class AddSubUnaryExpression(Expression):
14
+ """Represents an addition or subtraction expression."""
15
+
16
+ operator: Token
17
+ number: Expression
18
+
19
+ def __init__(self, operator: Token, number: Expression) -> None:
20
+ self.operator = operator
21
+ self.number = number
22
+
23
+ @classmethod
24
+ @lexer_reset
25
+ def match(cls, parser: "Parser") -> "AddSubUnaryExpression | None":
26
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
27
+
28
+ skip_index = EXPRESSION_HIERARCHY.index(
29
+ AddSubUnaryExpression,
30
+ ) # intentionally inclusive of self to allow +-++- chains
31
+
32
+ operator = parser.consume()
33
+
34
+ if operator.tok_type not in {TokenType.PLUS_SIGN, TokenType.MINUS_SIGN}:
35
+ return None
36
+
37
+ # Handle chained +-++-+ prefixes
38
+ number: Expression | None = any_expression_match(
39
+ parser=parser,
40
+ skip_first=skip_index,
41
+ )
42
+ if number is None:
43
+ msg = f"Expected a number after operator {operator.text}, found: {parser.peek()}"
44
+ raise ValueError(msg)
45
+ return AddSubUnaryExpression(operator=operator, number=number)
46
+
47
+ def pprint(self) -> str:
48
+ number = textwrap.indent(self.number.pprint(), " " * 12).lstrip()
49
+ return f"""
50
+ Number (
51
+ sign: {self.operator.text},
52
+ number: {number},
53
+ )""".strip()
54
+
55
+ def children(self) -> list[Expression]:
56
+ """Returns a list of child expressions."""
57
+ return [self.number]
@@ -0,0 +1,60 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.pq.parser import Parser
11
+
12
+
13
+ class AndOrExpression(Expression):
14
+ """Represents an AND or OR expression."""
15
+
16
+ operator: Token
17
+ left: Expression
18
+ right: Expression
19
+
20
+ def __init__(self, operator: Token, left: Expression, right: Expression) -> None:
21
+ self.operator = operator
22
+ self.left = left
23
+ self.right = right
24
+
25
+ @classmethod
26
+ @lexer_reset
27
+ def match(cls, parser: "Parser") -> "AndOrExpression | None":
28
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
29
+
30
+ skip_index = EXPRESSION_HIERARCHY.index(AndOrExpression)
31
+
32
+ left_term = any_expression_match(parser=parser, skip_first=skip_index + 1)
33
+ operator = parser.consume()
34
+
35
+ if not left_term:
36
+ return None
37
+ if operator.tok_type not in {
38
+ TokenType.AND,
39
+ TokenType.OR,
40
+ }:
41
+ return None
42
+
43
+ right_term = any_expression_match(parser=parser, skip_first=skip_index)
44
+ if right_term is None:
45
+ msg = f"Expected a right term after operator {operator.text}, found: {parser.peek()}"
46
+ raise ValueError(msg)
47
+ return AndOrExpression(operator=operator, left=left_term, right=right_term)
48
+
49
+ def pprint(self) -> str:
50
+ left_str = textwrap.indent(self.left.pprint(), " " * 10)[10:]
51
+ right_str = textwrap.indent(self.right.pprint(), " " * 10)[10:]
52
+ return f"""
53
+ {self.operator.text} (
54
+ left: {left_str},
55
+ right: {right_str}
56
+ )""".strip()
57
+
58
+ def children(self) -> list[Expression]:
59
+ """Returns a list of child expressions."""
60
+ return [self.left, self.right]
@@ -0,0 +1,53 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.pq.parser import Parser
11
+
12
+
13
+ class ArrayExpression(Expression):
14
+ elements: list[Expression]
15
+
16
+ def __init__(self, elements: list[Expression]) -> None:
17
+ self.elements: list[Expression] = elements
18
+
19
+ def pprint(self) -> str:
20
+ elements = ",\n".join(element.pprint() for element in self.elements)
21
+ elements = textwrap.indent(elements, " " * 14)[14:]
22
+ return f"""
23
+ Array (
24
+ elements: {elements}
25
+ ) """.strip()
26
+
27
+ @classmethod
28
+ @lexer_reset
29
+ def match(cls, parser: "Parser") -> "ArrayExpression | None":
30
+ from . import any_expression_match # noqa: PLC0415
31
+
32
+ if parser.consume().tok_type != TokenType.LEFT_CURLY_BRACE:
33
+ return None
34
+
35
+ elements: list[Expression] = []
36
+
37
+ while not cls.match_tokens(parser, [TokenType.RIGHT_CURLY_BRACE]):
38
+ # We gotta handle operators next :(
39
+ element = any_expression_match(parser)
40
+ if element is not None:
41
+ elements.append(element)
42
+ else:
43
+ msg = f"Unexpected token sequence: {parser.peek()}, {parser.index}"
44
+ raise ValueError(msg)
45
+
46
+ if not cls.match_tokens(parser, [TokenType.RIGHT_CURLY_BRACE]):
47
+ assert parser.consume().tok_type == TokenType.COMMA
48
+ _right_brace = parser.consume()
49
+ return ArrayExpression(elements=elements)
50
+
51
+ def children(self) -> list[Expression]:
52
+ """Returns a list of child expressions."""
53
+ return self.elements
@@ -0,0 +1,50 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+ from .parens import ParenthesesExpression
9
+
10
+ if TYPE_CHECKING:
11
+ from pbi_parsers.pq.parser import Parser
12
+
13
+
14
+ class ArrowExpression(Expression):
15
+ inputs: ParenthesesExpression
16
+ function_body: Expression
17
+
18
+ def __init__(self, inputs: ParenthesesExpression, function_body: Expression) -> None:
19
+ self.inputs = inputs
20
+ self.function_body = function_body
21
+
22
+ def pprint(self) -> str:
23
+ inputs = textwrap.indent(self.inputs.pprint(), " " * 10)[10:]
24
+ function_body = textwrap.indent(self.function_body.pprint(), " " * 10)[10:]
25
+ return f"""
26
+ Arrow (
27
+ inputs: {inputs},
28
+ function_body: {function_body}
29
+ )""".strip()
30
+
31
+ @classmethod
32
+ @lexer_reset
33
+ def match(cls, parser: "Parser") -> "ArrowExpression | None":
34
+ from . import any_expression_match # noqa: PLC0415
35
+
36
+ inputs = ParenthesesExpression.match(parser)
37
+ if inputs is None:
38
+ return None
39
+
40
+ if parser.consume().tok_type != TokenType.LAMBDA_ARROW:
41
+ return None
42
+ function_body = any_expression_match(parser)
43
+ if function_body is None:
44
+ return None
45
+
46
+ return ArrowExpression(inputs=inputs, function_body=function_body)
47
+
48
+ def children(self) -> list[Expression]:
49
+ """Returns a list of child expressions."""
50
+ return [self.inputs, self.function_body]
@@ -0,0 +1,42 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from pbi_parsers.pq.tokens import Token, TokenType
4
+
5
+ from ._base import Expression
6
+ from ._utils import lexer_reset
7
+
8
+ if TYPE_CHECKING:
9
+ from pbi_parsers.pq.parser import Parser
10
+
11
+
12
+ class ColumnExpression(Expression):
13
+ name: Token
14
+
15
+ def __init__(self, name: Token) -> None:
16
+ self.name = name
17
+
18
+ def pprint(self) -> str:
19
+ return f"Column ({self.name.text})"
20
+
21
+ @classmethod
22
+ @lexer_reset
23
+ def match(cls, parser: "Parser") -> "ColumnExpression | None":
24
+ if cls.match_tokens(
25
+ parser,
26
+ [
27
+ TokenType.LEFT_BRACKET,
28
+ TokenType.UNQUOTED_IDENTIFIER,
29
+ TokenType.RIGHT_BRACKET,
30
+ ],
31
+ ):
32
+ _l_bracket, name, _r_bracket = (
33
+ parser.consume(),
34
+ parser.consume(),
35
+ parser.consume(),
36
+ )
37
+ return ColumnExpression(name=name)
38
+ return None
39
+
40
+ def children(self) -> list[Expression]: # noqa: PLR6301
41
+ """Returns a list of child expressions."""
42
+ return []
@@ -0,0 +1,62 @@
1
+ import textwrap
2
+ from typing import TYPE_CHECKING
3
+
4
+ from pbi_parsers.pq.tokens import Token, TokenType
5
+
6
+ from ._base import Expression
7
+ from ._utils import lexer_reset
8
+
9
+ if TYPE_CHECKING:
10
+ from pbi_parsers.pq.parser import Parser
11
+
12
+
13
+ class ComparisonExpression(Expression):
14
+ """Represents an multiplication or division expression."""
15
+
16
+ operator: Token
17
+ left: Expression
18
+ right: Expression
19
+
20
+ def __init__(self, operator: Token, left: Expression, right: Expression) -> None:
21
+ self.operator = operator
22
+ self.left = left
23
+ self.right = right
24
+
25
+ @classmethod
26
+ @lexer_reset
27
+ def match(cls, parser: "Parser") -> "ComparisonExpression | None":
28
+ from . import EXPRESSION_HIERARCHY, any_expression_match # noqa: PLC0415
29
+
30
+ skip_index = EXPRESSION_HIERARCHY.index(ComparisonExpression)
31
+
32
+ left_term = any_expression_match(parser=parser, skip_first=skip_index + 1)
33
+ operator = parser.consume()
34
+
35
+ if not left_term:
36
+ return None
37
+ if operator.tok_type not in {
38
+ TokenType.EQUAL_SIGN,
39
+ TokenType.COMPARISON_OPERATOR,
40
+ TokenType.NOT_EQUAL_SIGN,
41
+ }:
42
+ return None
43
+
44
+ right_term = any_expression_match(parser=parser, skip_first=skip_index)
45
+ if right_term is None:
46
+ msg = f"Expected a right term after operator {operator.text}, found: {parser.peek()}"
47
+ raise ValueError(msg)
48
+ return ComparisonExpression(operator=operator, left=left_term, right=right_term)
49
+
50
+ def pprint(self) -> str:
51
+ left_str = textwrap.indent(self.left.pprint(), " " * 10)[10:]
52
+ right_str = textwrap.indent(self.right.pprint(), " " * 10)[10:]
53
+ return f"""
54
+ Bool (
55
+ operator: {self.operator.text},
56
+ left: {left_str},
57
+ right: {right_str}
58
+ )""".strip()
59
+
60
+ def children(self) -> list[Expression]:
61
+ """Returns a list of child expressions."""
62
+ return [self.left, self.right]