pbi-parsers 0.7.12__py3-none-any.whl → 0.7.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pbi_parsers/__init__.py +1 -1
- pbi_parsers/base/lexer.py +32 -33
- pbi_parsers/base/tokens.py +18 -13
- pbi_parsers/dax/exprs/_base.py +13 -13
- pbi_parsers/dax/exprs/_utils.py +11 -0
- pbi_parsers/dax/exprs/add_sub.py +10 -10
- pbi_parsers/dax/exprs/add_sub_unary.py +10 -10
- pbi_parsers/dax/exprs/array.py +13 -13
- pbi_parsers/dax/exprs/column.py +12 -12
- pbi_parsers/dax/exprs/comparison.py +10 -10
- pbi_parsers/dax/exprs/concatenation.py +10 -10
- pbi_parsers/dax/exprs/div_mul.py +10 -10
- pbi_parsers/dax/exprs/exponent.py +10 -10
- pbi_parsers/dax/exprs/function.py +31 -31
- pbi_parsers/dax/exprs/hierarchy.py +13 -13
- pbi_parsers/dax/exprs/identifier.py +9 -9
- pbi_parsers/dax/exprs/ins.py +10 -10
- pbi_parsers/dax/exprs/keyword.py +9 -9
- pbi_parsers/dax/exprs/literal_number.py +9 -8
- pbi_parsers/dax/exprs/literal_string.py +8 -8
- pbi_parsers/dax/exprs/logical.py +10 -10
- pbi_parsers/dax/exprs/measure.py +8 -8
- pbi_parsers/dax/exprs/none.py +9 -9
- pbi_parsers/dax/exprs/parens.py +12 -12
- pbi_parsers/dax/exprs/returns.py +17 -17
- pbi_parsers/dax/exprs/table.py +11 -11
- pbi_parsers/dax/exprs/variable.py +14 -14
- pbi_parsers/dax/formatter.py +8 -8
- pbi_parsers/dax/lexer.py +97 -97
- pbi_parsers/dax/main.py +16 -16
- pbi_parsers/dax/parser.py +8 -8
- pbi_parsers/dax/utils.py +19 -19
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/METADATA +13 -1
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/RECORD +36 -35
- pbi_parsers-0.7.20.dist-info/licenses/LICENSE +21 -0
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/WHEEL +0 -0
pbi_parsers/__init__.py
CHANGED
pbi_parsers/base/lexer.py
CHANGED
@@ -17,10 +17,33 @@ class BaseLexer:
|
|
17
17
|
self.current_position = 0
|
18
18
|
self.tokens = []
|
19
19
|
|
20
|
-
def
|
21
|
-
"""
|
22
|
-
|
23
|
-
|
20
|
+
def advance(self, chunk: int = 1) -> None:
|
21
|
+
"""Advances the current position by the specified chunk size.
|
22
|
+
|
23
|
+
Generally used alongside peek to consume characters.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
chunk (int): The number of characters to advance the current position.
|
27
|
+
|
28
|
+
Raises:
|
29
|
+
ValueError: If the current position exceeds a predefined MAX_POSITION (1,000,000 characters).
|
30
|
+
This is to avoid errors with the lexer causing the process to hang
|
31
|
+
|
32
|
+
"""
|
33
|
+
if self.current_position > MAX_POSITION:
|
34
|
+
msg = f"Current position exceeds {MAX_POSITION:,} characters."
|
35
|
+
raise ValueError(msg)
|
36
|
+
self.current_position += chunk
|
37
|
+
|
38
|
+
def at_end(self) -> bool:
|
39
|
+
"""Checks if the current position is at (or beyond) the end of the source.
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
bool: True if the current position is at or beyond the end of the source, False
|
43
|
+
otherwise.
|
44
|
+
|
45
|
+
"""
|
46
|
+
return self.current_position >= len(self.source)
|
24
47
|
|
25
48
|
def match(
|
26
49
|
self,
|
@@ -32,7 +55,6 @@ class BaseLexer:
|
|
32
55
|
"""Match a string or a callable matcher against the current position in the source.
|
33
56
|
|
34
57
|
Args:
|
35
|
-
----
|
36
58
|
matcher (Callable[[str], bool] | str): A string to match or a callable that
|
37
59
|
takes a string and returns a boolean.
|
38
60
|
chunk (int): The number of characters to check from the current position.
|
@@ -61,7 +83,7 @@ class BaseLexer:
|
|
61
83
|
return False
|
62
84
|
|
63
85
|
def peek(self, chunk: int = 1) -> str:
|
64
|
-
"""Returns the next
|
86
|
+
"""Returns the next section of text from the current position of length `chunk`. Defaults to a single character.
|
65
87
|
|
66
88
|
Args:
|
67
89
|
chunk (int): The number of characters to return from the current position.
|
@@ -87,24 +109,6 @@ class BaseLexer:
|
|
87
109
|
"""
|
88
110
|
return self.source[self.current_position :]
|
89
111
|
|
90
|
-
def advance(self, chunk: int = 1) -> None:
|
91
|
-
"""Advances the current position by the specified chunk size.
|
92
|
-
|
93
|
-
Generally used alongside peek to consume characters.
|
94
|
-
|
95
|
-
Args:
|
96
|
-
chunk (int): The number of characters to advance the current position.
|
97
|
-
|
98
|
-
Raises:
|
99
|
-
ValueError: If the current position exceeds a predefined MAX_POSITION (1,000,000 characters).
|
100
|
-
This is to avoid errors with the lexer causing the process to hang
|
101
|
-
|
102
|
-
"""
|
103
|
-
if self.current_position > MAX_POSITION:
|
104
|
-
msg = f"Current position exceeds {MAX_POSITION:,} characters."
|
105
|
-
raise ValueError(msg)
|
106
|
-
self.current_position += chunk
|
107
|
-
|
108
112
|
def scan(self) -> tuple[BaseToken, ...]:
|
109
113
|
"""Repeatedly calls scan_helper until the end of the source is reached.
|
110
114
|
|
@@ -116,12 +120,7 @@ class BaseLexer:
|
|
116
120
|
self.tokens.append(self.scan_helper())
|
117
121
|
return tuple(self.tokens)
|
118
122
|
|
119
|
-
def
|
120
|
-
"""
|
121
|
-
|
122
|
-
|
123
|
-
bool: True if the current position is at or beyond the end of the source, False
|
124
|
-
otherwise.
|
125
|
-
|
126
|
-
"""
|
127
|
-
return self.current_position >= len(self.source)
|
123
|
+
def scan_helper(self) -> BaseToken:
|
124
|
+
"""Contains the orchestration logic for converting tokens into expressions."""
|
125
|
+
msg = "Subclasses should implement match_tokens method."
|
126
|
+
raise NotImplementedError(msg)
|
pbi_parsers/base/tokens.py
CHANGED
@@ -32,15 +32,20 @@ class BaseToken:
|
|
32
32
|
tok_type: Any
|
33
33
|
text_slice: TextSlice = field(default_factory=TextSlice)
|
34
34
|
|
35
|
+
def __eq__(self, other: object) -> bool:
|
36
|
+
"""Checks equality based on token type and text slice."""
|
37
|
+
if not isinstance(other, BaseToken):
|
38
|
+
return NotImplemented
|
39
|
+
return self.tok_type == other.tok_type and self.text_slice == other.text_slice
|
40
|
+
|
41
|
+
def __hash__(self) -> int:
|
42
|
+
"""Returns a hash based on token type and text slice."""
|
43
|
+
return hash((self.tok_type, self.text_slice))
|
44
|
+
|
35
45
|
def __repr__(self) -> str:
|
36
46
|
pretty_text = self.text_slice.get_text().replace("\n", "\\n").replace("\r", "\\r")
|
37
47
|
return f"Token(type={self.tok_type.name}, text='{pretty_text}')"
|
38
48
|
|
39
|
-
@property
|
40
|
-
def text(self) -> str:
|
41
|
-
"""Returns the text underlying the token."""
|
42
|
-
return self.text_slice.get_text()
|
43
|
-
|
44
49
|
def position(self) -> tuple[int, int]:
|
45
50
|
"""Returns the start and end positions of the token.
|
46
51
|
|
@@ -50,12 +55,12 @@ class BaseToken:
|
|
50
55
|
"""
|
51
56
|
return self.text_slice.start, self.text_slice.end
|
52
57
|
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
return NotImplemented
|
57
|
-
return self.tok_type == other.tok_type and self.text_slice == other.text_slice
|
58
|
+
@property
|
59
|
+
def text(self) -> str:
|
60
|
+
"""Returns the text underlying the token.
|
58
61
|
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
+
Returns:
|
63
|
+
str: The text of the token as a string.
|
64
|
+
|
65
|
+
"""
|
66
|
+
return self.text_slice.get_text()
|
pbi_parsers/dax/exprs/_base.py
CHANGED
@@ -10,8 +10,17 @@ class Expression:
|
|
10
10
|
pre_comments: list[Any] = []
|
11
11
|
post_comments: list[Any] = []
|
12
12
|
|
13
|
-
def
|
14
|
-
|
13
|
+
def __repr__(self) -> str:
|
14
|
+
return self.pprint()
|
15
|
+
|
16
|
+
def children(self) -> list["Expression"]:
|
17
|
+
"""Returns a list of child expressions."""
|
18
|
+
msg = "This method should be implemented by subclasses."
|
19
|
+
raise NotImplementedError(msg)
|
20
|
+
|
21
|
+
def full_text(self) -> str:
|
22
|
+
"""Returns the full text of the expression."""
|
23
|
+
msg = "This method should be implemented by subclasses."
|
15
24
|
raise NotImplementedError(msg)
|
16
25
|
|
17
26
|
@classmethod
|
@@ -27,20 +36,11 @@ class Expression:
|
|
27
36
|
def match_tokens(parser: "Parser", match_tokens: list[TokenType]) -> bool:
|
28
37
|
return all(parser.peek(i).tok_type == token_type for i, token_type in enumerate(match_tokens))
|
29
38
|
|
30
|
-
def __repr__(self) -> str:
|
31
|
-
return self.pprint()
|
32
|
-
|
33
|
-
def children(self) -> list["Expression"]:
|
34
|
-
"""Returns a list of child expressions."""
|
35
|
-
msg = "This method should be implemented by subclasses."
|
36
|
-
raise NotImplementedError(msg)
|
37
|
-
|
38
39
|
def position(self) -> tuple[int, int]:
|
39
40
|
"""Returns the start and end positions of the expression in the source code."""
|
40
41
|
msg = "This method should be implemented by subclasses."
|
41
42
|
raise NotImplementedError(msg)
|
42
43
|
|
43
|
-
def
|
44
|
-
|
45
|
-
msg = "This method should be implemented by subclasses."
|
44
|
+
def pprint(self) -> str:
|
45
|
+
msg = "Subclasses should implement this method."
|
46
46
|
raise NotImplementedError(msg)
|
pbi_parsers/dax/exprs/_utils.py
CHANGED
@@ -10,6 +10,17 @@ R = TypeVar("R") # Represents the return type of the decorated function
|
|
10
10
|
|
11
11
|
|
12
12
|
def lexer_reset(func: Callable[P, R]) -> Callable[P, R]:
|
13
|
+
"""Decorator to reset the lexer state before and after parsing an expression.
|
14
|
+
|
15
|
+
This decorator performs the following actions:
|
16
|
+
1. Collects pre-comments before parsing.
|
17
|
+
2. Caches the result of the parsing function to avoid redundant parsing.
|
18
|
+
3. Collects post-comments after parsing.
|
19
|
+
|
20
|
+
The caching is required since the operator precedence otherwise leads to all other expressions being
|
21
|
+
called multiple times.
|
22
|
+
"""
|
23
|
+
|
13
24
|
def lexer_reset_inner(*args: P.args, **kwargs: P.kwargs) -> R:
|
14
25
|
parser = args[1]
|
15
26
|
if not isinstance(parser, Parser):
|
pbi_parsers/dax/exprs/add_sub.py
CHANGED
@@ -28,6 +28,13 @@ class AddSubExpression(Expression):
|
|
28
28
|
self.left = left
|
29
29
|
self.right = right
|
30
30
|
|
31
|
+
def children(self) -> list[Expression]:
|
32
|
+
"""Returns a list of child expressions."""
|
33
|
+
return [self.left, self.right]
|
34
|
+
|
35
|
+
def full_text(self) -> str:
|
36
|
+
return self.operator.text_slice.full_text
|
37
|
+
|
31
38
|
@classmethod
|
32
39
|
@lexer_reset
|
33
40
|
def match(cls, parser: "Parser") -> "AddSubExpression | None":
|
@@ -52,6 +59,9 @@ class AddSubExpression(Expression):
|
|
52
59
|
raise ValueError(msg)
|
53
60
|
return AddSubExpression(operator=operator, left=left_term, right=right_term)
|
54
61
|
|
62
|
+
def position(self) -> tuple[int, int]:
|
63
|
+
return self.left.position()[0], self.right.position()[1]
|
64
|
+
|
55
65
|
def pprint(self) -> str:
|
56
66
|
op_str = "Add" if self.operator.text == "+" else "Sub"
|
57
67
|
left_str = textwrap.indent(self.left.pprint(), " " * 10).lstrip()
|
@@ -61,13 +71,3 @@ class AddSubExpression(Expression):
|
|
61
71
|
left: {left_str},
|
62
72
|
right: {right_str}
|
63
73
|
)""".strip()
|
64
|
-
|
65
|
-
def children(self) -> list[Expression]:
|
66
|
-
"""Returns a list of child expressions."""
|
67
|
-
return [self.left, self.right]
|
68
|
-
|
69
|
-
def position(self) -> tuple[int, int]:
|
70
|
-
return self.left.position()[0], self.right.position()[1]
|
71
|
-
|
72
|
-
def full_text(self) -> str:
|
73
|
-
return self.operator.text_slice.full_text
|
@@ -26,6 +26,13 @@ class AddSubUnaryExpression(Expression):
|
|
26
26
|
self.operator = operator
|
27
27
|
self.number = number
|
28
28
|
|
29
|
+
def children(self) -> list[Expression]:
|
30
|
+
"""Returns a list of child expressions."""
|
31
|
+
return [self.number]
|
32
|
+
|
33
|
+
def full_text(self) -> str:
|
34
|
+
return self.operator.text_slice.full_text
|
35
|
+
|
29
36
|
@classmethod
|
30
37
|
@lexer_reset
|
31
38
|
def match(cls, parser: "Parser") -> "AddSubUnaryExpression | None":
|
@@ -53,6 +60,9 @@ class AddSubUnaryExpression(Expression):
|
|
53
60
|
raise ValueError(msg)
|
54
61
|
return AddSubUnaryExpression(operator=operator, number=number)
|
55
62
|
|
63
|
+
def position(self) -> tuple[int, int]:
|
64
|
+
return self.operator.text_slice.start, self.number.position()[1]
|
65
|
+
|
56
66
|
def pprint(self) -> str:
|
57
67
|
number = textwrap.indent(self.number.pprint(), " " * 12).lstrip()
|
58
68
|
return f"""
|
@@ -60,13 +70,3 @@ Number (
|
|
60
70
|
sign: {self.operator.text},
|
61
71
|
number: {number},
|
62
72
|
)""".strip()
|
63
|
-
|
64
|
-
def children(self) -> list[Expression]:
|
65
|
-
"""Returns a list of child expressions."""
|
66
|
-
return [self.number]
|
67
|
-
|
68
|
-
def position(self) -> tuple[int, int]:
|
69
|
-
return self.operator.text_slice.start, self.number.position()[1]
|
70
|
-
|
71
|
-
def full_text(self) -> str:
|
72
|
-
return self.operator.text_slice.full_text
|
pbi_parsers/dax/exprs/array.py
CHANGED
@@ -26,13 +26,12 @@ class ArrayExpression(Expression):
|
|
26
26
|
self.elements: list[Expression] = elements
|
27
27
|
self.braces = braces
|
28
28
|
|
29
|
-
def
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
) """.strip()
|
29
|
+
def children(self) -> list[Expression]:
|
30
|
+
"""Returns a list of child expressions."""
|
31
|
+
return self.elements
|
32
|
+
|
33
|
+
def full_text(self) -> str:
|
34
|
+
return self.braces[0].text_slice.full_text
|
36
35
|
|
37
36
|
@classmethod
|
38
37
|
@lexer_reset
|
@@ -64,12 +63,13 @@ Array (
|
|
64
63
|
|
65
64
|
return ArrayExpression(elements=elements, braces=(left_brace, right_brace))
|
66
65
|
|
67
|
-
def children(self) -> list[Expression]:
|
68
|
-
"""Returns a list of child expressions."""
|
69
|
-
return self.elements
|
70
|
-
|
71
66
|
def position(self) -> tuple[int, int]:
|
72
67
|
return self.braces[0].text_slice.start, self.braces[1].text_slice.end
|
73
68
|
|
74
|
-
def
|
75
|
-
|
69
|
+
def pprint(self) -> str:
|
70
|
+
elements = ",\n".join(element.pprint() for element in self.elements)
|
71
|
+
elements = textwrap.indent(elements, " " * 14)[14:]
|
72
|
+
return f"""
|
73
|
+
Array (
|
74
|
+
elements: {elements}
|
75
|
+
) """.strip()
|
pbi_parsers/dax/exprs/column.py
CHANGED
@@ -25,12 +25,12 @@ class ColumnExpression(Expression):
|
|
25
25
|
self.table = table
|
26
26
|
self.column = column
|
27
27
|
|
28
|
-
def
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
28
|
+
def children(self) -> list[Expression]: # noqa: PLR6301
|
29
|
+
"""Returns a list of child expressions."""
|
30
|
+
return []
|
31
|
+
|
32
|
+
def full_text(self) -> str:
|
33
|
+
return self.table.text_slice.full_text
|
34
34
|
|
35
35
|
@classmethod
|
36
36
|
@lexer_reset
|
@@ -45,12 +45,12 @@ Column (
|
|
45
45
|
return None
|
46
46
|
return ColumnExpression(table=table, column=column)
|
47
47
|
|
48
|
-
def children(self) -> list[Expression]: # noqa: PLR6301
|
49
|
-
"""Returns a list of child expressions."""
|
50
|
-
return []
|
51
|
-
|
52
48
|
def position(self) -> tuple[int, int]:
|
53
49
|
return self.table.text_slice.start, self.column.text_slice.end
|
54
50
|
|
55
|
-
def
|
56
|
-
return
|
51
|
+
def pprint(self) -> str:
|
52
|
+
return f"""
|
53
|
+
Column (
|
54
|
+
{self.table.text},
|
55
|
+
{self.column.text}
|
56
|
+
)""".strip()
|
@@ -30,6 +30,13 @@ class ComparisonExpression(Expression):
|
|
30
30
|
self.left = left
|
31
31
|
self.right = right
|
32
32
|
|
33
|
+
def children(self) -> list[Expression]:
|
34
|
+
"""Returns a list of child expressions."""
|
35
|
+
return [self.left, self.right]
|
36
|
+
|
37
|
+
def full_text(self) -> str:
|
38
|
+
return self.operator.text_slice.full_text
|
39
|
+
|
33
40
|
@classmethod
|
34
41
|
@lexer_reset
|
35
42
|
def match(cls, parser: "Parser") -> "ComparisonExpression | None":
|
@@ -55,6 +62,9 @@ class ComparisonExpression(Expression):
|
|
55
62
|
raise ValueError(msg)
|
56
63
|
return ComparisonExpression(operator=operator, left=left_term, right=right_term)
|
57
64
|
|
65
|
+
def position(self) -> tuple[int, int]:
|
66
|
+
return self.left.position()[0], self.right.position()[1]
|
67
|
+
|
58
68
|
def pprint(self) -> str:
|
59
69
|
left_str = textwrap.indent(self.left.pprint(), " " * 10)[10:]
|
60
70
|
right_str = textwrap.indent(self.right.pprint(), " " * 10)[10:]
|
@@ -64,13 +74,3 @@ Bool (
|
|
64
74
|
left: {left_str},
|
65
75
|
right: {right_str}
|
66
76
|
)""".strip()
|
67
|
-
|
68
|
-
def children(self) -> list[Expression]:
|
69
|
-
"""Returns a list of child expressions."""
|
70
|
-
return [self.left, self.right]
|
71
|
-
|
72
|
-
def position(self) -> tuple[int, int]:
|
73
|
-
return self.left.position()[0], self.right.position()[1]
|
74
|
-
|
75
|
-
def full_text(self) -> str:
|
76
|
-
return self.operator.text_slice.full_text
|
@@ -28,6 +28,13 @@ class ConcatenationExpression(Expression):
|
|
28
28
|
self.left = left
|
29
29
|
self.right = right
|
30
30
|
|
31
|
+
def children(self) -> list[Expression]:
|
32
|
+
"""Returns a list of child expressions."""
|
33
|
+
return [self.left, self.right]
|
34
|
+
|
35
|
+
def full_text(self) -> str:
|
36
|
+
return self.operator.text_slice.full_text
|
37
|
+
|
31
38
|
@classmethod
|
32
39
|
@lexer_reset
|
33
40
|
def match(cls, parser: "Parser") -> "ConcatenationExpression | None":
|
@@ -53,6 +60,9 @@ class ConcatenationExpression(Expression):
|
|
53
60
|
right=right_term,
|
54
61
|
)
|
55
62
|
|
63
|
+
def position(self) -> tuple[int, int]:
|
64
|
+
return self.left.position()[0], self.right.position()[1]
|
65
|
+
|
56
66
|
def pprint(self) -> str:
|
57
67
|
left_str = textwrap.indent(self.left.pprint(), " " * 10).lstrip()
|
58
68
|
right_str = textwrap.indent(self.right.pprint(), " " * 10).lstrip()
|
@@ -61,13 +71,3 @@ Concat (
|
|
61
71
|
left: {left_str},
|
62
72
|
right: {right_str}
|
63
73
|
)""".strip()
|
64
|
-
|
65
|
-
def children(self) -> list[Expression]:
|
66
|
-
"""Returns a list of child expressions."""
|
67
|
-
return [self.left, self.right]
|
68
|
-
|
69
|
-
def position(self) -> tuple[int, int]:
|
70
|
-
return self.left.position()[0], self.right.position()[1]
|
71
|
-
|
72
|
-
def full_text(self) -> str:
|
73
|
-
return self.operator.text_slice.full_text
|
pbi_parsers/dax/exprs/div_mul.py
CHANGED
@@ -30,6 +30,13 @@ class DivMulExpression(Expression):
|
|
30
30
|
self.left = left
|
31
31
|
self.right = right
|
32
32
|
|
33
|
+
def children(self) -> list[Expression]:
|
34
|
+
"""Returns a list of child expressions."""
|
35
|
+
return [self.left, self.right]
|
36
|
+
|
37
|
+
def full_text(self) -> str:
|
38
|
+
return self.operator.text_slice.full_text
|
39
|
+
|
33
40
|
@classmethod
|
34
41
|
@lexer_reset
|
35
42
|
def match(cls, parser: "Parser") -> "DivMulExpression | None":
|
@@ -51,6 +58,9 @@ class DivMulExpression(Expression):
|
|
51
58
|
raise ValueError(msg)
|
52
59
|
return DivMulExpression(operator=operator, left=left_term, right=right_term)
|
53
60
|
|
61
|
+
def position(self) -> tuple[int, int]:
|
62
|
+
return self.left.position()[0], self.right.position()[1]
|
63
|
+
|
54
64
|
def pprint(self) -> str:
|
55
65
|
op_str = {
|
56
66
|
TokenType.MULTIPLY_SIGN: "Mul",
|
@@ -63,13 +73,3 @@ class DivMulExpression(Expression):
|
|
63
73
|
left: {left_str},
|
64
74
|
right: {right_str}
|
65
75
|
)""".strip()
|
66
|
-
|
67
|
-
def children(self) -> list[Expression]:
|
68
|
-
"""Returns a list of child expressions."""
|
69
|
-
return [self.left, self.right]
|
70
|
-
|
71
|
-
def position(self) -> tuple[int, int]:
|
72
|
-
return self.left.position()[0], self.right.position()[1]
|
73
|
-
|
74
|
-
def full_text(self) -> str:
|
75
|
-
return self.operator.text_slice.full_text
|
@@ -26,6 +26,13 @@ class ExponentExpression(Expression):
|
|
26
26
|
self.base = base
|
27
27
|
self.power = power
|
28
28
|
|
29
|
+
def children(self) -> list[Expression]:
|
30
|
+
"""Returns a list of child expressions."""
|
31
|
+
return [self.base, self.power]
|
32
|
+
|
33
|
+
def full_text(self) -> str:
|
34
|
+
return self.base.full_text()
|
35
|
+
|
29
36
|
@classmethod
|
30
37
|
@lexer_reset
|
31
38
|
def match(cls, parser: "Parser") -> "ExponentExpression | None":
|
@@ -47,6 +54,9 @@ class ExponentExpression(Expression):
|
|
47
54
|
raise ValueError(msg)
|
48
55
|
return ExponentExpression(base=base, power=power)
|
49
56
|
|
57
|
+
def position(self) -> tuple[int, int]:
|
58
|
+
return self.base.position()[0], self.power.position()[1]
|
59
|
+
|
50
60
|
def pprint(self) -> str:
|
51
61
|
base_str = textwrap.indent(self.base.pprint(), " " * 10).lstrip()
|
52
62
|
power_str = textwrap.indent(self.power.pprint(), " " * 10).lstrip()
|
@@ -55,13 +65,3 @@ Exponent (
|
|
55
65
|
base: {base_str},
|
56
66
|
power: {power_str}
|
57
67
|
)""".strip()
|
58
|
-
|
59
|
-
def children(self) -> list[Expression]:
|
60
|
-
"""Returns a list of child expressions."""
|
61
|
-
return [self.base, self.power]
|
62
|
-
|
63
|
-
def position(self) -> tuple[int, int]:
|
64
|
-
return self.base.position()[0], self.power.position()[1]
|
65
|
-
|
66
|
-
def full_text(self) -> str:
|
67
|
-
return self.base.full_text()
|
@@ -29,30 +29,15 @@ class FunctionExpression(Expression):
|
|
29
29
|
self.args = args
|
30
30
|
self.parens = parens
|
31
31
|
|
32
|
-
def
|
33
|
-
|
34
|
-
|
35
|
-
return f"""
|
36
|
-
Function (
|
37
|
-
name: {"".join(x.text for x in self.name_parts)},
|
38
|
-
args: {args}
|
39
|
-
) """.strip()
|
40
|
-
|
41
|
-
@classmethod
|
42
|
-
def _match_function_name(cls, parser: "Parser") -> list[Token] | None:
|
43
|
-
name_parts = [parser.consume()]
|
44
|
-
if name_parts[0].tok_type != TokenType.UNQUOTED_IDENTIFIER:
|
45
|
-
return None
|
32
|
+
def children(self) -> list[Expression]:
|
33
|
+
"""Returns a list of child expressions."""
|
34
|
+
return self.args
|
46
35
|
|
47
|
-
|
48
|
-
|
49
|
-
if name.tok_type != TokenType.UNQUOTED_IDENTIFIER:
|
50
|
-
return None
|
51
|
-
if period.tok_type != TokenType.PERIOD:
|
52
|
-
return None
|
53
|
-
name_parts.extend((period, name))
|
36
|
+
def full_text(self) -> str:
|
37
|
+
return self.parens[0].text_slice.full_text
|
54
38
|
|
55
|
-
|
39
|
+
def function_name(self) -> str:
|
40
|
+
return "".join(x.text for x in self.name_parts)
|
56
41
|
|
57
42
|
@classmethod
|
58
43
|
@lexer_reset
|
@@ -88,15 +73,30 @@ Function (
|
|
88
73
|
|
89
74
|
return FunctionExpression(name_parts=name_parts, args=args, parens=(left_paren, right_paren))
|
90
75
|
|
91
|
-
def function_name(self) -> str:
|
92
|
-
return "".join(x.text for x in self.name_parts)
|
93
|
-
|
94
|
-
def children(self) -> list[Expression]:
|
95
|
-
"""Returns a list of child expressions."""
|
96
|
-
return self.args
|
97
|
-
|
98
76
|
def position(self) -> tuple[int, int]:
|
99
77
|
return self.parens[0].text_slice.start, self.parens[1].text_slice.end
|
100
78
|
|
101
|
-
def
|
102
|
-
|
79
|
+
def pprint(self) -> str:
|
80
|
+
args = ",\n".join(arg.pprint() for arg in self.args)
|
81
|
+
args = textwrap.indent(args, " " * 10)[10:]
|
82
|
+
return f"""
|
83
|
+
Function (
|
84
|
+
name: {"".join(x.text for x in self.name_parts)},
|
85
|
+
args: {args}
|
86
|
+
) """.strip()
|
87
|
+
|
88
|
+
@classmethod
|
89
|
+
def _match_function_name(cls, parser: "Parser") -> list[Token] | None:
|
90
|
+
name_parts = [parser.consume()]
|
91
|
+
if name_parts[0].tok_type != TokenType.UNQUOTED_IDENTIFIER:
|
92
|
+
return None
|
93
|
+
|
94
|
+
while parser.peek().tok_type != TokenType.LEFT_PAREN:
|
95
|
+
period, name = parser.consume(), parser.consume()
|
96
|
+
if name.tok_type != TokenType.UNQUOTED_IDENTIFIER:
|
97
|
+
return None
|
98
|
+
if period.tok_type != TokenType.PERIOD:
|
99
|
+
return None
|
100
|
+
name_parts.extend((period, name))
|
101
|
+
|
102
|
+
return name_parts
|
@@ -27,13 +27,12 @@ class HierarchyExpression(Expression):
|
|
27
27
|
self.column = column
|
28
28
|
self.level = level
|
29
29
|
|
30
|
-
def
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
)""".strip()
|
30
|
+
def children(self) -> list[Expression]: # noqa: PLR6301
|
31
|
+
"""Returns a list of child expressions."""
|
32
|
+
return []
|
33
|
+
|
34
|
+
def full_text(self) -> str:
|
35
|
+
return self.table.text_slice.full_text
|
37
36
|
|
38
37
|
@classmethod
|
39
38
|
@lexer_reset
|
@@ -57,12 +56,13 @@ Hierarchy (
|
|
57
56
|
return None
|
58
57
|
return HierarchyExpression(table=table, column=column, level=level)
|
59
58
|
|
60
|
-
def children(self) -> list[Expression]: # noqa: PLR6301
|
61
|
-
"""Returns a list of child expressions."""
|
62
|
-
return []
|
63
|
-
|
64
59
|
def position(self) -> tuple[int, int]:
|
65
60
|
return self.table.text_slice.start, self.level.text_slice.end
|
66
61
|
|
67
|
-
def
|
68
|
-
return
|
62
|
+
def pprint(self) -> str:
|
63
|
+
return f"""
|
64
|
+
Hierarchy (
|
65
|
+
table: {self.table.text},
|
66
|
+
column: {self.column.text},
|
67
|
+
level: {self.level.text}
|
68
|
+
)""".strip()
|
@@ -23,9 +23,12 @@ class IdentifierExpression(Expression):
|
|
23
23
|
def __init__(self, name: Token) -> None:
|
24
24
|
self.name = name
|
25
25
|
|
26
|
-
def
|
27
|
-
|
28
|
-
|
26
|
+
def children(self) -> list[Expression]: # noqa: PLR6301
|
27
|
+
"""Returns a list of child expressions."""
|
28
|
+
return []
|
29
|
+
|
30
|
+
def full_text(self) -> str:
|
31
|
+
return self.name.text_slice.full_text
|
29
32
|
|
30
33
|
@classmethod
|
31
34
|
@lexer_reset
|
@@ -35,12 +38,9 @@ Identifier ({self.name.text})""".strip()
|
|
35
38
|
return None
|
36
39
|
return IdentifierExpression(name=name)
|
37
40
|
|
38
|
-
def children(self) -> list[Expression]: # noqa: PLR6301
|
39
|
-
"""Returns a list of child expressions."""
|
40
|
-
return []
|
41
|
-
|
42
41
|
def position(self) -> tuple[int, int]:
|
43
42
|
return self.name.text_slice.start, self.name.text_slice.end
|
44
43
|
|
45
|
-
def
|
46
|
-
return
|
44
|
+
def pprint(self) -> str:
|
45
|
+
return f"""
|
46
|
+
Identifier ({self.name.text})""".strip()
|