pbi-parsers 0.7.12__py3-none-any.whl → 0.7.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pbi_parsers/__init__.py +1 -1
- pbi_parsers/base/lexer.py +32 -33
- pbi_parsers/base/tokens.py +18 -13
- pbi_parsers/dax/exprs/_base.py +13 -13
- pbi_parsers/dax/exprs/_utils.py +11 -0
- pbi_parsers/dax/exprs/add_sub.py +10 -10
- pbi_parsers/dax/exprs/add_sub_unary.py +10 -10
- pbi_parsers/dax/exprs/array.py +13 -13
- pbi_parsers/dax/exprs/column.py +12 -12
- pbi_parsers/dax/exprs/comparison.py +10 -10
- pbi_parsers/dax/exprs/concatenation.py +10 -10
- pbi_parsers/dax/exprs/div_mul.py +10 -10
- pbi_parsers/dax/exprs/exponent.py +10 -10
- pbi_parsers/dax/exprs/function.py +31 -31
- pbi_parsers/dax/exprs/hierarchy.py +13 -13
- pbi_parsers/dax/exprs/identifier.py +9 -9
- pbi_parsers/dax/exprs/ins.py +10 -10
- pbi_parsers/dax/exprs/keyword.py +9 -9
- pbi_parsers/dax/exprs/literal_number.py +9 -8
- pbi_parsers/dax/exprs/literal_string.py +8 -8
- pbi_parsers/dax/exprs/logical.py +10 -10
- pbi_parsers/dax/exprs/measure.py +8 -8
- pbi_parsers/dax/exprs/none.py +9 -9
- pbi_parsers/dax/exprs/parens.py +12 -12
- pbi_parsers/dax/exprs/returns.py +17 -17
- pbi_parsers/dax/exprs/table.py +11 -11
- pbi_parsers/dax/exprs/variable.py +14 -14
- pbi_parsers/dax/formatter.py +8 -8
- pbi_parsers/dax/lexer.py +97 -97
- pbi_parsers/dax/main.py +16 -16
- pbi_parsers/dax/parser.py +8 -8
- pbi_parsers/dax/utils.py +19 -19
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/METADATA +13 -1
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/RECORD +36 -35
- pbi_parsers-0.7.20.dist-info/licenses/LICENSE +21 -0
- {pbi_parsers-0.7.12.dist-info → pbi_parsers-0.7.20.dist-info}/WHEEL +0 -0
pbi_parsers/dax/lexer.py
CHANGED
@@ -7,9 +7,6 @@ WHITESPACE = ["\n", "\r", "\t", " ", "\f", "\v"]
|
|
7
7
|
|
8
8
|
|
9
9
|
class Lexer(BaseLexer):
|
10
|
-
def scan(self) -> tuple[Token]:
|
11
|
-
return super().scan() # type: ignore[override]
|
12
|
-
|
13
10
|
def create_token(self, tok_type: TokenType, start_pos: int) -> Token:
|
14
11
|
"""Create a new token with the given type and text."""
|
15
12
|
text_slice = TextSlice(
|
@@ -19,6 +16,51 @@ class Lexer(BaseLexer):
|
|
19
16
|
)
|
20
17
|
return Token(tok_type=tok_type, text_slice=text_slice)
|
21
18
|
|
19
|
+
def scan(self) -> tuple[Token]:
|
20
|
+
return super().scan() # type: ignore[override]
|
21
|
+
|
22
|
+
def scan_helper(self) -> Token:
|
23
|
+
start_pos: int = self.current_position
|
24
|
+
|
25
|
+
if not self.peek():
|
26
|
+
return Token()
|
27
|
+
|
28
|
+
for candidate_func in (
|
29
|
+
self._match_in,
|
30
|
+
self._match_keyword,
|
31
|
+
self._match_whitespace,
|
32
|
+
self._match_var,
|
33
|
+
self._match_return,
|
34
|
+
self._match_period,
|
35
|
+
self._match_number_literal,
|
36
|
+
self._match_unquoted_identifier,
|
37
|
+
self._match_single_quoted_identifier,
|
38
|
+
self._match_bracketed_identifier,
|
39
|
+
self._match_string_literal,
|
40
|
+
self._match_single_line_comment,
|
41
|
+
self._match_multi_line_comment,
|
42
|
+
self._match_token,
|
43
|
+
):
|
44
|
+
match_candidate = candidate_func(start_pos)
|
45
|
+
if match_candidate:
|
46
|
+
return match_candidate
|
47
|
+
|
48
|
+
msg = f"Unexpected character: {self.peek()} at position {self.current_position}"
|
49
|
+
raise ValueError(msg)
|
50
|
+
|
51
|
+
def _match_bracketed_identifier(self, start_pos: int) -> Token | None:
|
52
|
+
if self.match("["):
|
53
|
+
while self.match(lambda c: c != "]"):
|
54
|
+
pass
|
55
|
+
if self.match("]"):
|
56
|
+
return self.create_token(
|
57
|
+
tok_type=TokenType.BRACKETED_IDENTIFIER,
|
58
|
+
start_pos=start_pos,
|
59
|
+
)
|
60
|
+
msg = "Unterminated bracketed identifier"
|
61
|
+
raise ValueError(msg)
|
62
|
+
return None
|
63
|
+
|
22
64
|
def _match_in(self, start_pos: int) -> Token | None:
|
23
65
|
if self.match(
|
24
66
|
"in ",
|
@@ -41,28 +83,29 @@ class Lexer(BaseLexer):
|
|
41
83
|
)
|
42
84
|
return None
|
43
85
|
|
44
|
-
def
|
45
|
-
if self.match(
|
46
|
-
|
47
|
-
pass
|
48
|
-
return self.create_token(
|
49
|
-
tok_type=TokenType.WHITESPACE,
|
50
|
-
start_pos=start_pos,
|
51
|
-
)
|
52
|
-
return None
|
86
|
+
def _match_multi_line_comment(self, start_pos: int) -> Token | None:
|
87
|
+
if not self.match("/*"):
|
88
|
+
return None
|
53
89
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
90
|
+
while not self.at_end():
|
91
|
+
if self.match("*/", chunk=2):
|
92
|
+
return self.create_token(
|
93
|
+
tok_type=TokenType.MULTI_LINE_COMMENT,
|
94
|
+
start_pos=start_pos,
|
95
|
+
)
|
96
|
+
self.advance()
|
61
97
|
|
62
|
-
|
63
|
-
|
98
|
+
msg = "Unterminated multi-line comment"
|
99
|
+
raise ValueError(msg)
|
100
|
+
|
101
|
+
def _match_number_literal(self, start_pos: int) -> Token | None:
|
102
|
+
if self.match(
|
103
|
+
lambda c: c.isdigit() or c == ".",
|
104
|
+
): # must come before unquoted identifier to avoid conflict
|
105
|
+
while self.match(lambda c: c.isdigit() or c in {".", "e", "E"}):
|
106
|
+
pass
|
64
107
|
return self.create_token(
|
65
|
-
tok_type=TokenType.
|
108
|
+
tok_type=TokenType.NUMBER_LITERAL,
|
66
109
|
start_pos=start_pos,
|
67
110
|
)
|
68
111
|
return None
|
@@ -76,24 +119,20 @@ class Lexer(BaseLexer):
|
|
76
119
|
)
|
77
120
|
return None
|
78
121
|
|
79
|
-
def
|
80
|
-
if self.match(
|
81
|
-
lambda c: c.isdigit() or c == ".",
|
82
|
-
): # must come before unquoted identifier to avoid conflict
|
83
|
-
while self.match(lambda c: c.isdigit() or c in {".", "e", "E"}):
|
84
|
-
pass
|
122
|
+
def _match_return(self, start_pos: int) -> Token | None:
|
123
|
+
if self.match("return", case_insensitive=True):
|
85
124
|
return self.create_token(
|
86
|
-
tok_type=TokenType.
|
125
|
+
tok_type=TokenType.RETURN,
|
87
126
|
start_pos=start_pos,
|
88
127
|
)
|
89
128
|
return None
|
90
129
|
|
91
|
-
def
|
92
|
-
if self.match(
|
93
|
-
while self.match(lambda c: c
|
130
|
+
def _match_single_line_comment(self, start_pos: int) -> Token | None:
|
131
|
+
if self.match("//") or self.match("--"):
|
132
|
+
while self.match(lambda c: c not in {"\n", ""}):
|
94
133
|
pass
|
95
134
|
return self.create_token(
|
96
|
-
tok_type=TokenType.
|
135
|
+
tok_type=TokenType.SINGLE_LINE_COMMENT,
|
97
136
|
start_pos=start_pos,
|
98
137
|
)
|
99
138
|
return None
|
@@ -111,19 +150,6 @@ class Lexer(BaseLexer):
|
|
111
150
|
raise ValueError(msg)
|
112
151
|
return None
|
113
152
|
|
114
|
-
def _match_bracketed_identifier(self, start_pos: int) -> Token | None:
|
115
|
-
if self.match("["):
|
116
|
-
while self.match(lambda c: c != "]"):
|
117
|
-
pass
|
118
|
-
if self.match("]"):
|
119
|
-
return self.create_token(
|
120
|
-
tok_type=TokenType.BRACKETED_IDENTIFIER,
|
121
|
-
start_pos=start_pos,
|
122
|
-
)
|
123
|
-
msg = "Unterminated bracketed identifier"
|
124
|
-
raise ValueError(msg)
|
125
|
-
return None
|
126
|
-
|
127
153
|
def _match_string_literal(self, start_pos: int) -> Token | None:
|
128
154
|
if self.match('"'):
|
129
155
|
while self.match(lambda c: c != '"') or self.match('""'):
|
@@ -137,31 +163,6 @@ class Lexer(BaseLexer):
|
|
137
163
|
raise ValueError(msg)
|
138
164
|
return None
|
139
165
|
|
140
|
-
def _match_single_line_comment(self, start_pos: int) -> Token | None:
|
141
|
-
if self.match("//") or self.match("--"):
|
142
|
-
while self.match(lambda c: c not in {"\n", ""}):
|
143
|
-
pass
|
144
|
-
return self.create_token(
|
145
|
-
tok_type=TokenType.SINGLE_LINE_COMMENT,
|
146
|
-
start_pos=start_pos,
|
147
|
-
)
|
148
|
-
return None
|
149
|
-
|
150
|
-
def _match_multi_line_comment(self, start_pos: int) -> Token | None:
|
151
|
-
if not self.match("/*"):
|
152
|
-
return None
|
153
|
-
|
154
|
-
while not self.at_end():
|
155
|
-
if self.match("*/", chunk=2):
|
156
|
-
return self.create_token(
|
157
|
-
tok_type=TokenType.MULTI_LINE_COMMENT,
|
158
|
-
start_pos=start_pos,
|
159
|
-
)
|
160
|
-
self.advance()
|
161
|
-
|
162
|
-
msg = "Unterminated multi-line comment"
|
163
|
-
raise ValueError(msg)
|
164
|
-
|
165
166
|
def _match_token(self, start_pos: int) -> Token | None:
|
166
167
|
fixed_character_mapping = {
|
167
168
|
"(": TokenType.LEFT_PAREN,
|
@@ -192,31 +193,30 @@ class Lexer(BaseLexer):
|
|
192
193
|
return self.create_token(tok_type=token_type, start_pos=start_pos)
|
193
194
|
return None
|
194
195
|
|
195
|
-
def
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
return
|
196
|
+
def _match_unquoted_identifier(self, start_pos: int) -> Token | None:
|
197
|
+
if self.match(lambda c: c.isalnum() or c == "_"):
|
198
|
+
while self.match(lambda c: c.isalnum() or c == "_"):
|
199
|
+
pass
|
200
|
+
return self.create_token(
|
201
|
+
tok_type=TokenType.UNQUOTED_IDENTIFIER,
|
202
|
+
start_pos=start_pos,
|
203
|
+
)
|
204
|
+
return None
|
200
205
|
|
201
|
-
|
202
|
-
|
203
|
-
self.
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
self._match_number_literal,
|
209
|
-
self._match_unquoted_identifier,
|
210
|
-
self._match_single_quoted_identifier,
|
211
|
-
self._match_bracketed_identifier,
|
212
|
-
self._match_string_literal,
|
213
|
-
self._match_single_line_comment,
|
214
|
-
self._match_multi_line_comment,
|
215
|
-
self._match_token,
|
216
|
-
):
|
217
|
-
match_candidate = candidate_func(start_pos)
|
218
|
-
if match_candidate:
|
219
|
-
return match_candidate
|
206
|
+
def _match_var(self, start_pos: int) -> Token | None:
|
207
|
+
if self.match("var", case_insensitive=True):
|
208
|
+
return self.create_token(
|
209
|
+
tok_type=TokenType.VARIABLE,
|
210
|
+
start_pos=start_pos,
|
211
|
+
)
|
212
|
+
return None
|
220
213
|
|
221
|
-
|
222
|
-
|
214
|
+
def _match_whitespace(self, start_pos: int) -> Token | None:
|
215
|
+
if self.match(lambda c: c in WHITESPACE):
|
216
|
+
while self.match(lambda c: c in WHITESPACE):
|
217
|
+
pass
|
218
|
+
return self.create_token(
|
219
|
+
tok_type=TokenType.WHITESPACE,
|
220
|
+
start_pos=start_pos,
|
221
|
+
)
|
222
|
+
return None
|
pbi_parsers/dax/main.py
CHANGED
@@ -7,6 +7,22 @@ from .parser import Parser
|
|
7
7
|
from .tokens import Token, TokenType
|
8
8
|
|
9
9
|
|
10
|
+
def format_expression(text: str) -> str:
|
11
|
+
"""Formats a DAX expression string into a more readable format.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
text (str): The DAX expression to format.
|
15
|
+
|
16
|
+
Returns:
|
17
|
+
str: The formatted DAX expression.
|
18
|
+
|
19
|
+
"""
|
20
|
+
ast = to_ast(text)
|
21
|
+
if ast is None:
|
22
|
+
return text
|
23
|
+
return Formatter(ast).format()
|
24
|
+
|
25
|
+
|
10
26
|
def remove_non_executing_tokens(tokens: Iterable[Token]) -> list[Token]:
|
11
27
|
"""Removes tokens that are not executed in the DAX expression.
|
12
28
|
|
@@ -45,19 +61,3 @@ def to_ast(text: str) -> Expression | None:
|
|
45
61
|
tokens = remove_non_executing_tokens(tokens)
|
46
62
|
parser = Parser(tokens)
|
47
63
|
return parser.to_ast()
|
48
|
-
|
49
|
-
|
50
|
-
def format_expression(text: str) -> str:
|
51
|
-
"""Formats a DAX expression string into a more readable format.
|
52
|
-
|
53
|
-
Args:
|
54
|
-
text (str): The DAX expression to format.
|
55
|
-
|
56
|
-
Returns:
|
57
|
-
str: The formatted DAX expression.
|
58
|
-
|
59
|
-
"""
|
60
|
-
ast = to_ast(text)
|
61
|
-
if ast is None:
|
62
|
-
return text
|
63
|
-
return Formatter(ast).format()
|
pbi_parsers/dax/parser.py
CHANGED
@@ -18,6 +18,14 @@ class Parser:
|
|
18
18
|
self.index = 0
|
19
19
|
self.cache = {}
|
20
20
|
|
21
|
+
def consume(self) -> Token:
|
22
|
+
"""Returns the next token and advances the index."""
|
23
|
+
if self.index >= len(self.__tokens):
|
24
|
+
return EOF_TOKEN
|
25
|
+
ret = self.__tokens[self.index]
|
26
|
+
self.index += 1
|
27
|
+
return ret
|
28
|
+
|
21
29
|
def peek(self, forward: int = 0) -> Token:
|
22
30
|
"""Peek at the next token without advancing the index.
|
23
31
|
|
@@ -56,11 +64,3 @@ class Parser:
|
|
56
64
|
raise ValueError(msg)
|
57
65
|
assert self.peek().tok_type == TokenType.EOF
|
58
66
|
return ret
|
59
|
-
|
60
|
-
def consume(self) -> Token:
|
61
|
-
"""Returns the next token and advances the index."""
|
62
|
-
if self.index >= len(self.__tokens):
|
63
|
-
return EOF_TOKEN
|
64
|
-
ret = self.__tokens[self.index]
|
65
|
-
self.index += 1
|
66
|
-
return ret
|
pbi_parsers/dax/utils.py
CHANGED
@@ -50,25 +50,6 @@ class Context:
|
|
50
50
|
def __repr__(self) -> str:
|
51
51
|
return self.to_console()
|
52
52
|
|
53
|
-
@staticmethod
|
54
|
-
def _get_highlighted_text(
|
55
|
-
lines: list[str],
|
56
|
-
position: tuple[int, int],
|
57
|
-
) -> dict[int, tuple[int, int]]:
|
58
|
-
highlight_line_dict: dict[int, tuple[int, int]] = {}
|
59
|
-
|
60
|
-
remaining_start, remaining_end = position
|
61
|
-
for i, line in enumerate(lines):
|
62
|
-
if len(line) > remaining_start and remaining_end > 0:
|
63
|
-
buffer = len(str(i)) + 3
|
64
|
-
highlight_line_dict[i] = (
|
65
|
-
buffer + remaining_start,
|
66
|
-
buffer + min(remaining_end, len(line)),
|
67
|
-
)
|
68
|
-
remaining_start -= len(line) + 1 # +1 for the newline character
|
69
|
-
remaining_end -= len(line) + 1
|
70
|
-
return highlight_line_dict
|
71
|
-
|
72
53
|
def to_console(self, context_lines: int = 2) -> str:
|
73
54
|
"""Render the context for console output."""
|
74
55
|
lines = self.full_text.split("\n")
|
@@ -107,6 +88,25 @@ class Context:
|
|
107
88
|
Fore=Fore,
|
108
89
|
)
|
109
90
|
|
91
|
+
@staticmethod
|
92
|
+
def _get_highlighted_text(
|
93
|
+
lines: list[str],
|
94
|
+
position: tuple[int, int],
|
95
|
+
) -> dict[int, tuple[int, int]]:
|
96
|
+
highlight_line_dict: dict[int, tuple[int, int]] = {}
|
97
|
+
|
98
|
+
remaining_start, remaining_end = position
|
99
|
+
for i, line in enumerate(lines):
|
100
|
+
if len(line) > remaining_start and remaining_end > 0:
|
101
|
+
buffer = len(str(i)) + 3
|
102
|
+
highlight_line_dict[i] = (
|
103
|
+
buffer + remaining_start,
|
104
|
+
buffer + min(remaining_end, len(line)),
|
105
|
+
)
|
106
|
+
remaining_start -= len(line) + 1 # +1 for the newline character
|
107
|
+
remaining_end -= len(line) + 1
|
108
|
+
return highlight_line_dict
|
109
|
+
|
110
110
|
|
111
111
|
def highlight_section(node: Expression | Token | list[Token] | list[Expression]) -> Context:
|
112
112
|
if isinstance(node, list):
|
@@ -1,12 +1,17 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: pbi_parsers
|
3
|
-
Version: 0.7.
|
3
|
+
Version: 0.7.20
|
4
4
|
Summary: Power BI lexer, parsers, and formatters for DAX and M (Power Query) languages
|
5
|
+
License-File: LICENSE
|
5
6
|
Requires-Python: >=3.11.0
|
7
|
+
Requires-Dist: colorama>=0.4.6
|
6
8
|
Requires-Dist: jinja2>=3.1.6
|
7
9
|
Provides-Extra: dev
|
8
10
|
Requires-Dist: build>=1.2.2; extra == 'dev'
|
11
|
+
Requires-Dist: coverage; extra == 'dev'
|
12
|
+
Requires-Dist: coveralls; extra == 'dev'
|
9
13
|
Requires-Dist: pre-commit>=3.8.0; extra == 'dev'
|
14
|
+
Requires-Dist: pytest; extra == 'dev'
|
10
15
|
Requires-Dist: ruff>=0.12.7; extra == 'dev'
|
11
16
|
Provides-Extra: docs
|
12
17
|
Requires-Dist: mkdocs-material>=9.6.16; extra == 'docs'
|
@@ -16,6 +21,13 @@ Description-Content-Type: text/markdown
|
|
16
21
|
|
17
22
|
# Overview
|
18
23
|
|
24
|
+
[](https://pepy.tech/projects/pbi-parsers)
|
25
|
+

|
26
|
+
[](https://coveralls.io/github/douglassimonsen/pbi_parsers?branch=main)
|
27
|
+

|
28
|
+
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fdouglassimonsen%2Fpbi_parsers?ref=badge_shield&issueType=license)
|
29
|
+
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fdouglassimonsen%2Fpbi_parsers?ref=badge_shield&issueType=security)
|
30
|
+
|
19
31
|
Based on [Crafting Interpreters](https://timothya.com/pdfs/crafting-interpreters.pdf). Library provides lexers, parsers, and formatters for DAX and Power Query (M) languages. Designed to support code introspection and analysis, not execution. This enables developement of [ruff](https://github.com/astral-sh/ruff)-equivalent tools for DAX and Power Query. It also enables extracting metadata from DAX and Power Query code, such PQ source types (Excel, SQL, etc.) and DAX lineage dependencies.
|
20
32
|
|
21
33
|
For more information, see the [docs](https://douglassimonsen.github.io/pbi_parsers/)
|
@@ -1,39 +1,39 @@
|
|
1
|
-
pbi_parsers/__init__.py,sha256=
|
1
|
+
pbi_parsers/__init__.py,sha256=Rqu3rm5WOjscQ2G4HTN4n5fZAbp90nFZHbH4IE2WbAY,92
|
2
2
|
pbi_parsers/base/__init__.py,sha256=U7QpzFFD9A4wK3ZHin6xg5fPgTca0y5KC-O7nrZ-flM,115
|
3
|
-
pbi_parsers/base/lexer.py,sha256=
|
4
|
-
pbi_parsers/base/tokens.py,sha256=
|
3
|
+
pbi_parsers/base/lexer.py,sha256=5iOkdYzJ9wGwz7I4rDlc7slrRNUPzi0oFFkDxN1d62M,4180
|
4
|
+
pbi_parsers/base/tokens.py,sha256=slIVl4673xXomqHMgrn1ApHs8YRvcC2yQgs1zfSpe1U,2220
|
5
5
|
pbi_parsers/dax/__init__.py,sha256=w8tfYFRwfjndq-QNnYQO3cu8fri4-OlG-edxUAKunF4,479
|
6
|
-
pbi_parsers/dax/formatter.py,sha256=
|
7
|
-
pbi_parsers/dax/lexer.py,sha256=
|
8
|
-
pbi_parsers/dax/main.py,sha256=
|
9
|
-
pbi_parsers/dax/parser.py,sha256=
|
6
|
+
pbi_parsers/dax/formatter.py,sha256=jOFnwcgQGIzsmi5sfkKoB_pFEGjDPd8E_pwMPwudmy4,7674
|
7
|
+
pbi_parsers/dax/lexer.py,sha256=2_pERJSrSYd8VujOe9TxJa9R2Ex8mvP-bCotH7uVBZY,8025
|
8
|
+
pbi_parsers/dax/main.py,sha256=FG35XCAPEooXoJShSgOnmQ0py-h_MPtOfnLpQWy61is,1657
|
9
|
+
pbi_parsers/dax/parser.py,sha256=QLKrIBcxZ26TGhTHpeKcTGEHEHUDLC6IgpxxrdJzdek,1821
|
10
10
|
pbi_parsers/dax/tokens.py,sha256=nY1laCbL8vwALpJ4jcd8k4lAscqOwqdw3dFuj4_KKVk,1234
|
11
|
-
pbi_parsers/dax/utils.py,sha256=
|
11
|
+
pbi_parsers/dax/utils.py,sha256=OURPa-b6Ldn0_KKXPdLIPA3Zdc12OfbbFd2X5SocCek,4402
|
12
12
|
pbi_parsers/dax/exprs/__init__.py,sha256=OUfiXzZYp5HkTPE9x595MMxpsgG1IvsED8p8spAKZuk,3432
|
13
|
-
pbi_parsers/dax/exprs/_base.py,sha256=
|
14
|
-
pbi_parsers/dax/exprs/_utils.py,sha256=
|
15
|
-
pbi_parsers/dax/exprs/add_sub.py,sha256=
|
16
|
-
pbi_parsers/dax/exprs/add_sub_unary.py,sha256=
|
17
|
-
pbi_parsers/dax/exprs/array.py,sha256
|
18
|
-
pbi_parsers/dax/exprs/column.py,sha256=
|
19
|
-
pbi_parsers/dax/exprs/comparison.py,sha256=
|
20
|
-
pbi_parsers/dax/exprs/concatenation.py,sha256=
|
21
|
-
pbi_parsers/dax/exprs/div_mul.py,sha256=
|
22
|
-
pbi_parsers/dax/exprs/exponent.py,sha256=
|
23
|
-
pbi_parsers/dax/exprs/function.py,sha256=
|
24
|
-
pbi_parsers/dax/exprs/hierarchy.py,sha256=
|
25
|
-
pbi_parsers/dax/exprs/identifier.py,sha256=
|
26
|
-
pbi_parsers/dax/exprs/ins.py,sha256=
|
27
|
-
pbi_parsers/dax/exprs/keyword.py,sha256=
|
28
|
-
pbi_parsers/dax/exprs/literal_number.py,sha256=
|
29
|
-
pbi_parsers/dax/exprs/literal_string.py,sha256=
|
30
|
-
pbi_parsers/dax/exprs/logical.py,sha256=
|
31
|
-
pbi_parsers/dax/exprs/measure.py,sha256=
|
32
|
-
pbi_parsers/dax/exprs/none.py,sha256=
|
33
|
-
pbi_parsers/dax/exprs/parens.py,sha256=
|
34
|
-
pbi_parsers/dax/exprs/returns.py,sha256=
|
35
|
-
pbi_parsers/dax/exprs/table.py,sha256=
|
36
|
-
pbi_parsers/dax/exprs/variable.py,sha256=
|
13
|
+
pbi_parsers/dax/exprs/_base.py,sha256=KIlP2AxtZVJGy49ANZfotYnBKt9-EDKOuTx2k48k9cc,1634
|
14
|
+
pbi_parsers/dax/exprs/_utils.py,sha256=BxxRCtsqpL9t330ZfBnkm1lq7B_ejAoG89Ot2bLYJig,2123
|
15
|
+
pbi_parsers/dax/exprs/add_sub.py,sha256=O-1DGXr4F4WWG1PyL1xra_pFR0ZxFo-_y-e6SioHrQc,2241
|
16
|
+
pbi_parsers/dax/exprs/add_sub_unary.py,sha256=OhLF_AFxatvbI7X_Tj3_wQ0gjsnmuwkMkmcCzsECsK4,2003
|
17
|
+
pbi_parsers/dax/exprs/array.py,sha256=-bt7o1aBaa7svW0ohQYIeRQxkw4gRz-s8RwO0EMPD6I,2336
|
18
|
+
pbi_parsers/dax/exprs/column.py,sha256=A1CC-RU7ysXXlmtr5MMUiCuPkNQH3dRXld5Hj_l0poc,1500
|
19
|
+
pbi_parsers/dax/exprs/comparison.py,sha256=GEZxSFDkNlD0MaEtvcvtHyquQ4BJj387a74zd6T5D8o,2217
|
20
|
+
pbi_parsers/dax/exprs/concatenation.py,sha256=ECjBISCPTNWe6ctBP4DyWqBpUOKsItMqYX-SRvbbpB0,2153
|
21
|
+
pbi_parsers/dax/exprs/div_mul.py,sha256=sJFmvRFc_dNj1wF31HHDtqNhY3dFnkAKXqg8WikNjkk,2316
|
22
|
+
pbi_parsers/dax/exprs/exponent.py,sha256=Q3hYnuxC2nPC6eThlFeqYVZwiDsjhvwfxsakmPMkaOc,1919
|
23
|
+
pbi_parsers/dax/exprs/function.py,sha256=jU5YZWN6Ss6_EhnMWuFZRl7OUVSUbVGIEoiKf7V_-eo,3518
|
24
|
+
pbi_parsers/dax/exprs/hierarchy.py,sha256=FrswrQRXywdXLl_WO-U9EcjJlW3Pt2QUbPe9Wrqs-j8,1853
|
25
|
+
pbi_parsers/dax/exprs/identifier.py,sha256=qpeExFboV-lFibrg7KfgV8zBC1iSR1T2xM0y5qxLzXw,1151
|
26
|
+
pbi_parsers/dax/exprs/ins.py,sha256=YZUSdr4uYh806IEaKlcnSZVHeF_8rwkw1tAyWDWNDPM,1913
|
27
|
+
pbi_parsers/dax/exprs/keyword.py,sha256=teXIai1kBnjtGax1vxCDg2inKvbqE3YaZv9LUSCdZSQ,1803
|
28
|
+
pbi_parsers/dax/exprs/literal_number.py,sha256=C-S-e9azT2JuezHLuQlSPlBnRQOTOnBGOQDnTIIoInc,1204
|
29
|
+
pbi_parsers/dax/exprs/literal_string.py,sha256=rJPnQ3knD7KDbBb9aWNH_O7A45rTP6QDmSCdU1FSnvA,1188
|
30
|
+
pbi_parsers/dax/exprs/logical.py,sha256=3m0N1A4VjO-uOROEuaUnXqLGwPOliuEx2ty_SB81qMQ,2197
|
31
|
+
pbi_parsers/dax/exprs/measure.py,sha256=IGmpA46agktmk8YJYDMLMXjl-6PqZ8yuTQVZ5R8v_n8,1143
|
32
|
+
pbi_parsers/dax/exprs/none.py,sha256=bBP356B1ya8XCynIq1qwwEC0zrXZphSoff_8xq5sfSU,1018
|
33
|
+
pbi_parsers/dax/exprs/parens.py,sha256=LYtlPrhvxc7x9YnKG_5d4fqiVW7rzfvTkEuTNco800s,1927
|
34
|
+
pbi_parsers/dax/exprs/returns.py,sha256=YgEJ71-0D-hKLtiTuYjYAN15L07PJcCuNpL6W0BlyRY,2358
|
35
|
+
pbi_parsers/dax/exprs/table.py,sha256=YXzKdGd97sIOoMSejV3pEFTjJ_zURen7d1id-XCn8hY,1185
|
36
|
+
pbi_parsers/dax/exprs/variable.py,sha256=8jQZiHbzHYNcWMD33228fNvOMWphBJDM2EWApWzVoRA,1836
|
37
37
|
pbi_parsers/pq/__init__.py,sha256=zYkl7_XQwBeNLAucRd0vgbIHO9kcnFaXy-zhgZdQ7xc,359
|
38
38
|
pbi_parsers/pq/formatter.py,sha256=gcqj_aP8o5V10ULi5hdGhy3aAOy829jTKAfzH4mZewA,358
|
39
39
|
pbi_parsers/pq/lexer.py,sha256=YOo4chz1N06FLO7cU4-wSoemIzfwG30NeUSJhJB-yOE,8093
|
@@ -73,6 +73,7 @@ pbi_parsers/pq/exprs/statement.py,sha256=JSg48pGAU3Ka2pt4lzVsYlVOqGeF_ARGm8Ajf0l
|
|
73
73
|
pbi_parsers/pq/exprs/try_expr.py,sha256=UcnqfA-t9S1LVrKqeNUT8n4JJcO-ZQZoJrxAdjJ-GMA,1692
|
74
74
|
pbi_parsers/pq/exprs/type_expr.py,sha256=hH5ubrIJaxwQsopNJHUZ4ByS1rHEgv2Tf8ocYqSukXM,2570
|
75
75
|
pbi_parsers/pq/exprs/variable.py,sha256=wp4t0QHIGA264sXnWp7XVe1H8MJzMIOaoLNBQe-dfNk,1602
|
76
|
-
pbi_parsers-0.7.
|
77
|
-
pbi_parsers-0.7.
|
78
|
-
pbi_parsers-0.7.
|
76
|
+
pbi_parsers-0.7.20.dist-info/METADATA,sha256=Ktv-4OMg0GWMMRjX43nxVJuexxO2EuTugZ_m_UaNmWg,2762
|
77
|
+
pbi_parsers-0.7.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
78
|
+
pbi_parsers-0.7.20.dist-info/licenses/LICENSE,sha256=Sn0IfXOE4B0iL9lZXmGmRuTGyJeCtefxcfws0bLjp2g,1072
|
79
|
+
pbi_parsers-0.7.20.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 douglassimonsen
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
File without changes
|