pbi-parsers 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. pbi_parsers/__init__.py +9 -0
  2. pbi_parsers/base/__init__.py +7 -0
  3. pbi_parsers/base/lexer.py +127 -0
  4. pbi_parsers/base/tokens.py +61 -0
  5. pbi_parsers/dax/__init__.py +22 -0
  6. pbi_parsers/dax/exprs/__init__.py +107 -0
  7. pbi_parsers/dax/exprs/_base.py +46 -0
  8. pbi_parsers/dax/exprs/_utils.py +45 -0
  9. pbi_parsers/dax/exprs/add_sub.py +73 -0
  10. pbi_parsers/dax/exprs/add_sub_unary.py +72 -0
  11. pbi_parsers/dax/exprs/array.py +75 -0
  12. pbi_parsers/dax/exprs/column.py +56 -0
  13. pbi_parsers/dax/exprs/comparison.py +76 -0
  14. pbi_parsers/dax/exprs/concatenation.py +73 -0
  15. pbi_parsers/dax/exprs/div_mul.py +75 -0
  16. pbi_parsers/dax/exprs/exponent.py +67 -0
  17. pbi_parsers/dax/exprs/function.py +102 -0
  18. pbi_parsers/dax/exprs/hierarchy.py +68 -0
  19. pbi_parsers/dax/exprs/identifier.py +46 -0
  20. pbi_parsers/dax/exprs/ins.py +67 -0
  21. pbi_parsers/dax/exprs/keyword.py +60 -0
  22. pbi_parsers/dax/exprs/literal_number.py +46 -0
  23. pbi_parsers/dax/exprs/literal_string.py +45 -0
  24. pbi_parsers/dax/exprs/logical.py +76 -0
  25. pbi_parsers/dax/exprs/measure.py +44 -0
  26. pbi_parsers/dax/exprs/none.py +30 -0
  27. pbi_parsers/dax/exprs/parens.py +61 -0
  28. pbi_parsers/dax/exprs/returns.py +76 -0
  29. pbi_parsers/dax/exprs/table.py +51 -0
  30. pbi_parsers/dax/exprs/variable.py +68 -0
  31. pbi_parsers/dax/formatter.py +215 -0
  32. pbi_parsers/dax/lexer.py +222 -0
  33. pbi_parsers/dax/main.py +63 -0
  34. pbi_parsers/dax/parser.py +66 -0
  35. pbi_parsers/dax/tokens.py +54 -0
  36. pbi_parsers/dax/utils.py +120 -0
  37. pbi_parsers/pq/__init__.py +17 -0
  38. pbi_parsers/pq/exprs/__init__.py +98 -0
  39. pbi_parsers/pq/exprs/_base.py +33 -0
  40. pbi_parsers/pq/exprs/_utils.py +31 -0
  41. pbi_parsers/pq/exprs/add_sub.py +59 -0
  42. pbi_parsers/pq/exprs/add_sub_unary.py +57 -0
  43. pbi_parsers/pq/exprs/and_or_expr.py +60 -0
  44. pbi_parsers/pq/exprs/array.py +53 -0
  45. pbi_parsers/pq/exprs/arrow.py +50 -0
  46. pbi_parsers/pq/exprs/column.py +42 -0
  47. pbi_parsers/pq/exprs/comparison.py +62 -0
  48. pbi_parsers/pq/exprs/concatenation.py +61 -0
  49. pbi_parsers/pq/exprs/div_mul.py +59 -0
  50. pbi_parsers/pq/exprs/each.py +41 -0
  51. pbi_parsers/pq/exprs/ellipsis_expr.py +28 -0
  52. pbi_parsers/pq/exprs/function.py +63 -0
  53. pbi_parsers/pq/exprs/identifier.py +77 -0
  54. pbi_parsers/pq/exprs/if_expr.py +70 -0
  55. pbi_parsers/pq/exprs/is_expr.py +54 -0
  56. pbi_parsers/pq/exprs/keyword.py +40 -0
  57. pbi_parsers/pq/exprs/literal_number.py +31 -0
  58. pbi_parsers/pq/exprs/literal_string.py +31 -0
  59. pbi_parsers/pq/exprs/meta.py +54 -0
  60. pbi_parsers/pq/exprs/negation.py +52 -0
  61. pbi_parsers/pq/exprs/none.py +22 -0
  62. pbi_parsers/pq/exprs/not_expr.py +39 -0
  63. pbi_parsers/pq/exprs/parens.py +43 -0
  64. pbi_parsers/pq/exprs/record.py +58 -0
  65. pbi_parsers/pq/exprs/row.py +54 -0
  66. pbi_parsers/pq/exprs/row_index.py +57 -0
  67. pbi_parsers/pq/exprs/statement.py +67 -0
  68. pbi_parsers/pq/exprs/try_expr.py +55 -0
  69. pbi_parsers/pq/exprs/type_expr.py +78 -0
  70. pbi_parsers/pq/exprs/variable.py +52 -0
  71. pbi_parsers/pq/formatter.py +13 -0
  72. pbi_parsers/pq/lexer.py +219 -0
  73. pbi_parsers/pq/main.py +63 -0
  74. pbi_parsers/pq/parser.py +65 -0
  75. pbi_parsers/pq/tokens.py +81 -0
  76. pbi_parsers-0.7.8.dist-info/METADATA +66 -0
  77. pbi_parsers-0.7.8.dist-info/RECORD +78 -0
  78. pbi_parsers-0.7.8.dist-info/WHEEL +4 -0
@@ -0,0 +1,219 @@
1
+ import string
2
+
3
+ from pbi_parsers.base import BaseLexer
4
+ from pbi_parsers.base.tokens import TextSlice
5
+
6
+ from .tokens import Token, TokenType
7
+
8
+ WHITESPACE = ["\n", "\r", "\t", " ", "\f", "\v"]
9
+ KEYWORDS = ("null", "true", "false")
10
+ RESERVED_WORDS = (
11
+ ("type", TokenType.TYPE),
12
+ ("let", TokenType.LET),
13
+ ("if", TokenType.IF),
14
+ ("then", TokenType.THEN),
15
+ ("else", TokenType.ELSE),
16
+ ("each", TokenType.EACH),
17
+ ("meta", TokenType.META),
18
+ ("nullable", TokenType.NULLABLE),
19
+ ("try", TokenType.TRY),
20
+ ("otherwise", TokenType.OTHERWISE),
21
+ ("and", TokenType.AND),
22
+ ("or", TokenType.OR),
23
+ ("not", TokenType.NOT),
24
+ ("in", TokenType.IN),
25
+ ("is", TokenType.IS),
26
+ ("as", TokenType.AS),
27
+ )
28
+
29
+
30
+ class Lexer(BaseLexer):
31
+ def scan(self) -> tuple[Token]:
32
+ return super().scan() # type: ignore[override]
33
+
34
+ def create_token(self, tok_type: TokenType, start_pos: int) -> Token:
35
+ """Create a new token with the given type and text."""
36
+ text_slice = TextSlice(
37
+ full_text=self.source,
38
+ start=start_pos,
39
+ end=self.current_position,
40
+ )
41
+ return Token(tok_type=tok_type, text_slice=text_slice)
42
+
43
+ def _match_type_literal(self, start_pos: int) -> Token | None:
44
+ for c in ("int64.type", "currency.type"):
45
+ if self.match(c, case_insensitive=True):
46
+ return self.create_token(
47
+ tok_type=TokenType.TYPE_LITERAL,
48
+ start_pos=start_pos,
49
+ )
50
+ return None
51
+
52
+ def _match_reserved_words(self, start_pos: int) -> Token | None:
53
+ for name, token_type in RESERVED_WORDS:
54
+ if self.match(name, case_insensitive=True):
55
+ if not self.peek().isalpha():
56
+ return self.create_token(tok_type=token_type, start_pos=start_pos)
57
+ # if the next character is an alpha character, it is not a keyword
58
+ # but an identifier, so we need to backtrack
59
+ self.advance(-len(name))
60
+ return None
61
+
62
+ def _match_keyword(self, start_pos: int) -> Token | None:
63
+ for keyword in KEYWORDS:
64
+ if self.match(keyword, case_insensitive=True):
65
+ return self.create_token(tok_type=TokenType.KEYWORD, start_pos=start_pos)
66
+ return None
67
+
68
+ def _match_hash_identifier(self, start_pos: int) -> Token | None:
69
+ if self.match('#"'):
70
+ while self.match(lambda c: c != '"') or self.match('""'):
71
+ pass
72
+ if self.match('"'):
73
+ return self.create_token(
74
+ tok_type=TokenType.HASH_IDENTIFIER,
75
+ start_pos=start_pos,
76
+ )
77
+ msg = f"Unterminated string literal at positions: {start_pos} to {self.current_position}"
78
+ raise ValueError(msg)
79
+
80
+ if self.match("#"):
81
+ while self.match(lambda c: c in string.ascii_letters + string.digits + "_"):
82
+ pass
83
+ return self.create_token(
84
+ tok_type=TokenType.HASH_IDENTIFIER,
85
+ start_pos=start_pos,
86
+ )
87
+
88
+ return None
89
+
90
+ def _match_string_literal(self, start_pos: int) -> Token | None:
91
+ if self.match('"'):
92
+ while self.match(lambda c: c != '"') or self.match('""'):
93
+ pass
94
+ if self.match('"'):
95
+ return self.create_token(
96
+ tok_type=TokenType.STRING_LITERAL,
97
+ start_pos=start_pos,
98
+ )
99
+ msg = f"Unterminated string literal at positions: {start_pos} to {self.current_position}"
100
+ raise ValueError(msg)
101
+
102
+ return None
103
+
104
+ def _match_whitespace(self, start_pos: int) -> Token | None:
105
+ if self.match(lambda c: c in WHITESPACE):
106
+ while self.match(lambda c: c in WHITESPACE):
107
+ pass
108
+ return self.create_token(
109
+ tok_type=TokenType.WHITESPACE,
110
+ start_pos=start_pos,
111
+ )
112
+ return None
113
+
114
+ def _match_ellipsis(self, start_pos: int) -> Token | None:
115
+ if self.match("..."):
116
+ return self.create_token(
117
+ tok_type=TokenType.ELLIPSIS,
118
+ start_pos=start_pos,
119
+ )
120
+ return None
121
+
122
+ def _match_period(self, start_pos: int) -> Token | None:
123
+ if self.match("."):
124
+ return self.create_token(
125
+ tok_type=TokenType.PERIOD,
126
+ start_pos=start_pos,
127
+ )
128
+ return None
129
+
130
+ def _match_number_literal(self, start_pos: int) -> Token | None:
131
+ if self.match(
132
+ lambda c: c.isdigit() or c == ".",
133
+ ): # must come before unquoted identifier to avoid conflict
134
+ while self.match(lambda c: c.isdigit() or c in {".", "e", "E"}):
135
+ pass
136
+ return self.create_token(
137
+ tok_type=TokenType.NUMBER_LITERAL,
138
+ start_pos=start_pos,
139
+ )
140
+ return None
141
+
142
+ def _match_unquoted_identifier(self, start_pos: int) -> Token | None:
143
+ if self.match(lambda c: c.isalnum() or c == "_"):
144
+ while self.match(lambda c: c.isalnum() or c == "_"):
145
+ pass
146
+ return self.create_token(
147
+ tok_type=TokenType.UNQUOTED_IDENTIFIER,
148
+ start_pos=start_pos,
149
+ )
150
+ return None
151
+
152
+ def _match_single_line_comment(self, start_pos: int) -> Token | None:
153
+ if self.match("//") or self.match("--"):
154
+ while self.match(lambda c: c not in {"\n", ""}):
155
+ pass
156
+ return self.create_token(
157
+ tok_type=TokenType.SINGLE_LINE_COMMENT,
158
+ start_pos=start_pos,
159
+ )
160
+ return None
161
+
162
+ def _match_token(self, start_pos: int) -> Token | None:
163
+ fixed_character_mapping = {
164
+ "=>": TokenType.LAMBDA_ARROW,
165
+ ">=": TokenType.COMPARISON_OPERATOR,
166
+ "=": TokenType.EQUAL_SIGN,
167
+ "(": TokenType.LEFT_PAREN,
168
+ ")": TokenType.RIGHT_PAREN,
169
+ "{": TokenType.LEFT_CURLY_BRACE,
170
+ "}": TokenType.RIGHT_CURLY_BRACE,
171
+ ",": TokenType.COMMA,
172
+ "[": TokenType.LEFT_BRACKET,
173
+ "]": TokenType.RIGHT_BRACKET,
174
+ "<>": TokenType.NOT_EQUAL_SIGN,
175
+ "+": TokenType.PLUS_SIGN,
176
+ "-": TokenType.MINUS_SIGN,
177
+ "*": TokenType.MULTIPLY_SIGN,
178
+ "/": TokenType.DIVIDE_SIGN,
179
+ ">": TokenType.COMPARISON_OPERATOR,
180
+ "&": TokenType.CONCATENATION_OPERATOR,
181
+ "!": TokenType.EXCLAMATION_POINT,
182
+ }
183
+
184
+ for char, token_type in fixed_character_mapping.items():
185
+ if self.match(char):
186
+ return self.create_token(
187
+ tok_type=token_type,
188
+ start_pos=start_pos,
189
+ )
190
+ return None
191
+
192
+ def scan_helper(self) -> Token:
193
+ start_pos: int = self.current_position
194
+
195
+ if not self.peek():
196
+ return Token()
197
+
198
+ for candidate_func in (
199
+ self._match_type_literal,
200
+ self._match_reserved_words,
201
+ # keywords have to be checked after the above tokens because "null" blocks "nullable"
202
+ self._match_keyword,
203
+ self._match_hash_identifier,
204
+ self._match_string_literal,
205
+ self._match_whitespace,
206
+ self._match_ellipsis,
207
+ self._match_period,
208
+ self._match_number_literal,
209
+ self._match_unquoted_identifier,
210
+ self._match_hash_identifier,
211
+ self._match_single_line_comment,
212
+ self._match_token,
213
+ ):
214
+ match_candidate = candidate_func(start_pos)
215
+ if match_candidate:
216
+ return match_candidate
217
+
218
+ msg = f"Unexpected character '{self.peek()}' at position {self.current_position}"
219
+ raise ValueError(msg)
pbi_parsers/pq/main.py ADDED
@@ -0,0 +1,63 @@
1
+ from collections.abc import Iterable
2
+
3
+ from .exprs._base import Expression
4
+ from .formatter import Formatter
5
+ from .lexer import Lexer
6
+ from .parser import Parser
7
+ from .tokens import Token, TokenType
8
+
9
+
10
+ def remove_non_executing_tokens(tokens: Iterable[Token]) -> list[Token]:
11
+ """Removes tokens that are not executed in the M expression.
12
+
13
+ Args:
14
+ tokens (Iterable[Token]): Iterable of tokens to filter.
15
+
16
+ Returns:
17
+ list[Token]: Filtered list of tokens that are executed.
18
+
19
+ """
20
+ return list(
21
+ filter(
22
+ lambda x: x.tok_type
23
+ not in {
24
+ TokenType.WHITESPACE,
25
+ TokenType.SINGLE_LINE_COMMENT,
26
+ TokenType.MULTI_LINE_COMMENT,
27
+ },
28
+ tokens,
29
+ ),
30
+ )
31
+
32
+
33
+ def to_ast(text: str) -> Expression | None:
34
+ """Converts an M expression string into an AST (Abstract Syntax Tree).
35
+
36
+ Args:
37
+ text (str): The M expression to parse.
38
+
39
+ Returns:
40
+ Expression | None: when matched, returns the root node of the AST representing the M expression.
41
+ When not matched, returns None.
42
+
43
+ """
44
+ tokens = Lexer(text).scan()
45
+ tokens = remove_non_executing_tokens(tokens)
46
+ parser = Parser(tokens)
47
+ return parser.to_ast()
48
+
49
+
50
+ def format_expression(text: str) -> str:
51
+ """Formats an M expression string into a more readable format.
52
+
53
+ Args:
54
+ text (str): The M expression to format.
55
+
56
+ Returns:
57
+ str: The formatted M expression.
58
+
59
+ """
60
+ ast = to_ast(text)
61
+ if ast is None:
62
+ return text
63
+ return Formatter(ast).format()
@@ -0,0 +1,65 @@
1
+ from typing import TYPE_CHECKING, Any
2
+
3
+ if TYPE_CHECKING:
4
+ from .exprs import Expression
5
+ from .tokens import Token, TokenType
6
+
7
+ EOF_TOKEN = Token()
8
+
9
+
10
+ class Parser:
11
+ __tokens: list[Token]
12
+ index: int = 0
13
+ cache: dict[Any, Any]
14
+
15
+ def __init__(self, tokens: list[Token]) -> None:
16
+ self.__tokens = tokens
17
+ self.index = 0
18
+ self.cache = {}
19
+
20
+ def peek(self, forward: int = 0) -> Token:
21
+ """Peek at the next token without advancing the index.
22
+
23
+ Args:
24
+ forward (int): How many tokens to look ahead. Defaults to 0.
25
+
26
+ Returns:
27
+ Token: The token at the current index + forward.
28
+
29
+ """
30
+ if self.index + forward >= len(self.__tokens):
31
+ return EOF_TOKEN
32
+ return self.__tokens[self.index + forward]
33
+
34
+ def remaining(self) -> list[Token]:
35
+ """Returns the remaining tokens from the current index.
36
+
37
+ Returns:
38
+ list[Token]: The list of tokens from the current index to the end.
39
+
40
+ """
41
+ return self.__tokens[self.index :]
42
+
43
+ def to_ast(self) -> "Expression | None":
44
+ """Parse the tokens and return the root expression.
45
+
46
+ Raises:
47
+ ValueError: If no valid expression is found in the token stream.
48
+
49
+ """
50
+ from .exprs import any_expression_match # noqa: PLC0415
51
+
52
+ ret = any_expression_match(self)
53
+ if ret is None:
54
+ msg = "No valid expression found in the token stream."
55
+ raise ValueError(msg)
56
+ assert self.peek().tok_type == TokenType.EOF
57
+ return ret
58
+
59
+ def consume(self) -> Token:
60
+ """Returns the next token and advances the index."""
61
+ if self.index >= len(self.__tokens):
62
+ return EOF_TOKEN
63
+ ret = self.__tokens[self.index]
64
+ self.index += 1
65
+ return ret
@@ -0,0 +1,81 @@
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ from pbi_parsers.base import BaseToken
5
+
6
+
7
+ class TokenType(Enum):
8
+ LET = 1
9
+ EOF = 2
10
+ KEYWORD = 3
11
+ WHITESPACE = 4
12
+ UNQUOTED_IDENTIFIER = 5
13
+ QUOTED_IDENTIFER = 6
14
+ EQUAL_SIGN = 7
15
+ PERIOD = 8
16
+ LEFT_PAREN = 9
17
+ RIGHT_PAREN = 10
18
+ STRING_LITERAL = 11
19
+ LEFT_CURLY_BRACE = 12
20
+ RIGHT_CURLY_BRACE = 13
21
+ NUMBER_LITERAL = 14
22
+ COMMA = 15
23
+ LEFT_BRACKET = 16
24
+ RIGHT_BRACKET = 17
25
+ NOT_EQUAL_SIGN = 18
26
+ LAMBDA_ARROW = 19
27
+ PLUS_SIGN = 20
28
+ MINUS_SIGN = 21
29
+ MULTIPLY_SIGN = 22
30
+ DIVIDE_SIGN = 23
31
+ SINGLE_QUOTED_IDENTIFIER = 24
32
+ HASH_IDENTIFIER = 25
33
+ IN = 26
34
+ TYPE = 27
35
+ TYPE_LITERAL = 28
36
+ COMPARISON_OPERATOR = 29
37
+ IF = 31
38
+ ELSE = 32
39
+ THEN = 33
40
+ EACH = 34
41
+ META = 35
42
+ CONCATENATION_OPERATOR = 36
43
+ NULLABLE = 37
44
+ TRY = 38
45
+ OTHERWISE = 39
46
+ AND = 40
47
+ OR = 41
48
+ SINGLE_LINE_COMMENT = 42
49
+ MULTI_LINE_COMMENT = 43
50
+ ELLIPSIS = 44
51
+ NOT = 45
52
+ IS = 46
53
+ AS = 47
54
+ EXCLAMATION_POINT = 48
55
+
56
+
57
+ @dataclass
58
+ class Token(BaseToken):
59
+ tok_type: TokenType = TokenType.EOF
60
+
61
+
62
+ # These are tokens that could also be used as identifiers in expressions.
63
+ TEXT_TOKENS = (
64
+ TokenType.KEYWORD,
65
+ TokenType.LET,
66
+ TokenType.IN,
67
+ TokenType.TYPE,
68
+ TokenType.IF,
69
+ TokenType.ELSE,
70
+ TokenType.THEN,
71
+ TokenType.EACH,
72
+ TokenType.META,
73
+ TokenType.NULLABLE,
74
+ TokenType.TRY,
75
+ TokenType.OTHERWISE,
76
+ TokenType.AND,
77
+ TokenType.OR,
78
+ TokenType.NOT,
79
+ TokenType.IS,
80
+ TokenType.AS,
81
+ )
@@ -0,0 +1,66 @@
1
+ Metadata-Version: 2.4
2
+ Name: pbi_parsers
3
+ Version: 0.7.8
4
+ Summary: Power BI lexer, parsers, and formatters for DAX and M (Power Query) languages
5
+ Requires-Python: >=3.11.0
6
+ Requires-Dist: jinja2>=3.1.6
7
+ Provides-Extra: dev
8
+ Requires-Dist: build>=1.2.2; extra == 'dev'
9
+ Requires-Dist: pre-commit>=3.8.0; extra == 'dev'
10
+ Requires-Dist: ruff>=0.12.7; extra == 'dev'
11
+ Provides-Extra: docs
12
+ Requires-Dist: mkdocs-material>=9.6.16; extra == 'docs'
13
+ Requires-Dist: mkdocs>=1.6.1; extra == 'docs'
14
+ Requires-Dist: mkdocstrings-python>=0.30.0; extra == 'docs'
15
+ Description-Content-Type: text/markdown
16
+
17
+ # Overview
18
+
19
+ Based on [Crafting Interpreters](https://timothya.com/pdfs/crafting-interpreters.pdf). Library provides lexers, parsers, and formatters for DAX and Power Query (M) languages. Designed to support code introspection and analysis, not execution. This enables developement of [ruff](https://github.com/astral-sh/ruff)-equivalent tools for DAX and Power Query. It also enables extracting metadata from DAX and Power Query code, such PQ source types (Excel, SQL, etc.) and DAX lineage dependencies.
20
+
21
+ For more information, see the [docs](https://douglassimonsen.github.io/pbi_parsers/)
22
+
23
+ # Installation
24
+
25
+ ```shell
26
+ python -m pip install pbi_parsers
27
+ ```
28
+
29
+ # Dev Instructions
30
+
31
+
32
+ ## Set Up
33
+
34
+ ```shell
35
+ python -m venv venv
36
+ venv\Scripts\activate
37
+ python -m pip install .
38
+ pre-commit install
39
+ ```
40
+
41
+
42
+ # Running the Documentation Server
43
+
44
+ ```shell
45
+ python -m pip install .[docs]
46
+ mkdocs serve -f docs/mkdocs.yml
47
+ ```
48
+
49
+ ## Deploy docs to Github Pages
50
+
51
+ ```shell
52
+ mkdocs gh-deploy --clean -f docs/mkdocs.yml
53
+ ```
54
+
55
+ ## Testing
56
+
57
+ ```shell
58
+
59
+ pip install -e .
60
+ ```
61
+
62
+ # Build Wheel
63
+
64
+ ```shell
65
+ python -m build .
66
+ ```
@@ -0,0 +1,78 @@
1
+ pbi_parsers/__init__.py,sha256=PjPoEsO9-jbGlmhpRxRgZdP4Xle5nERf4DeZu01DqvI,91
2
+ pbi_parsers/base/__init__.py,sha256=U7QpzFFD9A4wK3ZHin6xg5fPgTca0y5KC-O7nrZ-flM,115
3
+ pbi_parsers/base/lexer.py,sha256=9QonQzr-oLUdfW9XKWtQDLf2ZjalHXdTK-CyIX1AtN8,4174
4
+ pbi_parsers/base/tokens.py,sha256=UPQqN21k9i55oEG-seW7msppnv35vHt23JOwYbVcVa4,2135
5
+ pbi_parsers/dax/__init__.py,sha256=w8tfYFRwfjndq-QNnYQO3cu8fri4-OlG-edxUAKunF4,479
6
+ pbi_parsers/dax/formatter.py,sha256=Ywbm3g_5GWcZJN6Eu8oGx_XdMuohqiCO77nMMB-P2us,7674
7
+ pbi_parsers/dax/lexer.py,sha256=EsGjpVAS9HqRg0cP7j6Ro5WPd7zj7CldbLsuiw2fm0k,8025
8
+ pbi_parsers/dax/main.py,sha256=mkkhW_lXv2sbRuQiOwNhF7RtQS5FdZLSdG8A5MOw-Wg,1657
9
+ pbi_parsers/dax/parser.py,sha256=FtVL0KtP9ccisLElGIg0bR3dyVwFnLYWDXBGkjc6GUg,1821
10
+ pbi_parsers/dax/tokens.py,sha256=nY1laCbL8vwALpJ4jcd8k4lAscqOwqdw3dFuj4_KKVk,1234
11
+ pbi_parsers/dax/utils.py,sha256=ip5ALyKEPTNq38FrD0GzVhSTMbWTX9BdfQE1HI99mpY,4402
12
+ pbi_parsers/dax/exprs/__init__.py,sha256=OUfiXzZYp5HkTPE9x595MMxpsgG1IvsED8p8spAKZuk,3432
13
+ pbi_parsers/dax/exprs/_base.py,sha256=5CH82uz8q-gkeIsrcFHPR0-8mNMdHJed82Lc8DqwG9M,1634
14
+ pbi_parsers/dax/exprs/_utils.py,sha256=zPDkfQjWlW2VMPYJSvXCpP7KfJlrn15Zz6fdMZNa3Os,1669
15
+ pbi_parsers/dax/exprs/add_sub.py,sha256=Yy-Zuvf0Pg9PeN1J0-fufw7Q7bgaIcCM_MweR7dquxM,2241
16
+ pbi_parsers/dax/exprs/add_sub_unary.py,sha256=21Sw3MJEcRIsv3OrT1C7Z9aqNa9-xnQR3ZZHSo2MJb8,2003
17
+ pbi_parsers/dax/exprs/array.py,sha256=r7W21dS3_ZoegVA88DqbXvJJPrmjVw56Jyqg36h_7fE,2336
18
+ pbi_parsers/dax/exprs/column.py,sha256=jH0QRNOfXTfwJpQhimCZzph-wCJQzzTp3se1w924hQg,1500
19
+ pbi_parsers/dax/exprs/comparison.py,sha256=I2Ei00jBCb83qCvVgViIbX44QPUBJuafRp3MBAtZvms,2217
20
+ pbi_parsers/dax/exprs/concatenation.py,sha256=ntzHPT171yJhzZOaHJAev1_Yq6XKFp-RkQAqcrlTrYo,2153
21
+ pbi_parsers/dax/exprs/div_mul.py,sha256=PyDFs1BBUg6CS0TXG6-AIVN5qa_siaMeQsHOM9mUlg8,2316
22
+ pbi_parsers/dax/exprs/exponent.py,sha256=vJ-PjkbzFFHhGXBU27DIhD4mY2wgsFxUJXYVjL0Oafc,1919
23
+ pbi_parsers/dax/exprs/function.py,sha256=Vzo21E9jMuzpww6f1ulwNqxjfJp5eDpKYGsa17AEm_Q,3518
24
+ pbi_parsers/dax/exprs/hierarchy.py,sha256=dNFBf1tKoal8GSi9SkT76RaMc4hPzmfSMGybgPo0CPE,1853
25
+ pbi_parsers/dax/exprs/identifier.py,sha256=EZ3fAQMof2ccjF6qq2tE4J2SmlZZ624NbbYmdtBUDdQ,1151
26
+ pbi_parsers/dax/exprs/ins.py,sha256=TENRz4YQHFe1UBLfFgzsaBryphe8lQR70FJmi6yw40w,1913
27
+ pbi_parsers/dax/exprs/keyword.py,sha256=CrMAgGfcIREsQNWNgJKF5RhYNDLSrpVlZhG3vcjB96U,1803
28
+ pbi_parsers/dax/exprs/literal_number.py,sha256=W0Uh2DU4BnDPtfo94xye3KAWETJIeiPOertQlcXxuek,1189
29
+ pbi_parsers/dax/exprs/literal_string.py,sha256=8Qad6332-5Gb3qPxPIQYRfWU8WbFEmrsWH7bcfhpq_c,1188
30
+ pbi_parsers/dax/exprs/logical.py,sha256=cDDZjkaKK9DMV0GxGSNuZ4VzyQz520ozl2mjYzJ-8cU,2197
31
+ pbi_parsers/dax/exprs/measure.py,sha256=g0HLMxD29icBgCJ-R9enRrJgQrXrih0iz4CNcjZxMg8,1143
32
+ pbi_parsers/dax/exprs/none.py,sha256=3a1S3CW9TEfutgUxtimGwCkDpZNWJniCDcYHErPHc-s,1018
33
+ pbi_parsers/dax/exprs/parens.py,sha256=9c7KF4t_Cw0Co-Smw6EiF2CGBtbqILHxqQxQjn9dlTI,1927
34
+ pbi_parsers/dax/exprs/returns.py,sha256=C-j3O4qM8q-ptftXCbyUYRlZ9btUn5RGin4f09zOg9s,2358
35
+ pbi_parsers/dax/exprs/table.py,sha256=AWXtlvV1jkeeeGCnLn0qK7AFGILUCFkUMzYBJuQ3-yg,1185
36
+ pbi_parsers/dax/exprs/variable.py,sha256=hRbd42D2Uu8AfE9wyUh0uvt0sV7PfAZChMBU2ixKFiM,1836
37
+ pbi_parsers/pq/__init__.py,sha256=zYkl7_XQwBeNLAucRd0vgbIHO9kcnFaXy-zhgZdQ7xc,359
38
+ pbi_parsers/pq/formatter.py,sha256=gcqj_aP8o5V10ULi5hdGhy3aAOy829jTKAfzH4mZewA,358
39
+ pbi_parsers/pq/lexer.py,sha256=YOo4chz1N06FLO7cU4-wSoemIzfwG30NeUSJhJB-yOE,8093
40
+ pbi_parsers/pq/main.py,sha256=4k5ZT-dRv5g2jjFgL1ckSpLR36wzClxe1YjiiIiBMu8,1649
41
+ pbi_parsers/pq/parser.py,sha256=Fy8cqAGvGv1oVg4vYWJAGHZSWimEJ3wTtL5eqIkfOA8,1885
42
+ pbi_parsers/pq/tokens.py,sha256=tll_fijLQ2reUJZIgyTW_a6ewuxiw9dva4dH9zx4GZ0,1637
43
+ pbi_parsers/pq/exprs/__init__.py,sha256=wV-G51GagUAkA6_uVjsNA5JskO2JN3xXJPjKtzCH5rU,2845
44
+ pbi_parsers/pq/exprs/_base.py,sha256=GcfWW3rannZBvw4LyjdiGbWGJ1nctw-m5k6LGkX7Wk4,1118
45
+ pbi_parsers/pq/exprs/_utils.py,sha256=kUCWSzCSy7HMKOWcGjmO4R1WiYHP23afXmq67A0ZAXY,1065
46
+ pbi_parsers/pq/exprs/add_sub.py,sha256=6h3OwaX87Xy82JV-inXpzFfdqoHhfZjiKpdaIJ39Tyk,1980
47
+ pbi_parsers/pq/exprs/add_sub_unary.py,sha256=n_EMG1aNAuDjm95HJCgs28CUifeqzJzD8I388z7r1ok,1771
48
+ pbi_parsers/pq/exprs/and_or_expr.py,sha256=wM8xufdnFgP8cuppubvF8ZpfyjPcPsrewNEt76gRWOs,1889
49
+ pbi_parsers/pq/exprs/array.py,sha256=xdpeE9lUNDCWN7Nq9nT3QOOJ5RTrcruk4DKtwvwxXiI,1742
50
+ pbi_parsers/pq/exprs/arrow.py,sha256=ANbvVIdEWlRlwv9lI5S7nuIKFv8lNfJ4qwjguCVtOuE,1546
51
+ pbi_parsers/pq/exprs/column.py,sha256=fAKj0chGrlNaYJ6IVq6Vr9D3Ggq7OMrTiU4CE4xmuNc,1131
52
+ pbi_parsers/pq/exprs/comparison.py,sha256=5OlvJEMu5QNjWNbnALMPVYzlcch7TNh8HxzDX3ZgLgg,2010
53
+ pbi_parsers/pq/exprs/concatenation.py,sha256=DgQWQCIb6JiqvkHGYKs76dOFhvUES7tenI4XijaAdy4,1941
54
+ pbi_parsers/pq/exprs/div_mul.py,sha256=wgkGtcTDolPzsyqCZzv4Dus8vUvEkJZswrCeTYmO_14,1980
55
+ pbi_parsers/pq/exprs/each.py,sha256=8CM__HL1lzTR3cBlJyr9wnGYJLOFYqdoHNyToj4jIYc,1142
56
+ pbi_parsers/pq/exprs/ellipsis_expr.py,sha256=_nPSaVwUKoVYUd_SzNI9rhSUUJf2x-0gBo3-6indNPM,807
57
+ pbi_parsers/pq/exprs/function.py,sha256=UyqpgfJNDGggMFJCippt-6Nmzoy-cMBO9DpPudxwAYQ,2025
58
+ pbi_parsers/pq/exprs/identifier.py,sha256=xTFpKrgiy6romsHbS9nNJ1kOgRbbfyUSzAY_jxXWVso,2492
59
+ pbi_parsers/pq/exprs/if_expr.py,sha256=UZjOCndG0-vurYdNwZF7nA_hCTMiqQBNRNEE9V8Kp1Y,2084
60
+ pbi_parsers/pq/exprs/is_expr.py,sha256=kn0YqSSXi8_4sFKECv5M3Kc_fCz0cbQ8xQAIs4i_Kdw,1679
61
+ pbi_parsers/pq/exprs/keyword.py,sha256=4QxiiBe_f8VL1XW6bVL9GkTgZyH5_3MtGOLU_URmG-4,1263
62
+ pbi_parsers/pq/exprs/literal_number.py,sha256=XMFAZ1Csx-Du9DNfC-P0OUKofMZqmMdQjo6SQtOGgoY,872
63
+ pbi_parsers/pq/exprs/literal_string.py,sha256=49QAUxWkpmX4gVuSElKifVJC6T_ATqdDjYRu5PjmlbM,872
64
+ pbi_parsers/pq/exprs/meta.py,sha256=mNHqZl1iX23u6ML53aihf0t1Lkb5ts0tT8vbqumplgg,1668
65
+ pbi_parsers/pq/exprs/negation.py,sha256=PFUHKc6v09ToK2tqUcI6NavA0lC21fDijSZAVq3Z2Lc,1552
66
+ pbi_parsers/pq/exprs/none.py,sha256=Mg8RO43vcrkhQ0aKF_DriB5eY5G0oHONb35tnNHGeGk,717
67
+ pbi_parsers/pq/exprs/not_expr.py,sha256=O1bENFowAIH9_cWfbbWYKVbNbXlLN1ekKU5NT4dnt4k,970
68
+ pbi_parsers/pq/exprs/parens.py,sha256=M6NSjBzTNGnQ6bMDtEJwAIMUFLd8w1evNLjx8rAM7QM,1265
69
+ pbi_parsers/pq/exprs/record.py,sha256=gdtXC5wSdrah_ob7bna-hoAFDviLH-DS9h6N3xBORvk,1836
70
+ pbi_parsers/pq/exprs/row.py,sha256=_NJfM2GhOd_8qN_HonD_KMEjfJEXcTKI8rP_Ab6TDVY,1531
71
+ pbi_parsers/pq/exprs/row_index.py,sha256=qYlJWt8yUGio36yKOgVwQ8TGYNeA37yszX5Xwqhj4ME,1695
72
+ pbi_parsers/pq/exprs/statement.py,sha256=JSg48pGAU3Ka2pt4lzVsYlVOqGeF_ARGm8Ajf0lB1VY,2208
73
+ pbi_parsers/pq/exprs/try_expr.py,sha256=UcnqfA-t9S1LVrKqeNUT8n4JJcO-ZQZoJrxAdjJ-GMA,1692
74
+ pbi_parsers/pq/exprs/type_expr.py,sha256=hH5ubrIJaxwQsopNJHUZ4ByS1rHEgv2Tf8ocYqSukXM,2570
75
+ pbi_parsers/pq/exprs/variable.py,sha256=wp4t0QHIGA264sXnWp7XVe1H8MJzMIOaoLNBQe-dfNk,1602
76
+ pbi_parsers-0.7.8.dist-info/METADATA,sha256=NFK2jTKIQVQc_7SATdO-DdxEcW2WSVqe-83nhvsACUQ,1658
77
+ pbi_parsers-0.7.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
78
+ pbi_parsers-0.7.8.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any