pbi-parsers 0.8.0__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pbi_parsers/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from . import dax, pq
2
2
 
3
- __version__ = "0.8.0"
3
+ __version__ = "0.8.2"
4
4
 
5
5
 
6
6
  __all__ = [
pbi_parsers/base/lexer.py CHANGED
@@ -118,6 +118,9 @@ class BaseLexer:
118
118
  """
119
119
  while not self.at_end():
120
120
  self.tokens.append(self.scan_helper())
121
+ for a_tok, b_tok in zip(self.tokens, self.tokens[1:], strict=False):
122
+ a_tok.next_token = b_tok
123
+ b_tok.prior_token = a_tok
121
124
  return tuple(self.tokens)
122
125
 
123
126
  def scan_helper(self) -> BaseToken:
@@ -31,6 +31,8 @@ class TextSlice:
31
31
  class BaseToken:
32
32
  tok_type: Any
33
33
  text_slice: TextSlice = field(default_factory=TextSlice)
34
+ prior_token: "BaseToken | None" = field(repr=False, default=None)
35
+ next_token: "BaseToken | None" = field(repr=False, default=None)
34
36
 
35
37
  def __eq__(self, other: object) -> bool:
36
38
  """Checks equality based on token type and text slice."""
@@ -64,3 +66,131 @@ class BaseToken:
64
66
 
65
67
  """
66
68
  return self.text_slice.get_text()
69
+
70
+ def add_token_before(self, text: str, tok_type: Any) -> None:
71
+ """Adds a token before the current token in the linked list.
72
+
73
+ Args:
74
+ text (str): The text to add before the current token.
75
+ tok_type (Any): The type of the token to add.
76
+
77
+ """
78
+ new_global_text = (
79
+ self.text_slice.full_text[: self.text_slice.start]
80
+ + text
81
+ + self.text_slice.full_text[self.text_slice.start :]
82
+ )
83
+ self._update_full_text(new_global_text)
84
+
85
+ length = len(text)
86
+ tok = BaseToken(
87
+ tok_type=tok_type,
88
+ text_slice=TextSlice(
89
+ full_text=new_global_text,
90
+ start=self.text_slice.start,
91
+ end=self.text_slice.start + length,
92
+ ),
93
+ prior_token=self.prior_token,
94
+ next_token=self,
95
+ )
96
+ if self.prior_token:
97
+ self.prior_token.next_token = tok
98
+ self.prior_token = tok
99
+
100
+ # prior_token because we need to update the current token's position as well
101
+ curr_tok = self.prior_token
102
+ while curr_tok := curr_tok.next_token:
103
+ curr_tok.text_slice.start += length
104
+ curr_tok.text_slice.end += length
105
+
106
+ def add_token_after(self, text: str, tok_type: Any) -> None:
107
+ """Adds a token after the current token in the linked list.
108
+
109
+ Args:
110
+ text (str): The text to add before the current token.
111
+ tok_type (Any): The type of the token to add.
112
+
113
+ """
114
+ new_global_text = (
115
+ self.text_slice.full_text[: self.text_slice.end] + text + self.text_slice.full_text[self.text_slice.end :]
116
+ )
117
+ self._update_full_text(new_global_text)
118
+
119
+ length = len(text)
120
+ tok = BaseToken(
121
+ tok_type=tok_type,
122
+ text_slice=TextSlice(
123
+ full_text=new_global_text,
124
+ start=self.text_slice.end,
125
+ end=self.text_slice.end + length,
126
+ ),
127
+ prior_token=self,
128
+ next_token=self.next_token,
129
+ )
130
+ if self.next_token:
131
+ self.next_token.prior_token = tok
132
+ self.next_token = tok
133
+
134
+ # prior_token because we need to update the current token's position as well
135
+ curr_tok = self
136
+ while curr_tok := curr_tok.next_token:
137
+ curr_tok.text_slice.start += length
138
+ curr_tok.text_slice.end += length
139
+
140
+ def remove(self) -> None:
141
+ """Removes the current token from the linked list."""
142
+ new_global_text = (
143
+ self.text_slice.full_text[: self.text_slice.start] + self.text_slice.full_text[self.text_slice.end :]
144
+ )
145
+ self._update_full_text(new_global_text)
146
+
147
+ curr_tok = self
148
+ length = len(self.text)
149
+ while curr_tok := curr_tok.next_token:
150
+ curr_tok.text_slice.start -= length
151
+ curr_tok.text_slice.end -= length
152
+
153
+ if self.prior_token:
154
+ self.prior_token.next_token = self.next_token
155
+ if self.next_token:
156
+ self.next_token.prior_token = self.prior_token
157
+ self.prior_token = None
158
+ self.next_token = None
159
+
160
+ def replace(self, new_text: str) -> None:
161
+ """Replaces the text of the current token with new text.
162
+
163
+ Args:
164
+ new_text (str): The new text to replace the current token's text.
165
+
166
+ """
167
+ new_global_text = (
168
+ self.text_slice.full_text[: self.text_slice.start]
169
+ + new_text
170
+ + self.text_slice.full_text[self.text_slice.end :]
171
+ )
172
+ self._update_full_text(new_global_text)
173
+
174
+ len_diff = len(new_text) - len(self.text)
175
+ # Adjust the positions of subsequent tokens
176
+ self.text_slice.end += len_diff
177
+ current = self.next_token
178
+ while current:
179
+ current.text_slice.start += len_diff
180
+ current.text_slice.end += len_diff
181
+ current = current.next_token
182
+
183
+ def _update_full_text(self, new_full_text: str) -> None:
184
+ """Updates the full text of the token and adjusts the text slice accordingly.
185
+
186
+ Args:
187
+ new_full_text (str): The new full text to update the token's text slice.
188
+
189
+ """
190
+ self.text_slice.full_text = new_full_text
191
+ curr_tok = self
192
+ while curr_tok := curr_tok.next_token:
193
+ curr_tok.text_slice.full_text = new_full_text
194
+ curr_tok = self
195
+ while curr_tok := curr_tok.prior_token:
196
+ curr_tok.text_slice.full_text = new_full_text
pbi_parsers/dax/tokens.py CHANGED
@@ -2,6 +2,7 @@ from dataclasses import dataclass
2
2
  from enum import Enum, auto
3
3
 
4
4
  from pbi_parsers.base import BaseToken
5
+ from pbi_parsers.base.tokens import TextSlice
5
6
 
6
7
 
7
8
  class TokenType(Enum):
@@ -39,6 +40,8 @@ class TokenType(Enum):
39
40
  UNQUOTED_IDENTIFIER = auto()
40
41
  VARIABLE = auto()
41
42
  WHITESPACE = auto()
43
+ UNKNOWN = auto()
44
+ """unknown is used when someone replaces a token with a str"""
42
45
 
43
46
 
44
47
  KEYWORD_MAPPING = {
@@ -52,3 +55,17 @@ KEYWORD_MAPPING = {
52
55
  @dataclass
53
56
  class Token(BaseToken):
54
57
  tok_type: TokenType = TokenType.EOF
58
+
59
+ @staticmethod
60
+ def from_str(value: str, tok_type: TokenType = TokenType.UNKNOWN) -> "Token":
61
+ tok_type = KEYWORD_MAPPING.get(value, tok_type)
62
+ return Token(
63
+ tok_type=tok_type,
64
+ text_slice=TextSlice(value, 0, len(value)),
65
+ )
66
+
67
+ def add_token_before(self, text: str, tok_type: TokenType) -> None:
68
+ super().add_token_before(text, tok_type)
69
+
70
+ def add_token_after(self, text: str, tok_type: TokenType) -> None:
71
+ super().add_token_after(text, tok_type)
pbi_parsers/pq/tokens.py CHANGED
@@ -3,6 +3,8 @@ from enum import Enum
3
3
 
4
4
  from pbi_parsers.base import BaseToken
5
5
 
6
+ from ..base.tokens import TextSlice
7
+
6
8
 
7
9
  class TokenType(Enum):
8
10
  LET = 1
@@ -52,12 +54,27 @@ class TokenType(Enum):
52
54
  IS = 46
53
55
  AS = 47
54
56
  EXCLAMATION_POINT = 48
57
+ UNKNOWN = 99
58
+ """unknown is used when someone replaces a token with a str"""
55
59
 
56
60
 
57
61
  @dataclass
58
62
  class Token(BaseToken):
59
63
  tok_type: TokenType = TokenType.EOF
60
64
 
65
+ @staticmethod
66
+ def from_str(value: str, tok_type: TokenType = TokenType.UNKNOWN) -> "Token":
67
+ return Token(
68
+ tok_type=tok_type,
69
+ text_slice=TextSlice(value, 0, len(value)),
70
+ )
71
+
72
+ def add_token_before(self, text: str, tok_type: TokenType) -> None:
73
+ super().add_token_before(text, tok_type)
74
+
75
+ def add_token_after(self, text: str, tok_type: TokenType) -> None:
76
+ super().add_token_after(text, tok_type)
77
+
61
78
 
62
79
  # These are tokens that could also be used as identifiers in expressions.
63
80
  TEXT_TOKENS = (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pbi_parsers
3
- Version: 0.8.0
3
+ Version: 0.8.2
4
4
  Summary: Power BI lexer, parsers, and formatters for DAX and M (Power Query) languages
5
5
  Project-URL: Homepage, https://github.com/douglassimonsen/pbi_parsers
6
6
  Project-URL: Documentation, https://douglassimonsen.github.io/pbi_parsers/
@@ -1,13 +1,13 @@
1
- pbi_parsers/__init__.py,sha256=cFx9LLUu1MM9UFcbhuA1oauYK8JiPDwU0ohEoJnrI_I,91
1
+ pbi_parsers/__init__.py,sha256=o6tdbz0lucvp3ShZEGQdvolSfiDl90As5VtF8yWSzBc,91
2
2
  pbi_parsers/base/__init__.py,sha256=U7QpzFFD9A4wK3ZHin6xg5fPgTca0y5KC-O7nrZ-flM,115
3
- pbi_parsers/base/lexer.py,sha256=5iOkdYzJ9wGwz7I4rDlc7slrRNUPzi0oFFkDxN1d62M,4180
4
- pbi_parsers/base/tokens.py,sha256=slIVl4673xXomqHMgrn1ApHs8YRvcC2yQgs1zfSpe1U,2220
3
+ pbi_parsers/base/lexer.py,sha256=Rl2cWlySJblFHvwW8oMVAjnCh83Zjo4N7jW3pkRZZO0,4335
4
+ pbi_parsers/base/tokens.py,sha256=c1PIAU5oRxZbI74SY5KT3AZ3WrcukzXtuiLPrZU4f2o,7017
5
5
  pbi_parsers/dax/__init__.py,sha256=w8tfYFRwfjndq-QNnYQO3cu8fri4-OlG-edxUAKunF4,479
6
6
  pbi_parsers/dax/formatter.py,sha256=jOFnwcgQGIzsmi5sfkKoB_pFEGjDPd8E_pwMPwudmy4,7674
7
7
  pbi_parsers/dax/lexer.py,sha256=2_pERJSrSYd8VujOe9TxJa9R2Ex8mvP-bCotH7uVBZY,8025
8
8
  pbi_parsers/dax/main.py,sha256=FG35XCAPEooXoJShSgOnmQ0py-h_MPtOfnLpQWy61is,1657
9
9
  pbi_parsers/dax/parser.py,sha256=QLKrIBcxZ26TGhTHpeKcTGEHEHUDLC6IgpxxrdJzdek,1821
10
- pbi_parsers/dax/tokens.py,sha256=nY1laCbL8vwALpJ4jcd8k4lAscqOwqdw3dFuj4_KKVk,1234
10
+ pbi_parsers/dax/tokens.py,sha256=vWkHbxWg_tU_vkzCayrK6K8W2CQPPghY0sshRwFAvoc,1886
11
11
  pbi_parsers/dax/utils.py,sha256=OURPa-b6Ldn0_KKXPdLIPA3Zdc12OfbbFd2X5SocCek,4402
12
12
  pbi_parsers/dax/exprs/__init__.py,sha256=OUfiXzZYp5HkTPE9x595MMxpsgG1IvsED8p8spAKZuk,3432
13
13
  pbi_parsers/dax/exprs/_base.py,sha256=bMHLICgAUOqAKl_S9d6V8kk62cqM1jynUY2-gBJlEcs,2732
@@ -39,7 +39,7 @@ pbi_parsers/pq/formatter.py,sha256=gcqj_aP8o5V10ULi5hdGhy3aAOy829jTKAfzH4mZewA,3
39
39
  pbi_parsers/pq/lexer.py,sha256=YOo4chz1N06FLO7cU4-wSoemIzfwG30NeUSJhJB-yOE,8093
40
40
  pbi_parsers/pq/main.py,sha256=4k5ZT-dRv5g2jjFgL1ckSpLR36wzClxe1YjiiIiBMu8,1649
41
41
  pbi_parsers/pq/parser.py,sha256=Fy8cqAGvGv1oVg4vYWJAGHZSWimEJ3wTtL5eqIkfOA8,1885
42
- pbi_parsers/pq/tokens.py,sha256=tll_fijLQ2reUJZIgyTW_a6ewuxiw9dva4dH9zx4GZ0,1637
42
+ pbi_parsers/pq/tokens.py,sha256=WHaAIvGaoj22b-YbuFF_t0h5fLdaia3ZdNTGRXK8PiA,2237
43
43
  pbi_parsers/pq/exprs/__init__.py,sha256=wV-G51GagUAkA6_uVjsNA5JskO2JN3xXJPjKtzCH5rU,2845
44
44
  pbi_parsers/pq/exprs/_base.py,sha256=GcfWW3rannZBvw4LyjdiGbWGJ1nctw-m5k6LGkX7Wk4,1118
45
45
  pbi_parsers/pq/exprs/_utils.py,sha256=kUCWSzCSy7HMKOWcGjmO4R1WiYHP23afXmq67A0ZAXY,1065
@@ -73,7 +73,7 @@ pbi_parsers/pq/exprs/statement.py,sha256=JSg48pGAU3Ka2pt4lzVsYlVOqGeF_ARGm8Ajf0l
73
73
  pbi_parsers/pq/exprs/try_expr.py,sha256=UcnqfA-t9S1LVrKqeNUT8n4JJcO-ZQZoJrxAdjJ-GMA,1692
74
74
  pbi_parsers/pq/exprs/type_expr.py,sha256=hH5ubrIJaxwQsopNJHUZ4ByS1rHEgv2Tf8ocYqSukXM,2570
75
75
  pbi_parsers/pq/exprs/variable.py,sha256=wp4t0QHIGA264sXnWp7XVe1H8MJzMIOaoLNBQe-dfNk,1602
76
- pbi_parsers-0.8.0.dist-info/METADATA,sha256=o3D15LUYr8IlrK-TNjptPgLFlSvwHF7BqmkXfOPFJtA,2906
77
- pbi_parsers-0.8.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
78
- pbi_parsers-0.8.0.dist-info/licenses/LICENSE,sha256=Sn0IfXOE4B0iL9lZXmGmRuTGyJeCtefxcfws0bLjp2g,1072
79
- pbi_parsers-0.8.0.dist-info/RECORD,,
76
+ pbi_parsers-0.8.2.dist-info/METADATA,sha256=lw6AvVHgL0ERGlP7LEZtl343oezahwNwDJ4FJCXFbGM,2906
77
+ pbi_parsers-0.8.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
78
+ pbi_parsers-0.8.2.dist-info/licenses/LICENSE,sha256=Sn0IfXOE4B0iL9lZXmGmRuTGyJeCtefxcfws0bLjp2g,1072
79
+ pbi_parsers-0.8.2.dist-info/RECORD,,