omlish 0.0.0.dev104__py3-none-any.whl → 0.0.0.dev105__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omlish/__about__.py +2 -2
- omlish/fnpipes.py +20 -2
- omlish/formats/json/__init__.py +2 -0
- omlish/formats/json/cli/cli.py +144 -108
- omlish/formats/json/cli/parsing.py +82 -0
- omlish/formats/json/cli/processing.py +44 -0
- omlish/formats/json/cli/rendering.py +92 -0
- omlish/formats/json/consts.py +11 -1
- omlish/formats/json/render.py +68 -26
- omlish/formats/json/stream/build.py +2 -2
- omlish/formats/json/stream/render.py +32 -29
- omlish/io/trampoline.py +0 -4
- omlish/specs/jmespath/ast.py +35 -30
- omlish/specs/jmespath/exceptions.py +7 -4
- omlish/specs/jmespath/functions.py +1 -0
- omlish/specs/jmespath/lexer.py +31 -20
- omlish/specs/jmespath/parser.py +98 -93
- omlish/specs/jmespath/scope.py +2 -0
- omlish/specs/jmespath/visitor.py +13 -8
- omlish/text/random.py +7 -0
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/METADATA +1 -1
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/RECORD +27 -23
- /omlish/{collections/_io_abc.py → io/_abc.py} +0 -0
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/LICENSE +0 -0
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/WHEEL +0 -0
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/entry_points.txt +0 -0
- {omlish-0.0.0.dev104.dist-info → omlish-0.0.0.dev105.dist-info}/top_level.txt +0 -0
omlish/specs/jmespath/parser.py
CHANGED
@@ -23,6 +23,7 @@ A few notes on the implementation.
|
|
23
23
|
import random
|
24
24
|
import typing as ta
|
25
25
|
|
26
|
+
from ... import check
|
26
27
|
from . import ast
|
27
28
|
from . import exceptions
|
28
29
|
from . import lexer
|
@@ -76,16 +77,18 @@ class Parser:
|
|
76
77
|
_PROJECTION_STOP = 10
|
77
78
|
|
78
79
|
# The _MAX_SIZE most recent expressions are cached in _CACHE dict.
|
79
|
-
_CACHE: dict = {} # noqa
|
80
|
+
_CACHE: ta.ClassVar[dict[str, 'ParsedResult']] = {} # noqa
|
80
81
|
_MAX_SIZE = 128
|
81
82
|
|
82
|
-
def __init__(self, lookahead=2):
|
83
|
-
|
84
|
-
|
83
|
+
def __init__(self, lookahead: int = 2) -> None:
|
84
|
+
super().__init__()
|
85
|
+
|
86
|
+
self._tokenizer: ta.Iterable[lexer.Token] | None = None
|
87
|
+
self._tokens: list[lexer.Token | None] = [None] * lookahead
|
85
88
|
self._buffer_size = lookahead
|
86
89
|
self._index = 0
|
87
90
|
|
88
|
-
def parse(self, expression, options=None):
|
91
|
+
def parse(self, expression: str, options: visitor.Options | None = None) -> 'ParsedResult':
|
89
92
|
cached = self._CACHE.get(expression)
|
90
93
|
if cached is not None:
|
91
94
|
return cached
|
@@ -98,7 +101,7 @@ class Parser:
|
|
98
101
|
|
99
102
|
return parsed_result
|
100
103
|
|
101
|
-
def _do_parse(self, expression, options=None):
|
104
|
+
def _do_parse(self, expression: str, options: visitor.Options | None = None) -> 'ParsedResult':
|
102
105
|
try:
|
103
106
|
return self._parse(expression, options)
|
104
107
|
|
@@ -114,15 +117,15 @@ class Parser:
|
|
114
117
|
e.expression = expression
|
115
118
|
raise
|
116
119
|
|
117
|
-
def _parse(self, expression, options=None):
|
118
|
-
self.
|
119
|
-
self._tokens = list(self.
|
120
|
+
def _parse(self, expression: str, options: visitor.Options | None = None) -> 'ParsedResult':
|
121
|
+
self._tokenizer = lexer.Lexer().tokenize(expression, options)
|
122
|
+
self._tokens = list(self._tokenizer)
|
120
123
|
self._index = 0
|
121
124
|
|
122
125
|
parsed = self._expression(binding_power=0)
|
123
126
|
|
124
127
|
if self._current_token() != 'eof':
|
125
|
-
t = self._lookahead_token(0)
|
128
|
+
t = check.not_none(self._lookahead_token(0))
|
126
129
|
raise exceptions.ParseError(
|
127
130
|
t['start'],
|
128
131
|
t['value'],
|
@@ -132,8 +135,8 @@ class Parser:
|
|
132
135
|
|
133
136
|
return ParsedResult(expression, parsed)
|
134
137
|
|
135
|
-
def _expression(self, binding_power=0):
|
136
|
-
left_token = self._lookahead_token(0)
|
138
|
+
def _expression(self, binding_power: int = 0) -> ast.Node:
|
139
|
+
left_token = check.not_none(self._lookahead_token(0))
|
137
140
|
|
138
141
|
self._advance()
|
139
142
|
|
@@ -143,7 +146,7 @@ class Parser:
|
|
143
146
|
self._error_nud_token,
|
144
147
|
)
|
145
148
|
|
146
|
-
left = nud_function(left_token)
|
149
|
+
left = nud_function(left_token) # noqa
|
147
150
|
|
148
151
|
current_token = self._current_token()
|
149
152
|
while binding_power < self.BINDING_POWER[current_token]:
|
@@ -153,7 +156,7 @@ class Parser:
|
|
153
156
|
None,
|
154
157
|
)
|
155
158
|
if led is None:
|
156
|
-
error_token = self._lookahead_token(0)
|
159
|
+
error_token = check.not_none(self._lookahead_token(0))
|
157
160
|
self._error_led_token(error_token)
|
158
161
|
|
159
162
|
else:
|
@@ -163,29 +166,29 @@ class Parser:
|
|
163
166
|
|
164
167
|
return left
|
165
168
|
|
166
|
-
def _token_nud_literal(self, token):
|
169
|
+
def _token_nud_literal(self, token: lexer.Token) -> ast.Node:
|
167
170
|
return ast.literal(token['value'])
|
168
171
|
|
169
|
-
def _token_nud_variable(self, token):
|
172
|
+
def _token_nud_variable(self, token: lexer.Token) -> ast.Node:
|
170
173
|
return ast.variable_ref(token['value'][1:])
|
171
174
|
|
172
|
-
def _token_nud_unquoted_identifier(self, token):
|
175
|
+
def _token_nud_unquoted_identifier(self, token: lexer.Token) -> ast.Node:
|
173
176
|
if token['value'] == 'let' and self._current_token() == 'variable':
|
174
177
|
return self._parse_let_expression()
|
175
178
|
else:
|
176
179
|
return ast.field(token['value'])
|
177
180
|
|
178
|
-
def _parse_let_expression(self):
|
181
|
+
def _parse_let_expression(self) -> ast.Node:
|
179
182
|
bindings = []
|
180
183
|
while True:
|
181
|
-
var_token = self._lookahead_token(0)
|
184
|
+
var_token = check.not_none(self._lookahead_token(0))
|
182
185
|
# Strip off the '$'.
|
183
186
|
varname = var_token['value'][1:]
|
184
187
|
self._advance()
|
185
188
|
self._match('assign')
|
186
189
|
assign_expr = self._expression()
|
187
190
|
bindings.append(ast.assign(varname, assign_expr))
|
188
|
-
if self._is_in_keyword(self._lookahead_token(0)):
|
191
|
+
if self._is_in_keyword(check.not_none(self._lookahead_token(0))):
|
189
192
|
self._advance()
|
190
193
|
break
|
191
194
|
else:
|
@@ -193,18 +196,18 @@ class Parser:
|
|
193
196
|
expr = self._expression()
|
194
197
|
return ast.let_expression(bindings, expr)
|
195
198
|
|
196
|
-
def _is_in_keyword(self, token):
|
199
|
+
def _is_in_keyword(self, token: lexer.Token) -> bool:
|
197
200
|
return (
|
198
201
|
token['type'] == 'unquoted_identifier' and
|
199
202
|
token['value'] == 'in'
|
200
203
|
)
|
201
204
|
|
202
|
-
def _token_nud_quoted_identifier(self, token):
|
205
|
+
def _token_nud_quoted_identifier(self, token: lexer.Token) -> ast.Node:
|
203
206
|
field = ast.field(token['value'])
|
204
207
|
|
205
208
|
# You can't have a quoted identifier as a function name.
|
206
209
|
if self._current_token() == 'lparen':
|
207
|
-
t = self._lookahead_token(0)
|
210
|
+
t = check.not_none(self._lookahead_token(0))
|
208
211
|
raise exceptions.ParseError(
|
209
212
|
0,
|
210
213
|
t['value'],
|
@@ -214,7 +217,7 @@ class Parser:
|
|
214
217
|
|
215
218
|
return field
|
216
219
|
|
217
|
-
def _token_nud_star(self, token):
|
220
|
+
def _token_nud_star(self, token: lexer.Token) -> ast.Node:
|
218
221
|
left = ast.identity()
|
219
222
|
if self._current_token() == 'rbracket':
|
220
223
|
right = ast.identity()
|
@@ -222,34 +225,34 @@ class Parser:
|
|
222
225
|
right = self._parse_projection_rhs(self.BINDING_POWER['star'])
|
223
226
|
return ast.value_projection(left, right)
|
224
227
|
|
225
|
-
def _token_nud_filter(self, token):
|
228
|
+
def _token_nud_filter(self, token: lexer.Token) -> ast.Node:
|
226
229
|
return self._token_led_filter(ast.identity())
|
227
230
|
|
228
|
-
def _token_nud_lbrace(self, token):
|
231
|
+
def _token_nud_lbrace(self, token: lexer.Token) -> ast.Node:
|
229
232
|
return self._parse_multi_select_hash()
|
230
233
|
|
231
|
-
def _token_nud_lparen(self, token):
|
234
|
+
def _token_nud_lparen(self, token: lexer.Token) -> ast.Node:
|
232
235
|
expression = self._expression()
|
233
236
|
self._match('rparen')
|
234
237
|
return expression
|
235
238
|
|
236
|
-
def _token_nud_minus(self, token):
|
239
|
+
def _token_nud_minus(self, token: lexer.Token) -> ast.Node:
|
237
240
|
return self._parse_arithmetic_unary(token)
|
238
241
|
|
239
|
-
def _token_nud_plus(self, token):
|
242
|
+
def _token_nud_plus(self, token: lexer.Token) -> ast.Node:
|
240
243
|
return self._parse_arithmetic_unary(token)
|
241
244
|
|
242
|
-
def _token_nud_flatten(self, token):
|
245
|
+
def _token_nud_flatten(self, token: lexer.Token) -> ast.Node:
|
243
246
|
left = ast.flatten(ast.identity())
|
244
247
|
right = self._parse_projection_rhs(
|
245
248
|
self.BINDING_POWER['flatten'])
|
246
249
|
return ast.projection(left, right)
|
247
250
|
|
248
|
-
def _token_nud_not(self, token):
|
251
|
+
def _token_nud_not(self, token: lexer.Token) -> ast.Node:
|
249
252
|
expr = self._expression(self.BINDING_POWER['not'])
|
250
253
|
return ast.not_expression(expr)
|
251
254
|
|
252
|
-
def _token_nud_lbracket(self, token):
|
255
|
+
def _token_nud_lbracket(self, token: lexer.Token) -> ast.Node:
|
253
256
|
if self._current_token() in ['number', 'colon']:
|
254
257
|
right = self._parse_index_expression()
|
255
258
|
# We could optimize this and remove the identity() node. We don't really need an index_expression node, we
|
@@ -265,7 +268,7 @@ class Parser:
|
|
265
268
|
else:
|
266
269
|
return self._parse_multi_select_list()
|
267
270
|
|
268
|
-
def _parse_index_expression(self):
|
271
|
+
def _parse_index_expression(self) -> ast.Node:
|
269
272
|
# We're here:
|
270
273
|
# [<current>
|
271
274
|
# ^
|
@@ -275,12 +278,12 @@ class Parser:
|
|
275
278
|
|
276
279
|
else:
|
277
280
|
# Parse the syntax [number]
|
278
|
-
node = ast.index(self._lookahead_token(0)['value'])
|
281
|
+
node = ast.index(check.not_none(self._lookahead_token(0))['value'])
|
279
282
|
self._advance()
|
280
283
|
self._match('rbracket')
|
281
284
|
return node
|
282
285
|
|
283
|
-
def _parse_slice_expression(self):
|
286
|
+
def _parse_slice_expression(self) -> ast.Node:
|
284
287
|
# [start:end:step]
|
285
288
|
# Where start, end, and step are optional. The last colon is optional as well.
|
286
289
|
parts = [None, None, None]
|
@@ -290,32 +293,32 @@ class Parser:
|
|
290
293
|
if current_token == 'colon': # noqa
|
291
294
|
index += 1
|
292
295
|
if index == 3:
|
293
|
-
self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
|
296
|
+
self._raise_parse_error_for_token(check.not_none(self._lookahead_token(0)), 'syntax error')
|
294
297
|
self._advance()
|
295
298
|
|
296
299
|
elif current_token == 'number': # noqa
|
297
|
-
parts[index] = self._lookahead_token(0)['value']
|
300
|
+
parts[index] = check.not_none(self._lookahead_token(0))['value']
|
298
301
|
self._advance()
|
299
302
|
|
300
303
|
else:
|
301
|
-
self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
|
304
|
+
self._raise_parse_error_for_token(check.not_none(self._lookahead_token(0)), 'syntax error')
|
302
305
|
|
303
306
|
current_token = self._current_token()
|
304
307
|
|
305
308
|
self._match('rbracket')
|
306
309
|
return ast.slice(*parts)
|
307
310
|
|
308
|
-
def _token_nud_current(self, token):
|
311
|
+
def _token_nud_current(self, token: lexer.Token) -> ast.Node:
|
309
312
|
return ast.current_node()
|
310
313
|
|
311
|
-
def _token_nud_root(self, token):
|
314
|
+
def _token_nud_root(self, token: lexer.Token) -> ast.Node:
|
312
315
|
return ast.root_node()
|
313
316
|
|
314
|
-
def _token_nud_expref(self, token):
|
317
|
+
def _token_nud_expref(self, token: lexer.Token) -> ast.Node:
|
315
318
|
expression = self._expression(self.BINDING_POWER['expref'])
|
316
319
|
return ast.expref(expression)
|
317
320
|
|
318
|
-
def _token_led_dot(self, left):
|
321
|
+
def _token_led_dot(self, left: ast.Node) -> ast.Node:
|
319
322
|
if self._current_token() != 'star':
|
320
323
|
right = self._parse_dot_rhs(self.BINDING_POWER['dot'])
|
321
324
|
if left['type'] == 'subexpression':
|
@@ -331,24 +334,24 @@ class Parser:
|
|
331
334
|
right = self._parse_projection_rhs(self.BINDING_POWER['dot'])
|
332
335
|
return ast.value_projection(left, right)
|
333
336
|
|
334
|
-
def _token_led_pipe(self, left):
|
337
|
+
def _token_led_pipe(self, left: ast.Node) -> ast.Node:
|
335
338
|
right = self._expression(self.BINDING_POWER['pipe'])
|
336
339
|
return ast.pipe(left, right)
|
337
340
|
|
338
|
-
def _token_led_or(self, left):
|
341
|
+
def _token_led_or(self, left: ast.Node) -> ast.Node:
|
339
342
|
right = self._expression(self.BINDING_POWER['or'])
|
340
343
|
return ast.or_expression(left, right)
|
341
344
|
|
342
|
-
def _token_led_and(self, left):
|
345
|
+
def _token_led_and(self, left: ast.Node) -> ast.Node:
|
343
346
|
right = self._expression(self.BINDING_POWER['and'])
|
344
347
|
return ast.and_expression(left, right)
|
345
348
|
|
346
|
-
def _token_led_lparen(self, left):
|
349
|
+
def _token_led_lparen(self, left: ast.Node) -> ast.Node:
|
347
350
|
if left['type'] != 'field':
|
348
351
|
# 0 - first func arg or closing paren.
|
349
352
|
# -1 - '(' token
|
350
353
|
# -2 - invalid function "name".
|
351
|
-
prev_t = self._lookahead_token(-2)
|
354
|
+
prev_t = check.not_none(self._lookahead_token(-2))
|
352
355
|
raise exceptions.ParseError(
|
353
356
|
prev_t['start'],
|
354
357
|
prev_t['value'],
|
@@ -368,7 +371,7 @@ class Parser:
|
|
368
371
|
function_node = ast.function_expression(name, args)
|
369
372
|
return function_node
|
370
373
|
|
371
|
-
def _token_led_filter(self, left):
|
374
|
+
def _token_led_filter(self, left: ast.Node) -> ast.Node:
|
372
375
|
# Filters are projections.
|
373
376
|
condition = self._expression(0)
|
374
377
|
self._match('rbracket')
|
@@ -378,52 +381,52 @@ class Parser:
|
|
378
381
|
right = self._parse_projection_rhs(self.BINDING_POWER['filter'])
|
379
382
|
return ast.filter_projection(left, right, condition)
|
380
383
|
|
381
|
-
def _token_led_eq(self, left):
|
384
|
+
def _token_led_eq(self, left: ast.Node) -> ast.Node:
|
382
385
|
return self._parse_comparator(left, 'eq')
|
383
386
|
|
384
|
-
def _token_led_ne(self, left):
|
387
|
+
def _token_led_ne(self, left: ast.Node) -> ast.Node:
|
385
388
|
return self._parse_comparator(left, 'ne')
|
386
389
|
|
387
|
-
def _token_led_gt(self, left):
|
390
|
+
def _token_led_gt(self, left: ast.Node) -> ast.Node:
|
388
391
|
return self._parse_comparator(left, 'gt')
|
389
392
|
|
390
|
-
def _token_led_gte(self, left):
|
393
|
+
def _token_led_gte(self, left: ast.Node) -> ast.Node:
|
391
394
|
return self._parse_comparator(left, 'gte')
|
392
395
|
|
393
|
-
def _token_led_lt(self, left):
|
396
|
+
def _token_led_lt(self, left: ast.Node) -> ast.Node:
|
394
397
|
return self._parse_comparator(left, 'lt')
|
395
398
|
|
396
|
-
def _token_led_lte(self, left):
|
399
|
+
def _token_led_lte(self, left: ast.Node) -> ast.Node:
|
397
400
|
return self._parse_comparator(left, 'lte')
|
398
401
|
|
399
|
-
def _token_led_div(self, left):
|
402
|
+
def _token_led_div(self, left: ast.Node) -> ast.Node:
|
400
403
|
return self._parse_arithmetic(left, 'div')
|
401
404
|
|
402
|
-
def _token_led_divide(self, left):
|
405
|
+
def _token_led_divide(self, left: ast.Node) -> ast.Node:
|
403
406
|
return self._parse_arithmetic(left, 'divide')
|
404
407
|
|
405
|
-
def _token_led_minus(self, left):
|
408
|
+
def _token_led_minus(self, left: ast.Node) -> ast.Node:
|
406
409
|
return self._parse_arithmetic(left, 'minus')
|
407
410
|
|
408
|
-
def _token_led_modulo(self, left):
|
411
|
+
def _token_led_modulo(self, left: ast.Node) -> ast.Node:
|
409
412
|
return self._parse_arithmetic(left, 'modulo')
|
410
413
|
|
411
|
-
def _token_led_multiply(self, left):
|
414
|
+
def _token_led_multiply(self, left: ast.Node) -> ast.Node:
|
412
415
|
return self._parse_arithmetic(left, 'multiply')
|
413
416
|
|
414
|
-
def _token_led_plus(self, left):
|
417
|
+
def _token_led_plus(self, left: ast.Node) -> ast.Node:
|
415
418
|
return self._parse_arithmetic(left, 'plus')
|
416
419
|
|
417
|
-
def _token_led_star(self, left):
|
420
|
+
def _token_led_star(self, left: ast.Node) -> ast.Node:
|
418
421
|
return self._parse_arithmetic(left, 'multiply')
|
419
422
|
|
420
|
-
def _token_led_flatten(self, left):
|
423
|
+
def _token_led_flatten(self, left: ast.Node) -> ast.Node:
|
421
424
|
left = ast.flatten(left)
|
422
425
|
right = self._parse_projection_rhs(self.BINDING_POWER['flatten'])
|
423
426
|
return ast.projection(left, right)
|
424
427
|
|
425
|
-
def _token_led_lbracket(self, left):
|
426
|
-
token = self._lookahead_token(0)
|
428
|
+
def _token_led_lbracket(self, left: ast.Node) -> ast.Node:
|
429
|
+
token = check.not_none(self._lookahead_token(0))
|
427
430
|
if token['type'] in ['number', 'colon']:
|
428
431
|
right = self._parse_index_expression()
|
429
432
|
if left['type'] == 'index_expression':
|
@@ -442,7 +445,7 @@ class Parser:
|
|
442
445
|
right = self._parse_projection_rhs(self.BINDING_POWER['star'])
|
443
446
|
return ast.projection(left, right)
|
444
447
|
|
445
|
-
def _project_if_slice(self, left, right):
|
448
|
+
def _project_if_slice(self, left: ast.Node, right: ast.Node) -> ast.Node:
|
446
449
|
index_expr = ast.index_expression([left, right])
|
447
450
|
if right['type'] == 'slice':
|
448
451
|
return ast.projection(
|
@@ -452,20 +455,20 @@ class Parser:
|
|
452
455
|
else:
|
453
456
|
return index_expr
|
454
457
|
|
455
|
-
def _parse_comparator(self, left, comparator):
|
458
|
+
def _parse_comparator(self, left: ast.Node, comparator: str) -> ast.Node:
|
456
459
|
right = self._expression(self.BINDING_POWER[comparator])
|
457
460
|
return ast.comparator(comparator, left, right)
|
458
461
|
|
459
|
-
def _parse_arithmetic_unary(self, token):
|
462
|
+
def _parse_arithmetic_unary(self, token: lexer.Token) -> ast.Node:
|
460
463
|
expression = self._expression(self.BINDING_POWER[token['type']])
|
461
464
|
return ast.arithmetic_unary(token['type'], expression)
|
462
465
|
|
463
|
-
def _parse_arithmetic(self, left, operator):
|
466
|
+
def _parse_arithmetic(self, left: ast.Node, operator: str) -> ast.Node:
|
464
467
|
right = self._expression(self.BINDING_POWER[operator])
|
465
468
|
return ast.arithmetic(operator, left, right)
|
466
469
|
|
467
|
-
def _parse_multi_select_list(self):
|
468
|
-
expressions = []
|
470
|
+
def _parse_multi_select_list(self) -> ast.Node:
|
471
|
+
expressions: list[ast.Node] = []
|
469
472
|
while True:
|
470
473
|
expression = self._expression()
|
471
474
|
expressions.append(expression)
|
@@ -476,10 +479,10 @@ class Parser:
|
|
476
479
|
self._match('rbracket')
|
477
480
|
return ast.multi_select_list(expressions)
|
478
481
|
|
479
|
-
def _parse_multi_select_hash(self):
|
482
|
+
def _parse_multi_select_hash(self) -> ast.Node:
|
480
483
|
pairs = []
|
481
484
|
while True:
|
482
|
-
key_token = self._lookahead_token(0)
|
485
|
+
key_token = check.not_none(self._lookahead_token(0))
|
483
486
|
|
484
487
|
# Before getting the token value, verify it's an identifier.
|
485
488
|
self._match_multiple_tokens(token_types=['quoted_identifier', 'unquoted_identifier'])
|
@@ -500,7 +503,7 @@ class Parser:
|
|
500
503
|
|
501
504
|
return ast.multi_select_dict(nodes=pairs)
|
502
505
|
|
503
|
-
def _parse_projection_rhs(self, binding_power):
|
506
|
+
def _parse_projection_rhs(self, binding_power: int) -> ast.Node:
|
504
507
|
# Parse the right hand side of the projection.
|
505
508
|
if self.BINDING_POWER[self._current_token()] < self._PROJECTION_STOP:
|
506
509
|
# BP of 10 are all the tokens that stop a projection.
|
@@ -517,11 +520,11 @@ class Parser:
|
|
517
520
|
right = self._parse_dot_rhs(binding_power)
|
518
521
|
|
519
522
|
else:
|
520
|
-
self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
|
523
|
+
self._raise_parse_error_for_token(check.not_none(self._lookahead_token(0)), 'syntax error')
|
521
524
|
|
522
525
|
return right
|
523
526
|
|
524
|
-
def _parse_dot_rhs(self, binding_power):
|
527
|
+
def _parse_dot_rhs(self, binding_power: int) -> ast.Node:
|
525
528
|
# From the grammar:
|
526
529
|
# expression '.' ( identifier /
|
527
530
|
# multi-select-list /
|
@@ -544,13 +547,13 @@ class Parser:
|
|
544
547
|
return self._parse_multi_select_hash()
|
545
548
|
|
546
549
|
else:
|
547
|
-
t = self._lookahead_token(0)
|
550
|
+
t = check.not_none(self._lookahead_token(0))
|
548
551
|
allowed = ['quoted_identifier', 'unquoted_identifier', 'lbracket', 'lbrace']
|
549
552
|
msg = f'Expecting: {allowed}, got: {t["type"]}'
|
550
553
|
self._raise_parse_error_for_token(t, msg)
|
551
554
|
raise RuntimeError # noqa
|
552
555
|
|
553
|
-
def _error_nud_token(self, token):
|
556
|
+
def _error_nud_token(self, token: lexer.Token) -> ta.NoReturn:
|
554
557
|
if token['type'] == 'eof':
|
555
558
|
raise exceptions.IncompleteExpressionError(
|
556
559
|
token['start'],
|
@@ -560,10 +563,10 @@ class Parser:
|
|
560
563
|
|
561
564
|
self._raise_parse_error_for_token(token, 'invalid token')
|
562
565
|
|
563
|
-
def _error_led_token(self, token):
|
566
|
+
def _error_led_token(self, token: lexer.Token) -> ta.NoReturn:
|
564
567
|
self._raise_parse_error_for_token(token, 'invalid token')
|
565
568
|
|
566
|
-
def _match(self, token_type=None):
|
569
|
+
def _match(self, token_type: str | None = None) -> None:
|
567
570
|
# inline'd self._current_token()
|
568
571
|
if self._current_token() == token_type:
|
569
572
|
# inline'd self._advance()
|
@@ -571,24 +574,24 @@ class Parser:
|
|
571
574
|
else:
|
572
575
|
self._raise_parse_error_maybe_eof(token_type, self._lookahead_token(0))
|
573
576
|
|
574
|
-
def _match_multiple_tokens(self, token_types):
|
577
|
+
def _match_multiple_tokens(self, token_types: ta.Container[str]) -> None:
|
575
578
|
if self._current_token() not in token_types:
|
576
579
|
self._raise_parse_error_maybe_eof(token_types, self._lookahead_token(0))
|
577
580
|
self._advance()
|
578
581
|
|
579
|
-
def _advance(self):
|
582
|
+
def _advance(self) -> None:
|
580
583
|
self._index += 1
|
581
584
|
|
582
|
-
def _current_token(self):
|
583
|
-
return self._tokens[self._index]['type']
|
585
|
+
def _current_token(self) -> str:
|
586
|
+
return check.not_none(self._tokens[self._index])['type']
|
584
587
|
|
585
|
-
def _lookahead(self, number):
|
586
|
-
return self._tokens[self._index + number]['type']
|
588
|
+
def _lookahead(self, number: int) -> str:
|
589
|
+
return check.not_none(self._tokens[self._index + number])['type']
|
587
590
|
|
588
|
-
def _lookahead_token(self, number):
|
591
|
+
def _lookahead_token(self, number: int) -> lexer.Token | None:
|
589
592
|
return self._tokens[self._index + number]
|
590
593
|
|
591
|
-
def _raise_parse_error_for_token(self, token, reason) -> ta.NoReturn:
|
594
|
+
def _raise_parse_error_for_token(self, token: lexer.Token, reason: str) -> ta.NoReturn:
|
592
595
|
lex_position = token['start']
|
593
596
|
actual_value = token['value']
|
594
597
|
actual_type = token['type']
|
@@ -631,15 +634,17 @@ class Parser:
|
|
631
634
|
|
632
635
|
|
633
636
|
class ParsedResult:
|
634
|
-
def __init__(self, expression, parsed):
|
637
|
+
def __init__(self, expression: str, parsed: ast.Node) -> None:
|
638
|
+
super().__init__()
|
639
|
+
|
635
640
|
self.expression = expression
|
636
641
|
self.parsed = parsed
|
637
642
|
|
638
|
-
def search(self, value, options=None):
|
643
|
+
def search(self, value: ta.Any, options: visitor.Options | None = None) -> ta.Any:
|
639
644
|
evaluator = visitor.TreeInterpreter(options)
|
640
645
|
return evaluator.evaluate(self.parsed, value)
|
641
646
|
|
642
|
-
def _render_dot_file(self):
|
647
|
+
def _render_dot_file(self) -> str:
|
643
648
|
"""
|
644
649
|
Render the parsed AST as a dot file.
|
645
650
|
|
@@ -652,13 +657,13 @@ class ParsedResult:
|
|
652
657
|
contents = renderer.visit(self.parsed)
|
653
658
|
return contents
|
654
659
|
|
655
|
-
def __repr__(self):
|
660
|
+
def __repr__(self) -> str:
|
656
661
|
return repr(self.parsed)
|
657
662
|
|
658
663
|
|
659
|
-
def compile(expression, options=None): # noqa
|
664
|
+
def compile(expression: str, options: visitor.Options | None = None) -> ParsedResult: # noqa
|
660
665
|
return Parser().parse(expression, options=options)
|
661
666
|
|
662
667
|
|
663
|
-
def search(expression, data, options=None):
|
668
|
+
def search(expression: str, data: ta.Any, options: visitor.Options | None = None) -> ta.Any:
|
664
669
|
return compile(expression, options).search(data, options=options)
|
omlish/specs/jmespath/scope.py
CHANGED
@@ -9,6 +9,8 @@ class ScopedChainDict:
|
|
9
9
|
"""
|
10
10
|
|
11
11
|
def __init__(self, *scopes):
|
12
|
+
super().__init__()
|
13
|
+
|
12
14
|
# The scopes are evaluated starting at the top of the stack (the most recently pushed scope via .push_scope()).
|
13
15
|
# If we use a normal list() and push/pop scopes by adding/removing to the end of the list, we'd have to always
|
14
16
|
# call reversed(self._scopes) whenever we resolve a key, because the end of the list is the top of the stack.
|
omlish/specs/jmespath/visitor.py
CHANGED
@@ -4,17 +4,18 @@ import typing as ta
|
|
4
4
|
|
5
5
|
from . import exceptions
|
6
6
|
from . import functions
|
7
|
+
from .ast import Node
|
7
8
|
from .scope import ScopedChainDict
|
8
9
|
|
9
10
|
|
10
|
-
def _equals(x, y):
|
11
|
+
def _equals(x: ta.Any, y: ta.Any) -> bool:
|
11
12
|
if _is_special_number_case(x, y):
|
12
13
|
return False
|
13
14
|
else:
|
14
15
|
return x == y
|
15
16
|
|
16
17
|
|
17
|
-
def _is_special_number_case(x, y):
|
18
|
+
def _is_special_number_case(x: ta.Any, y: ta.Any) -> bool | None:
|
18
19
|
# We need to special case comparing 0 or 1 to True/False. While normally comparing any integer other than 0/1 to
|
19
20
|
# True/False will always return False. However 0/1 have this:
|
20
21
|
# >>> 0 == True
|
@@ -39,13 +40,13 @@ def _is_special_number_case(x, y):
|
|
39
40
|
return None
|
40
41
|
|
41
42
|
|
42
|
-
def _is_comparable(x):
|
43
|
+
def _is_comparable(x: ta.Any) -> bool:
|
43
44
|
# The spec doesn't officially support string types yet, but enough people are relying on this behavior that it's
|
44
45
|
# been added back. This should eventually become part of the official spec.
|
45
46
|
return _is_actual_number(x) or isinstance(x, str)
|
46
47
|
|
47
48
|
|
48
|
-
def _is_actual_number(x):
|
49
|
+
def _is_actual_number(x: ta.Any) -> bool:
|
49
50
|
# We need to handle python's quirkiness with booleans, specifically:
|
50
51
|
#
|
51
52
|
# >>> isinstance(False, int)
|
@@ -62,10 +63,12 @@ class Options:
|
|
62
63
|
|
63
64
|
def __init__(
|
64
65
|
self,
|
65
|
-
dict_cls=None,
|
66
|
+
dict_cls: type | None = None,
|
66
67
|
custom_functions=None,
|
67
|
-
enable_legacy_literals=False,
|
68
|
-
):
|
68
|
+
enable_legacy_literals: bool = False,
|
69
|
+
) -> None:
|
70
|
+
super().__init__()
|
71
|
+
|
69
72
|
#: The class to use when creating a dict. The interpreter may create dictionaries during the evaluation of a
|
70
73
|
# Jmespath expression. For example, a multi-select hash will create a dictionary. By default we use a dict()
|
71
74
|
# type. You can set this value to change what dict type is used. The most common reason you would change this
|
@@ -82,6 +85,7 @@ class Options:
|
|
82
85
|
|
83
86
|
class _Expression:
|
84
87
|
def __init__(self, expression, interpreter):
|
88
|
+
super().__init__()
|
85
89
|
self.expression = expression
|
86
90
|
self.interpreter = interpreter
|
87
91
|
|
@@ -91,6 +95,7 @@ class _Expression:
|
|
91
95
|
|
92
96
|
class Visitor:
|
93
97
|
def __init__(self):
|
98
|
+
super().__init__()
|
94
99
|
self._method_cache = {}
|
95
100
|
|
96
101
|
def visit(self, node, *args, **kwargs):
|
@@ -156,7 +161,7 @@ class TreeInterpreter(Visitor):
|
|
156
161
|
def default_visit(self, node, *args, **kwargs):
|
157
162
|
raise NotImplementedError(node['type'])
|
158
163
|
|
159
|
-
def evaluate(self, ast, root):
|
164
|
+
def evaluate(self, ast, root: Node) -> ta.Any:
|
160
165
|
self._root = root
|
161
166
|
return self.visit(ast, root)
|
162
167
|
|
omlish/text/random.py
ADDED