omlish 0.0.0.dev46__py3-none-any.whl → 0.0.0.dev48__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,664 @@
1
+ """
2
+ Top down operator precedence parser.
3
+
4
+ This is an implementation of Vaughan R. Pratt's "Top Down Operator Precedence" parser.
5
+ (http://dl.acm.org/citation.cfm?doid=512927.512931).
6
+
7
+ These are some additional resources that help explain the general idea behind a Pratt parser:
8
+
9
+ * http://effbot.org/zone/simple-top-down-parsing.htm
10
+ * http://javascript.crockford.com/tdop/tdop.html
11
+
12
+ A few notes on the implementation.
13
+
14
+ * All the nud/led tokens are on the Parser class itself, and are dispatched using getattr(). This keeps all the parsing
15
+ logic contained to a single class.
16
+ * We use two passes through the data. One to create a list of token, then one pass through the tokens to create the
17
+ AST. While the lexer actually yields tokens, we convert it to a list so we can easily implement two tokens of
18
+ lookahead. A previous implementation used a fixed circular buffer, but it was significantly slower. Also, the
19
+ average jmespath expression typically does not have a large amount of token so this is not an issue. And
20
+ interestingly enough, creating a token list first is actually faster than consuming from the token iterator one token
21
+ at a time.
22
+ """
23
+ import random
24
+ import typing as ta
25
+
26
+ from . import ast
27
+ from . import exceptions
28
+ from . import lexer
29
+ from . import visitor
30
+
31
+
32
+ class Parser:
33
+ BINDING_POWER: ta.Mapping[str, int] = {
34
+ 'eof': 0,
35
+ 'variable': 0,
36
+ 'assign': 0,
37
+ 'unquoted_identifier': 0,
38
+ 'quoted_identifier': 0,
39
+ 'literal': 0,
40
+ 'rbracket': 0,
41
+ 'rparen': 0,
42
+ 'comma': 0,
43
+ 'rbrace': 0,
44
+ 'number': 0,
45
+ 'current': 0,
46
+ 'root': 0,
47
+ 'expref': 0,
48
+ 'colon': 0,
49
+ 'pipe': 1,
50
+ 'or': 2,
51
+ 'and': 3,
52
+ 'eq': 5,
53
+ 'gt': 5,
54
+ 'lt': 5,
55
+ 'gte': 5,
56
+ 'lte': 5,
57
+ 'ne': 5,
58
+ 'minus': 6,
59
+ 'plus': 6,
60
+ 'div': 7,
61
+ 'divide': 7,
62
+ 'modulo': 7,
63
+ 'multiply': 7,
64
+ 'flatten': 9,
65
+ # Everything above stops a projection.
66
+ 'star': 20,
67
+ 'filter': 21,
68
+ 'dot': 40,
69
+ 'not': 45,
70
+ 'lbrace': 50,
71
+ 'lbracket': 55,
72
+ 'lparen': 60,
73
+ }
74
+
75
+ # The maximum binding power for a token that can stop a projection.
76
+ _PROJECTION_STOP = 10
77
+
78
+ # The _MAX_SIZE most recent expressions are cached in _CACHE dict.
79
+ _CACHE: dict = {} # noqa
80
+ _MAX_SIZE = 128
81
+
82
+ def __init__(self, lookahead=2):
83
+ self.tokenizer = None
84
+ self._tokens = [None] * lookahead
85
+ self._buffer_size = lookahead
86
+ self._index = 0
87
+
88
+ def parse(self, expression, options=None):
89
+ cached = self._CACHE.get(expression)
90
+ if cached is not None:
91
+ return cached
92
+
93
+ parsed_result = self._do_parse(expression, options)
94
+
95
+ self._CACHE[expression] = parsed_result
96
+ if len(self._CACHE) > self._MAX_SIZE:
97
+ self._free_cache_entries()
98
+
99
+ return parsed_result
100
+
101
+ def _do_parse(self, expression, options=None):
102
+ try:
103
+ return self._parse(expression, options)
104
+
105
+ except exceptions.LexerError as e:
106
+ e.expression = expression
107
+ raise
108
+
109
+ except exceptions.IncompleteExpressionError as e:
110
+ e.set_expression(expression)
111
+ raise
112
+
113
+ except exceptions.ParseError as e:
114
+ e.expression = expression
115
+ raise
116
+
117
+ def _parse(self, expression, options=None):
118
+ self.tokenizer = lexer.Lexer().tokenize(expression, options)
119
+ self._tokens = list(self.tokenizer)
120
+ self._index = 0
121
+
122
+ parsed = self._expression(binding_power=0)
123
+
124
+ if self._current_token() != 'eof':
125
+ t = self._lookahead_token(0)
126
+ raise exceptions.ParseError(
127
+ t['start'],
128
+ t['value'],
129
+ t['type'],
130
+ f'Unexpected token: {t["value"]}',
131
+ )
132
+
133
+ return ParsedResult(expression, parsed)
134
+
135
+ def _expression(self, binding_power=0):
136
+ left_token = self._lookahead_token(0)
137
+
138
+ self._advance()
139
+
140
+ nud_function = getattr(
141
+ self,
142
+ f'_token_nud_{left_token["type"]}',
143
+ self._error_nud_token,
144
+ )
145
+
146
+ left = nud_function(left_token)
147
+
148
+ current_token = self._current_token()
149
+ while binding_power < self.BINDING_POWER[current_token]:
150
+ led = getattr(
151
+ self,
152
+ f'_token_led_{current_token}',
153
+ None,
154
+ )
155
+ if led is None:
156
+ error_token = self._lookahead_token(0)
157
+ self._error_led_token(error_token)
158
+
159
+ else:
160
+ self._advance()
161
+ left = led(left)
162
+ current_token = self._current_token()
163
+
164
+ return left
165
+
166
+ def _token_nud_literal(self, token):
167
+ return ast.literal(token['value'])
168
+
169
+ def _token_nud_variable(self, token):
170
+ return ast.variable_ref(token['value'][1:])
171
+
172
+ def _token_nud_unquoted_identifier(self, token):
173
+ if token['value'] == 'let' and self._current_token() == 'variable':
174
+ return self._parse_let_expression()
175
+ else:
176
+ return ast.field(token['value'])
177
+
178
+ def _parse_let_expression(self):
179
+ bindings = []
180
+ while True:
181
+ var_token = self._lookahead_token(0)
182
+ # Strip off the '$'.
183
+ varname = var_token['value'][1:]
184
+ self._advance()
185
+ self._match('assign')
186
+ assign_expr = self._expression()
187
+ bindings.append(ast.assign(varname, assign_expr))
188
+ if self._is_in_keyword(self._lookahead_token(0)):
189
+ self._advance()
190
+ break
191
+ else:
192
+ self._match('comma')
193
+ expr = self._expression()
194
+ return ast.let_expression(bindings, expr)
195
+
196
+ def _is_in_keyword(self, token):
197
+ return (
198
+ token['type'] == 'unquoted_identifier' and
199
+ token['value'] == 'in'
200
+ )
201
+
202
+ def _token_nud_quoted_identifier(self, token):
203
+ field = ast.field(token['value'])
204
+
205
+ # You can't have a quoted identifier as a function name.
206
+ if self._current_token() == 'lparen':
207
+ t = self._lookahead_token(0)
208
+ raise exceptions.ParseError(
209
+ 0,
210
+ t['value'],
211
+ t['type'],
212
+ 'Quoted identifier not allowed for function names.',
213
+ )
214
+
215
+ return field
216
+
217
+ def _token_nud_star(self, token):
218
+ left = ast.identity()
219
+ if self._current_token() == 'rbracket':
220
+ right = ast.identity()
221
+ else:
222
+ right = self._parse_projection_rhs(self.BINDING_POWER['star'])
223
+ return ast.value_projection(left, right)
224
+
225
+ def _token_nud_filter(self, token):
226
+ return self._token_led_filter(ast.identity())
227
+
228
+ def _token_nud_lbrace(self, token):
229
+ return self._parse_multi_select_hash()
230
+
231
+ def _token_nud_lparen(self, token):
232
+ expression = self._expression()
233
+ self._match('rparen')
234
+ return expression
235
+
236
+ def _token_nud_minus(self, token):
237
+ return self._parse_arithmetic_unary(token)
238
+
239
+ def _token_nud_plus(self, token):
240
+ return self._parse_arithmetic_unary(token)
241
+
242
+ def _token_nud_flatten(self, token):
243
+ left = ast.flatten(ast.identity())
244
+ right = self._parse_projection_rhs(
245
+ self.BINDING_POWER['flatten'])
246
+ return ast.projection(left, right)
247
+
248
+ def _token_nud_not(self, token):
249
+ expr = self._expression(self.BINDING_POWER['not'])
250
+ return ast.not_expression(expr)
251
+
252
+ def _token_nud_lbracket(self, token):
253
+ if self._current_token() in ['number', 'colon']:
254
+ right = self._parse_index_expression()
255
+ # We could optimize this and remove the identity() node. We don't really need an index_expression node, we
256
+ # can just use emit an index node here if we're not dealing with a slice.
257
+ return self._project_if_slice(ast.identity(), right)
258
+
259
+ elif self._current_token() == 'star' and self._lookahead(1) == 'rbracket':
260
+ self._advance()
261
+ self._advance()
262
+ right = self._parse_projection_rhs(self.BINDING_POWER['star'])
263
+ return ast.projection(ast.identity(), right)
264
+
265
+ else:
266
+ return self._parse_multi_select_list()
267
+
268
+ def _parse_index_expression(self):
269
+ # We're here:
270
+ # [<current>
271
+ # ^
272
+ # | current token
273
+ if (self._lookahead(0) == 'colon' or self._lookahead(1) == 'colon'):
274
+ return self._parse_slice_expression()
275
+
276
+ else:
277
+ # Parse the syntax [number]
278
+ node = ast.index(self._lookahead_token(0)['value'])
279
+ self._advance()
280
+ self._match('rbracket')
281
+ return node
282
+
283
+ def _parse_slice_expression(self):
284
+ # [start:end:step]
285
+ # Where start, end, and step are optional. The last colon is optional as well.
286
+ parts = [None, None, None]
287
+ index = 0
288
+ current_token = self._current_token()
289
+ while current_token != 'rbracket' and index < 3: # noqa
290
+ if current_token == 'colon': # noqa
291
+ index += 1
292
+ if index == 3:
293
+ self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
294
+ self._advance()
295
+
296
+ elif current_token == 'number': # noqa
297
+ parts[index] = self._lookahead_token(0)['value']
298
+ self._advance()
299
+
300
+ else:
301
+ self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
302
+
303
+ current_token = self._current_token()
304
+
305
+ self._match('rbracket')
306
+ return ast.slice(*parts)
307
+
308
+ def _token_nud_current(self, token):
309
+ return ast.current_node()
310
+
311
+ def _token_nud_root(self, token):
312
+ return ast.root_node()
313
+
314
+ def _token_nud_expref(self, token):
315
+ expression = self._expression(self.BINDING_POWER['expref'])
316
+ return ast.expref(expression)
317
+
318
+ def _token_led_dot(self, left):
319
+ if self._current_token() != 'star':
320
+ right = self._parse_dot_rhs(self.BINDING_POWER['dot'])
321
+ if left['type'] == 'subexpression':
322
+ left['children'].append(right)
323
+ return left
324
+
325
+ else:
326
+ return ast.subexpression([left, right])
327
+
328
+ else:
329
+ # We're creating a projection.
330
+ self._advance()
331
+ right = self._parse_projection_rhs(self.BINDING_POWER['dot'])
332
+ return ast.value_projection(left, right)
333
+
334
+ def _token_led_pipe(self, left):
335
+ right = self._expression(self.BINDING_POWER['pipe'])
336
+ return ast.pipe(left, right)
337
+
338
+ def _token_led_or(self, left):
339
+ right = self._expression(self.BINDING_POWER['or'])
340
+ return ast.or_expression(left, right)
341
+
342
+ def _token_led_and(self, left):
343
+ right = self._expression(self.BINDING_POWER['and'])
344
+ return ast.and_expression(left, right)
345
+
346
+ def _token_led_lparen(self, left):
347
+ if left['type'] != 'field':
348
+ # 0 - first func arg or closing paren.
349
+ # -1 - '(' token
350
+ # -2 - invalid function "name".
351
+ prev_t = self._lookahead_token(-2)
352
+ raise exceptions.ParseError(
353
+ prev_t['start'],
354
+ prev_t['value'],
355
+ prev_t['type'],
356
+ f"Invalid function name '{prev_t['value']}'",
357
+ )
358
+
359
+ name = left['value']
360
+ args = []
361
+ while self._current_token() != 'rparen':
362
+ expression = self._expression()
363
+ if self._current_token() == 'comma':
364
+ self._match('comma')
365
+ args.append(expression)
366
+ self._match('rparen')
367
+
368
+ function_node = ast.function_expression(name, args)
369
+ return function_node
370
+
371
+ def _token_led_filter(self, left):
372
+ # Filters are projections.
373
+ condition = self._expression(0)
374
+ self._match('rbracket')
375
+ if self._current_token() == 'flatten':
376
+ right = ast.identity()
377
+ else:
378
+ right = self._parse_projection_rhs(self.BINDING_POWER['filter'])
379
+ return ast.filter_projection(left, right, condition)
380
+
381
+ def _token_led_eq(self, left):
382
+ return self._parse_comparator(left, 'eq')
383
+
384
+ def _token_led_ne(self, left):
385
+ return self._parse_comparator(left, 'ne')
386
+
387
+ def _token_led_gt(self, left):
388
+ return self._parse_comparator(left, 'gt')
389
+
390
+ def _token_led_gte(self, left):
391
+ return self._parse_comparator(left, 'gte')
392
+
393
+ def _token_led_lt(self, left):
394
+ return self._parse_comparator(left, 'lt')
395
+
396
+ def _token_led_lte(self, left):
397
+ return self._parse_comparator(left, 'lte')
398
+
399
+ def _token_led_div(self, left):
400
+ return self._parse_arithmetic(left, 'div')
401
+
402
+ def _token_led_divide(self, left):
403
+ return self._parse_arithmetic(left, 'divide')
404
+
405
+ def _token_led_minus(self, left):
406
+ return self._parse_arithmetic(left, 'minus')
407
+
408
+ def _token_led_modulo(self, left):
409
+ return self._parse_arithmetic(left, 'modulo')
410
+
411
+ def _token_led_multiply(self, left):
412
+ return self._parse_arithmetic(left, 'multiply')
413
+
414
+ def _token_led_plus(self, left):
415
+ return self._parse_arithmetic(left, 'plus')
416
+
417
+ def _token_led_star(self, left):
418
+ return self._parse_arithmetic(left, 'multiply')
419
+
420
+ def _token_led_flatten(self, left):
421
+ left = ast.flatten(left)
422
+ right = self._parse_projection_rhs(self.BINDING_POWER['flatten'])
423
+ return ast.projection(left, right)
424
+
425
+ def _token_led_lbracket(self, left):
426
+ token = self._lookahead_token(0)
427
+ if token['type'] in ['number', 'colon']:
428
+ right = self._parse_index_expression()
429
+ if left['type'] == 'index_expression':
430
+ # Optimization: if the left node is an index expr, we can avoid creating another node and instead just
431
+ # add the right node as a child of the left.
432
+ left['children'].append(right)
433
+ return left
434
+
435
+ else:
436
+ return self._project_if_slice(left, right)
437
+
438
+ else:
439
+ # We have a projection
440
+ self._match('star')
441
+ self._match('rbracket')
442
+ right = self._parse_projection_rhs(self.BINDING_POWER['star'])
443
+ return ast.projection(left, right)
444
+
445
+ def _project_if_slice(self, left, right):
446
+ index_expr = ast.index_expression([left, right])
447
+ if right['type'] == 'slice':
448
+ return ast.projection(
449
+ index_expr,
450
+ self._parse_projection_rhs(self.BINDING_POWER['star']),
451
+ )
452
+ else:
453
+ return index_expr
454
+
455
+ def _parse_comparator(self, left, comparator):
456
+ right = self._expression(self.BINDING_POWER[comparator])
457
+ return ast.comparator(comparator, left, right)
458
+
459
+ def _parse_arithmetic_unary(self, token):
460
+ expression = self._expression(self.BINDING_POWER[token['type']])
461
+ return ast.arithmetic_unary(token['type'], expression)
462
+
463
+ def _parse_arithmetic(self, left, operator):
464
+ right = self._expression(self.BINDING_POWER[operator])
465
+ return ast.arithmetic(operator, left, right)
466
+
467
+ def _parse_multi_select_list(self):
468
+ expressions = []
469
+ while True:
470
+ expression = self._expression()
471
+ expressions.append(expression)
472
+ if self._current_token() == 'rbracket':
473
+ break
474
+ else:
475
+ self._match('comma')
476
+ self._match('rbracket')
477
+ return ast.multi_select_list(expressions)
478
+
479
+ def _parse_multi_select_hash(self):
480
+ pairs = []
481
+ while True:
482
+ key_token = self._lookahead_token(0)
483
+
484
+ # Before getting the token value, verify it's an identifier.
485
+ self._match_multiple_tokens(token_types=['quoted_identifier', 'unquoted_identifier'])
486
+ key_name = key_token['value']
487
+
488
+ self._match('colon')
489
+ value = self._expression(0)
490
+
491
+ node = ast.key_val_pair(key_name=key_name, node=value)
492
+
493
+ pairs.append(node)
494
+ if self._current_token() == 'comma':
495
+ self._match('comma')
496
+
497
+ elif self._current_token() == 'rbrace':
498
+ self._match('rbrace')
499
+ break
500
+
501
+ return ast.multi_select_dict(nodes=pairs)
502
+
503
+ def _parse_projection_rhs(self, binding_power):
504
+ # Parse the right hand side of the projection.
505
+ if self.BINDING_POWER[self._current_token()] < self._PROJECTION_STOP:
506
+ # BP of 10 are all the tokens that stop a projection.
507
+ right = ast.identity()
508
+
509
+ elif self._current_token() == 'lbracket':
510
+ right = self._expression(binding_power)
511
+
512
+ elif self._current_token() == 'filter':
513
+ right = self._expression(binding_power)
514
+
515
+ elif self._current_token() == 'dot':
516
+ self._match('dot')
517
+ right = self._parse_dot_rhs(binding_power)
518
+
519
+ else:
520
+ self._raise_parse_error_for_token(self._lookahead_token(0), 'syntax error')
521
+
522
+ return right
523
+
524
+ def _parse_dot_rhs(self, binding_power):
525
+ # From the grammar:
526
+ # expression '.' ( identifier /
527
+ # multi-select-list /
528
+ # multi-select-hash /
529
+ # function-expression /
530
+ # *
531
+ # In terms of tokens that means that after a '.', you can have:
532
+ lookahead = self._current_token()
533
+
534
+ # Common case "foo.bar", so first check for an identifier.
535
+ if lookahead in ['quoted_identifier', 'unquoted_identifier', 'star']:
536
+ return self._expression(binding_power)
537
+
538
+ elif lookahead == 'lbracket':
539
+ self._match('lbracket')
540
+ return self._parse_multi_select_list()
541
+
542
+ elif lookahead == 'lbrace':
543
+ self._match('lbrace')
544
+ return self._parse_multi_select_hash()
545
+
546
+ else:
547
+ t = self._lookahead_token(0)
548
+ allowed = ['quoted_identifier', 'unquoted_identifier', 'lbracket', 'lbrace']
549
+ msg = f'Expecting: {allowed}, got: {t["type"]}'
550
+ self._raise_parse_error_for_token(t, msg)
551
+ raise RuntimeError # noqa
552
+
553
+ def _error_nud_token(self, token):
554
+ if token['type'] == 'eof':
555
+ raise exceptions.IncompleteExpressionError(
556
+ token['start'],
557
+ token['value'],
558
+ token['type'],
559
+ )
560
+
561
+ self._raise_parse_error_for_token(token, 'invalid token')
562
+
563
+ def _error_led_token(self, token):
564
+ self._raise_parse_error_for_token(token, 'invalid token')
565
+
566
+ def _match(self, token_type=None):
567
+ # inline'd self._current_token()
568
+ if self._current_token() == token_type:
569
+ # inline'd self._advance()
570
+ self._advance()
571
+ else:
572
+ self._raise_parse_error_maybe_eof(token_type, self._lookahead_token(0))
573
+
574
+ def _match_multiple_tokens(self, token_types):
575
+ if self._current_token() not in token_types:
576
+ self._raise_parse_error_maybe_eof(token_types, self._lookahead_token(0))
577
+ self._advance()
578
+
579
+ def _advance(self):
580
+ self._index += 1
581
+
582
+ def _current_token(self):
583
+ return self._tokens[self._index]['type'] # type: ignore
584
+
585
+ def _lookahead(self, number):
586
+ return self._tokens[self._index + number]['type'] # noqa
587
+
588
+ def _lookahead_token(self, number):
589
+ return self._tokens[self._index + number]
590
+
591
+ def _raise_parse_error_for_token(self, token, reason) -> ta.NoReturn:
592
+ lex_position = token['start']
593
+ actual_value = token['value']
594
+ actual_type = token['type']
595
+ raise exceptions.ParseError(
596
+ lex_position,
597
+ actual_value,
598
+ actual_type,
599
+ reason,
600
+ )
601
+
602
+ def _raise_parse_error_maybe_eof(self, expected_type, token):
603
+ lex_position = token['start']
604
+ actual_value = token['value']
605
+ actual_type = token['type']
606
+ if actual_type == 'eof':
607
+ raise exceptions.IncompleteExpressionError(
608
+ lex_position,
609
+ actual_value,
610
+ actual_type,
611
+ )
612
+
613
+ message = f'Expecting: {expected_type}, got: {actual_type}'
614
+ raise exceptions.ParseError(
615
+ lex_position,
616
+ actual_value,
617
+ actual_type,
618
+ message,
619
+ )
620
+
621
+ def _free_cache_entries(self):
622
+ keys = list(self._CACHE.keys())
623
+ for key in random.sample(keys, min(len(keys), int(self._MAX_SIZE / 2))):
624
+ self._CACHE.pop(key, None)
625
+
626
+ @classmethod
627
+ def purge(cls):
628
+ """Clear the expression compilation cache."""
629
+
630
+ cls._CACHE.clear()
631
+
632
+
633
+ class ParsedResult:
634
+ def __init__(self, expression, parsed):
635
+ self.expression = expression
636
+ self.parsed = parsed
637
+
638
+ def search(self, value, options=None):
639
+ evaluator = visitor.TreeInterpreter(options)
640
+ return evaluator.evaluate(self.parsed, value)
641
+
642
+ def _render_dot_file(self):
643
+ """
644
+ Render the parsed AST as a dot file.
645
+
646
+ Note that this is marked as an internal method because the AST is an implementation detail and is subject to
647
+ change. This method can be used to help troubleshoot or for development purposes, but is not considered part of
648
+ the public supported API. Use at your own risk.
649
+ """
650
+
651
+ renderer = visitor.GraphvizVisitor()
652
+ contents = renderer.visit(self.parsed)
653
+ return contents
654
+
655
+ def __repr__(self):
656
+ return repr(self.parsed)
657
+
658
+
659
+ def compile(expression, options=None): # noqa
660
+ return Parser().parse(expression, options=options)
661
+
662
+
663
+ def search(expression, data, options=None):
664
+ return compile(expression, options).search(data, options=options)
@@ -0,0 +1,35 @@
1
+ import collections
2
+
3
+
4
+ class ScopedChainDict:
5
+ """
6
+ Dictionary that can delegate lookups to multiple dicts. This provides a basic get/set dict interface that is backed
7
+ by multiple dicts. Each dict is searched from the top most (most recently pushed) scope dict until a match is
8
+ found.
9
+ """
10
+
11
+ def __init__(self, *scopes):
12
+ # The scopes are evaluated starting at the top of the stack (the most recently pushed scope via .push_scope()).
13
+ # If we use a normal list() and push/pop scopes by adding/removing to the end of the list, we'd have to always
14
+ # call reversed(self._scopes) whenever we resolve a key, because the end of the list is the top of the stack.
15
+ # To avoid this, we're using a deque so we can append to the front of the list via .appendleft() in constant
16
+ # time, and iterate over scopes without having to do so with a reversed() call each time.
17
+ self._scopes = collections.deque(scopes)
18
+
19
+ def __getitem__(self, key):
20
+ for scope in self._scopes:
21
+ if key in scope:
22
+ return scope[key]
23
+ raise KeyError(key)
24
+
25
+ def get(self, key, default=None):
26
+ try:
27
+ return self[key]
28
+ except KeyError:
29
+ return default
30
+
31
+ def push_scope(self, scope):
32
+ self._scopes.appendleft(scope)
33
+
34
+ def pop_scope(self):
35
+ self._scopes.popleft()