omlish 0.0.0.dev47__py3-none-any.whl → 0.0.0.dev49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -32,6 +32,8 @@ from . import visitor
32
32
  class Parser:
33
33
  BINDING_POWER: ta.Mapping[str, int] = {
34
34
  'eof': 0,
35
+ 'variable': 0,
36
+ 'assign': 0,
35
37
  'unquoted_identifier': 0,
36
38
  'quoted_identifier': 0,
37
39
  'literal': 0,
@@ -41,6 +43,7 @@ class Parser:
41
43
  'rbrace': 0,
42
44
  'number': 0,
43
45
  'current': 0,
46
+ 'root': 0,
44
47
  'expref': 0,
45
48
  'colon': 0,
46
49
  'pipe': 1,
@@ -52,6 +55,12 @@ class Parser:
52
55
  'gte': 5,
53
56
  'lte': 5,
54
57
  'ne': 5,
58
+ 'minus': 6,
59
+ 'plus': 6,
60
+ 'div': 7,
61
+ 'divide': 7,
62
+ 'modulo': 7,
63
+ 'multiply': 7,
55
64
  'flatten': 9,
56
65
  # Everything above stops a projection.
57
66
  'star': 20,
@@ -76,12 +85,12 @@ class Parser:
76
85
  self._buffer_size = lookahead
77
86
  self._index = 0
78
87
 
79
- def parse(self, expression):
88
+ def parse(self, expression, options=None):
80
89
  cached = self._CACHE.get(expression)
81
90
  if cached is not None:
82
91
  return cached
83
92
 
84
- parsed_result = self._do_parse(expression)
93
+ parsed_result = self._do_parse(expression, options)
85
94
 
86
95
  self._CACHE[expression] = parsed_result
87
96
  if len(self._CACHE) > self._MAX_SIZE:
@@ -89,9 +98,9 @@ class Parser:
89
98
 
90
99
  return parsed_result
91
100
 
92
- def _do_parse(self, expression):
101
+ def _do_parse(self, expression, options=None):
93
102
  try:
94
- return self._parse(expression)
103
+ return self._parse(expression, options)
95
104
 
96
105
  except exceptions.LexerError as e:
97
106
  e.expression = expression
@@ -105,8 +114,8 @@ class Parser:
105
114
  e.expression = expression
106
115
  raise
107
116
 
108
- def _parse(self, expression):
109
- self.tokenizer = lexer.Lexer().tokenize(expression)
117
+ def _parse(self, expression, options=None):
118
+ self.tokenizer = lexer.Lexer().tokenize(expression, options)
110
119
  self._tokens = list(self.tokenizer)
111
120
  self._index = 0
112
121
 
@@ -157,8 +166,38 @@ class Parser:
157
166
  def _token_nud_literal(self, token):
158
167
  return ast.literal(token['value'])
159
168
 
169
+ def _token_nud_variable(self, token):
170
+ return ast.variable_ref(token['value'][1:])
171
+
160
172
  def _token_nud_unquoted_identifier(self, token):
161
- return ast.field(token['value'])
173
+ if token['value'] == 'let' and self._current_token() == 'variable':
174
+ return self._parse_let_expression()
175
+ else:
176
+ return ast.field(token['value'])
177
+
178
+ def _parse_let_expression(self):
179
+ bindings = []
180
+ while True:
181
+ var_token = self._lookahead_token(0)
182
+ # Strip off the '$'.
183
+ varname = var_token['value'][1:]
184
+ self._advance()
185
+ self._match('assign')
186
+ assign_expr = self._expression()
187
+ bindings.append(ast.assign(varname, assign_expr))
188
+ if self._is_in_keyword(self._lookahead_token(0)):
189
+ self._advance()
190
+ break
191
+ else:
192
+ self._match('comma')
193
+ expr = self._expression()
194
+ return ast.let_expression(bindings, expr)
195
+
196
+ def _is_in_keyword(self, token):
197
+ return (
198
+ token['type'] == 'unquoted_identifier' and
199
+ token['value'] == 'in'
200
+ )
162
201
 
163
202
  def _token_nud_quoted_identifier(self, token):
164
203
  field = ast.field(token['value'])
@@ -194,6 +233,12 @@ class Parser:
194
233
  self._match('rparen')
195
234
  return expression
196
235
 
236
+ def _token_nud_minus(self, token):
237
+ return self._parse_arithmetic_unary(token)
238
+
239
+ def _token_nud_plus(self, token):
240
+ return self._parse_arithmetic_unary(token)
241
+
197
242
  def _token_nud_flatten(self, token):
198
243
  left = ast.flatten(ast.identity())
199
244
  right = self._parse_projection_rhs(
@@ -263,6 +308,9 @@ class Parser:
263
308
  def _token_nud_current(self, token):
264
309
  return ast.current_node()
265
310
 
311
+ def _token_nud_root(self, token):
312
+ return ast.root_node()
313
+
266
314
  def _token_nud_expref(self, token):
267
315
  expression = self._expression(self.BINDING_POWER['expref'])
268
316
  return ast.expref(expression)
@@ -348,6 +396,27 @@ class Parser:
348
396
  def _token_led_lte(self, left):
349
397
  return self._parse_comparator(left, 'lte')
350
398
 
399
+ def _token_led_div(self, left):
400
+ return self._parse_arithmetic(left, 'div')
401
+
402
+ def _token_led_divide(self, left):
403
+ return self._parse_arithmetic(left, 'divide')
404
+
405
+ def _token_led_minus(self, left):
406
+ return self._parse_arithmetic(left, 'minus')
407
+
408
+ def _token_led_modulo(self, left):
409
+ return self._parse_arithmetic(left, 'modulo')
410
+
411
+ def _token_led_multiply(self, left):
412
+ return self._parse_arithmetic(left, 'multiply')
413
+
414
+ def _token_led_plus(self, left):
415
+ return self._parse_arithmetic(left, 'plus')
416
+
417
+ def _token_led_star(self, left):
418
+ return self._parse_arithmetic(left, 'multiply')
419
+
351
420
  def _token_led_flatten(self, left):
352
421
  left = ast.flatten(left)
353
422
  right = self._parse_projection_rhs(self.BINDING_POWER['flatten'])
@@ -387,6 +456,14 @@ class Parser:
387
456
  right = self._expression(self.BINDING_POWER[comparator])
388
457
  return ast.comparator(comparator, left, right)
389
458
 
459
+ def _parse_arithmetic_unary(self, token):
460
+ expression = self._expression(self.BINDING_POWER[token['type']])
461
+ return ast.arithmetic_unary(token['type'], expression)
462
+
463
+ def _parse_arithmetic(self, left, operator):
464
+ right = self._expression(self.BINDING_POWER[operator])
465
+ return ast.arithmetic(operator, left, right)
466
+
390
467
  def _parse_multi_select_list(self):
391
468
  expressions = []
392
469
  while True:
@@ -542,7 +619,8 @@ class Parser:
542
619
  )
543
620
 
544
621
  def _free_cache_entries(self):
545
- for key in random.sample(list(self._CACHE.keys()), int(self._MAX_SIZE / 2)):
622
+ keys = list(self._CACHE.keys())
623
+ for key in random.sample(keys, min(len(keys), int(self._MAX_SIZE / 2))):
546
624
  self._CACHE.pop(key, None)
547
625
 
548
626
  @classmethod
@@ -558,9 +636,8 @@ class ParsedResult:
558
636
  self.parsed = parsed
559
637
 
560
638
  def search(self, value, options=None):
561
- interpreter = visitor.TreeInterpreter(options)
562
- result = interpreter.visit(self.parsed, value)
563
- return result
639
+ evaluator = visitor.TreeInterpreter(options)
640
+ return evaluator.evaluate(self.parsed, value)
564
641
 
565
642
  def _render_dot_file(self):
566
643
  """
@@ -579,9 +656,9 @@ class ParsedResult:
579
656
  return repr(self.parsed)
580
657
 
581
658
 
582
- def compile(expression): # noqa
583
- return Parser().parse(expression)
659
+ def compile(expression, options=None): # noqa
660
+ return Parser().parse(expression, options=options)
584
661
 
585
662
 
586
663
  def search(expression, data, options=None):
587
- return Parser().parse(expression).search(data, options=options)
664
+ return compile(expression, options).search(data, options=options)
@@ -0,0 +1,35 @@
1
+ import collections
2
+
3
+
4
+ class ScopedChainDict:
5
+ """
6
+ Dictionary that can delegate lookups to multiple dicts. This provides a basic get/set dict interface that is backed
7
+ by multiple dicts. Each dict is searched from the top most (most recently pushed) scope dict until a match is
8
+ found.
9
+ """
10
+
11
+ def __init__(self, *scopes):
12
+ # The scopes are evaluated starting at the top of the stack (the most recently pushed scope via .push_scope()).
13
+ # If we use a normal list() and push/pop scopes by adding/removing to the end of the list, we'd have to always
14
+ # call reversed(self._scopes) whenever we resolve a key, because the end of the list is the top of the stack.
15
+ # To avoid this, we're using a deque so we can append to the front of the list via .appendleft() in constant
16
+ # time, and iterate over scopes without having to do so with a reversed() call each time.
17
+ self._scopes = collections.deque(scopes)
18
+
19
+ def __getitem__(self, key):
20
+ for scope in self._scopes:
21
+ if key in scope:
22
+ return scope[key]
23
+ raise KeyError(key)
24
+
25
+ def get(self, key, default=None):
26
+ try:
27
+ return self[key]
28
+ except KeyError:
29
+ return default
30
+
31
+ def push_scope(self, scope):
32
+ self._scopes.appendleft(scope)
33
+
34
+ def pop_scope(self):
35
+ self._scopes.popleft()
@@ -2,7 +2,9 @@ import numbers
2
2
  import operator
3
3
  import typing as ta
4
4
 
5
+ from . import exceptions
5
6
  from . import functions
7
+ from .scope import ScopedChainDict
6
8
 
7
9
 
8
10
  def _equals(x, y):
@@ -58,7 +60,12 @@ def _is_actual_number(x):
58
60
  class Options:
59
61
  """Options to control how a Jmespath function is evaluated."""
60
62
 
61
- def __init__(self, dict_cls=None, custom_functions=None):
63
+ def __init__(
64
+ self,
65
+ dict_cls=None,
66
+ custom_functions=None,
67
+ enable_legacy_literals=False,
68
+ ):
62
69
  #: The class to use when creating a dict. The interpreter may create dictionaries during the evaluation of a
63
70
  # Jmespath expression. For example, a multi-select hash will create a dictionary. By default we use a dict()
64
71
  # type. You can set this value to change what dict type is used. The most common reason you would change this
@@ -66,6 +73,12 @@ class Options:
66
73
  self.dict_cls = dict_cls
67
74
  self.custom_functions = custom_functions
68
75
 
76
+ #: The flag to enable pre-JEP-12 literal compatibility.
77
+ # JEP-12 deprecates `foo` -> "foo" syntax.
78
+ # Valid expressions MUST use: `"foo"` -> "foo"
79
+ # Setting this flag to `True` enables support for legacy syntax.
80
+ self.enable_legacy_literals = enable_legacy_literals
81
+
69
82
 
70
83
  class _Expression:
71
84
  def __init__(self, expression, interpreter):
@@ -104,6 +117,20 @@ class TreeInterpreter(Visitor):
104
117
 
105
118
  _EQUALITY_OPS: ta.Sequence[str] = ['eq', 'ne']
106
119
 
120
+ _ARITHMETIC_UNARY_FUNC: ta.Mapping[str, ta.Callable] = {
121
+ 'minus': operator.neg,
122
+ 'plus': lambda x: x,
123
+ }
124
+
125
+ _ARITHMETIC_FUNC: ta.Mapping[str, ta.Callable] = {
126
+ 'div': operator.floordiv,
127
+ 'divide': operator.truediv,
128
+ 'minus': operator.sub,
129
+ 'modulo': operator.mod,
130
+ 'multiply': operator.mul,
131
+ 'plus': operator.add,
132
+ }
133
+
107
134
  MAP_TYPE = dict
108
135
 
109
136
  def __init__(self, options=None):
@@ -123,13 +150,22 @@ class TreeInterpreter(Visitor):
123
150
  else:
124
151
  self._functions = functions.Functions()
125
152
 
153
+ self._root = None
154
+ self._scope = ScopedChainDict()
155
+
126
156
  def default_visit(self, node, *args, **kwargs):
127
157
  raise NotImplementedError(node['type'])
128
158
 
159
+ def evaluate(self, ast, root):
160
+ self._root = root
161
+ return self.visit(ast, root)
162
+
129
163
  def visit_subexpression(self, node, value):
130
164
  result = value
131
165
  for child in node['children']:
132
166
  result = self.visit(child, result)
167
+ if result is None:
168
+ return None
133
169
  return result
134
170
 
135
171
  def visit_field(self, node, value):
@@ -157,9 +193,25 @@ class TreeInterpreter(Visitor):
157
193
  return None
158
194
  return comparator_func(left, right)
159
195
 
196
+ def visit_arithmetic_unary(self, node, value):
197
+ operation = self._ARITHMETIC_UNARY_FUNC[node['value']]
198
+ return operation(
199
+ self.visit(node['children'][0], value),
200
+ )
201
+
202
+ def visit_arithmetic(self, node, value):
203
+ operation = self._ARITHMETIC_FUNC[node['value']]
204
+ return operation(
205
+ self.visit(node['children'][0], value),
206
+ self.visit(node['children'][1], value),
207
+ )
208
+
160
209
  def visit_current(self, node, value):
161
210
  return value
162
211
 
212
+ def visit_root(self, node, value):
213
+ return self._root
214
+
163
215
  def visit_expref(self, node, value):
164
216
  return _Expression(node['children'][0], self)
165
217
 
@@ -222,8 +274,15 @@ class TreeInterpreter(Visitor):
222
274
  return result
223
275
 
224
276
  def visit_slice(self, node, value):
277
+ if isinstance(value, str):
278
+ start = node['children'][0]
279
+ end = node['children'][1]
280
+ step = node['children'][2]
281
+ return value[start:end:step]
282
+
225
283
  if not isinstance(value, list):
226
284
  return None
285
+
227
286
  s = slice(*node['children'])
228
287
  return value[s]
229
288
 
@@ -234,9 +293,6 @@ class TreeInterpreter(Visitor):
234
293
  return node['value']
235
294
 
236
295
  def visit_multi_select_dict(self, node, value):
237
- if value is None:
238
- return None
239
-
240
296
  collected = self._dict_cls()
241
297
  for child in node['children']:
242
298
  collected[child['value']] = self.visit(child, value)
@@ -244,9 +300,6 @@ class TreeInterpreter(Visitor):
244
300
  return collected
245
301
 
246
302
  def visit_multi_select_list(self, node, value):
247
- if value is None:
248
- return None
249
-
250
303
  collected = []
251
304
  for child in node['children']:
252
305
  collected.append(self.visit(child, value))
@@ -286,9 +339,20 @@ class TreeInterpreter(Visitor):
286
339
 
287
340
  def visit_projection(self, node, value):
288
341
  base = self.visit(node['children'][0], value)
342
+
343
+ allow_string = False
344
+ first_child = node['children'][0]
345
+ if first_child['type'] == 'index_expression':
346
+ nested_children = first_child['children']
347
+ if len(nested_children) > 1 and nested_children[1]['type'] == 'slice':
348
+ allow_string = True
349
+
350
+ if isinstance(base, str) and allow_string:
351
+ # projections are really sub-expressions in disguise evaluate the rhs when lhs is a sliced string
352
+ return self.visit(node['children'][1], base)
353
+
289
354
  if not isinstance(base, list):
290
355
  return None
291
-
292
356
  collected = []
293
357
  for element in base:
294
358
  current = self.visit(node['children'][1], element)
@@ -297,6 +361,27 @@ class TreeInterpreter(Visitor):
297
361
 
298
362
  return collected
299
363
 
364
+ def visit_let_expression(self, node, value):
365
+ *bindings, expr = node['children']
366
+ scope = {}
367
+ for assign in bindings:
368
+ scope.update(self.visit(assign, value))
369
+ self._scope.push_scope(scope)
370
+ result = self.visit(expr, value)
371
+ self._scope.pop_scope()
372
+ return result
373
+
374
+ def visit_assign(self, node, value):
375
+ name = node['value']
376
+ value = self.visit(node['children'][0], value)
377
+ return {name: value}
378
+
379
+ def visit_variable_ref(self, node, value):
380
+ try:
381
+ return self._scope[node['value']]
382
+ except KeyError:
383
+ raise exceptions.UndefinedVariableError(node['value']) # noqa
384
+
300
385
  def visit_value_projection(self, node, value):
301
386
  base = self.visit(node['children'][0], value)
302
387
  try:
@@ -17,6 +17,12 @@ from .metadata import ( # noqa
17
17
  )
18
18
 
19
19
  from .parse import ( # noqa
20
+ DEFAULT_KEYWORD_SUPERTYPES,
21
+ DEFAULT_KEYWORD_TYPES,
22
+ DEFAULT_KEYWORD_TYPES_BY_TAG,
23
+ DEFAULT_PARSER,
24
+ Parser,
25
+ build_keyword_types_by_tag,
20
26
  parse_keyword,
21
27
  parse_keywords,
22
28
  )
@@ -14,7 +14,7 @@ KeywordT = ta.TypeVar('KeywordT', bound='Keyword')
14
14
  ##
15
15
 
16
16
 
17
- class Keyword(lang.Abstract, lang.PackageSealed):
17
+ class Keyword(lang.Abstract):
18
18
  tag: ta.ClassVar[str]
19
19
 
20
20
  def __init_subclass__(cls, *, tag: str | None = None, **kwargs: ta.Any) -> None:
@@ -6,7 +6,7 @@ from .base import StrKeyword
6
6
  ##
7
7
 
8
8
 
9
- class CoreKeyword(Keyword, lang.Abstract):
9
+ class CoreKeyword(Keyword, lang.Abstract, lang.Sealed):
10
10
  pass
11
11
 
12
12
 
@@ -6,7 +6,7 @@ from .base import StrKeyword
6
6
  ##
7
7
 
8
8
 
9
- class MetadataKeyword(Keyword, lang.Abstract):
9
+ class MetadataKeyword(Keyword, lang.Abstract, lang.Sealed):
10
10
  pass
11
11
 
12
12
 
@@ -4,9 +4,6 @@ import typing as ta
4
4
  from .... import check
5
5
  from .... import collections as col
6
6
  from .... import lang
7
- from . import core # noqa
8
- from . import metadata # noqa
9
- from . import validation # noqa
10
7
  from .base import BooleanKeyword
11
8
  from .base import Keyword
12
9
  from .base import Keywords
@@ -15,6 +12,9 @@ from .base import NumberKeyword
15
12
  from .base import StrKeyword
16
13
  from .base import StrOrStrsKeyword
17
14
  from .base import StrToKeywordsKeyword
15
+ from .core import CoreKeyword
16
+ from .metadata import MetadataKeyword
17
+ from .validation import ValidationKeyword
18
18
 
19
19
 
20
20
  KeywordT = ta.TypeVar('KeywordT', bound=Keyword)
@@ -23,46 +23,79 @@ KeywordT = ta.TypeVar('KeywordT', bound=Keyword)
23
23
  ##
24
24
 
25
25
 
26
- KEYWORD_TYPES_BY_TAG: ta.Mapping[str, type[Keyword]] = col.make_map_by( # noqa
27
- operator.attrgetter('tag'),
28
- (cls for cls in lang.deep_subclasses(Keyword) if not lang.is_abstract_class(cls)),
29
- strict=True,
30
- )
26
+ def build_keyword_types_by_tag(keyword_types: ta.Iterable[type[Keyword]]) -> ta.Mapping[str, type[Keyword]]:
27
+ return col.make_map_by(operator.attrgetter('tag'), keyword_types, strict=True)
31
28
 
32
29
 
33
- def parse_keyword(cls: type[KeywordT], v: ta.Any) -> KeywordT:
34
- if issubclass(cls, BooleanKeyword):
35
- return cls(check.isinstance(v, bool)) # type: ignore
30
+ DEFAULT_KEYWORD_SUPERTYPES: ta.AbstractSet = frozenset([
31
+ CoreKeyword,
32
+ MetadataKeyword,
33
+ ValidationKeyword,
34
+ ])
36
35
 
37
- elif issubclass(cls, NumberKeyword):
38
- return cls(check.isinstance(v, (int, float))) # type: ignore
36
+ DEFAULT_KEYWORD_TYPES: ta.AbstractSet = frozenset(lang.flatten(
37
+ lang.deep_subclasses(st, concrete_only=True) for st in DEFAULT_KEYWORD_SUPERTYPES
38
+ ))
39
39
 
40
- elif issubclass(cls, StrKeyword):
41
- return cls(check.isinstance(v, str)) # type: ignore
40
+ DEFAULT_KEYWORD_TYPES_BY_TAG: ta.Mapping[str, type[Keyword]] = build_keyword_types_by_tag(DEFAULT_KEYWORD_TYPES)
42
41
 
43
- elif issubclass(cls, StrOrStrsKeyword):
44
- ss: str | ta.Sequence[str]
45
- if isinstance(v, str):
46
- ss = v
47
- elif isinstance(v, ta.Iterable):
48
- ss = col.seq_of(check.of_isinstance(str))(v)
42
+
43
+ ##
44
+
45
+
46
+ class Parser:
47
+ def __init__(
48
+ self,
49
+ keyword_types: ta.Iterable[type[Keyword]] | ta.Mapping[str, type[Keyword]] = DEFAULT_KEYWORD_TYPES_BY_TAG,
50
+ ) -> None:
51
+ super().__init__()
52
+
53
+ if isinstance(keyword_types, ta.Mapping):
54
+ self._keyword_types_by_tag = keyword_types
49
55
  else:
50
- raise TypeError(v)
51
- return cls(ss) # type: ignore
56
+ self._keyword_types_by_tag = build_keyword_types_by_tag(keyword_types)
57
+
58
+ def parse_keyword(self, cls: type[KeywordT], v: ta.Any) -> KeywordT:
59
+ if issubclass(cls, BooleanKeyword):
60
+ return cls(check.isinstance(v, bool)) # type: ignore
61
+
62
+ elif issubclass(cls, NumberKeyword):
63
+ return cls(check.isinstance(v, (int, float))) # type: ignore
64
+
65
+ elif issubclass(cls, StrKeyword):
66
+ return cls(check.isinstance(v, str)) # type: ignore
52
67
 
53
- elif issubclass(cls, KeywordsKeyword):
54
- return cls(parse_keywords(v)) # type: ignore
68
+ elif issubclass(cls, StrOrStrsKeyword):
69
+ ss: str | ta.Sequence[str]
70
+ if isinstance(v, str):
71
+ ss = v
72
+ elif isinstance(v, ta.Iterable):
73
+ ss = col.seq_of(check.of_isinstance(str))(v)
74
+ else:
75
+ raise TypeError(v)
76
+ return cls(ss) # type: ignore
55
77
 
56
- elif issubclass(cls, StrToKeywordsKeyword):
57
- return cls({k: parse_keywords(mv) for k, mv in v.items()}) # type: ignore
78
+ elif issubclass(cls, KeywordsKeyword):
79
+ return cls(parse_keywords(v)) # type: ignore
80
+
81
+ elif issubclass(cls, StrToKeywordsKeyword):
82
+ return cls({k: parse_keywords(mv) for k, mv in v.items()}) # type: ignore
83
+
84
+ else:
85
+ raise TypeError(cls)
86
+
87
+ def parse_keywords(self, dct: ta.Mapping[str, ta.Any]) -> Keywords:
88
+ lst: list[Keyword] = []
89
+ for k, v in dct.items():
90
+ cls = self._keyword_types_by_tag[k]
91
+ lst.append(self.parse_keyword(cls, v))
92
+ return Keywords(lst)
93
+
94
+
95
+ ##
58
96
 
59
- else:
60
- raise TypeError(cls)
61
97
 
98
+ DEFAULT_PARSER = Parser()
62
99
 
63
- def parse_keywords(dct: ta.Mapping[str, ta.Any]) -> Keywords:
64
- lst: list[Keyword] = []
65
- for k, v in dct.items():
66
- cls = KEYWORD_TYPES_BY_TAG[k]
67
- lst.append(parse_keyword(cls, v))
68
- return Keywords(lst)
100
+ parse_keyword = DEFAULT_PARSER.parse_keyword
101
+ parse_keywords = DEFAULT_PARSER.parse_keywords
@@ -10,7 +10,7 @@ from .base import StrToKeywordsKeyword
10
10
  ##
11
11
 
12
12
 
13
- class ValidationKeyword(Keyword, lang.Abstract):
13
+ class ValidationKeyword(Keyword, lang.Abstract, lang.Sealed):
14
14
  pass
15
15
 
16
16
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omlish
3
- Version: 0.0.0.dev47
3
+ Version: 0.0.0.dev49
4
4
  Summary: omlish
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause