omlish 0.0.0.dev57__py3-none-any.whl → 0.0.0.dev59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/antlr/__init__.py +0 -0
  3. omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
  4. omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
  5. omlish/antlr/_runtime/CommonTokenStream.py +90 -0
  6. omlish/antlr/_runtime/FileStream.py +30 -0
  7. omlish/antlr/_runtime/InputStream.py +90 -0
  8. omlish/antlr/_runtime/IntervalSet.py +183 -0
  9. omlish/antlr/_runtime/LL1Analyzer.py +176 -0
  10. omlish/antlr/_runtime/Lexer.py +332 -0
  11. omlish/antlr/_runtime/ListTokenSource.py +147 -0
  12. omlish/antlr/_runtime/Parser.py +583 -0
  13. omlish/antlr/_runtime/ParserInterpreter.py +173 -0
  14. omlish/antlr/_runtime/ParserRuleContext.py +189 -0
  15. omlish/antlr/_runtime/PredictionContext.py +632 -0
  16. omlish/antlr/_runtime/Recognizer.py +150 -0
  17. omlish/antlr/_runtime/RuleContext.py +230 -0
  18. omlish/antlr/_runtime/StdinStream.py +14 -0
  19. omlish/antlr/_runtime/Token.py +158 -0
  20. omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
  21. omlish/antlr/_runtime/Utils.py +36 -0
  22. omlish/antlr/_runtime/__init__.py +24 -0
  23. omlish/antlr/_runtime/_pygrun.py +174 -0
  24. omlish/antlr/_runtime/atn/ATN.py +135 -0
  25. omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
  26. omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  27. omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  28. omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  29. omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
  30. omlish/antlr/_runtime/atn/ATNState.py +267 -0
  31. omlish/antlr/_runtime/atn/ATNType.py +20 -0
  32. omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  33. omlish/antlr/_runtime/atn/LexerAction.py +301 -0
  34. omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  35. omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  36. omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
  37. omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
  38. omlish/antlr/_runtime/atn/Transition.py +271 -0
  39. omlish/antlr/_runtime/atn/__init__.py +4 -0
  40. omlish/antlr/_runtime/dfa/DFA.py +136 -0
  41. omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
  42. omlish/antlr/_runtime/dfa/DFAState.py +129 -0
  43. omlish/antlr/_runtime/dfa/__init__.py +4 -0
  44. omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
  45. omlish/antlr/_runtime/error/ErrorListener.py +75 -0
  46. omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
  47. omlish/antlr/_runtime/error/Errors.py +176 -0
  48. omlish/antlr/_runtime/error/__init__.py +4 -0
  49. omlish/antlr/_runtime/tree/Chunk.py +33 -0
  50. omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  51. omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  52. omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  53. omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
  54. omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
  55. omlish/antlr/_runtime/tree/Tree.py +194 -0
  56. omlish/antlr/_runtime/tree/Trees.py +114 -0
  57. omlish/antlr/_runtime/tree/__init__.py +2 -0
  58. omlish/antlr/_runtime/xpath/XPath.py +272 -0
  59. omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
  60. omlish/antlr/_runtime/xpath/__init__.py +4 -0
  61. omlish/marshal/__init__.py +10 -5
  62. omlish/marshal/nop.py +18 -0
  63. omlish/marshal/primitives.py +16 -6
  64. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/METADATA +1 -1
  65. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/RECORD +69 -9
  66. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/LICENSE +0 -0
  67. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/WHEEL +0 -0
  68. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/entry_points.txt +0 -0
  69. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+
11
+ # A set of utility routines useful for all kinds of ANTLR trees.#
12
+ from io import StringIO
13
+ from ..Token import Token
14
+ from ..Utils import escapeWhitespace
15
+ from .Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree
16
+
17
+ # need forward declaration
18
+ Parser = None
19
+
20
+ class Trees(object):
21
+
22
+ # Print out a whole tree in LISP form. {@link #getNodeText} is used on the
23
+ # node payloads to get the text for the nodes. Detect
24
+ # parse trees and extract data appropriately.
25
+ @classmethod
26
+ def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
27
+ if recog is not None:
28
+ ruleNames = recog.ruleNames
29
+ s = escapeWhitespace(cls.getNodeText(t, ruleNames), False)
30
+ if t.getChildCount()==0:
31
+ return s
32
+ with StringIO() as buf:
33
+ buf.write("(")
34
+ buf.write(s)
35
+ buf.write(' ')
36
+ for i in range(0, t.getChildCount()):
37
+ if i > 0:
38
+ buf.write(' ')
39
+ buf.write(cls.toStringTree(t.getChild(i), ruleNames))
40
+ buf.write(")")
41
+ return buf.getvalue()
42
+
43
+ @classmethod
44
+ def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
45
+ if recog is not None:
46
+ ruleNames = recog.ruleNames
47
+ if ruleNames is not None:
48
+ if isinstance(t, RuleNode):
49
+ if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile
50
+ return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber())
51
+ return ruleNames[t.getRuleIndex()]
52
+ elif isinstance( t, ErrorNode):
53
+ return str(t)
54
+ elif isinstance(t, TerminalNode):
55
+ if t.symbol is not None:
56
+ return t.symbol.text
57
+ # no recog for rule names
58
+ payload = t.getPayload()
59
+ if isinstance(payload, Token ):
60
+ return payload.text
61
+ return str(t.getPayload())
62
+
63
+
64
+ # Return ordered list of all children of this node
65
+ @classmethod
66
+ def getChildren(cls, t:Tree):
67
+ return [ t.getChild(i) for i in range(0, t.getChildCount()) ]
68
+
69
+ # Return a list of all ancestors of this node. The first node of
70
+ # list is the root and the last is the parent of this node.
71
+ #
72
+ @classmethod
73
+ def getAncestors(cls, t:Tree):
74
+ ancestors = []
75
+ t = t.getParent()
76
+ while t is not None:
77
+ ancestors.insert(0, t) # insert at start
78
+ t = t.getParent()
79
+ return ancestors
80
+
81
+ @classmethod
82
+ def findAllTokenNodes(cls, t:ParseTree, ttype:int):
83
+ return cls.findAllNodes(t, ttype, True)
84
+
85
+ @classmethod
86
+ def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int):
87
+ return cls.findAllNodes(t, ruleIndex, False)
88
+
89
+ @classmethod
90
+ def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool):
91
+ nodes = []
92
+ cls._findAllNodes(t, index, findTokens, nodes)
93
+ return nodes
94
+
95
+ @classmethod
96
+ def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list):
97
+ from ..ParserRuleContext import ParserRuleContext
98
+ # check this node (the root) first
99
+ if findTokens and isinstance(t, TerminalNode):
100
+ if t.symbol.type==index:
101
+ nodes.append(t)
102
+ elif not findTokens and isinstance(t, ParserRuleContext):
103
+ if t.ruleIndex == index:
104
+ nodes.append(t)
105
+ # check children
106
+ for i in range(0, t.getChildCount()):
107
+ cls._findAllNodes(t.getChild(i), index, findTokens, nodes)
108
+
109
+ @classmethod
110
+ def descendants(cls, t:ParseTree):
111
+ nodes = [t]
112
+ for i in range(0, t.getChildCount()):
113
+ nodes.extend(cls.descendants(t.getChild(i)))
114
+ return nodes
@@ -0,0 +1,2 @@
1
+ # ruff: noqa
2
+ # flake8: noqa
@@ -0,0 +1,272 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ #
11
+ # Represent a subset of XPath XML path syntax for use in identifying nodes in
12
+ # parse trees.
13
+ #
14
+ # <p>
15
+ # Split path into words and separators {@code /} and {@code //} via ANTLR
16
+ # itself then walk path elements from left to right. At each separator-word
17
+ # pair, find set of nodes. Next stage uses those as work list.</p>
18
+ #
19
+ # <p>
20
+ # The basic interface is
21
+ # {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
22
+ # But that is just shorthand for:</p>
23
+ #
24
+ # <pre>
25
+ # {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
26
+ # return p.{@link #evaluate evaluate}(tree);
27
+ # </pre>
28
+ #
29
+ # <p>
30
+ # See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
31
+ # allows operators:</p>
32
+ #
33
+ # <dl>
34
+ # <dt>/</dt> <dd>root</dd>
35
+ # <dt>//</dt> <dd>anywhere</dd>
36
+ # <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
37
+ # operator</dd>
38
+ # </dl>
39
+ #
40
+ # <p>
41
+ # and path elements:</p>
42
+ #
43
+ # <dl>
44
+ # <dt>ID</dt> <dd>token name</dd>
45
+ # <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
46
+ # <dt>expr</dt> <dd>rule name</dd>
47
+ # <dt>*</dt> <dd>wildcard matching any node</dd>
48
+ # </dl>
49
+ #
50
+ # <p>
51
+ # Whitespace is not allowed.</p>
52
+ #
53
+ from .. import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode
54
+ from ..InputStream import InputStream
55
+ from ..Parser import Parser
56
+ from ..RuleContext import RuleContext
57
+ from ..Token import Token
58
+ from ..atn.ATNDeserializer import ATNDeserializer
59
+ from ..error.ErrorListener import ErrorListener
60
+ from ..error.Errors import LexerNoViableAltException
61
+ from ..tree.Tree import ParseTree
62
+ from ..tree.Trees import Trees
63
+ from io import StringIO
64
+ from .XPathLexer import XPathLexer
65
+
66
+
67
+ class XPath(object):
68
+
69
+ WILDCARD = "*" # word not operator/separator
70
+ NOT = "!" # word for invert operator
71
+
72
+ def __init__(self, parser:Parser, path:str):
73
+ self.parser = parser
74
+ self.path = path
75
+ self.elements = self.split(path)
76
+
77
+ def split(self, path:str):
78
+ input = InputStream(path)
79
+ lexer = XPathLexer(input)
80
+ def recover(self, e):
81
+ raise e
82
+ lexer.recover = recover
83
+ lexer.removeErrorListeners()
84
+ lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more
85
+ tokenStream = CommonTokenStream(lexer)
86
+ try:
87
+ tokenStream.fill()
88
+ except LexerNoViableAltException as e:
89
+ pos = lexer.column
90
+ msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path)
91
+ raise Exception(msg, e)
92
+
93
+ tokens = iter(tokenStream.tokens)
94
+ elements = list()
95
+ for el in tokens:
96
+ invert = False
97
+ anywhere = False
98
+ # Check for path separators, if none assume root
99
+ if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
100
+ anywhere = el.type == XPathLexer.ANYWHERE
101
+ next_el = next(tokens, None)
102
+ if not next_el:
103
+ raise Exception('Missing element after %s' % el.getText())
104
+ else:
105
+ el = next_el
106
+ # Check for bangs
107
+ if el.type == XPathLexer.BANG:
108
+ invert = True
109
+ next_el = next(tokens, None)
110
+ if not next_el:
111
+ raise Exception('Missing element after %s' % el.getText())
112
+ else:
113
+ el = next_el
114
+ # Add searched element
115
+ if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]:
116
+ element = self.getXPathElement(el, anywhere)
117
+ element.invert = invert
118
+ elements.append(element)
119
+ elif el.type==Token.EOF:
120
+ break
121
+ else:
122
+ raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type])
123
+ return elements
124
+
125
+ #
126
+ # Convert word like {@code#} or {@code ID} or {@code expr} to a path
127
+ # element. {@code anywhere} is {@code true} if {@code //} precedes the
128
+ # word.
129
+ #
130
+ def getXPathElement(self, wordToken:Token, anywhere:bool):
131
+ if wordToken.type==Token.EOF:
132
+ raise Exception("Missing path element at end of path")
133
+
134
+ word = wordToken.text
135
+ if wordToken.type==XPathLexer.WILDCARD :
136
+ return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement()
137
+
138
+ elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]:
139
+ tsource = self.parser.getTokenStream().tokenSource
140
+
141
+ ttype = Token.INVALID_TYPE
142
+ if wordToken.type == XPathLexer.TOKEN_REF:
143
+ if word in tsource.ruleNames:
144
+ ttype = tsource.ruleNames.index(word) + 1
145
+ else:
146
+ if word in tsource.literalNames:
147
+ ttype = tsource.literalNames.index(word)
148
+
149
+ if ttype == Token.INVALID_TYPE:
150
+ raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex))
151
+ return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype)
152
+
153
+ else:
154
+ ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1
155
+
156
+ if ruleIndex == -1:
157
+ raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex))
158
+ return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex)
159
+
160
+
161
+ @staticmethod
162
+ def findAll(tree:ParseTree, xpath:str, parser:Parser):
163
+ p = XPath(parser, xpath)
164
+ return p.evaluate(tree)
165
+
166
+ #
167
+ # Return a list of all nodes starting at {@code t} as root that satisfy the
168
+ # path. The root {@code /} is relative to the node passed to
169
+ # {@link #evaluate}.
170
+ #
171
+ def evaluate(self, t:ParseTree):
172
+ dummyRoot = ParserRuleContext()
173
+ dummyRoot.children = [t] # don't set t's parent.
174
+
175
+ work = [dummyRoot]
176
+ for element in self.elements:
177
+ work_next = list()
178
+ for node in work:
179
+ if not isinstance(node, TerminalNode) and node.children:
180
+ # only try to match next element if it has children
181
+ # e.g., //func/*/stat might have a token node for which
182
+ # we can't go looking for stat nodes.
183
+ matching = element.evaluate(node)
184
+
185
+ # See issue antlr#370 - Prevents XPath from returning the
186
+ # same node multiple times
187
+ matching = filter(lambda m: m not in work_next, matching)
188
+
189
+ work_next.extend(matching)
190
+ work = work_next
191
+
192
+ return work
193
+
194
+
195
+ class XPathElement(object):
196
+
197
+ def __init__(self, nodeName:str):
198
+ self.nodeName = nodeName
199
+ self.invert = False
200
+
201
+ def __str__(self):
202
+ return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]"
203
+
204
+
205
+
206
+ #
207
+ # Either {@code ID} at start of path or {@code ...//ID} in middle of path.
208
+ #
209
+ class XPathRuleAnywhereElement(XPathElement):
210
+
211
+ def __init__(self, ruleName:str, ruleIndex:int):
212
+ super().__init__(ruleName)
213
+ self.ruleIndex = ruleIndex
214
+
215
+ def evaluate(self, t:ParseTree):
216
+ # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted)
217
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t))
218
+
219
+ class XPathRuleElement(XPathElement):
220
+
221
+ def __init__(self, ruleName:str, ruleIndex:int):
222
+ super().__init__(ruleName)
223
+ self.ruleIndex = ruleIndex
224
+
225
+ def evaluate(self, t:ParseTree):
226
+ # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted)
227
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t))
228
+
229
+ class XPathTokenAnywhereElement(XPathElement):
230
+
231
+ def __init__(self, ruleName:str, tokenType:int):
232
+ super().__init__(ruleName)
233
+ self.tokenType = tokenType
234
+
235
+ def evaluate(self, t:ParseTree):
236
+ # return all TerminalNode descendants of t that match tokenType (or do not match if inverted)
237
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t))
238
+
239
+ class XPathTokenElement(XPathElement):
240
+
241
+ def __init__(self, ruleName:str, tokenType:int):
242
+ super().__init__(ruleName)
243
+ self.tokenType = tokenType
244
+
245
+ def evaluate(self, t:ParseTree):
246
+ # return all TerminalNode children of t that match tokenType (or do not match if inverted)
247
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t))
248
+
249
+
250
+ class XPathWildcardAnywhereElement(XPathElement):
251
+
252
+ def __init__(self):
253
+ super().__init__(XPath.WILDCARD)
254
+
255
+ def evaluate(self, t:ParseTree):
256
+ if self.invert:
257
+ return list() # !* is weird but valid (empty)
258
+ else:
259
+ return Trees.descendants(t)
260
+
261
+
262
+ class XPathWildcardElement(XPathElement):
263
+
264
+ def __init__(self):
265
+ super().__init__(XPath.WILDCARD)
266
+
267
+
268
+ def evaluate(self, t:ParseTree):
269
+ if self.invert:
270
+ return list() # !* is weird but valid (empty)
271
+ else:
272
+ return Trees.getChildren(t)
@@ -0,0 +1,98 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Generated from XPathLexer.g4 by ANTLR 4.11.2-SNAPSHOT
5
+ from .. import *
6
+ from io import StringIO
7
+ import sys
8
+ if sys.version_info[1] > 5:
9
+ from typing import TextIO
10
+ else:
11
+ from typing.io import TextIO
12
+
13
+
14
+ def serializedATN():
15
+ return [
16
+ 4,0,8,50,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,
17
+ 6,7,6,2,7,7,7,1,0,1,0,1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,5,4,29,
18
+ 8,4,10,4,12,4,32,9,4,1,4,1,4,1,5,1,5,3,5,38,8,5,1,6,1,6,1,7,1,7,
19
+ 5,7,44,8,7,10,7,12,7,47,9,7,1,7,1,7,1,45,0,8,1,3,3,4,5,5,7,6,9,7,
20
+ 11,0,13,0,15,8,1,0,2,5,0,48,57,95,95,183,183,768,879,8255,8256,13,
21
+ 0,65,90,97,122,192,214,216,246,248,767,880,893,895,8191,8204,8205,
22
+ 8304,8591,11264,12271,12289,55295,63744,64975,65008,65533,50,0,1,
23
+ 1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,15,1,0,
24
+ 0,0,1,17,1,0,0,0,3,20,1,0,0,0,5,22,1,0,0,0,7,24,1,0,0,0,9,26,1,0,
25
+ 0,0,11,37,1,0,0,0,13,39,1,0,0,0,15,41,1,0,0,0,17,18,5,47,0,0,18,
26
+ 19,5,47,0,0,19,2,1,0,0,0,20,21,5,47,0,0,21,4,1,0,0,0,22,23,5,42,
27
+ 0,0,23,6,1,0,0,0,24,25,5,33,0,0,25,8,1,0,0,0,26,30,3,13,6,0,27,29,
28
+ 3,11,5,0,28,27,1,0,0,0,29,32,1,0,0,0,30,28,1,0,0,0,30,31,1,0,0,0,
29
+ 31,33,1,0,0,0,32,30,1,0,0,0,33,34,6,4,0,0,34,10,1,0,0,0,35,38,3,
30
+ 13,6,0,36,38,7,0,0,0,37,35,1,0,0,0,37,36,1,0,0,0,38,12,1,0,0,0,39,
31
+ 40,7,1,0,0,40,14,1,0,0,0,41,45,5,39,0,0,42,44,9,0,0,0,43,42,1,0,
32
+ 0,0,44,47,1,0,0,0,45,46,1,0,0,0,45,43,1,0,0,0,46,48,1,0,0,0,47,45,
33
+ 1,0,0,0,48,49,5,39,0,0,49,16,1,0,0,0,4,0,30,37,45,1,1,4,0
34
+ ]
35
+
36
+ class XPathLexer(Lexer):
37
+
38
+ atn = ATNDeserializer().deserialize(serializedATN())
39
+
40
+ decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
41
+
42
+ TOKEN_REF = 1
43
+ RULE_REF = 2
44
+ ANYWHERE = 3
45
+ ROOT = 4
46
+ WILDCARD = 5
47
+ BANG = 6
48
+ ID = 7
49
+ STRING = 8
50
+
51
+ channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
52
+
53
+ modeNames = [ "DEFAULT_MODE" ]
54
+
55
+ literalNames = [ "<INVALID>",
56
+ "'//'", "'/'", "'*'", "'!'" ]
57
+
58
+ symbolicNames = [ "<INVALID>",
59
+ "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG",
60
+ "ID", "STRING" ]
61
+
62
+ ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar",
63
+ "NameStartChar", "STRING" ]
64
+
65
+ grammarFileName = "XPathLexer.g4"
66
+
67
+ def __init__(self, input=None, output:TextIO = sys.stdout):
68
+ super().__init__(input, output)
69
+ self.checkVersion("4.11.2-SNAPSHOT")
70
+ self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
71
+ self._actions = None
72
+ self._predicates = None
73
+
74
+
75
+ def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
76
+ if self._actions is None:
77
+ actions = dict()
78
+ actions[4] = self.ID_action
79
+ self._actions = actions
80
+ action = self._actions.get(ruleIndex, None)
81
+ if action is not None:
82
+ action(localctx, actionIndex)
83
+ else:
84
+ raise Exception("No registered action for:" + str(ruleIndex))
85
+
86
+
87
+ def ID_action(self, localctx:RuleContext , actionIndex:int):
88
+ if actionIndex == 0:
89
+
90
+ char = self.text[0]
91
+ if char.isupper():
92
+ self.type = XPathLexer.TOKEN_REF
93
+ else:
94
+ self.type = XPathLexer.RULE_REF
95
+
96
+
97
+
98
+
@@ -0,0 +1,4 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ __author__ = 'ericvergnaud'
@@ -39,11 +39,6 @@ from .forbidden import ( # noqa
39
39
  ForbiddenTypeUnmarshalerFactory,
40
40
  )
41
41
 
42
- from .naming import ( # noqa
43
- Naming,
44
- translate_name,
45
- )
46
-
47
42
  from .global_ import ( # noqa
48
43
  GLOBAL_REGISTRY,
49
44
 
@@ -57,6 +52,16 @@ from .helpers import ( # noqa
57
52
  update_object_metadata,
58
53
  )
59
54
 
55
+ from .naming import ( # noqa
56
+ Naming,
57
+ translate_name,
58
+ )
59
+
60
+ from .nop import ( # noqa
61
+ NOP_MARSHALER_UNMARSHALER,
62
+ NopMarshalerUnmarshaler,
63
+ )
64
+
60
65
  from .objects import ( # noqa
61
66
  FieldInfo,
62
67
  FieldInfos,
omlish/marshal/nop.py ADDED
@@ -0,0 +1,18 @@
1
+ import typing as ta
2
+
3
+ from .base import MarshalContext
4
+ from .base import Marshaler
5
+ from .base import UnmarshalContext
6
+ from .base import Unmarshaler
7
+ from .values import Value
8
+
9
+
10
+ class NopMarshalerUnmarshaler(Marshaler, Unmarshaler):
11
+ def marshal(self, ctx: MarshalContext, o: ta.Any) -> Value:
12
+ return o # noqa
13
+
14
+ def unmarshal(self, ctx: UnmarshalContext, v: Value) -> ta.Any:
15
+ return v
16
+
17
+
18
+ NOP_MARSHALER_UNMARSHALER = NopMarshalerUnmarshaler()
@@ -1,5 +1,10 @@
1
+ """
2
+ TODO:
3
+ - field-configurable coercion
4
+ """
1
5
  import typing as ta
2
6
 
7
+ from .. import dataclasses as dc
3
8
  from .base import MarshalContext
4
9
  from .base import Marshaler
5
10
  from .base import TypeMapMarshalerFactory
@@ -25,24 +30,29 @@ PRIMITIVE_TYPES: tuple[type, ...] = (
25
30
  ##
26
31
 
27
32
 
33
+ @dc.dataclass(frozen=True)
28
34
  class PrimitiveMarshalerUnmarshaler(Marshaler, Unmarshaler):
35
+ ty: type
36
+
29
37
  def marshal(self, ctx: MarshalContext, o: ta.Any) -> Value:
30
- if isinstance(o, PRIMITIVE_TYPES):
38
+ if isinstance(o, self.ty):
31
39
  return o # type: ignore
40
+ if isinstance(o, PRIMITIVE_TYPES):
41
+ return self.ty(o)
32
42
  raise TypeError(o)
33
43
 
34
44
  def unmarshal(self, ctx: UnmarshalContext, v: Value) -> ta.Any:
35
- if isinstance(v, PRIMITIVE_TYPES):
45
+ if isinstance(v, self.ty):
36
46
  return v
47
+ if isinstance(v, PRIMITIVE_TYPES):
48
+ return self.ty(v)
37
49
  raise TypeError(v)
38
50
 
39
51
 
40
- PRIMITIVE_MARSHALER_UNMARSHALER = PrimitiveMarshalerUnmarshaler()
41
-
42
52
  PRIMITIVE_MARSHALER_FACTORY = TypeMapMarshalerFactory({ # noqa
43
- t: PRIMITIVE_MARSHALER_UNMARSHALER for t in PRIMITIVE_TYPES
53
+ t: PrimitiveMarshalerUnmarshaler(t) for t in PRIMITIVE_TYPES
44
54
  })
45
55
 
46
56
  PRIMITIVE_UNMARSHALER_FACTORY = TypeMapUnmarshalerFactory({ # noqa
47
- t: PRIMITIVE_MARSHALER_UNMARSHALER for t in PRIMITIVE_TYPES
57
+ t: PrimitiveMarshalerUnmarshaler(t) for t in PRIMITIVE_TYPES
48
58
  })
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omlish
3
- Version: 0.0.0.dev57
3
+ Version: 0.0.0.dev59
4
4
  Summary: omlish
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause