omextra 0.0.0.dev437__py3-none-any.whl → 0.0.0.dev439__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. omextra/formats/json/_antlr/JsonLexer.py +1 -1
  2. omextra/formats/json/_antlr/JsonListener.py +1 -1
  3. omextra/formats/json/_antlr/JsonParser.py +1 -1
  4. omextra/formats/json/_antlr/JsonVisitor.py +1 -1
  5. omextra/formats/json5/Json5.g4 +168 -0
  6. omextra/formats/json5/__init__.py +0 -0
  7. omextra/formats/json5/_antlr/Json5Lexer.py +354 -0
  8. omextra/formats/json5/_antlr/Json5Listener.py +79 -0
  9. omextra/formats/json5/_antlr/Json5Parser.py +617 -0
  10. omextra/formats/json5/_antlr/Json5Visitor.py +52 -0
  11. omextra/formats/json5/_antlr/__init__.py +0 -0
  12. omextra/formats/json5/parsing.py +101 -0
  13. omextra/specs/proto/_antlr/Protobuf3Lexer.py +1 -1
  14. omextra/specs/proto/_antlr/Protobuf3Listener.py +1 -1
  15. omextra/specs/proto/_antlr/Protobuf3Parser.py +1 -1
  16. omextra/specs/proto/_antlr/Protobuf3Visitor.py +1 -1
  17. omextra/specs/proto/parsing.py +2 -2
  18. omextra/sql/parsing/_antlr/MinisqlLexer.py +1 -1
  19. omextra/sql/parsing/_antlr/MinisqlListener.py +1 -1
  20. omextra/sql/parsing/_antlr/MinisqlParser.py +1 -1
  21. omextra/sql/parsing/_antlr/MinisqlVisitor.py +1 -1
  22. omextra/sql/parsing/parsing.py +3 -3
  23. omextra/text/antlr/__init__.py +3 -0
  24. omextra/text/antlr/_runtime/BufferedTokenStream.py +305 -0
  25. omextra/text/antlr/_runtime/CommonTokenFactory.py +64 -0
  26. omextra/text/antlr/_runtime/CommonTokenStream.py +90 -0
  27. omextra/text/antlr/_runtime/FileStream.py +30 -0
  28. omextra/text/antlr/_runtime/InputStream.py +90 -0
  29. omextra/text/antlr/_runtime/IntervalSet.py +183 -0
  30. omextra/text/antlr/_runtime/LICENSE.txt +28 -0
  31. omextra/text/antlr/_runtime/LL1Analyzer.py +176 -0
  32. omextra/text/antlr/_runtime/Lexer.py +332 -0
  33. omextra/text/antlr/_runtime/ListTokenSource.py +147 -0
  34. omextra/text/antlr/_runtime/Parser.py +583 -0
  35. omextra/text/antlr/_runtime/ParserInterpreter.py +173 -0
  36. omextra/text/antlr/_runtime/ParserRuleContext.py +189 -0
  37. omextra/text/antlr/_runtime/PredictionContext.py +632 -0
  38. omextra/text/antlr/_runtime/Recognizer.py +150 -0
  39. omextra/text/antlr/_runtime/RuleContext.py +230 -0
  40. omextra/text/antlr/_runtime/StdinStream.py +14 -0
  41. omextra/text/antlr/_runtime/Token.py +158 -0
  42. omextra/text/antlr/_runtime/TokenStreamRewriter.py +258 -0
  43. omextra/text/antlr/_runtime/Utils.py +36 -0
  44. omextra/text/antlr/_runtime/__init__.py +2 -0
  45. omextra/text/antlr/_runtime/_all.py +24 -0
  46. omextra/text/antlr/_runtime/_pygrun.py +174 -0
  47. omextra/text/antlr/_runtime/atn/ATN.py +135 -0
  48. omextra/text/antlr/_runtime/atn/ATNConfig.py +162 -0
  49. omextra/text/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  50. omextra/text/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  51. omextra/text/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  52. omextra/text/antlr/_runtime/atn/ATNSimulator.py +50 -0
  53. omextra/text/antlr/_runtime/atn/ATNState.py +267 -0
  54. omextra/text/antlr/_runtime/atn/ATNType.py +20 -0
  55. omextra/text/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  56. omextra/text/antlr/_runtime/atn/LexerAction.py +301 -0
  57. omextra/text/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  58. omextra/text/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  59. omextra/text/antlr/_runtime/atn/PredictionMode.py +502 -0
  60. omextra/text/antlr/_runtime/atn/SemanticContext.py +333 -0
  61. omextra/text/antlr/_runtime/atn/Transition.py +271 -0
  62. omextra/text/antlr/_runtime/atn/__init__.py +4 -0
  63. omextra/text/antlr/_runtime/dfa/DFA.py +136 -0
  64. omextra/text/antlr/_runtime/dfa/DFASerializer.py +76 -0
  65. omextra/text/antlr/_runtime/dfa/DFAState.py +129 -0
  66. omextra/text/antlr/_runtime/dfa/__init__.py +4 -0
  67. omextra/text/antlr/_runtime/error/DiagnosticErrorListener.py +111 -0
  68. omextra/text/antlr/_runtime/error/ErrorListener.py +75 -0
  69. omextra/text/antlr/_runtime/error/ErrorStrategy.py +712 -0
  70. omextra/text/antlr/_runtime/error/Errors.py +176 -0
  71. omextra/text/antlr/_runtime/error/__init__.py +4 -0
  72. omextra/text/antlr/_runtime/tree/Chunk.py +33 -0
  73. omextra/text/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  74. omextra/text/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  75. omextra/text/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  76. omextra/text/antlr/_runtime/tree/RuleTagToken.py +53 -0
  77. omextra/text/antlr/_runtime/tree/TokenTagToken.py +50 -0
  78. omextra/text/antlr/_runtime/tree/Tree.py +194 -0
  79. omextra/text/antlr/_runtime/tree/Trees.py +114 -0
  80. omextra/text/antlr/_runtime/tree/__init__.py +2 -0
  81. omextra/text/antlr/_runtime/xpath/XPath.py +278 -0
  82. omextra/text/antlr/_runtime/xpath/XPathLexer.py +98 -0
  83. omextra/text/antlr/_runtime/xpath/__init__.py +4 -0
  84. omextra/text/antlr/cli/consts.py +1 -1
  85. omextra/text/antlr/delimit.py +110 -0
  86. omextra/text/antlr/dot.py +42 -0
  87. omextra/text/antlr/errors.py +14 -0
  88. omextra/text/antlr/input.py +96 -0
  89. omextra/text/antlr/parsing.py +55 -0
  90. omextra/text/antlr/runtime.py +102 -0
  91. omextra/text/antlr/utils.py +38 -0
  92. omextra-0.0.0.dev439.dist-info/METADATA +28 -0
  93. omextra-0.0.0.dev439.dist-info/RECORD +144 -0
  94. omextra-0.0.0.dev437.dist-info/METADATA +0 -73
  95. omextra-0.0.0.dev437.dist-info/RECORD +0 -69
  96. {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/WHEEL +0 -0
  97. {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/entry_points.txt +0 -0
  98. {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/licenses/LICENSE +0 -0
  99. {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+
11
+ # A set of utility routines useful for all kinds of ANTLR trees.#
12
+ from io import StringIO
13
+ from ..Token import Token
14
+ from ..Utils import escapeWhitespace
15
+ from .Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree
16
+
17
+ # need forward declaration
18
+ Parser = None
19
+
20
+ class Trees(object):
21
+
22
+ # Print out a whole tree in LISP form. {@link #getNodeText} is used on the
23
+ # node payloads to get the text for the nodes. Detect
24
+ # parse trees and extract data appropriately.
25
+ @classmethod
26
+ def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
27
+ if recog is not None:
28
+ ruleNames = recog.ruleNames
29
+ s = escapeWhitespace(cls.getNodeText(t, ruleNames), False)
30
+ if t.getChildCount()==0:
31
+ return s
32
+ with StringIO() as buf:
33
+ buf.write("(")
34
+ buf.write(s)
35
+ buf.write(' ')
36
+ for i in range(0, t.getChildCount()):
37
+ if i > 0:
38
+ buf.write(' ')
39
+ buf.write(cls.toStringTree(t.getChild(i), ruleNames))
40
+ buf.write(")")
41
+ return buf.getvalue()
42
+
43
+ @classmethod
44
+ def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
45
+ if recog is not None:
46
+ ruleNames = recog.ruleNames
47
+ if ruleNames is not None:
48
+ if isinstance(t, RuleNode):
49
+ if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile
50
+ return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber())
51
+ return ruleNames[t.getRuleIndex()]
52
+ elif isinstance( t, ErrorNode):
53
+ return str(t)
54
+ elif isinstance(t, TerminalNode):
55
+ if t.symbol is not None:
56
+ return t.symbol.text
57
+ # no recog for rule names
58
+ payload = t.getPayload()
59
+ if isinstance(payload, Token ):
60
+ return payload.text
61
+ return str(t.getPayload())
62
+
63
+
64
+ # Return ordered list of all children of this node
65
+ @classmethod
66
+ def getChildren(cls, t:Tree):
67
+ return [ t.getChild(i) for i in range(0, t.getChildCount()) ]
68
+
69
+ # Return a list of all ancestors of this node. The first node of
70
+ # list is the root and the last is the parent of this node.
71
+ #
72
+ @classmethod
73
+ def getAncestors(cls, t:Tree):
74
+ ancestors = []
75
+ t = t.getParent()
76
+ while t is not None:
77
+ ancestors.insert(0, t) # insert at start
78
+ t = t.getParent()
79
+ return ancestors
80
+
81
+ @classmethod
82
+ def findAllTokenNodes(cls, t:ParseTree, ttype:int):
83
+ return cls.findAllNodes(t, ttype, True)
84
+
85
+ @classmethod
86
+ def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int):
87
+ return cls.findAllNodes(t, ruleIndex, False)
88
+
89
+ @classmethod
90
+ def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool):
91
+ nodes = []
92
+ cls._findAllNodes(t, index, findTokens, nodes)
93
+ return nodes
94
+
95
+ @classmethod
96
+ def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list):
97
+ from ..ParserRuleContext import ParserRuleContext
98
+ # check this node (the root) first
99
+ if findTokens and isinstance(t, TerminalNode):
100
+ if t.symbol.type==index:
101
+ nodes.append(t)
102
+ elif not findTokens and isinstance(t, ParserRuleContext):
103
+ if t.ruleIndex == index:
104
+ nodes.append(t)
105
+ # check children
106
+ for i in range(0, t.getChildCount()):
107
+ cls._findAllNodes(t.getChild(i), index, findTokens, nodes)
108
+
109
+ @classmethod
110
+ def descendants(cls, t:ParseTree):
111
+ nodes = [t]
112
+ for i in range(0, t.getChildCount()):
113
+ nodes.extend(cls.descendants(t.getChild(i)))
114
+ return nodes
@@ -0,0 +1,2 @@
1
+ # ruff: noqa
2
+ # flake8: noqa
@@ -0,0 +1,278 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ #
11
+ # Represent a subset of XPath XML path syntax for use in identifying nodes in
12
+ # parse trees.
13
+ #
14
+ # <p>
15
+ # Split path into words and separators {@code /} and {@code //} via ANTLR
16
+ # itself then walk path elements from left to right. At each separator-word
17
+ # pair, find set of nodes. Next stage uses those as work list.</p>
18
+ #
19
+ # <p>
20
+ # The basic interface is
21
+ # {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
22
+ # But that is just shorthand for:</p>
23
+ #
24
+ # <pre>
25
+ # {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
26
+ # return p.{@link #evaluate evaluate}(tree);
27
+ # </pre>
28
+ #
29
+ # <p>
30
+ # See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
31
+ # allows operators:</p>
32
+ #
33
+ # <dl>
34
+ # <dt>/</dt> <dd>root</dd>
35
+ # <dt>//</dt> <dd>anywhere</dd>
36
+ # <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
37
+ # operator</dd>
38
+ # </dl>
39
+ #
40
+ # <p>
41
+ # and path elements:</p>
42
+ #
43
+ # <dl>
44
+ # <dt>ID</dt> <dd>token name</dd>
45
+ # <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
46
+ # <dt>expr</dt> <dd>rule name</dd>
47
+ # <dt>*</dt> <dd>wildcard matching any node</dd>
48
+ # </dl>
49
+ #
50
+ # <p>
51
+ # Whitespace is not allowed.</p>
52
+ #
53
+ from ..CommonTokenStream import CommonTokenStream
54
+ from ..dfa.DFA import DFA
55
+ from ..PredictionContext import PredictionContextCache
56
+ from ..Lexer import Lexer
57
+ from ..atn.LexerATNSimulator import LexerATNSimulator
58
+ from ..ParserRuleContext import ParserRuleContext
59
+ from ..tree.Tree import TerminalNode
60
+ from ..InputStream import InputStream
61
+ from ..Parser import Parser
62
+ from ..RuleContext import RuleContext
63
+ from ..Token import Token
64
+ from ..atn.ATNDeserializer import ATNDeserializer
65
+ from ..error.ErrorListener import ErrorListener
66
+ from ..error.Errors import LexerNoViableAltException
67
+ from ..tree.Tree import ParseTree
68
+ from ..tree.Trees import Trees
69
+ from io import StringIO
70
+ from .XPathLexer import XPathLexer
71
+
72
+
73
+ class XPath(object):
74
+
75
+ WILDCARD = "*" # word not operator/separator
76
+ NOT = "!" # word for invert operator
77
+
78
+ def __init__(self, parser:Parser, path:str):
79
+ self.parser = parser
80
+ self.path = path
81
+ self.elements = self.split(path)
82
+
83
+ def split(self, path:str):
84
+ input = InputStream(path)
85
+ lexer = XPathLexer(input)
86
+ def recover(self, e):
87
+ raise e
88
+ lexer.recover = recover
89
+ lexer.removeErrorListeners()
90
+ lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more
91
+ tokenStream = CommonTokenStream(lexer)
92
+ try:
93
+ tokenStream.fill()
94
+ except LexerNoViableAltException as e:
95
+ pos = lexer.column
96
+ msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path)
97
+ raise Exception(msg, e)
98
+
99
+ tokens = iter(tokenStream.tokens)
100
+ elements = list()
101
+ for el in tokens:
102
+ invert = False
103
+ anywhere = False
104
+ # Check for path separators, if none assume root
105
+ if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
106
+ anywhere = el.type == XPathLexer.ANYWHERE
107
+ next_el = next(tokens, None)
108
+ if not next_el:
109
+ raise Exception('Missing element after %s' % el.getText())
110
+ else:
111
+ el = next_el
112
+ # Check for bangs
113
+ if el.type == XPathLexer.BANG:
114
+ invert = True
115
+ next_el = next(tokens, None)
116
+ if not next_el:
117
+ raise Exception('Missing element after %s' % el.getText())
118
+ else:
119
+ el = next_el
120
+ # Add searched element
121
+ if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]:
122
+ element = self.getXPathElement(el, anywhere)
123
+ element.invert = invert
124
+ elements.append(element)
125
+ elif el.type==Token.EOF:
126
+ break
127
+ else:
128
+ raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type])
129
+ return elements
130
+
131
+ #
132
+ # Convert word like {@code#} or {@code ID} or {@code expr} to a path
133
+ # element. {@code anywhere} is {@code true} if {@code //} precedes the
134
+ # word.
135
+ #
136
+ def getXPathElement(self, wordToken:Token, anywhere:bool):
137
+ if wordToken.type==Token.EOF:
138
+ raise Exception("Missing path element at end of path")
139
+
140
+ word = wordToken.text
141
+ if wordToken.type==XPathLexer.WILDCARD :
142
+ return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement()
143
+
144
+ elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]:
145
+ tsource = self.parser.getTokenStream().tokenSource
146
+
147
+ ttype = Token.INVALID_TYPE
148
+ if wordToken.type == XPathLexer.TOKEN_REF:
149
+ if word in tsource.ruleNames:
150
+ ttype = tsource.ruleNames.index(word) + 1
151
+ else:
152
+ if word in tsource.literalNames:
153
+ ttype = tsource.literalNames.index(word)
154
+
155
+ if ttype == Token.INVALID_TYPE:
156
+ raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex))
157
+ return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype)
158
+
159
+ else:
160
+ ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1
161
+
162
+ if ruleIndex == -1:
163
+ raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex))
164
+ return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex)
165
+
166
+
167
+ @staticmethod
168
+ def findAll(tree:ParseTree, xpath:str, parser:Parser):
169
+ p = XPath(parser, xpath)
170
+ return p.evaluate(tree)
171
+
172
+ #
173
+ # Return a list of all nodes starting at {@code t} as root that satisfy the
174
+ # path. The root {@code /} is relative to the node passed to
175
+ # {@link #evaluate}.
176
+ #
177
+ def evaluate(self, t:ParseTree):
178
+ dummyRoot = ParserRuleContext()
179
+ dummyRoot.children = [t] # don't set t's parent.
180
+
181
+ work = [dummyRoot]
182
+ for element in self.elements:
183
+ work_next = list()
184
+ for node in work:
185
+ if not isinstance(node, TerminalNode) and node.children:
186
+ # only try to match next element if it has children
187
+ # e.g., //func/*/stat might have a token node for which
188
+ # we can't go looking for stat nodes.
189
+ matching = element.evaluate(node)
190
+
191
+ # See issue antlr#370 - Prevents XPath from returning the
192
+ # same node multiple times
193
+ matching = filter(lambda m: m not in work_next, matching)
194
+
195
+ work_next.extend(matching)
196
+ work = work_next
197
+
198
+ return work
199
+
200
+
201
+ class XPathElement(object):
202
+
203
+ def __init__(self, nodeName:str):
204
+ self.nodeName = nodeName
205
+ self.invert = False
206
+
207
+ def __str__(self):
208
+ return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]"
209
+
210
+
211
+
212
+ #
213
+ # Either {@code ID} at start of path or {@code ...//ID} in middle of path.
214
+ #
215
+ class XPathRuleAnywhereElement(XPathElement):
216
+
217
+ def __init__(self, ruleName:str, ruleIndex:int):
218
+ super().__init__(ruleName)
219
+ self.ruleIndex = ruleIndex
220
+
221
+ def evaluate(self, t:ParseTree):
222
+ # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted)
223
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t))
224
+
225
+ class XPathRuleElement(XPathElement):
226
+
227
+ def __init__(self, ruleName:str, ruleIndex:int):
228
+ super().__init__(ruleName)
229
+ self.ruleIndex = ruleIndex
230
+
231
+ def evaluate(self, t:ParseTree):
232
+ # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted)
233
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t))
234
+
235
+ class XPathTokenAnywhereElement(XPathElement):
236
+
237
+ def __init__(self, ruleName:str, tokenType:int):
238
+ super().__init__(ruleName)
239
+ self.tokenType = tokenType
240
+
241
+ def evaluate(self, t:ParseTree):
242
+ # return all TerminalNode descendants of t that match tokenType (or do not match if inverted)
243
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t))
244
+
245
+ class XPathTokenElement(XPathElement):
246
+
247
+ def __init__(self, ruleName:str, tokenType:int):
248
+ super().__init__(ruleName)
249
+ self.tokenType = tokenType
250
+
251
+ def evaluate(self, t:ParseTree):
252
+ # return all TerminalNode children of t that match tokenType (or do not match if inverted)
253
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t))
254
+
255
+
256
+ class XPathWildcardAnywhereElement(XPathElement):
257
+
258
+ def __init__(self):
259
+ super().__init__(XPath.WILDCARD)
260
+
261
+ def evaluate(self, t:ParseTree):
262
+ if self.invert:
263
+ return list() # !* is weird but valid (empty)
264
+ else:
265
+ return Trees.descendants(t)
266
+
267
+
268
+ class XPathWildcardElement(XPathElement):
269
+
270
+ def __init__(self):
271
+ super().__init__(XPath.WILDCARD)
272
+
273
+
274
+ def evaluate(self, t:ParseTree):
275
+ if self.invert:
276
+ return list() # !* is weird but valid (empty)
277
+ else:
278
+ return Trees.getChildren(t)
@@ -0,0 +1,98 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Generated from XPathLexer.g4 by ANTLR 4.11.2-SNAPSHOT
5
+ from .._all import *
6
+ from io import StringIO
7
+ import sys
8
+ if sys.version_info[1] > 5:
9
+ from typing import TextIO
10
+ else:
11
+ from typing.io import TextIO
12
+
13
+
14
+ def serializedATN():
15
+ return [
16
+ 4,0,8,50,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,
17
+ 6,7,6,2,7,7,7,1,0,1,0,1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,5,4,29,
18
+ 8,4,10,4,12,4,32,9,4,1,4,1,4,1,5,1,5,3,5,38,8,5,1,6,1,6,1,7,1,7,
19
+ 5,7,44,8,7,10,7,12,7,47,9,7,1,7,1,7,1,45,0,8,1,3,3,4,5,5,7,6,9,7,
20
+ 11,0,13,0,15,8,1,0,2,5,0,48,57,95,95,183,183,768,879,8255,8256,13,
21
+ 0,65,90,97,122,192,214,216,246,248,767,880,893,895,8191,8204,8205,
22
+ 8304,8591,11264,12271,12289,55295,63744,64975,65008,65533,50,0,1,
23
+ 1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,15,1,0,
24
+ 0,0,1,17,1,0,0,0,3,20,1,0,0,0,5,22,1,0,0,0,7,24,1,0,0,0,9,26,1,0,
25
+ 0,0,11,37,1,0,0,0,13,39,1,0,0,0,15,41,1,0,0,0,17,18,5,47,0,0,18,
26
+ 19,5,47,0,0,19,2,1,0,0,0,20,21,5,47,0,0,21,4,1,0,0,0,22,23,5,42,
27
+ 0,0,23,6,1,0,0,0,24,25,5,33,0,0,25,8,1,0,0,0,26,30,3,13,6,0,27,29,
28
+ 3,11,5,0,28,27,1,0,0,0,29,32,1,0,0,0,30,28,1,0,0,0,30,31,1,0,0,0,
29
+ 31,33,1,0,0,0,32,30,1,0,0,0,33,34,6,4,0,0,34,10,1,0,0,0,35,38,3,
30
+ 13,6,0,36,38,7,0,0,0,37,35,1,0,0,0,37,36,1,0,0,0,38,12,1,0,0,0,39,
31
+ 40,7,1,0,0,40,14,1,0,0,0,41,45,5,39,0,0,42,44,9,0,0,0,43,42,1,0,
32
+ 0,0,44,47,1,0,0,0,45,46,1,0,0,0,45,43,1,0,0,0,46,48,1,0,0,0,47,45,
33
+ 1,0,0,0,48,49,5,39,0,0,49,16,1,0,0,0,4,0,30,37,45,1,1,4,0
34
+ ]
35
+
36
+ class XPathLexer(Lexer):
37
+
38
+ atn = ATNDeserializer().deserialize(serializedATN())
39
+
40
+ decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
41
+
42
+ TOKEN_REF = 1
43
+ RULE_REF = 2
44
+ ANYWHERE = 3
45
+ ROOT = 4
46
+ WILDCARD = 5
47
+ BANG = 6
48
+ ID = 7
49
+ STRING = 8
50
+
51
+ channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
52
+
53
+ modeNames = [ "DEFAULT_MODE" ]
54
+
55
+ literalNames = [ "<INVALID>",
56
+ "'//'", "'/'", "'*'", "'!'" ]
57
+
58
+ symbolicNames = [ "<INVALID>",
59
+ "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG",
60
+ "ID", "STRING" ]
61
+
62
+ ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar",
63
+ "NameStartChar", "STRING" ]
64
+
65
+ grammarFileName = "XPathLexer.g4"
66
+
67
+ def __init__(self, input=None, output:TextIO = sys.stdout):
68
+ super().__init__(input, output)
69
+ self.checkVersion("4.11.2-SNAPSHOT")
70
+ self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
71
+ self._actions = None
72
+ self._predicates = None
73
+
74
+
75
+ def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
76
+ if self._actions is None:
77
+ actions = dict()
78
+ actions[4] = self.ID_action
79
+ self._actions = actions
80
+ action = self._actions.get(ruleIndex, None)
81
+ if action is not None:
82
+ action(localctx, actionIndex)
83
+ else:
84
+ raise Exception("No registered action for:" + str(ruleIndex))
85
+
86
+
87
+ def ID_action(self, localctx:RuleContext , actionIndex:int):
88
+ if actionIndex == 0:
89
+
90
+ char = self.text[0]
91
+ if char.isupper():
92
+ self.type = XPathLexer.TOKEN_REF
93
+ else:
94
+ self.type = XPathLexer.RULE_REF
95
+
96
+
97
+
98
+
@@ -0,0 +1,4 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ __author__ = 'ericvergnaud'
@@ -4,4 +4,4 @@ ANTLR_JAR_URL = f'https://www.antlr.org/download/{ANTLR_JAR_NAME}'
4
4
  ANTLR_RUNTIME_PACKAGE = 'antlr4-python3-runtime'
5
5
  ANTLR_GITHUB_REPO = 'antlr/antlr4'
6
6
 
7
- ANTLR_RUNTIME_VENDOR = 'omlish.text.antlr._runtime._all'
7
+ ANTLR_RUNTIME_VENDOR = 'omextra.text.antlr._runtime._all'
@@ -0,0 +1,110 @@
1
+ # ruff: noqa: N802 N815
2
+ import io
3
+ import typing as ta
4
+
5
+ from omlish import check
6
+
7
+ from . import runtime as antlr4
8
+
9
+
10
+ ##
11
+
12
+
13
+ class DelimitingLexer(antlr4.Lexer):
14
+ def __init__(
15
+ self,
16
+ *args: ta.Any,
17
+ delimiter_token: ta.Any,
18
+ delimiters: ta.Iterable[str],
19
+ no_skip: bool = False,
20
+ **kwargs,
21
+ ) -> None:
22
+ super().__init__(*args, **kwargs)
23
+
24
+ self._delimiter_token = delimiter_token
25
+ self._delimiters = set(check.not_isinstance(delimiters, str))
26
+ self._no_skip = no_skip
27
+
28
+ _hitEOF: bool
29
+
30
+ def nextToken(self) -> antlr4.Token:
31
+ if self._input is None:
32
+ raise antlr4.IllegalStateException('nextToken requires a non-null input stream.')
33
+
34
+ token_start_marker = self._input.mark()
35
+ try:
36
+ while True:
37
+ if self._hitEOF:
38
+ self.emitEOF()
39
+ return self._token
40
+
41
+ self._token: antlr4.Token | None = None
42
+ self._channel = antlr4.Token.DEFAULT_CHANNEL
43
+ self._tokenStartCharIndex = self._input.index
44
+ self._tokenStartColumn = self._interp.column
45
+ self._tokenStartLine = self._interp.line
46
+ self._text = None
47
+
48
+ continue_outer = False
49
+ while True:
50
+ self._type = antlr4.Token.INVALID_TYPE
51
+ ttype = self.SKIP
52
+
53
+ for delimiter in self._delimiters:
54
+ if self._match_delimiter(delimiter):
55
+ ttype = self._delimiter_token
56
+ break
57
+ else:
58
+ try:
59
+ ttype = self._interp.match(self._input, self._mode)
60
+ except antlr4.LexerNoViableAltException as e:
61
+ self.notifyListeners(e) # report error
62
+ self.recover(e)
63
+
64
+ if self._input.LA(1) == antlr4.Token.EOF:
65
+ self._hitEOF = True
66
+
67
+ if self._type == antlr4.Token.INVALID_TYPE:
68
+ self._type = ttype
69
+
70
+ if not self._no_skip and self._type == self.SKIP:
71
+ continue_outer = True
72
+ break
73
+
74
+ if self._type != self.MORE:
75
+ break
76
+
77
+ if continue_outer:
78
+ continue
79
+
80
+ if self._token is None:
81
+ self.emit()
82
+
83
+ return self._token
84
+
85
+ finally:
86
+ self._input.release(token_start_marker)
87
+
88
+ def _match_delimiter(self, delimiter: str) -> bool:
89
+ for i, c in enumerate(delimiter):
90
+ if chr(self._input.LA(i + 1)) != c:
91
+ return False
92
+ self._input.seek(self._input.index + len(delimiter))
93
+ return True
94
+
95
+ def split(self) -> tuple[list[tuple[str, str]], str]:
96
+ lst = []
97
+ sb = io.StringIO()
98
+ while True:
99
+ token = self.nextToken()
100
+ if token.type == antlr4.Token.EOF:
101
+ break
102
+ if token.type == self._delimiter_token:
103
+ statement = sb.getvalue().strip()
104
+ if statement:
105
+ lst.append((statement, token.text))
106
+ sb = io.StringIO()
107
+ else:
108
+ sb.write(token.text)
109
+ partial = sb.getvalue()
110
+ return lst, partial
@@ -0,0 +1,42 @@
1
+ import typing as ta
2
+
3
+ from omlish.graphs import dot
4
+
5
+ from . import runtime as antlr4
6
+ from .utils import yield_contexts
7
+
8
+
9
+ ##
10
+
11
+
12
+ def dot_ctx(
13
+ root: antlr4.ParserRuleContext,
14
+ *,
15
+ left_to_right: bool = False,
16
+ ) -> dot.Graph:
17
+ stmts: list[dot.Stmt] = []
18
+
19
+ if left_to_right:
20
+ stmts.append(dot.RawStmt('rankdir=LR;'))
21
+
22
+ for c in yield_contexts(root):
23
+ if isinstance(c, antlr4.TerminalNode):
24
+ continue
25
+
26
+ lbl = [
27
+ [type(c).__name__],
28
+ [str(id(c))],
29
+ [f'{c.start} {c.stop}'],
30
+ ]
31
+
32
+ stmts.append(dot.Node(f'_{id(c)}', {'label': lbl, 'shape': 'box'}))
33
+
34
+ for n in (c.children or []):
35
+ if not isinstance(n, antlr4.TerminalNode):
36
+ stmts.append(dot.Edge(f'_{id(c)}', f'_{id(n)}'))
37
+
38
+ return dot.Graph(stmts)
39
+
40
+
41
+ def open_dot_ctx(root: antlr4.ParserRuleContext, **kwargs: ta.Any) -> None:
42
+ dot.open_dot(dot.render(dot_ctx(root)), **kwargs)
@@ -0,0 +1,14 @@
1
+ # ruff: noqa: N802 N803
2
+ from . import runtime as antlr4
3
+
4
+
5
+ ##
6
+
7
+
8
+ class ParseError(Exception):
9
+ pass
10
+
11
+
12
+ class SilentRaisingErrorListener(antlr4.ErrorListener):
13
+ def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
14
+ raise ParseError(recognizer, offendingSymbol, line, column, msg, e)