omlish 0.0.0.dev56__py3-none-any.whl → 0.0.0.dev58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/antlr/__init__.py +0 -0
  3. omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
  4. omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
  5. omlish/antlr/_runtime/CommonTokenStream.py +90 -0
  6. omlish/antlr/_runtime/FileStream.py +30 -0
  7. omlish/antlr/_runtime/InputStream.py +90 -0
  8. omlish/antlr/_runtime/IntervalSet.py +183 -0
  9. omlish/antlr/_runtime/LL1Analyzer.py +176 -0
  10. omlish/antlr/_runtime/Lexer.py +332 -0
  11. omlish/antlr/_runtime/ListTokenSource.py +147 -0
  12. omlish/antlr/_runtime/Parser.py +583 -0
  13. omlish/antlr/_runtime/ParserInterpreter.py +173 -0
  14. omlish/antlr/_runtime/ParserRuleContext.py +189 -0
  15. omlish/antlr/_runtime/PredictionContext.py +632 -0
  16. omlish/antlr/_runtime/Recognizer.py +150 -0
  17. omlish/antlr/_runtime/RuleContext.py +230 -0
  18. omlish/antlr/_runtime/StdinStream.py +14 -0
  19. omlish/antlr/_runtime/Token.py +158 -0
  20. omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
  21. omlish/antlr/_runtime/Utils.py +36 -0
  22. omlish/antlr/_runtime/__init__.py +24 -0
  23. omlish/antlr/_runtime/_pygrun.py +174 -0
  24. omlish/antlr/_runtime/atn/ATN.py +135 -0
  25. omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
  26. omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  27. omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  28. omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  29. omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
  30. omlish/antlr/_runtime/atn/ATNState.py +267 -0
  31. omlish/antlr/_runtime/atn/ATNType.py +20 -0
  32. omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  33. omlish/antlr/_runtime/atn/LexerAction.py +301 -0
  34. omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  35. omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  36. omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
  37. omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
  38. omlish/antlr/_runtime/atn/Transition.py +271 -0
  39. omlish/antlr/_runtime/atn/__init__.py +4 -0
  40. omlish/antlr/_runtime/dfa/DFA.py +136 -0
  41. omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
  42. omlish/antlr/_runtime/dfa/DFAState.py +129 -0
  43. omlish/antlr/_runtime/dfa/__init__.py +4 -0
  44. omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
  45. omlish/antlr/_runtime/error/ErrorListener.py +75 -0
  46. omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
  47. omlish/antlr/_runtime/error/Errors.py +176 -0
  48. omlish/antlr/_runtime/error/__init__.py +4 -0
  49. omlish/antlr/_runtime/tree/Chunk.py +33 -0
  50. omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  51. omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  52. omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  53. omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
  54. omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
  55. omlish/antlr/_runtime/tree/Tree.py +194 -0
  56. omlish/antlr/_runtime/tree/Trees.py +114 -0
  57. omlish/antlr/_runtime/tree/__init__.py +2 -0
  58. omlish/antlr/_runtime/xpath/XPath.py +272 -0
  59. omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
  60. omlish/antlr/_runtime/xpath/__init__.py +4 -0
  61. omlish/formats/json/cli.py +76 -7
  62. omlish/formats/props.py +6 -2
  63. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/METADATA +1 -1
  64. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/RECORD +68 -9
  65. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/LICENSE +0 -0
  66. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/WHEEL +0 -0
  67. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/entry_points.txt +0 -0
  68. {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,114 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+
11
+ # A set of utility routines useful for all kinds of ANTLR trees.#
12
+ from io import StringIO
13
+ from ..Token import Token
14
+ from ..Utils import escapeWhitespace
15
+ from .Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree
16
+
17
+ # need forward declaration
18
+ Parser = None
19
+
20
+ class Trees(object):
21
+
22
+ # Print out a whole tree in LISP form. {@link #getNodeText} is used on the
23
+ # node payloads to get the text for the nodes. Detect
24
+ # parse trees and extract data appropriately.
25
+ @classmethod
26
+ def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
27
+ if recog is not None:
28
+ ruleNames = recog.ruleNames
29
+ s = escapeWhitespace(cls.getNodeText(t, ruleNames), False)
30
+ if t.getChildCount()==0:
31
+ return s
32
+ with StringIO() as buf:
33
+ buf.write("(")
34
+ buf.write(s)
35
+ buf.write(' ')
36
+ for i in range(0, t.getChildCount()):
37
+ if i > 0:
38
+ buf.write(' ')
39
+ buf.write(cls.toStringTree(t.getChild(i), ruleNames))
40
+ buf.write(")")
41
+ return buf.getvalue()
42
+
43
+ @classmethod
44
+ def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
45
+ if recog is not None:
46
+ ruleNames = recog.ruleNames
47
+ if ruleNames is not None:
48
+ if isinstance(t, RuleNode):
49
+ if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile
50
+ return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber())
51
+ return ruleNames[t.getRuleIndex()]
52
+ elif isinstance( t, ErrorNode):
53
+ return str(t)
54
+ elif isinstance(t, TerminalNode):
55
+ if t.symbol is not None:
56
+ return t.symbol.text
57
+ # no recog for rule names
58
+ payload = t.getPayload()
59
+ if isinstance(payload, Token ):
60
+ return payload.text
61
+ return str(t.getPayload())
62
+
63
+
64
+ # Return ordered list of all children of this node
65
+ @classmethod
66
+ def getChildren(cls, t:Tree):
67
+ return [ t.getChild(i) for i in range(0, t.getChildCount()) ]
68
+
69
+ # Return a list of all ancestors of this node. The first node of
70
+ # list is the root and the last is the parent of this node.
71
+ #
72
+ @classmethod
73
+ def getAncestors(cls, t:Tree):
74
+ ancestors = []
75
+ t = t.getParent()
76
+ while t is not None:
77
+ ancestors.insert(0, t) # insert at start
78
+ t = t.getParent()
79
+ return ancestors
80
+
81
+ @classmethod
82
+ def findAllTokenNodes(cls, t:ParseTree, ttype:int):
83
+ return cls.findAllNodes(t, ttype, True)
84
+
85
+ @classmethod
86
+ def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int):
87
+ return cls.findAllNodes(t, ruleIndex, False)
88
+
89
+ @classmethod
90
+ def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool):
91
+ nodes = []
92
+ cls._findAllNodes(t, index, findTokens, nodes)
93
+ return nodes
94
+
95
+ @classmethod
96
+ def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list):
97
+ from ..ParserRuleContext import ParserRuleContext
98
+ # check this node (the root) first
99
+ if findTokens and isinstance(t, TerminalNode):
100
+ if t.symbol.type==index:
101
+ nodes.append(t)
102
+ elif not findTokens and isinstance(t, ParserRuleContext):
103
+ if t.ruleIndex == index:
104
+ nodes.append(t)
105
+ # check children
106
+ for i in range(0, t.getChildCount()):
107
+ cls._findAllNodes(t.getChild(i), index, findTokens, nodes)
108
+
109
+ @classmethod
110
+ def descendants(cls, t:ParseTree):
111
+ nodes = [t]
112
+ for i in range(0, t.getChildCount()):
113
+ nodes.extend(cls.descendants(t.getChild(i)))
114
+ return nodes
@@ -0,0 +1,2 @@
1
+ # ruff: noqa
2
+ # flake8: noqa
@@ -0,0 +1,272 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ #
11
+ # Represent a subset of XPath XML path syntax for use in identifying nodes in
12
+ # parse trees.
13
+ #
14
+ # <p>
15
+ # Split path into words and separators {@code /} and {@code //} via ANTLR
16
+ # itself then walk path elements from left to right. At each separator-word
17
+ # pair, find set of nodes. Next stage uses those as work list.</p>
18
+ #
19
+ # <p>
20
+ # The basic interface is
21
+ # {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
22
+ # But that is just shorthand for:</p>
23
+ #
24
+ # <pre>
25
+ # {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
26
+ # return p.{@link #evaluate evaluate}(tree);
27
+ # </pre>
28
+ #
29
+ # <p>
30
+ # See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
31
+ # allows operators:</p>
32
+ #
33
+ # <dl>
34
+ # <dt>/</dt> <dd>root</dd>
35
+ # <dt>//</dt> <dd>anywhere</dd>
36
+ # <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
37
+ # operator</dd>
38
+ # </dl>
39
+ #
40
+ # <p>
41
+ # and path elements:</p>
42
+ #
43
+ # <dl>
44
+ # <dt>ID</dt> <dd>token name</dd>
45
+ # <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
46
+ # <dt>expr</dt> <dd>rule name</dd>
47
+ # <dt>*</dt> <dd>wildcard matching any node</dd>
48
+ # </dl>
49
+ #
50
+ # <p>
51
+ # Whitespace is not allowed.</p>
52
+ #
53
+ from .. import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode
54
+ from ..InputStream import InputStream
55
+ from ..Parser import Parser
56
+ from ..RuleContext import RuleContext
57
+ from ..Token import Token
58
+ from ..atn.ATNDeserializer import ATNDeserializer
59
+ from ..error.ErrorListener import ErrorListener
60
+ from ..error.Errors import LexerNoViableAltException
61
+ from ..tree.Tree import ParseTree
62
+ from ..tree.Trees import Trees
63
+ from io import StringIO
64
+ from .XPathLexer import XPathLexer
65
+
66
+
67
+ class XPath(object):
68
+
69
+ WILDCARD = "*" # word not operator/separator
70
+ NOT = "!" # word for invert operator
71
+
72
+ def __init__(self, parser:Parser, path:str):
73
+ self.parser = parser
74
+ self.path = path
75
+ self.elements = self.split(path)
76
+
77
+ def split(self, path:str):
78
+ input = InputStream(path)
79
+ lexer = XPathLexer(input)
80
+ def recover(self, e):
81
+ raise e
82
+ lexer.recover = recover
83
+ lexer.removeErrorListeners()
84
+ lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more
85
+ tokenStream = CommonTokenStream(lexer)
86
+ try:
87
+ tokenStream.fill()
88
+ except LexerNoViableAltException as e:
89
+ pos = lexer.column
90
+ msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path)
91
+ raise Exception(msg, e)
92
+
93
+ tokens = iter(tokenStream.tokens)
94
+ elements = list()
95
+ for el in tokens:
96
+ invert = False
97
+ anywhere = False
98
+ # Check for path separators, if none assume root
99
+ if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
100
+ anywhere = el.type == XPathLexer.ANYWHERE
101
+ next_el = next(tokens, None)
102
+ if not next_el:
103
+ raise Exception('Missing element after %s' % el.getText())
104
+ else:
105
+ el = next_el
106
+ # Check for bangs
107
+ if el.type == XPathLexer.BANG:
108
+ invert = True
109
+ next_el = next(tokens, None)
110
+ if not next_el:
111
+ raise Exception('Missing element after %s' % el.getText())
112
+ else:
113
+ el = next_el
114
+ # Add searched element
115
+ if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]:
116
+ element = self.getXPathElement(el, anywhere)
117
+ element.invert = invert
118
+ elements.append(element)
119
+ elif el.type==Token.EOF:
120
+ break
121
+ else:
122
+ raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type])
123
+ return elements
124
+
125
+ #
126
+ # Convert word like {@code#} or {@code ID} or {@code expr} to a path
127
+ # element. {@code anywhere} is {@code true} if {@code //} precedes the
128
+ # word.
129
+ #
130
+ def getXPathElement(self, wordToken:Token, anywhere:bool):
131
+ if wordToken.type==Token.EOF:
132
+ raise Exception("Missing path element at end of path")
133
+
134
+ word = wordToken.text
135
+ if wordToken.type==XPathLexer.WILDCARD :
136
+ return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement()
137
+
138
+ elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]:
139
+ tsource = self.parser.getTokenStream().tokenSource
140
+
141
+ ttype = Token.INVALID_TYPE
142
+ if wordToken.type == XPathLexer.TOKEN_REF:
143
+ if word in tsource.ruleNames:
144
+ ttype = tsource.ruleNames.index(word) + 1
145
+ else:
146
+ if word in tsource.literalNames:
147
+ ttype = tsource.literalNames.index(word)
148
+
149
+ if ttype == Token.INVALID_TYPE:
150
+ raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex))
151
+ return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype)
152
+
153
+ else:
154
+ ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1
155
+
156
+ if ruleIndex == -1:
157
+ raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex))
158
+ return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex)
159
+
160
+
161
+ @staticmethod
162
+ def findAll(tree:ParseTree, xpath:str, parser:Parser):
163
+ p = XPath(parser, xpath)
164
+ return p.evaluate(tree)
165
+
166
+ #
167
+ # Return a list of all nodes starting at {@code t} as root that satisfy the
168
+ # path. The root {@code /} is relative to the node passed to
169
+ # {@link #evaluate}.
170
+ #
171
+ def evaluate(self, t:ParseTree):
172
+ dummyRoot = ParserRuleContext()
173
+ dummyRoot.children = [t] # don't set t's parent.
174
+
175
+ work = [dummyRoot]
176
+ for element in self.elements:
177
+ work_next = list()
178
+ for node in work:
179
+ if not isinstance(node, TerminalNode) and node.children:
180
+ # only try to match next element if it has children
181
+ # e.g., //func/*/stat might have a token node for which
182
+ # we can't go looking for stat nodes.
183
+ matching = element.evaluate(node)
184
+
185
+ # See issue antlr#370 - Prevents XPath from returning the
186
+ # same node multiple times
187
+ matching = filter(lambda m: m not in work_next, matching)
188
+
189
+ work_next.extend(matching)
190
+ work = work_next
191
+
192
+ return work
193
+
194
+
195
+ class XPathElement(object):
196
+
197
+ def __init__(self, nodeName:str):
198
+ self.nodeName = nodeName
199
+ self.invert = False
200
+
201
+ def __str__(self):
202
+ return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]"
203
+
204
+
205
+
206
+ #
207
+ # Either {@code ID} at start of path or {@code ...//ID} in middle of path.
208
+ #
209
+ class XPathRuleAnywhereElement(XPathElement):
210
+
211
+ def __init__(self, ruleName:str, ruleIndex:int):
212
+ super().__init__(ruleName)
213
+ self.ruleIndex = ruleIndex
214
+
215
+ def evaluate(self, t:ParseTree):
216
+ # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted)
217
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t))
218
+
219
+ class XPathRuleElement(XPathElement):
220
+
221
+ def __init__(self, ruleName:str, ruleIndex:int):
222
+ super().__init__(ruleName)
223
+ self.ruleIndex = ruleIndex
224
+
225
+ def evaluate(self, t:ParseTree):
226
+ # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted)
227
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t))
228
+
229
+ class XPathTokenAnywhereElement(XPathElement):
230
+
231
+ def __init__(self, ruleName:str, tokenType:int):
232
+ super().__init__(ruleName)
233
+ self.tokenType = tokenType
234
+
235
+ def evaluate(self, t:ParseTree):
236
+ # return all TerminalNode descendants of t that match tokenType (or do not match if inverted)
237
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t))
238
+
239
+ class XPathTokenElement(XPathElement):
240
+
241
+ def __init__(self, ruleName:str, tokenType:int):
242
+ super().__init__(ruleName)
243
+ self.tokenType = tokenType
244
+
245
+ def evaluate(self, t:ParseTree):
246
+ # return all TerminalNode children of t that match tokenType (or do not match if inverted)
247
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t))
248
+
249
+
250
+ class XPathWildcardAnywhereElement(XPathElement):
251
+
252
+ def __init__(self):
253
+ super().__init__(XPath.WILDCARD)
254
+
255
+ def evaluate(self, t:ParseTree):
256
+ if self.invert:
257
+ return list() # !* is weird but valid (empty)
258
+ else:
259
+ return Trees.descendants(t)
260
+
261
+
262
+ class XPathWildcardElement(XPathElement):
263
+
264
+ def __init__(self):
265
+ super().__init__(XPath.WILDCARD)
266
+
267
+
268
+ def evaluate(self, t:ParseTree):
269
+ if self.invert:
270
+ return list() # !* is weird but valid (empty)
271
+ else:
272
+ return Trees.getChildren(t)
@@ -0,0 +1,98 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Generated from XPathLexer.g4 by ANTLR 4.11.2-SNAPSHOT
5
+ from .. import *
6
+ from io import StringIO
7
+ import sys
8
+ if sys.version_info[1] > 5:
9
+ from typing import TextIO
10
+ else:
11
+ from typing.io import TextIO
12
+
13
+
14
+ def serializedATN():
15
+ return [
16
+ 4,0,8,50,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,
17
+ 6,7,6,2,7,7,7,1,0,1,0,1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,5,4,29,
18
+ 8,4,10,4,12,4,32,9,4,1,4,1,4,1,5,1,5,3,5,38,8,5,1,6,1,6,1,7,1,7,
19
+ 5,7,44,8,7,10,7,12,7,47,9,7,1,7,1,7,1,45,0,8,1,3,3,4,5,5,7,6,9,7,
20
+ 11,0,13,0,15,8,1,0,2,5,0,48,57,95,95,183,183,768,879,8255,8256,13,
21
+ 0,65,90,97,122,192,214,216,246,248,767,880,893,895,8191,8204,8205,
22
+ 8304,8591,11264,12271,12289,55295,63744,64975,65008,65533,50,0,1,
23
+ 1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,15,1,0,
24
+ 0,0,1,17,1,0,0,0,3,20,1,0,0,0,5,22,1,0,0,0,7,24,1,0,0,0,9,26,1,0,
25
+ 0,0,11,37,1,0,0,0,13,39,1,0,0,0,15,41,1,0,0,0,17,18,5,47,0,0,18,
26
+ 19,5,47,0,0,19,2,1,0,0,0,20,21,5,47,0,0,21,4,1,0,0,0,22,23,5,42,
27
+ 0,0,23,6,1,0,0,0,24,25,5,33,0,0,25,8,1,0,0,0,26,30,3,13,6,0,27,29,
28
+ 3,11,5,0,28,27,1,0,0,0,29,32,1,0,0,0,30,28,1,0,0,0,30,31,1,0,0,0,
29
+ 31,33,1,0,0,0,32,30,1,0,0,0,33,34,6,4,0,0,34,10,1,0,0,0,35,38,3,
30
+ 13,6,0,36,38,7,0,0,0,37,35,1,0,0,0,37,36,1,0,0,0,38,12,1,0,0,0,39,
31
+ 40,7,1,0,0,40,14,1,0,0,0,41,45,5,39,0,0,42,44,9,0,0,0,43,42,1,0,
32
+ 0,0,44,47,1,0,0,0,45,46,1,0,0,0,45,43,1,0,0,0,46,48,1,0,0,0,47,45,
33
+ 1,0,0,0,48,49,5,39,0,0,49,16,1,0,0,0,4,0,30,37,45,1,1,4,0
34
+ ]
35
+
36
+ class XPathLexer(Lexer):
37
+
38
+ atn = ATNDeserializer().deserialize(serializedATN())
39
+
40
+ decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
41
+
42
+ TOKEN_REF = 1
43
+ RULE_REF = 2
44
+ ANYWHERE = 3
45
+ ROOT = 4
46
+ WILDCARD = 5
47
+ BANG = 6
48
+ ID = 7
49
+ STRING = 8
50
+
51
+ channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
52
+
53
+ modeNames = [ "DEFAULT_MODE" ]
54
+
55
+ literalNames = [ "<INVALID>",
56
+ "'//'", "'/'", "'*'", "'!'" ]
57
+
58
+ symbolicNames = [ "<INVALID>",
59
+ "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG",
60
+ "ID", "STRING" ]
61
+
62
+ ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar",
63
+ "NameStartChar", "STRING" ]
64
+
65
+ grammarFileName = "XPathLexer.g4"
66
+
67
+ def __init__(self, input=None, output:TextIO = sys.stdout):
68
+ super().__init__(input, output)
69
+ self.checkVersion("4.11.2-SNAPSHOT")
70
+ self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
71
+ self._actions = None
72
+ self._predicates = None
73
+
74
+
75
+ def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
76
+ if self._actions is None:
77
+ actions = dict()
78
+ actions[4] = self.ID_action
79
+ self._actions = actions
80
+ action = self._actions.get(ruleIndex, None)
81
+ if action is not None:
82
+ action(localctx, actionIndex)
83
+ else:
84
+ raise Exception("No registered action for:" + str(ruleIndex))
85
+
86
+
87
+ def ID_action(self, localctx:RuleContext , actionIndex:int):
88
+ if actionIndex == 0:
89
+
90
+ char = self.text[0]
91
+ if char.isupper():
92
+ self.type = XPathLexer.TOKEN_REF
93
+ else:
94
+ self.type = XPathLexer.RULE_REF
95
+
96
+
97
+
98
+
@@ -0,0 +1,4 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ __author__ = 'ericvergnaud'
@@ -1,13 +1,34 @@
1
1
  import argparse
2
2
  import contextlib
3
+ import dataclasses as dc
4
+ import enum
3
5
  import json
6
+ import subprocess
4
7
  import sys
5
8
  import typing as ta
6
9
 
10
+ from ... import lang
7
11
  from ... import term
8
12
  from .render import JsonRenderer
9
13
 
10
14
 
15
+ if ta.TYPE_CHECKING:
16
+ import tomllib
17
+
18
+ import yaml
19
+
20
+ from .. import dotenv
21
+ from .. import props
22
+
23
+ else:
24
+ tomllib = lang.proxy_import('tomllib')
25
+
26
+ yaml = lang.proxy_import('yaml')
27
+
28
+ dotenv = lang.proxy_import('..dotenv', __package__)
29
+ props = lang.proxy_import('..props', __package__)
30
+
31
+
11
32
  def term_color(o: ta.Any, state: JsonRenderer.State) -> tuple[str, str]:
12
33
  if state is JsonRenderer.State.KEY:
13
34
  return term.SGR(term.SGRs.FG.BRIGHT_BLUE), term.SGR(term.SGRs.RESET)
@@ -17,14 +38,39 @@ def term_color(o: ta.Any, state: JsonRenderer.State) -> tuple[str, str]:
17
38
  return '', ''
18
39
 
19
40
 
41
+ @dc.dataclass(frozen=True)
42
+ class Format:
43
+ names: ta.Sequence[str]
44
+ load: ta.Callable[[ta.TextIO], ta.Any]
45
+
46
+
47
+ class Formats(enum.Enum):
48
+ JSON = Format(['json'], json.load)
49
+ YAML = Format(['yaml', 'yml'], lambda f: yaml.safe_load(f))
50
+ TOML = Format(['toml'], lambda f: tomllib.loads(f.read()))
51
+ ENV = Format(['env', 'dotenv'], lambda f: dotenv.dotenv_values(stream=f))
52
+ PROPS = Format(['properties', 'props'], lambda f: dict(props.Properties().load(f.read())))
53
+
54
+
55
+ FORMATS_BY_NAME: ta.Mapping[str, Format] = {
56
+ n: f
57
+ for e in Formats
58
+ for f in [e.value]
59
+ for n in f.names
60
+ }
61
+
62
+
20
63
  def _main() -> None:
21
64
  parser = argparse.ArgumentParser()
65
+
22
66
  parser.add_argument('file', nargs='?')
67
+ parser.add_argument('-f', '--format')
23
68
  parser.add_argument('-z', '--compact', action='store_true')
24
69
  parser.add_argument('-p', '--pretty', action='store_true')
25
70
  parser.add_argument('-i', '--indent')
26
71
  parser.add_argument('-s', '--sort-keys', action='store_true')
27
72
  parser.add_argument('-c', '--color', action='store_true')
73
+ parser.add_argument('-l', '--less', action='store_true')
28
74
  args = parser.parse_args()
29
75
 
30
76
  separators = None
@@ -40,32 +86,55 @@ def _main() -> None:
40
86
  except ValueError:
41
87
  indent = args.indent
42
88
 
89
+ fmt_name = args.format
90
+ if fmt_name is None:
91
+ if args.file is not None:
92
+ ext = args.file.rpartition('.')[2]
93
+ if ext in FORMATS_BY_NAME:
94
+ fmt_name = ext
95
+ if fmt_name is None:
96
+ fmt_name = 'json'
97
+ fmt = FORMATS_BY_NAME[fmt_name]
98
+
43
99
  with contextlib.ExitStack() as es:
44
100
  if args.file is None:
45
101
  in_file = sys.stdin
46
102
  else:
47
103
  in_file = es.enter_context(open(args.file))
48
104
 
49
- data = json.load(in_file)
105
+ data = fmt.load(in_file)
50
106
 
51
107
  kw: dict[str, ta.Any] = dict(
52
108
  indent=indent,
53
109
  separators=separators,
110
+ sort_keys=args.sort_keys,
54
111
  )
55
112
 
56
113
  if args.color:
57
- JsonRenderer(
58
- sys.stdout,
114
+ out = JsonRenderer.render_str(
115
+ data,
59
116
  **kw,
60
117
  style=term_color,
61
- ).render(data)
62
- print()
118
+ )
63
119
 
64
120
  else:
65
- print(json.dumps(
121
+ out = json.dumps(
66
122
  data,
67
123
  **kw,
68
- ))
124
+ )
125
+
126
+ if args.less:
127
+ subprocess.run(
128
+ [
129
+ 'less',
130
+ *(['-R'] if args.color else []),
131
+ ],
132
+ input=out.encode(),
133
+ check=True,
134
+ )
135
+
136
+ else:
137
+ print(out)
69
138
 
70
139
 
71
140
  if __name__ == '__main__':
omlish/formats/props.py CHANGED
@@ -508,7 +508,7 @@ class Properties(collections.abc.MutableMapping):
508
508
  source_data,
509
509
  encoding: str | None = 'iso-8859-1',
510
510
  metadoc: bool = False,
511
- ) -> None:
511
+ ) -> ta.Self:
512
512
  self.reset(metadoc)
513
513
 
514
514
  if isinstance(source_data, bytes):
@@ -522,6 +522,8 @@ class Properties(collections.abc.MutableMapping):
522
522
 
523
523
  self._parse()
524
524
 
525
+ return self
526
+
525
527
  def store(
526
528
  self,
527
529
  out_stream,
@@ -530,7 +532,7 @@ class Properties(collections.abc.MutableMapping):
530
532
  strict: bool = True,
531
533
  strip_meta: bool = True,
532
534
  timestamp: bool = True,
533
- ) -> None:
535
+ ) -> ta.Self:
534
536
  out_codec_info = codecs.lookup(encoding)
535
537
  wrapped_out_stream = out_codec_info.streamwriter(out_stream, _jbackslash_replace_codec_name)
536
538
  properties_escape_nonprinting = strict and out_codec_info == codecs.lookup('latin_1')
@@ -597,6 +599,8 @@ class Properties(collections.abc.MutableMapping):
597
599
  file=wrapped_out_stream,
598
600
  )
599
601
 
602
+ return self
603
+
600
604
  def list(self, out_stream=sys.stderr) -> None:
601
605
  print('-- listing properties --', file=out_stream)
602
606
  for key in self._properties:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: omlish
3
- Version: 0.0.0.dev56
3
+ Version: 0.0.0.dev58
4
4
  Summary: omlish
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause