omlish 0.0.0.dev57__py3-none-any.whl → 0.0.0.dev59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/antlr/__init__.py +0 -0
  3. omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
  4. omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
  5. omlish/antlr/_runtime/CommonTokenStream.py +90 -0
  6. omlish/antlr/_runtime/FileStream.py +30 -0
  7. omlish/antlr/_runtime/InputStream.py +90 -0
  8. omlish/antlr/_runtime/IntervalSet.py +183 -0
  9. omlish/antlr/_runtime/LL1Analyzer.py +176 -0
  10. omlish/antlr/_runtime/Lexer.py +332 -0
  11. omlish/antlr/_runtime/ListTokenSource.py +147 -0
  12. omlish/antlr/_runtime/Parser.py +583 -0
  13. omlish/antlr/_runtime/ParserInterpreter.py +173 -0
  14. omlish/antlr/_runtime/ParserRuleContext.py +189 -0
  15. omlish/antlr/_runtime/PredictionContext.py +632 -0
  16. omlish/antlr/_runtime/Recognizer.py +150 -0
  17. omlish/antlr/_runtime/RuleContext.py +230 -0
  18. omlish/antlr/_runtime/StdinStream.py +14 -0
  19. omlish/antlr/_runtime/Token.py +158 -0
  20. omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
  21. omlish/antlr/_runtime/Utils.py +36 -0
  22. omlish/antlr/_runtime/__init__.py +24 -0
  23. omlish/antlr/_runtime/_pygrun.py +174 -0
  24. omlish/antlr/_runtime/atn/ATN.py +135 -0
  25. omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
  26. omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  27. omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  28. omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  29. omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
  30. omlish/antlr/_runtime/atn/ATNState.py +267 -0
  31. omlish/antlr/_runtime/atn/ATNType.py +20 -0
  32. omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  33. omlish/antlr/_runtime/atn/LexerAction.py +301 -0
  34. omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  35. omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  36. omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
  37. omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
  38. omlish/antlr/_runtime/atn/Transition.py +271 -0
  39. omlish/antlr/_runtime/atn/__init__.py +4 -0
  40. omlish/antlr/_runtime/dfa/DFA.py +136 -0
  41. omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
  42. omlish/antlr/_runtime/dfa/DFAState.py +129 -0
  43. omlish/antlr/_runtime/dfa/__init__.py +4 -0
  44. omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
  45. omlish/antlr/_runtime/error/ErrorListener.py +75 -0
  46. omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
  47. omlish/antlr/_runtime/error/Errors.py +176 -0
  48. omlish/antlr/_runtime/error/__init__.py +4 -0
  49. omlish/antlr/_runtime/tree/Chunk.py +33 -0
  50. omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  51. omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  52. omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  53. omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
  54. omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
  55. omlish/antlr/_runtime/tree/Tree.py +194 -0
  56. omlish/antlr/_runtime/tree/Trees.py +114 -0
  57. omlish/antlr/_runtime/tree/__init__.py +2 -0
  58. omlish/antlr/_runtime/xpath/XPath.py +272 -0
  59. omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
  60. omlish/antlr/_runtime/xpath/__init__.py +4 -0
  61. omlish/marshal/__init__.py +10 -5
  62. omlish/marshal/nop.py +18 -0
  63. omlish/marshal/primitives.py +16 -6
  64. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/METADATA +1 -1
  65. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/RECORD +69 -9
  66. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/LICENSE +0 -0
  67. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/WHEEL +0 -0
  68. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/entry_points.txt +0 -0
  69. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,150 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+ from .RuleContext import RuleContext
10
+ from .Token import Token
11
+ from .error.ErrorListener import ProxyErrorListener, ConsoleErrorListener
12
+
13
+ # need forward delcaration
14
+ RecognitionException = None
15
+
16
+ class Recognizer(object):
17
+ __slots__ = ('_listeners', '_interp', '_stateNumber')
18
+
19
+ tokenTypeMapCache = dict()
20
+ ruleIndexMapCache = dict()
21
+
22
+ def __init__(self):
23
+ self._listeners = [ ConsoleErrorListener.INSTANCE ]
24
+ self._interp = None
25
+ self._stateNumber = -1
26
+
27
+ def extractVersion(self, version):
28
+ pos = version.find(".")
29
+ major = version[0:pos]
30
+ version = version[pos+1:]
31
+ pos = version.find(".")
32
+ if pos==-1:
33
+ pos = version.find("-")
34
+ if pos==-1:
35
+ pos = len(version)
36
+ minor = version[0:pos]
37
+ return major, minor
38
+
39
+ def checkVersion(self, toolVersion):
40
+ runtimeVersion = "4.13.1"
41
+ rvmajor, rvminor = self.extractVersion(runtimeVersion)
42
+ tvmajor, tvminor = self.extractVersion(toolVersion)
43
+ if rvmajor!=tvmajor or rvminor!=tvminor:
44
+ print("ANTLR runtime and generated code versions disagree: "+runtimeVersion+"!="+toolVersion)
45
+
46
+ def addErrorListener(self, listener):
47
+ self._listeners.append(listener)
48
+
49
+ def removeErrorListener(self, listener):
50
+ self._listeners.remove(listener)
51
+
52
+ def removeErrorListeners(self):
53
+ self._listeners = []
54
+
55
+ def getTokenTypeMap(self):
56
+ tokenNames = self.getTokenNames()
57
+ if tokenNames is None:
58
+ from .error.Errors import UnsupportedOperationException
59
+ raise UnsupportedOperationException("The current recognizer does not provide a list of token names.")
60
+ result = self.tokenTypeMapCache.get(tokenNames, None)
61
+ if result is None:
62
+ result = zip( tokenNames, range(0, len(tokenNames)))
63
+ result["EOF"] = Token.EOF
64
+ self.tokenTypeMapCache[tokenNames] = result
65
+ return result
66
+
67
+ # Get a map from rule names to rule indexes.
68
+ #
69
+ # <p>Used for XPath and tree pattern compilation.</p>
70
+ #
71
+ def getRuleIndexMap(self):
72
+ ruleNames = self.getRuleNames()
73
+ if ruleNames is None:
74
+ from .error.Errors import UnsupportedOperationException
75
+ raise UnsupportedOperationException("The current recognizer does not provide a list of rule names.")
76
+ result = self.ruleIndexMapCache.get(ruleNames, None)
77
+ if result is None:
78
+ result = zip( ruleNames, range(0, len(ruleNames)))
79
+ self.ruleIndexMapCache[ruleNames] = result
80
+ return result
81
+
82
+ def getTokenType(self, tokenName:str):
83
+ ttype = self.getTokenTypeMap().get(tokenName, None)
84
+ if ttype is not None:
85
+ return ttype
86
+ else:
87
+ return Token.INVALID_TYPE
88
+
89
+
90
+ # What is the error header, normally line/character position information?#
91
+ def getErrorHeader(self, e:RecognitionException):
92
+ line = e.getOffendingToken().line
93
+ column = e.getOffendingToken().column
94
+ return "line "+line+":"+column
95
+
96
+
97
+ # How should a token be displayed in an error message? The default
98
+ # is to display just the text, but during development you might
99
+ # want to have a lot of information spit out. Override in that case
100
+ # to use t.toString() (which, for CommonToken, dumps everything about
101
+ # the token). This is better than forcing you to override a method in
102
+ # your token objects because you don't have to go modify your lexer
103
+ # so that it creates a new Java type.
104
+ #
105
+ # @deprecated This method is not called by the ANTLR 4 Runtime. Specific
106
+ # implementations of {@link ANTLRErrorStrategy} may provide a similar
107
+ # feature when necessary. For example, see
108
+ # {@link DefaultErrorStrategy#getTokenErrorDisplay}.
109
+ #
110
+ def getTokenErrorDisplay(self, t:Token):
111
+ if t is None:
112
+ return "<no token>"
113
+ s = t.text
114
+ if s is None:
115
+ if t.type==Token.EOF:
116
+ s = "<EOF>"
117
+ else:
118
+ s = "<" + str(t.type) + ">"
119
+ s = s.replace("\n","\\n")
120
+ s = s.replace("\r","\\r")
121
+ s = s.replace("\t","\\t")
122
+ return "'" + s + "'"
123
+
124
+ def getErrorListenerDispatch(self):
125
+ return ProxyErrorListener(self._listeners)
126
+
127
+ # subclass needs to override these if there are sempreds or actions
128
+ # that the ATN interp needs to execute
129
+ def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
130
+ return True
131
+
132
+ def precpred(self, localctx:RuleContext , precedence:int):
133
+ return True
134
+
135
+ @property
136
+ def state(self):
137
+ return self._stateNumber
138
+
139
+ # Indicate that the recognizer has changed internal state that is
140
+ # consistent with the ATN state passed in. This way we always know
141
+ # where we are in the ATN as the parser goes along. The rule
142
+ # context objects form a stack that lets us see the stack of
143
+ # invoking rules. Combine this and we have complete ATN
144
+ # configuration information.
145
+
146
+ @state.setter
147
+ def state(self, atnState:int):
148
+ self._stateNumber = atnState
149
+
150
+ del RecognitionException
@@ -0,0 +1,230 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
5
+ # Use of this file is governed by the BSD 3-clause license that
6
+ # can be found in the LICENSE.txt file in the project root.
7
+ #/
8
+
9
+
10
+ # A rule context is a record of a single rule invocation. It knows
11
+ # which context invoked it, if any. If there is no parent context, then
12
+ # naturally the invoking state is not valid. The parent link
13
+ # provides a chain upwards from the current rule invocation to the root
14
+ # of the invocation tree, forming a stack. We actually carry no
15
+ # information about the rule associated with this context (except
16
+ # when parsing). We keep only the state number of the invoking state from
17
+ # the ATN submachine that invoked this. Contrast this with the s
18
+ # pointer inside ParserRuleContext that tracks the current state
19
+ # being "executed" for the current rule.
20
+ #
21
+ # The parent contexts are useful for computing lookahead sets and
22
+ # getting error information.
23
+ #
24
+ # These objects are used during parsing and prediction.
25
+ # For the special case of parsers, we use the subclass
26
+ # ParserRuleContext.
27
+ #
28
+ # @see ParserRuleContext
29
+ #/
30
+ from io import StringIO
31
+ from .tree.Tree import RuleNode, INVALID_INTERVAL, ParseTreeVisitor
32
+ from .tree.Trees import Trees
33
+
34
+ # need forward declarations
35
+ RuleContext = None
36
+ Parser = None
37
+
38
+ class RuleContext(RuleNode):
39
+ __slots__ = ('parentCtx', 'invokingState')
40
+ EMPTY = None
41
+
42
+ def __init__(self, parent:RuleContext=None, invokingState:int=-1):
43
+ super().__init__()
44
+ # What context invoked this rule?
45
+ self.parentCtx = parent
46
+ # What state invoked the rule associated with this context?
47
+ # The "return address" is the followState of invokingState
48
+ # If parent is null, this should be -1.
49
+ self.invokingState = invokingState
50
+
51
+
52
+ def depth(self):
53
+ n = 0
54
+ p = self
55
+ while p is not None:
56
+ p = p.parentCtx
57
+ n += 1
58
+ return n
59
+
60
+ # A context is empty if there is no invoking state; meaning nobody call
61
+ # current context.
62
+ def isEmpty(self):
63
+ return self.invokingState == -1
64
+
65
+ # satisfy the ParseTree / SyntaxTree interface
66
+
67
+ def getSourceInterval(self):
68
+ return INVALID_INTERVAL
69
+
70
+ def getRuleContext(self):
71
+ return self
72
+
73
+ def getPayload(self):
74
+ return self
75
+
76
+ # Return the combined text of all child nodes. This method only considers
77
+ # tokens which have been added to the parse tree.
78
+ # <p>
79
+ # Since tokens on hidden channels (e.g. whitespace or comments) are not
80
+ # added to the parse trees, they will not appear in the output of this
81
+ # method.
82
+ #/
83
+ def getText(self):
84
+ if self.getChildCount() == 0:
85
+ return ""
86
+ with StringIO() as builder:
87
+ for child in self.getChildren():
88
+ builder.write(child.getText())
89
+ return builder.getvalue()
90
+
91
+ def getRuleIndex(self):
92
+ return -1
93
+
94
+ # For rule associated with this parse tree internal node, return
95
+ # the outer alternative number used to match the input. Default
96
+ # implementation does not compute nor store this alt num. Create
97
+ # a subclass of ParserRuleContext with backing field and set
98
+ # option contextSuperClass.
99
+ # to set it.
100
+ def getAltNumber(self):
101
+ return 0 # should use ATN.INVALID_ALT_NUMBER but won't compile
102
+
103
+ # Set the outer alternative number for this context node. Default
104
+ # implementation does nothing to avoid backing field overhead for
105
+ # trees that don't need it. Create
106
+ # a subclass of ParserRuleContext with backing field and set
107
+ # option contextSuperClass.
108
+ def setAltNumber(self, altNumber:int):
109
+ pass
110
+
111
+ def getChild(self, i:int):
112
+ return None
113
+
114
+ def getChildCount(self):
115
+ return 0
116
+
117
+ def getChildren(self):
118
+ for c in []:
119
+ yield c
120
+
121
+ def accept(self, visitor:ParseTreeVisitor):
122
+ return visitor.visitChildren(self)
123
+
124
+ # # Call this method to view a parse tree in a dialog box visually.#/
125
+ # public Future<JDialog> inspect(@Nullable Parser parser) {
126
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
127
+ # return inspect(ruleNames);
128
+ # }
129
+ #
130
+ # public Future<JDialog> inspect(@Nullable List<String> ruleNames) {
131
+ # TreeViewer viewer = new TreeViewer(ruleNames, this);
132
+ # return viewer.open();
133
+ # }
134
+ #
135
+ # # Save this tree in a postscript file#/
136
+ # public void save(@Nullable Parser parser, String fileName)
137
+ # throws IOException, PrintException
138
+ # {
139
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
140
+ # save(ruleNames, fileName);
141
+ # }
142
+ #
143
+ # # Save this tree in a postscript file using a particular font name and size#/
144
+ # public void save(@Nullable Parser parser, String fileName,
145
+ # String fontName, int fontSize)
146
+ # throws IOException
147
+ # {
148
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
149
+ # save(ruleNames, fileName, fontName, fontSize);
150
+ # }
151
+ #
152
+ # # Save this tree in a postscript file#/
153
+ # public void save(@Nullable List<String> ruleNames, String fileName)
154
+ # throws IOException, PrintException
155
+ # {
156
+ # Trees.writePS(this, ruleNames, fileName);
157
+ # }
158
+ #
159
+ # # Save this tree in a postscript file using a particular font name and size#/
160
+ # public void save(@Nullable List<String> ruleNames, String fileName,
161
+ # String fontName, int fontSize)
162
+ # throws IOException
163
+ # {
164
+ # Trees.writePS(this, ruleNames, fileName, fontName, fontSize);
165
+ # }
166
+ #
167
+ # # Print out a whole tree, not just a node, in LISP format
168
+ # # (root child1 .. childN). Print just a node if this is a leaf.
169
+ # # We have to know the recognizer so we can get rule names.
170
+ # #/
171
+ # @Override
172
+ # public String toStringTree(@Nullable Parser recog) {
173
+ # return Trees.toStringTree(this, recog);
174
+ # }
175
+ #
176
+ # Print out a whole tree, not just a node, in LISP format
177
+ # (root child1 .. childN). Print just a node if this is a leaf.
178
+ #
179
+ def toStringTree(self, ruleNames:list=None, recog:Parser=None):
180
+ return Trees.toStringTree(self, ruleNames=ruleNames, recog=recog)
181
+ # }
182
+ #
183
+ # @Override
184
+ # public String toStringTree() {
185
+ # return toStringTree((List<String>)null);
186
+ # }
187
+ #
188
+ def __str__(self):
189
+ return self.toString(None, None)
190
+
191
+ # @Override
192
+ # public String toString() {
193
+ # return toString((List<String>)null, (RuleContext)null);
194
+ # }
195
+ #
196
+ # public final String toString(@Nullable Recognizer<?,?> recog) {
197
+ # return toString(recog, ParserRuleContext.EMPTY);
198
+ # }
199
+ #
200
+ # public final String toString(@Nullable List<String> ruleNames) {
201
+ # return toString(ruleNames, null);
202
+ # }
203
+ #
204
+ # // recog null unless ParserRuleContext, in which case we use subclass toString(...)
205
+ # public String toString(@Nullable Recognizer<?,?> recog, @Nullable RuleContext stop) {
206
+ # String[] ruleNames = recog != null ? recog.getRuleNames() : null;
207
+ # List<String> ruleNamesList = ruleNames != null ? Arrays.asList(ruleNames) : null;
208
+ # return toString(ruleNamesList, stop);
209
+ # }
210
+
211
+ def toString(self, ruleNames:list, stop:RuleContext)->str:
212
+ with StringIO() as buf:
213
+ p = self
214
+ buf.write("[")
215
+ while p is not None and p is not stop:
216
+ if ruleNames is None:
217
+ if not p.isEmpty():
218
+ buf.write(str(p.invokingState))
219
+ else:
220
+ ri = p.getRuleIndex()
221
+ ruleName = ruleNames[ri] if ri >= 0 and ri < len(ruleNames) else str(ri)
222
+ buf.write(ruleName)
223
+
224
+ if p.parentCtx is not None and (ruleNames is not None or not p.parentCtx.isEmpty()):
225
+ buf.write(" ")
226
+
227
+ p = p.parentCtx
228
+
229
+ buf.write("]")
230
+ return buf.getvalue()
@@ -0,0 +1,14 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ import codecs
5
+ import sys
6
+
7
+ from .InputStream import InputStream
8
+
9
+
10
+ class StdinStream(InputStream):
11
+ def __init__(self, encoding:str='ascii', errors:str='strict') -> None:
12
+ bytes = sys.stdin.buffer.read()
13
+ data = codecs.decode(bytes, encoding, errors)
14
+ super().__init__(data)
@@ -0,0 +1,158 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
5
+ # Use of this file is governed by the BSD 3-clause license that
6
+ # can be found in the LICENSE.txt file in the project root.
7
+ #
8
+
9
+ # A token has properties: text, type, line, character position in the line
10
+ # (so we can ignore tabs), token channel, index, and source from which
11
+ # we obtained this token.
12
+ from io import StringIO
13
+
14
+
15
+ class Token (object):
16
+ __slots__ = ('source', 'type', 'channel', 'start', 'stop', 'tokenIndex', 'line', 'column', '_text')
17
+
18
+ INVALID_TYPE = 0
19
+
20
+ # During lookahead operations, this "token" signifies we hit rule end ATN state
21
+ # and did not follow it despite needing to.
22
+ EPSILON = -2
23
+
24
+ MIN_USER_TOKEN_TYPE = 1
25
+
26
+ EOF = -1
27
+
28
+ # All tokens go to the parser (unless skip() is called in that rule)
29
+ # on a particular "channel". The parser tunes to a particular channel
30
+ # so that whitespace etc... can go to the parser on a "hidden" channel.
31
+
32
+ DEFAULT_CHANNEL = 0
33
+
34
+ # Anything on different channel than DEFAULT_CHANNEL is not parsed
35
+ # by parser.
36
+
37
+ HIDDEN_CHANNEL = 1
38
+
39
+ def __init__(self):
40
+ self.source = None
41
+ self.type = None # token type of the token
42
+ self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL
43
+ self.start = None # optional; return -1 if not implemented.
44
+ self.stop = None # optional; return -1 if not implemented.
45
+ self.tokenIndex = None # from 0..n-1 of the token object in the input stream
46
+ self.line = None # line=1..n of the 1st character
47
+ self.column = None # beginning of the line at which it occurs, 0..n-1
48
+ self._text = None # text of the token.
49
+
50
+ @property
51
+ def text(self):
52
+ return self._text
53
+
54
+ # Explicitly set the text for this token. If {code text} is not
55
+ # {@code null}, then {@link #getText} will return this value rather than
56
+ # extracting the text from the input.
57
+ #
58
+ # @param text The explicit text of the token, or {@code null} if the text
59
+ # should be obtained from the input along with the start and stop indexes
60
+ # of the token.
61
+
62
+ @text.setter
63
+ def text(self, text:str):
64
+ self._text = text
65
+
66
+
67
+ def getTokenSource(self):
68
+ return self.source[0]
69
+
70
+ def getInputStream(self):
71
+ return self.source[1]
72
+
73
+ class CommonToken(Token):
74
+
75
+ # An empty {@link Pair} which is used as the default value of
76
+ # {@link #source} for tokens that do not have a source.
77
+ EMPTY_SOURCE = (None, None)
78
+
79
+ def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1):
80
+ super().__init__()
81
+ self.source = source
82
+ self.type = type
83
+ self.channel = channel
84
+ self.start = start
85
+ self.stop = stop
86
+ self.tokenIndex = -1
87
+ if source[0] is not None:
88
+ self.line = source[0].line
89
+ self.column = source[0].column
90
+ else:
91
+ self.column = -1
92
+
93
+ # Constructs a new {@link CommonToken} as a copy of another {@link Token}.
94
+ #
95
+ # <p>
96
+ # If {@code oldToken} is also a {@link CommonToken} instance, the newly
97
+ # constructed token will share a reference to the {@link #text} field and
98
+ # the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
99
+ # be assigned the result of calling {@link #getText}, and {@link #source}
100
+ # will be constructed from the result of {@link Token#getTokenSource} and
101
+ # {@link Token#getInputStream}.</p>
102
+ #
103
+ # @param oldToken The token to copy.
104
+ #
105
+ def clone(self):
106
+ t = CommonToken(self.source, self.type, self.channel, self.start, self.stop)
107
+ t.tokenIndex = self.tokenIndex
108
+ t.line = self.line
109
+ t.column = self.column
110
+ t.text = self.text
111
+ return t
112
+
113
+ @property
114
+ def text(self):
115
+ if self._text is not None:
116
+ return self._text
117
+ input = self.getInputStream()
118
+ if input is None:
119
+ return None
120
+ n = input.size
121
+ if self.start < n and self.stop < n:
122
+ return input.getText(self.start, self.stop)
123
+ else:
124
+ return "<EOF>"
125
+
126
+ @text.setter
127
+ def text(self, text:str):
128
+ self._text = text
129
+
130
+ def __str__(self):
131
+ with StringIO() as buf:
132
+ buf.write("[@")
133
+ buf.write(str(self.tokenIndex))
134
+ buf.write(",")
135
+ buf.write(str(self.start))
136
+ buf.write(":")
137
+ buf.write(str(self.stop))
138
+ buf.write("='")
139
+ txt = self.text
140
+ if txt is not None:
141
+ txt = txt.replace("\n","\\n")
142
+ txt = txt.replace("\r","\\r")
143
+ txt = txt.replace("\t","\\t")
144
+ else:
145
+ txt = "<no text>"
146
+ buf.write(txt)
147
+ buf.write("',<")
148
+ buf.write(str(self.type))
149
+ buf.write(">")
150
+ if self.channel > 0:
151
+ buf.write(",channel=")
152
+ buf.write(str(self.channel))
153
+ buf.write(",")
154
+ buf.write(str(self.line))
155
+ buf.write(":")
156
+ buf.write(str(self.column))
157
+ buf.write("]")
158
+ return buf.getvalue()