omextra 0.0.0.dev437__py3-none-any.whl → 0.0.0.dev439__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omextra/formats/json/_antlr/JsonLexer.py +1 -1
- omextra/formats/json/_antlr/JsonListener.py +1 -1
- omextra/formats/json/_antlr/JsonParser.py +1 -1
- omextra/formats/json/_antlr/JsonVisitor.py +1 -1
- omextra/formats/json5/Json5.g4 +168 -0
- omextra/formats/json5/__init__.py +0 -0
- omextra/formats/json5/_antlr/Json5Lexer.py +354 -0
- omextra/formats/json5/_antlr/Json5Listener.py +79 -0
- omextra/formats/json5/_antlr/Json5Parser.py +617 -0
- omextra/formats/json5/_antlr/Json5Visitor.py +52 -0
- omextra/formats/json5/_antlr/__init__.py +0 -0
- omextra/formats/json5/parsing.py +101 -0
- omextra/specs/proto/_antlr/Protobuf3Lexer.py +1 -1
- omextra/specs/proto/_antlr/Protobuf3Listener.py +1 -1
- omextra/specs/proto/_antlr/Protobuf3Parser.py +1 -1
- omextra/specs/proto/_antlr/Protobuf3Visitor.py +1 -1
- omextra/specs/proto/parsing.py +2 -2
- omextra/sql/parsing/_antlr/MinisqlLexer.py +1 -1
- omextra/sql/parsing/_antlr/MinisqlListener.py +1 -1
- omextra/sql/parsing/_antlr/MinisqlParser.py +1 -1
- omextra/sql/parsing/_antlr/MinisqlVisitor.py +1 -1
- omextra/sql/parsing/parsing.py +3 -3
- omextra/text/antlr/__init__.py +3 -0
- omextra/text/antlr/_runtime/BufferedTokenStream.py +305 -0
- omextra/text/antlr/_runtime/CommonTokenFactory.py +64 -0
- omextra/text/antlr/_runtime/CommonTokenStream.py +90 -0
- omextra/text/antlr/_runtime/FileStream.py +30 -0
- omextra/text/antlr/_runtime/InputStream.py +90 -0
- omextra/text/antlr/_runtime/IntervalSet.py +183 -0
- omextra/text/antlr/_runtime/LICENSE.txt +28 -0
- omextra/text/antlr/_runtime/LL1Analyzer.py +176 -0
- omextra/text/antlr/_runtime/Lexer.py +332 -0
- omextra/text/antlr/_runtime/ListTokenSource.py +147 -0
- omextra/text/antlr/_runtime/Parser.py +583 -0
- omextra/text/antlr/_runtime/ParserInterpreter.py +173 -0
- omextra/text/antlr/_runtime/ParserRuleContext.py +189 -0
- omextra/text/antlr/_runtime/PredictionContext.py +632 -0
- omextra/text/antlr/_runtime/Recognizer.py +150 -0
- omextra/text/antlr/_runtime/RuleContext.py +230 -0
- omextra/text/antlr/_runtime/StdinStream.py +14 -0
- omextra/text/antlr/_runtime/Token.py +158 -0
- omextra/text/antlr/_runtime/TokenStreamRewriter.py +258 -0
- omextra/text/antlr/_runtime/Utils.py +36 -0
- omextra/text/antlr/_runtime/__init__.py +2 -0
- omextra/text/antlr/_runtime/_all.py +24 -0
- omextra/text/antlr/_runtime/_pygrun.py +174 -0
- omextra/text/antlr/_runtime/atn/ATN.py +135 -0
- omextra/text/antlr/_runtime/atn/ATNConfig.py +162 -0
- omextra/text/antlr/_runtime/atn/ATNConfigSet.py +215 -0
- omextra/text/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
- omextra/text/antlr/_runtime/atn/ATNDeserializer.py +449 -0
- omextra/text/antlr/_runtime/atn/ATNSimulator.py +50 -0
- omextra/text/antlr/_runtime/atn/ATNState.py +267 -0
- omextra/text/antlr/_runtime/atn/ATNType.py +20 -0
- omextra/text/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
- omextra/text/antlr/_runtime/atn/LexerAction.py +301 -0
- omextra/text/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
- omextra/text/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
- omextra/text/antlr/_runtime/atn/PredictionMode.py +502 -0
- omextra/text/antlr/_runtime/atn/SemanticContext.py +333 -0
- omextra/text/antlr/_runtime/atn/Transition.py +271 -0
- omextra/text/antlr/_runtime/atn/__init__.py +4 -0
- omextra/text/antlr/_runtime/dfa/DFA.py +136 -0
- omextra/text/antlr/_runtime/dfa/DFASerializer.py +76 -0
- omextra/text/antlr/_runtime/dfa/DFAState.py +129 -0
- omextra/text/antlr/_runtime/dfa/__init__.py +4 -0
- omextra/text/antlr/_runtime/error/DiagnosticErrorListener.py +111 -0
- omextra/text/antlr/_runtime/error/ErrorListener.py +75 -0
- omextra/text/antlr/_runtime/error/ErrorStrategy.py +712 -0
- omextra/text/antlr/_runtime/error/Errors.py +176 -0
- omextra/text/antlr/_runtime/error/__init__.py +4 -0
- omextra/text/antlr/_runtime/tree/Chunk.py +33 -0
- omextra/text/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
- omextra/text/antlr/_runtime/tree/ParseTreePattern.py +75 -0
- omextra/text/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
- omextra/text/antlr/_runtime/tree/RuleTagToken.py +53 -0
- omextra/text/antlr/_runtime/tree/TokenTagToken.py +50 -0
- omextra/text/antlr/_runtime/tree/Tree.py +194 -0
- omextra/text/antlr/_runtime/tree/Trees.py +114 -0
- omextra/text/antlr/_runtime/tree/__init__.py +2 -0
- omextra/text/antlr/_runtime/xpath/XPath.py +278 -0
- omextra/text/antlr/_runtime/xpath/XPathLexer.py +98 -0
- omextra/text/antlr/_runtime/xpath/__init__.py +4 -0
- omextra/text/antlr/cli/consts.py +1 -1
- omextra/text/antlr/delimit.py +110 -0
- omextra/text/antlr/dot.py +42 -0
- omextra/text/antlr/errors.py +14 -0
- omextra/text/antlr/input.py +96 -0
- omextra/text/antlr/parsing.py +55 -0
- omextra/text/antlr/runtime.py +102 -0
- omextra/text/antlr/utils.py +38 -0
- omextra-0.0.0.dev439.dist-info/METADATA +28 -0
- omextra-0.0.0.dev439.dist-info/RECORD +144 -0
- omextra-0.0.0.dev437.dist-info/METADATA +0 -73
- omextra-0.0.0.dev437.dist-info/RECORD +0 -69
- {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/WHEEL +0 -0
- {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/entry_points.txt +0 -0
- {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/licenses/LICENSE +0 -0
- {omextra-0.0.0.dev437.dist-info → omextra-0.0.0.dev439.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,332 @@
|
|
1
|
+
# type: ignore
|
2
|
+
# ruff: noqa
|
3
|
+
# flake8: noqa
|
4
|
+
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
5
|
+
# Use of this file is governed by the BSD 3-clause license that
|
6
|
+
# can be found in the LICENSE.txt file in the project root.
|
7
|
+
#/
|
8
|
+
|
9
|
+
# A lexer is recognizer that draws input symbols from a character stream.
|
10
|
+
# lexer grammars result in a subclass of self object. A Lexer object
|
11
|
+
# uses simplified match() and error recovery mechanisms in the interest
|
12
|
+
# of speed.
|
13
|
+
#/
|
14
|
+
from io import StringIO
|
15
|
+
|
16
|
+
import sys
|
17
|
+
if sys.version_info[1] > 5:
|
18
|
+
from typing import TextIO
|
19
|
+
else:
|
20
|
+
from typing.io import TextIO
|
21
|
+
from .CommonTokenFactory import CommonTokenFactory
|
22
|
+
from .atn.LexerATNSimulator import LexerATNSimulator
|
23
|
+
from .InputStream import InputStream
|
24
|
+
from .Recognizer import Recognizer
|
25
|
+
from .Token import Token
|
26
|
+
from .error.Errors import IllegalStateException, LexerNoViableAltException, RecognitionException
|
27
|
+
|
28
|
+
class TokenSource(object):
|
29
|
+
|
30
|
+
pass
|
31
|
+
|
32
|
+
|
33
|
+
class Lexer(Recognizer, TokenSource):
|
34
|
+
__slots__ = (
|
35
|
+
'_input', '_output', '_factory', '_tokenFactorySourcePair', '_token',
|
36
|
+
'_tokenStartCharIndex', '_tokenStartLine', '_tokenStartColumn',
|
37
|
+
'_hitEOF', '_channel', '_type', '_modeStack', '_mode', '_text'
|
38
|
+
)
|
39
|
+
|
40
|
+
DEFAULT_MODE = 0
|
41
|
+
MORE = -2
|
42
|
+
SKIP = -3
|
43
|
+
|
44
|
+
DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
|
45
|
+
HIDDEN = Token.HIDDEN_CHANNEL
|
46
|
+
MIN_CHAR_VALUE = 0x0000
|
47
|
+
MAX_CHAR_VALUE = 0x10FFFF
|
48
|
+
|
49
|
+
def __init__(self, input:InputStream, output:TextIO = sys.stdout):
|
50
|
+
super().__init__()
|
51
|
+
self._input = input
|
52
|
+
self._output = output
|
53
|
+
self._factory = CommonTokenFactory.DEFAULT
|
54
|
+
self._tokenFactorySourcePair = (self, input)
|
55
|
+
|
56
|
+
self._interp = None # child classes must populate this
|
57
|
+
|
58
|
+
# The goal of all lexer rules/methods is to create a token object.
|
59
|
+
# self is an instance variable as multiple rules may collaborate to
|
60
|
+
# create a single token. nextToken will return self object after
|
61
|
+
# matching lexer rule(s). If you subclass to allow multiple token
|
62
|
+
# emissions, then set self to the last token to be matched or
|
63
|
+
# something nonnull so that the auto token emit mechanism will not
|
64
|
+
# emit another token.
|
65
|
+
self._token = None
|
66
|
+
|
67
|
+
# What character index in the stream did the current token start at?
|
68
|
+
# Needed, for example, to get the text for current token. Set at
|
69
|
+
# the start of nextToken.
|
70
|
+
self._tokenStartCharIndex = -1
|
71
|
+
|
72
|
+
# The line on which the first character of the token resides#/
|
73
|
+
self._tokenStartLine = -1
|
74
|
+
|
75
|
+
# The character position of first character within the line#/
|
76
|
+
self._tokenStartColumn = -1
|
77
|
+
|
78
|
+
# Once we see EOF on char stream, next token will be EOF.
|
79
|
+
# If you have DONE : EOF ; then you see DONE EOF.
|
80
|
+
self._hitEOF = False
|
81
|
+
|
82
|
+
# The channel number for the current token#/
|
83
|
+
self._channel = Token.DEFAULT_CHANNEL
|
84
|
+
|
85
|
+
# The token type for the current token#/
|
86
|
+
self._type = Token.INVALID_TYPE
|
87
|
+
|
88
|
+
self._modeStack = []
|
89
|
+
self._mode = self.DEFAULT_MODE
|
90
|
+
|
91
|
+
# You can set the text for the current token to override what is in
|
92
|
+
# the input char buffer. Use setText() or can set self instance var.
|
93
|
+
#/
|
94
|
+
self._text = None
|
95
|
+
|
96
|
+
|
97
|
+
def reset(self):
|
98
|
+
# wack Lexer state variables
|
99
|
+
if self._input is not None:
|
100
|
+
self._input.seek(0) # rewind the input
|
101
|
+
self._token = None
|
102
|
+
self._type = Token.INVALID_TYPE
|
103
|
+
self._channel = Token.DEFAULT_CHANNEL
|
104
|
+
self._tokenStartCharIndex = -1
|
105
|
+
self._tokenStartColumn = -1
|
106
|
+
self._tokenStartLine = -1
|
107
|
+
self._text = None
|
108
|
+
|
109
|
+
self._hitEOF = False
|
110
|
+
self._mode = Lexer.DEFAULT_MODE
|
111
|
+
self._modeStack = []
|
112
|
+
|
113
|
+
self._interp.reset()
|
114
|
+
|
115
|
+
# Return a token from self source; i.e., match a token on the char
|
116
|
+
# stream.
|
117
|
+
def nextToken(self):
|
118
|
+
if self._input is None:
|
119
|
+
raise IllegalStateException("nextToken requires a non-null input stream.")
|
120
|
+
|
121
|
+
# Mark start location in char stream so unbuffered streams are
|
122
|
+
# guaranteed at least have text of current token
|
123
|
+
tokenStartMarker = self._input.mark()
|
124
|
+
try:
|
125
|
+
while True:
|
126
|
+
if self._hitEOF:
|
127
|
+
self.emitEOF()
|
128
|
+
return self._token
|
129
|
+
self._token = None
|
130
|
+
self._channel = Token.DEFAULT_CHANNEL
|
131
|
+
self._tokenStartCharIndex = self._input.index
|
132
|
+
self._tokenStartColumn = self._interp.column
|
133
|
+
self._tokenStartLine = self._interp.line
|
134
|
+
self._text = None
|
135
|
+
continueOuter = False
|
136
|
+
while True:
|
137
|
+
self._type = Token.INVALID_TYPE
|
138
|
+
ttype = self.SKIP
|
139
|
+
try:
|
140
|
+
ttype = self._interp.match(self._input, self._mode)
|
141
|
+
except LexerNoViableAltException as e:
|
142
|
+
self.notifyListeners(e) # report error
|
143
|
+
self.recover(e)
|
144
|
+
if self._input.LA(1)==Token.EOF:
|
145
|
+
self._hitEOF = True
|
146
|
+
if self._type == Token.INVALID_TYPE:
|
147
|
+
self._type = ttype
|
148
|
+
if self._type == self.SKIP:
|
149
|
+
continueOuter = True
|
150
|
+
break
|
151
|
+
if self._type!=self.MORE:
|
152
|
+
break
|
153
|
+
if continueOuter:
|
154
|
+
continue
|
155
|
+
if self._token is None:
|
156
|
+
self.emit()
|
157
|
+
return self._token
|
158
|
+
finally:
|
159
|
+
# make sure we release marker after match or
|
160
|
+
# unbuffered char stream will keep buffering
|
161
|
+
self._input.release(tokenStartMarker)
|
162
|
+
|
163
|
+
# Instruct the lexer to skip creating a token for current lexer rule
|
164
|
+
# and look for another token. nextToken() knows to keep looking when
|
165
|
+
# a lexer rule finishes with token set to SKIP_TOKEN. Recall that
|
166
|
+
# if token==null at end of any token rule, it creates one for you
|
167
|
+
# and emits it.
|
168
|
+
#/
|
169
|
+
def skip(self):
|
170
|
+
self._type = self.SKIP
|
171
|
+
|
172
|
+
def more(self):
|
173
|
+
self._type = self.MORE
|
174
|
+
|
175
|
+
def mode(self, m:int):
|
176
|
+
self._mode = m
|
177
|
+
|
178
|
+
def pushMode(self, m:int):
|
179
|
+
if self._interp.debug:
|
180
|
+
print("pushMode " + str(m), file=self._output)
|
181
|
+
self._modeStack.append(self._mode)
|
182
|
+
self.mode(m)
|
183
|
+
|
184
|
+
def popMode(self):
|
185
|
+
if len(self._modeStack)==0:
|
186
|
+
raise Exception("Empty Stack")
|
187
|
+
if self._interp.debug:
|
188
|
+
print("popMode back to "+ self._modeStack[:-1], file=self._output)
|
189
|
+
self.mode( self._modeStack.pop() )
|
190
|
+
return self._mode
|
191
|
+
|
192
|
+
# Set the char stream and reset the lexer#/
|
193
|
+
@property
|
194
|
+
def inputStream(self):
|
195
|
+
return self._input
|
196
|
+
|
197
|
+
@inputStream.setter
|
198
|
+
def inputStream(self, input:InputStream):
|
199
|
+
self._input = None
|
200
|
+
self._tokenFactorySourcePair = (self, self._input)
|
201
|
+
self.reset()
|
202
|
+
self._input = input
|
203
|
+
self._tokenFactorySourcePair = (self, self._input)
|
204
|
+
|
205
|
+
@property
|
206
|
+
def sourceName(self):
|
207
|
+
return self._input.sourceName
|
208
|
+
|
209
|
+
# By default does not support multiple emits per nextToken invocation
|
210
|
+
# for efficiency reasons. Subclass and override self method, nextToken,
|
211
|
+
# and getToken (to push tokens into a list and pull from that list
|
212
|
+
# rather than a single variable as self implementation does).
|
213
|
+
#/
|
214
|
+
def emitToken(self, token:Token):
|
215
|
+
self._token = token
|
216
|
+
|
217
|
+
# The standard method called to automatically emit a token at the
|
218
|
+
# outermost lexical rule. The token object should point into the
|
219
|
+
# char buffer start..stop. If there is a text override in 'text',
|
220
|
+
# use that to set the token's text. Override self method to emit
|
221
|
+
# custom Token objects or provide a new factory.
|
222
|
+
#/
|
223
|
+
def emit(self):
|
224
|
+
t = self._factory.create(self._tokenFactorySourcePair, self._type, self._text, self._channel, self._tokenStartCharIndex,
|
225
|
+
self.getCharIndex()-1, self._tokenStartLine, self._tokenStartColumn)
|
226
|
+
self.emitToken(t)
|
227
|
+
return t
|
228
|
+
|
229
|
+
def emitEOF(self):
|
230
|
+
cpos = self.column
|
231
|
+
lpos = self.line
|
232
|
+
eof = self._factory.create(self._tokenFactorySourcePair, Token.EOF, None, Token.DEFAULT_CHANNEL, self._input.index,
|
233
|
+
self._input.index-1, lpos, cpos)
|
234
|
+
self.emitToken(eof)
|
235
|
+
return eof
|
236
|
+
|
237
|
+
@property
|
238
|
+
def type(self):
|
239
|
+
return self._type
|
240
|
+
|
241
|
+
@type.setter
|
242
|
+
def type(self, type:int):
|
243
|
+
self._type = type
|
244
|
+
|
245
|
+
@property
|
246
|
+
def line(self):
|
247
|
+
return self._interp.line
|
248
|
+
|
249
|
+
@line.setter
|
250
|
+
def line(self, line:int):
|
251
|
+
self._interp.line = line
|
252
|
+
|
253
|
+
@property
|
254
|
+
def column(self):
|
255
|
+
return self._interp.column
|
256
|
+
|
257
|
+
@column.setter
|
258
|
+
def column(self, column:int):
|
259
|
+
self._interp.column = column
|
260
|
+
|
261
|
+
# What is the index of the current character of lookahead?#/
|
262
|
+
def getCharIndex(self):
|
263
|
+
return self._input.index
|
264
|
+
|
265
|
+
# Return the text matched so far for the current token or any
|
266
|
+
# text override.
|
267
|
+
@property
|
268
|
+
def text(self):
|
269
|
+
if self._text is not None:
|
270
|
+
return self._text
|
271
|
+
else:
|
272
|
+
return self._interp.getText(self._input)
|
273
|
+
|
274
|
+
# Set the complete text of self token; it wipes any previous
|
275
|
+
# changes to the text.
|
276
|
+
@text.setter
|
277
|
+
def text(self, txt:str):
|
278
|
+
self._text = txt
|
279
|
+
|
280
|
+
# Return a list of all Token objects in input char stream.
|
281
|
+
# Forces load of all tokens. Does not include EOF token.
|
282
|
+
#/
|
283
|
+
def getAllTokens(self):
|
284
|
+
tokens = []
|
285
|
+
t = self.nextToken()
|
286
|
+
while t.type!=Token.EOF:
|
287
|
+
tokens.append(t)
|
288
|
+
t = self.nextToken()
|
289
|
+
return tokens
|
290
|
+
|
291
|
+
def notifyListeners(self, e:LexerNoViableAltException):
|
292
|
+
start = self._tokenStartCharIndex
|
293
|
+
stop = self._input.index
|
294
|
+
text = self._input.getText(start, stop)
|
295
|
+
msg = "token recognition error at: '" + self.getErrorDisplay(text) + "'"
|
296
|
+
listener = self.getErrorListenerDispatch()
|
297
|
+
listener.syntaxError(self, None, self._tokenStartLine, self._tokenStartColumn, msg, e)
|
298
|
+
|
299
|
+
def getErrorDisplay(self, s:str):
|
300
|
+
with StringIO() as buf:
|
301
|
+
for c in s:
|
302
|
+
buf.write(self.getErrorDisplayForChar(c))
|
303
|
+
return buf.getvalue()
|
304
|
+
|
305
|
+
def getErrorDisplayForChar(self, c:str):
|
306
|
+
if ord(c[0])==Token.EOF:
|
307
|
+
return "<EOF>"
|
308
|
+
elif c=='\n':
|
309
|
+
return "\\n"
|
310
|
+
elif c=='\t':
|
311
|
+
return "\\t"
|
312
|
+
elif c=='\r':
|
313
|
+
return "\\r"
|
314
|
+
else:
|
315
|
+
return c
|
316
|
+
|
317
|
+
def getCharErrorDisplay(self, c:str):
|
318
|
+
return "'" + self.getErrorDisplayForChar(c) + "'"
|
319
|
+
|
320
|
+
# Lexers can normally match any char in it's vocabulary after matching
|
321
|
+
# a token, so do the easy thing and just kill a character and hope
|
322
|
+
# it all works out. You can instead use the rule invocation stack
|
323
|
+
# to do sophisticated error recovery if you are in a fragment rule.
|
324
|
+
#/
|
325
|
+
def recover(self, re:RecognitionException):
|
326
|
+
if self._input.LA(1) != Token.EOF:
|
327
|
+
if isinstance(re, LexerNoViableAltException):
|
328
|
+
# skip a char and try again
|
329
|
+
self._interp.consume(self._input)
|
330
|
+
else:
|
331
|
+
# TODO: Do we lose character or line position information?
|
332
|
+
self._input.consume()
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# type: ignore
|
2
|
+
# ruff: noqa
|
3
|
+
# flake8: noqa
|
4
|
+
#
|
5
|
+
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
6
|
+
# Use of this file is governed by the BSD 3-clause license that
|
7
|
+
# can be found in the LICENSE.txt file in the project root.
|
8
|
+
#
|
9
|
+
|
10
|
+
#
|
11
|
+
# Provides an implementation of {@link TokenSource} as a wrapper around a list
|
12
|
+
# of {@link Token} objects.
|
13
|
+
#
|
14
|
+
# <p>If the final token in the list is an {@link Token#EOF} token, it will be used
|
15
|
+
# as the EOF token for every call to {@link #nextToken} after the end of the
|
16
|
+
# list is reached. Otherwise, an EOF token will be created.</p>
|
17
|
+
#
|
18
|
+
from .CommonTokenFactory import CommonTokenFactory
|
19
|
+
from .Lexer import TokenSource
|
20
|
+
from .Token import Token
|
21
|
+
|
22
|
+
|
23
|
+
class ListTokenSource(TokenSource):
|
24
|
+
__slots__ = ('tokens', 'sourceName', 'pos', 'eofToken', '_factory')
|
25
|
+
|
26
|
+
# Constructs a new {@link ListTokenSource} instance from the specified
|
27
|
+
# collection of {@link Token} objects and source name.
|
28
|
+
#
|
29
|
+
# @param tokens The collection of {@link Token} objects to provide as a
|
30
|
+
# {@link TokenSource}.
|
31
|
+
# @param sourceName The name of the {@link TokenSource}. If this value is
|
32
|
+
# {@code null}, {@link #getSourceName} will attempt to infer the name from
|
33
|
+
# the next {@link Token} (or the previous token if the end of the input has
|
34
|
+
# been reached).
|
35
|
+
#
|
36
|
+
# @exception NullPointerException if {@code tokens} is {@code null}
|
37
|
+
#
|
38
|
+
def __init__(self, tokens:list, sourceName:str=None):
|
39
|
+
if tokens is None:
|
40
|
+
raise ReferenceError("tokens cannot be null")
|
41
|
+
self.tokens = tokens
|
42
|
+
self.sourceName = sourceName
|
43
|
+
# The index into {@link #tokens} of token to return by the next call to
|
44
|
+
# {@link #nextToken}. The end of the input is indicated by this value
|
45
|
+
# being greater than or equal to the number of items in {@link #tokens}.
|
46
|
+
self.pos = 0
|
47
|
+
# This field caches the EOF token for the token source.
|
48
|
+
self.eofToken = None
|
49
|
+
# This is the backing field for {@link #getTokenFactory} and
|
50
|
+
self._factory = CommonTokenFactory.DEFAULT
|
51
|
+
|
52
|
+
|
53
|
+
#
|
54
|
+
# {@inheritDoc}
|
55
|
+
#
|
56
|
+
@property
|
57
|
+
def column(self):
|
58
|
+
if self.pos < len(self.tokens):
|
59
|
+
return self.tokens[self.pos].column
|
60
|
+
elif self.eofToken is not None:
|
61
|
+
return self.eofToken.column
|
62
|
+
elif len(self.tokens) > 0:
|
63
|
+
# have to calculate the result from the line/column of the previous
|
64
|
+
# token, along with the text of the token.
|
65
|
+
lastToken = self.tokens[len(self.tokens) - 1]
|
66
|
+
tokenText = lastToken.text
|
67
|
+
if tokenText is not None:
|
68
|
+
lastNewLine = tokenText.rfind('\n')
|
69
|
+
if lastNewLine >= 0:
|
70
|
+
return len(tokenText) - lastNewLine - 1
|
71
|
+
return lastToken.column + lastToken.stop - lastToken.start + 1
|
72
|
+
|
73
|
+
# only reach this if tokens is empty, meaning EOF occurs at the first
|
74
|
+
# position in the input
|
75
|
+
return 0
|
76
|
+
|
77
|
+
#
|
78
|
+
# {@inheritDoc}
|
79
|
+
#
|
80
|
+
def nextToken(self):
|
81
|
+
if self.pos >= len(self.tokens):
|
82
|
+
if self.eofToken is None:
|
83
|
+
start = -1
|
84
|
+
if len(self.tokens) > 0:
|
85
|
+
previousStop = self.tokens[len(self.tokens) - 1].stop
|
86
|
+
if previousStop != -1:
|
87
|
+
start = previousStop + 1
|
88
|
+
stop = max(-1, start - 1)
|
89
|
+
self.eofToken = self._factory.create((self, self.getInputStream()),
|
90
|
+
Token.EOF, "EOF", Token.DEFAULT_CHANNEL, start, stop, self.line, self.column)
|
91
|
+
return self.eofToken
|
92
|
+
t = self.tokens[self.pos]
|
93
|
+
if self.pos == len(self.tokens) - 1 and t.type == Token.EOF:
|
94
|
+
self.eofToken = t
|
95
|
+
self.pos += 1
|
96
|
+
return t
|
97
|
+
|
98
|
+
#
|
99
|
+
# {@inheritDoc}
|
100
|
+
#
|
101
|
+
@property
|
102
|
+
def line(self):
|
103
|
+
if self.pos < len(self.tokens):
|
104
|
+
return self.tokens[self.pos].line
|
105
|
+
elif self.eofToken is not None:
|
106
|
+
return self.eofToken.line
|
107
|
+
elif len(self.tokens) > 0:
|
108
|
+
# have to calculate the result from the line/column of the previous
|
109
|
+
# token, along with the text of the token.
|
110
|
+
lastToken = self.tokens[len(self.tokens) - 1]
|
111
|
+
line = lastToken.line
|
112
|
+
tokenText = lastToken.text
|
113
|
+
if tokenText is not None:
|
114
|
+
line += tokenText.count('\n')
|
115
|
+
|
116
|
+
# if no text is available, assume the token did not contain any newline characters.
|
117
|
+
return line
|
118
|
+
|
119
|
+
# only reach this if tokens is empty, meaning EOF occurs at the first
|
120
|
+
# position in the input
|
121
|
+
return 1
|
122
|
+
|
123
|
+
#
|
124
|
+
# {@inheritDoc}
|
125
|
+
#
|
126
|
+
def getInputStream(self):
|
127
|
+
if self.pos < len(self.tokens):
|
128
|
+
return self.tokens[self.pos].getInputStream()
|
129
|
+
elif self.eofToken is not None:
|
130
|
+
return self.eofToken.getInputStream()
|
131
|
+
elif len(self.tokens) > 0:
|
132
|
+
return self.tokens[len(self.tokens) - 1].getInputStream()
|
133
|
+
else:
|
134
|
+
# no input stream information is available
|
135
|
+
return None
|
136
|
+
|
137
|
+
#
|
138
|
+
# {@inheritDoc}
|
139
|
+
#
|
140
|
+
def getSourceName(self):
|
141
|
+
if self.sourceName is not None:
|
142
|
+
return self.sourceName
|
143
|
+
inputStream = self.getInputStream()
|
144
|
+
if inputStream is not None:
|
145
|
+
return inputStream.getSourceName()
|
146
|
+
else:
|
147
|
+
return "List"
|