omlish 0.0.0.dev57__py3-none-any.whl → 0.0.0.dev58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omlish/__about__.py +2 -2
- omlish/antlr/__init__.py +0 -0
- omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
- omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
- omlish/antlr/_runtime/CommonTokenStream.py +90 -0
- omlish/antlr/_runtime/FileStream.py +30 -0
- omlish/antlr/_runtime/InputStream.py +90 -0
- omlish/antlr/_runtime/IntervalSet.py +183 -0
- omlish/antlr/_runtime/LL1Analyzer.py +176 -0
- omlish/antlr/_runtime/Lexer.py +332 -0
- omlish/antlr/_runtime/ListTokenSource.py +147 -0
- omlish/antlr/_runtime/Parser.py +583 -0
- omlish/antlr/_runtime/ParserInterpreter.py +173 -0
- omlish/antlr/_runtime/ParserRuleContext.py +189 -0
- omlish/antlr/_runtime/PredictionContext.py +632 -0
- omlish/antlr/_runtime/Recognizer.py +150 -0
- omlish/antlr/_runtime/RuleContext.py +230 -0
- omlish/antlr/_runtime/StdinStream.py +14 -0
- omlish/antlr/_runtime/Token.py +158 -0
- omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
- omlish/antlr/_runtime/Utils.py +36 -0
- omlish/antlr/_runtime/__init__.py +24 -0
- omlish/antlr/_runtime/_pygrun.py +174 -0
- omlish/antlr/_runtime/atn/ATN.py +135 -0
- omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
- omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
- omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
- omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
- omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
- omlish/antlr/_runtime/atn/ATNState.py +267 -0
- omlish/antlr/_runtime/atn/ATNType.py +20 -0
- omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
- omlish/antlr/_runtime/atn/LexerAction.py +301 -0
- omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
- omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
- omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
- omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
- omlish/antlr/_runtime/atn/Transition.py +271 -0
- omlish/antlr/_runtime/atn/__init__.py +4 -0
- omlish/antlr/_runtime/dfa/DFA.py +136 -0
- omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
- omlish/antlr/_runtime/dfa/DFAState.py +129 -0
- omlish/antlr/_runtime/dfa/__init__.py +4 -0
- omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
- omlish/antlr/_runtime/error/ErrorListener.py +75 -0
- omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
- omlish/antlr/_runtime/error/Errors.py +176 -0
- omlish/antlr/_runtime/error/__init__.py +4 -0
- omlish/antlr/_runtime/tree/Chunk.py +33 -0
- omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
- omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
- omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
- omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
- omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
- omlish/antlr/_runtime/tree/Tree.py +194 -0
- omlish/antlr/_runtime/tree/Trees.py +114 -0
- omlish/antlr/_runtime/tree/__init__.py +2 -0
- omlish/antlr/_runtime/xpath/XPath.py +272 -0
- omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
- omlish/antlr/_runtime/xpath/__init__.py +4 -0
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/METADATA +1 -1
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/RECORD +66 -7
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/LICENSE +0 -0
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/WHEEL +0 -0
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/entry_points.txt +0 -0
- {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev58.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,573 @@
|
|
1
|
+
# type: ignore
|
2
|
+
# ruff: noqa
|
3
|
+
# flake8: noqa
|
4
|
+
#
|
5
|
+
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
6
|
+
# Use of this file is governed by the BSD 3-clause license that
|
7
|
+
# can be found in the LICENSE.txt file in the project root.
|
8
|
+
#/
|
9
|
+
|
10
|
+
# When we hit an accept state in either the DFA or the ATN, we
|
11
|
+
# have to notify the character stream to start buffering characters
|
12
|
+
# via {@link IntStream#mark} and record the current state. The current sim state
|
13
|
+
# includes the current index into the input, the current line,
|
14
|
+
# and current character position in that line. Note that the Lexer is
|
15
|
+
# tracking the starting line and characterization of the token. These
|
16
|
+
# variables track the "state" of the simulator when it hits an accept state.
|
17
|
+
#
|
18
|
+
# <p>We track these variables separately for the DFA and ATN simulation
|
19
|
+
# because the DFA simulation often has to fail over to the ATN
|
20
|
+
# simulation. If the ATN simulation fails, we need the DFA to fall
|
21
|
+
# back to its previously accepted state, if any. If the ATN succeeds,
|
22
|
+
# then the ATN does the accept and the DFA simulator that invoked it
|
23
|
+
# can simply return the predicted token type.</p>
|
24
|
+
#/
|
25
|
+
|
26
|
+
from ..PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext
|
27
|
+
from ..InputStream import InputStream
|
28
|
+
from ..Token import Token
|
29
|
+
from .ATN import ATN
|
30
|
+
from .ATNConfig import LexerATNConfig
|
31
|
+
from .ATNSimulator import ATNSimulator
|
32
|
+
from .ATNConfigSet import ATNConfigSet, OrderedATNConfigSet
|
33
|
+
from .ATNState import RuleStopState, ATNState
|
34
|
+
from .LexerActionExecutor import LexerActionExecutor
|
35
|
+
from .Transition import Transition
|
36
|
+
from ..dfa.DFAState import DFAState
|
37
|
+
from ..error.Errors import LexerNoViableAltException, UnsupportedOperationException
|
38
|
+
|
39
|
+
class SimState(object):
|
40
|
+
__slots__ = ('index', 'line', 'column', 'dfaState')
|
41
|
+
|
42
|
+
def __init__(self):
|
43
|
+
self.reset()
|
44
|
+
|
45
|
+
def reset(self):
|
46
|
+
self.index = -1
|
47
|
+
self.line = 0
|
48
|
+
self.column = -1
|
49
|
+
self.dfaState = None
|
50
|
+
|
51
|
+
# need forward declaration
|
52
|
+
Lexer = None
|
53
|
+
LexerATNSimulator = None
|
54
|
+
|
55
|
+
class LexerATNSimulator(ATNSimulator):
|
56
|
+
__slots__ = (
|
57
|
+
'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode',
|
58
|
+
'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept'
|
59
|
+
)
|
60
|
+
|
61
|
+
debug = False
|
62
|
+
dfa_debug = False
|
63
|
+
|
64
|
+
MIN_DFA_EDGE = 0
|
65
|
+
MAX_DFA_EDGE = 127 # forces unicode to stay in ATN
|
66
|
+
|
67
|
+
ERROR = None
|
68
|
+
|
69
|
+
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
|
70
|
+
super().__init__(atn, sharedContextCache)
|
71
|
+
self.decisionToDFA = decisionToDFA
|
72
|
+
self.recog = recog
|
73
|
+
# The current token's starting index into the character stream.
|
74
|
+
# Shared across DFA to ATN simulation in case the ATN fails and the
|
75
|
+
# DFA did not have a previous accept state. In this case, we use the
|
76
|
+
# ATN-generated exception object.
|
77
|
+
self.startIndex = -1
|
78
|
+
# line number 1..n within the input#/
|
79
|
+
self.line = 1
|
80
|
+
# The index of the character relative to the beginning of the line 0..n-1#/
|
81
|
+
self.column = 0
|
82
|
+
from ..Lexer import Lexer
|
83
|
+
self.mode = Lexer.DEFAULT_MODE
|
84
|
+
# Cache Lexer properties to avoid further imports
|
85
|
+
self.DEFAULT_MODE = Lexer.DEFAULT_MODE
|
86
|
+
self.MAX_CHAR_VALUE = Lexer.MAX_CHAR_VALUE
|
87
|
+
# Used during DFA/ATN exec to record the most recent accept configuration info
|
88
|
+
self.prevAccept = SimState()
|
89
|
+
|
90
|
+
|
91
|
+
def copyState(self, simulator:LexerATNSimulator ):
|
92
|
+
self.column = simulator.column
|
93
|
+
self.line = simulator.line
|
94
|
+
self.mode = simulator.mode
|
95
|
+
self.startIndex = simulator.startIndex
|
96
|
+
|
97
|
+
def match(self, input:InputStream , mode:int):
|
98
|
+
self.mode = mode
|
99
|
+
mark = input.mark()
|
100
|
+
try:
|
101
|
+
self.startIndex = input.index
|
102
|
+
self.prevAccept.reset()
|
103
|
+
dfa = self.decisionToDFA[mode]
|
104
|
+
if dfa.s0 is None:
|
105
|
+
return self.matchATN(input)
|
106
|
+
else:
|
107
|
+
return self.execATN(input, dfa.s0)
|
108
|
+
finally:
|
109
|
+
input.release(mark)
|
110
|
+
|
111
|
+
def reset(self):
|
112
|
+
self.prevAccept.reset()
|
113
|
+
self.startIndex = -1
|
114
|
+
self.line = 1
|
115
|
+
self.column = 0
|
116
|
+
self.mode = self.DEFAULT_MODE
|
117
|
+
|
118
|
+
def matchATN(self, input:InputStream):
|
119
|
+
startState = self.atn.modeToStartState[self.mode]
|
120
|
+
|
121
|
+
if LexerATNSimulator.debug:
|
122
|
+
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
|
123
|
+
|
124
|
+
old_mode = self.mode
|
125
|
+
s0_closure = self.computeStartState(input, startState)
|
126
|
+
suppressEdge = s0_closure.hasSemanticContext
|
127
|
+
s0_closure.hasSemanticContext = False
|
128
|
+
|
129
|
+
next = self.addDFAState(s0_closure)
|
130
|
+
if not suppressEdge:
|
131
|
+
self.decisionToDFA[self.mode].s0 = next
|
132
|
+
|
133
|
+
predict = self.execATN(input, next)
|
134
|
+
|
135
|
+
if LexerATNSimulator.debug:
|
136
|
+
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
|
137
|
+
|
138
|
+
return predict
|
139
|
+
|
140
|
+
def execATN(self, input:InputStream, ds0:DFAState):
|
141
|
+
if LexerATNSimulator.debug:
|
142
|
+
print("start state closure=" + str(ds0.configs))
|
143
|
+
|
144
|
+
if ds0.isAcceptState:
|
145
|
+
# allow zero-length tokens
|
146
|
+
self.captureSimState(self.prevAccept, input, ds0)
|
147
|
+
|
148
|
+
t = input.LA(1)
|
149
|
+
s = ds0 # s is current/from DFA state
|
150
|
+
|
151
|
+
while True: # while more work
|
152
|
+
if LexerATNSimulator.debug:
|
153
|
+
print("execATN loop starting closure:", str(s.configs))
|
154
|
+
|
155
|
+
# As we move src->trg, src->trg, we keep track of the previous trg to
|
156
|
+
# avoid looking up the DFA state again, which is expensive.
|
157
|
+
# If the previous target was already part of the DFA, we might
|
158
|
+
# be able to avoid doing a reach operation upon t. If s!=null,
|
159
|
+
# it means that semantic predicates didn't prevent us from
|
160
|
+
# creating a DFA state. Once we know s!=null, we check to see if
|
161
|
+
# the DFA state has an edge already for t. If so, we can just reuse
|
162
|
+
# it's configuration set; there's no point in re-computing it.
|
163
|
+
# This is kind of like doing DFA simulation within the ATN
|
164
|
+
# simulation because DFA simulation is really just a way to avoid
|
165
|
+
# computing reach/closure sets. Technically, once we know that
|
166
|
+
# we have a previously added DFA state, we could jump over to
|
167
|
+
# the DFA simulator. But, that would mean popping back and forth
|
168
|
+
# a lot and making things more complicated algorithmically.
|
169
|
+
# This optimization makes a lot of sense for loops within DFA.
|
170
|
+
# A character will take us back to an existing DFA state
|
171
|
+
# that already has lots of edges out of it. e.g., .* in comments.
|
172
|
+
# print("Target for:" + str(s) + " and:" + str(t))
|
173
|
+
target = self.getExistingTargetState(s, t)
|
174
|
+
# print("Existing:" + str(target))
|
175
|
+
if target is None:
|
176
|
+
target = self.computeTargetState(input, s, t)
|
177
|
+
# print("Computed:" + str(target))
|
178
|
+
|
179
|
+
if target == self.ERROR:
|
180
|
+
break
|
181
|
+
|
182
|
+
# If this is a consumable input element, make sure to consume before
|
183
|
+
# capturing the accept state so the input index, line, and char
|
184
|
+
# position accurately reflect the state of the interpreter at the
|
185
|
+
# end of the token.
|
186
|
+
if t != Token.EOF:
|
187
|
+
self.consume(input)
|
188
|
+
|
189
|
+
if target.isAcceptState:
|
190
|
+
self.captureSimState(self.prevAccept, input, target)
|
191
|
+
if t == Token.EOF:
|
192
|
+
break
|
193
|
+
|
194
|
+
t = input.LA(1)
|
195
|
+
|
196
|
+
s = target # flip; current DFA target becomes new src/from state
|
197
|
+
|
198
|
+
return self.failOrAccept(self.prevAccept, input, s.configs, t)
|
199
|
+
|
200
|
+
# Get an existing target state for an edge in the DFA. If the target state
|
201
|
+
# for the edge has not yet been computed or is otherwise not available,
|
202
|
+
# this method returns {@code null}.
|
203
|
+
#
|
204
|
+
# @param s The current DFA state
|
205
|
+
# @param t The next input symbol
|
206
|
+
# @return The existing target DFA state for the given input symbol
|
207
|
+
# {@code t}, or {@code null} if the target state for this edge is not
|
208
|
+
# already cached
|
209
|
+
def getExistingTargetState(self, s:DFAState, t:int):
|
210
|
+
if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE:
|
211
|
+
return None
|
212
|
+
|
213
|
+
target = s.edges[t - self.MIN_DFA_EDGE]
|
214
|
+
if LexerATNSimulator.debug and target is not None:
|
215
|
+
print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
|
216
|
+
|
217
|
+
return target
|
218
|
+
|
219
|
+
# Compute a target state for an edge in the DFA, and attempt to add the
|
220
|
+
# computed state and corresponding edge to the DFA.
|
221
|
+
#
|
222
|
+
# @param input The input stream
|
223
|
+
# @param s The current DFA state
|
224
|
+
# @param t The next input symbol
|
225
|
+
#
|
226
|
+
# @return The computed target DFA state for the given input symbol
|
227
|
+
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
|
228
|
+
# returns {@link #ERROR}.
|
229
|
+
def computeTargetState(self, input:InputStream, s:DFAState, t:int):
|
230
|
+
reach = OrderedATNConfigSet()
|
231
|
+
|
232
|
+
# if we don't find an existing DFA state
|
233
|
+
# Fill reach starting from closure, following t transitions
|
234
|
+
self.getReachableConfigSet(input, s.configs, reach, t)
|
235
|
+
|
236
|
+
if len(reach)==0: # we got nowhere on t from s
|
237
|
+
if not reach.hasSemanticContext:
|
238
|
+
# we got nowhere on t, don't throw out this knowledge; it'd
|
239
|
+
# cause a failover from DFA later.
|
240
|
+
self. addDFAEdge(s, t, self.ERROR)
|
241
|
+
|
242
|
+
# stop when we can't match any more char
|
243
|
+
return self.ERROR
|
244
|
+
|
245
|
+
# Add an edge from s to target DFA found/created for reach
|
246
|
+
return self.addDFAEdge(s, t, cfgs=reach)
|
247
|
+
|
248
|
+
def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int):
|
249
|
+
if self.prevAccept.dfaState is not None:
|
250
|
+
lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
|
251
|
+
self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
252
|
+
return prevAccept.dfaState.prediction
|
253
|
+
else:
|
254
|
+
# if no accept and EOF is first char, return EOF
|
255
|
+
if t==Token.EOF and input.index==self.startIndex:
|
256
|
+
return Token.EOF
|
257
|
+
raise LexerNoViableAltException(self.recog, input, self.startIndex, reach)
|
258
|
+
|
259
|
+
# Given a starting configuration set, figure out all ATN configurations
|
260
|
+
# we can reach upon input {@code t}. Parameter {@code reach} is a return
|
261
|
+
# parameter.
|
262
|
+
def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int):
|
263
|
+
# this is used to skip processing for configs which have a lower priority
|
264
|
+
# than a config that already reached an accept state for the same rule
|
265
|
+
skipAlt = ATN.INVALID_ALT_NUMBER
|
266
|
+
for cfg in closure:
|
267
|
+
currentAltReachedAcceptState = ( cfg.alt == skipAlt )
|
268
|
+
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
|
269
|
+
continue
|
270
|
+
|
271
|
+
if LexerATNSimulator.debug:
|
272
|
+
print("testing", self.getTokenName(t), "at", str(cfg))
|
273
|
+
|
274
|
+
for trans in cfg.state.transitions: # for each transition
|
275
|
+
target = self.getReachableTarget(trans, t)
|
276
|
+
if target is not None:
|
277
|
+
lexerActionExecutor = cfg.lexerActionExecutor
|
278
|
+
if lexerActionExecutor is not None:
|
279
|
+
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex)
|
280
|
+
|
281
|
+
treatEofAsEpsilon = (t == Token.EOF)
|
282
|
+
config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg)
|
283
|
+
if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon):
|
284
|
+
# any remaining configs for this alt have a lower priority than
|
285
|
+
# the one that just reached an accept state.
|
286
|
+
skipAlt = cfg.alt
|
287
|
+
|
288
|
+
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
|
289
|
+
if LexerATNSimulator.debug:
|
290
|
+
print("ACTION", lexerActionExecutor)
|
291
|
+
|
292
|
+
# seek to after last char in token
|
293
|
+
input.seek(index)
|
294
|
+
self.line = line
|
295
|
+
self.column = charPos
|
296
|
+
|
297
|
+
if lexerActionExecutor is not None and self.recog is not None:
|
298
|
+
lexerActionExecutor.execute(self.recog, input, startIndex)
|
299
|
+
|
300
|
+
def getReachableTarget(self, trans:Transition, t:int):
|
301
|
+
if trans.matches(t, 0, self.MAX_CHAR_VALUE):
|
302
|
+
return trans.target
|
303
|
+
else:
|
304
|
+
return None
|
305
|
+
|
306
|
+
def computeStartState(self, input:InputStream, p:ATNState):
|
307
|
+
initialContext = PredictionContext.EMPTY
|
308
|
+
configs = OrderedATNConfigSet()
|
309
|
+
for i in range(0,len(p.transitions)):
|
310
|
+
target = p.transitions[i].target
|
311
|
+
c = LexerATNConfig(state=target, alt=i+1, context=initialContext)
|
312
|
+
self.closure(input, c, configs, False, False, False)
|
313
|
+
return configs
|
314
|
+
|
315
|
+
# Since the alternatives within any lexer decision are ordered by
|
316
|
+
# preference, this method stops pursuing the closure as soon as an accept
|
317
|
+
# state is reached. After the first accept state is reached by depth-first
|
318
|
+
# search from {@code config}, all other (potentially reachable) states for
|
319
|
+
# this rule would have a lower priority.
|
320
|
+
#
|
321
|
+
# @return {@code true} if an accept state is reached, otherwise
|
322
|
+
# {@code false}.
|
323
|
+
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
|
324
|
+
speculative:bool, treatEofAsEpsilon:bool):
|
325
|
+
if LexerATNSimulator.debug:
|
326
|
+
print("closure(" + str(config) + ")")
|
327
|
+
|
328
|
+
if isinstance( config.state, RuleStopState ):
|
329
|
+
if LexerATNSimulator.debug:
|
330
|
+
if self.recog is not None:
|
331
|
+
print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
|
332
|
+
else:
|
333
|
+
print("closure at rule stop", str(config))
|
334
|
+
|
335
|
+
if config.context is None or config.context.hasEmptyPath():
|
336
|
+
if config.context is None or config.context.isEmpty():
|
337
|
+
configs.add(config)
|
338
|
+
return True
|
339
|
+
else:
|
340
|
+
configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY))
|
341
|
+
currentAltReachedAcceptState = True
|
342
|
+
|
343
|
+
if config.context is not None and not config.context.isEmpty():
|
344
|
+
for i in range(0,len(config.context)):
|
345
|
+
if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE:
|
346
|
+
newContext = config.context.getParent(i) # "pop" return state
|
347
|
+
returnState = self.atn.states[config.context.getReturnState(i)]
|
348
|
+
c = LexerATNConfig(state=returnState, config=config, context=newContext)
|
349
|
+
currentAltReachedAcceptState = self.closure(input, c, configs,
|
350
|
+
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
|
351
|
+
|
352
|
+
return currentAltReachedAcceptState
|
353
|
+
|
354
|
+
# optimization
|
355
|
+
if not config.state.epsilonOnlyTransitions:
|
356
|
+
if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision:
|
357
|
+
configs.add(config)
|
358
|
+
|
359
|
+
for t in config.state.transitions:
|
360
|
+
c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon)
|
361
|
+
if c is not None:
|
362
|
+
currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
|
363
|
+
|
364
|
+
return currentAltReachedAcceptState
|
365
|
+
|
366
|
+
# side-effect: can alter configs.hasSemanticContext
|
367
|
+
def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet,
|
368
|
+
speculative:bool, treatEofAsEpsilon:bool):
|
369
|
+
c = None
|
370
|
+
if t.serializationType==Transition.RULE:
|
371
|
+
newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber)
|
372
|
+
c = LexerATNConfig(state=t.target, config=config, context=newContext)
|
373
|
+
|
374
|
+
elif t.serializationType==Transition.PRECEDENCE:
|
375
|
+
raise UnsupportedOperationException("Precedence predicates are not supported in lexers.")
|
376
|
+
|
377
|
+
elif t.serializationType==Transition.PREDICATE:
|
378
|
+
# Track traversing semantic predicates. If we traverse,
|
379
|
+
# we cannot add a DFA state for this "reach" computation
|
380
|
+
# because the DFA would not test the predicate again in the
|
381
|
+
# future. Rather than creating collections of semantic predicates
|
382
|
+
# like v3 and testing them on prediction, v4 will test them on the
|
383
|
+
# fly all the time using the ATN not the DFA. This is slower but
|
384
|
+
# semantically it's not used that often. One of the key elements to
|
385
|
+
# this predicate mechanism is not adding DFA states that see
|
386
|
+
# predicates immediately afterwards in the ATN. For example,
|
387
|
+
|
388
|
+
# a : ID {p1}? | ID {p2}? ;
|
389
|
+
|
390
|
+
# should create the start state for rule 'a' (to save start state
|
391
|
+
# competition), but should not create target of ID state. The
|
392
|
+
# collection of ATN states the following ID references includes
|
393
|
+
# states reached by traversing predicates. Since this is when we
|
394
|
+
# test them, we cannot cash the DFA state target of ID.
|
395
|
+
|
396
|
+
if LexerATNSimulator.debug:
|
397
|
+
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
|
398
|
+
configs.hasSemanticContext = True
|
399
|
+
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
|
400
|
+
c = LexerATNConfig(state=t.target, config=config)
|
401
|
+
|
402
|
+
elif t.serializationType==Transition.ACTION:
|
403
|
+
if config.context is None or config.context.hasEmptyPath():
|
404
|
+
# execute actions anywhere in the start rule for a token.
|
405
|
+
#
|
406
|
+
# TODO: if the entry rule is invoked recursively, some
|
407
|
+
# actions may be executed during the recursive call. The
|
408
|
+
# problem can appear when hasEmptyPath() is true but
|
409
|
+
# isEmpty() is false. In this case, the config needs to be
|
410
|
+
# split into two contexts - one with just the empty path
|
411
|
+
# and another with everything but the empty path.
|
412
|
+
# Unfortunately, the current algorithm does not allow
|
413
|
+
# getEpsilonTarget to return two configurations, so
|
414
|
+
# additional modifications are needed before we can support
|
415
|
+
# the split operation.
|
416
|
+
lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor,
|
417
|
+
self.atn.lexerActions[t.actionIndex])
|
418
|
+
c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor)
|
419
|
+
|
420
|
+
else:
|
421
|
+
# ignore actions in referenced rules
|
422
|
+
c = LexerATNConfig(state=t.target, config=config)
|
423
|
+
|
424
|
+
elif t.serializationType==Transition.EPSILON:
|
425
|
+
c = LexerATNConfig(state=t.target, config=config)
|
426
|
+
|
427
|
+
elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]:
|
428
|
+
if treatEofAsEpsilon:
|
429
|
+
if t.matches(Token.EOF, 0, self.MAX_CHAR_VALUE):
|
430
|
+
c = LexerATNConfig(state=t.target, config=config)
|
431
|
+
|
432
|
+
return c
|
433
|
+
|
434
|
+
# Evaluate a predicate specified in the lexer.
|
435
|
+
#
|
436
|
+
# <p>If {@code speculative} is {@code true}, this method was called before
|
437
|
+
# {@link #consume} for the matched character. This method should call
|
438
|
+
# {@link #consume} before evaluating the predicate to ensure position
|
439
|
+
# sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
|
440
|
+
# and {@link Lexer#getcolumn}, properly reflect the current
|
441
|
+
# lexer state. This method should restore {@code input} and the simulator
|
442
|
+
# to the original state before returning (i.e. undo the actions made by the
|
443
|
+
# call to {@link #consume}.</p>
|
444
|
+
#
|
445
|
+
# @param input The input stream.
|
446
|
+
# @param ruleIndex The rule containing the predicate.
|
447
|
+
# @param predIndex The index of the predicate within the rule.
|
448
|
+
# @param speculative {@code true} if the current index in {@code input} is
|
449
|
+
# one character before the predicate's location.
|
450
|
+
#
|
451
|
+
# @return {@code true} if the specified predicate evaluates to
|
452
|
+
# {@code true}.
|
453
|
+
#/
|
454
|
+
def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool):
|
455
|
+
# assume true if no recognizer was provided
|
456
|
+
if self.recog is None:
|
457
|
+
return True
|
458
|
+
|
459
|
+
if not speculative:
|
460
|
+
return self.recog.sempred(None, ruleIndex, predIndex)
|
461
|
+
|
462
|
+
savedcolumn = self.column
|
463
|
+
savedLine = self.line
|
464
|
+
index = input.index
|
465
|
+
marker = input.mark()
|
466
|
+
try:
|
467
|
+
self.consume(input)
|
468
|
+
return self.recog.sempred(None, ruleIndex, predIndex)
|
469
|
+
finally:
|
470
|
+
self.column = savedcolumn
|
471
|
+
self.line = savedLine
|
472
|
+
input.seek(index)
|
473
|
+
input.release(marker)
|
474
|
+
|
475
|
+
def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState):
|
476
|
+
settings.index = input.index
|
477
|
+
settings.line = self.line
|
478
|
+
settings.column = self.column
|
479
|
+
settings.dfaState = dfaState
|
480
|
+
|
481
|
+
def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState:
|
482
|
+
|
483
|
+
if to is None and cfgs is not None:
|
484
|
+
# leading to this call, ATNConfigSet.hasSemanticContext is used as a
|
485
|
+
# marker indicating dynamic predicate evaluation makes this edge
|
486
|
+
# dependent on the specific input sequence, so the static edge in the
|
487
|
+
# DFA should be omitted. The target DFAState is still created since
|
488
|
+
# execATN has the ability to resynchronize with the DFA state cache
|
489
|
+
# following the predicate evaluation step.
|
490
|
+
#
|
491
|
+
# TJP notes: next time through the DFA, we see a pred again and eval.
|
492
|
+
# If that gets us to a previously created (but dangling) DFA
|
493
|
+
# state, we can continue in pure DFA mode from there.
|
494
|
+
#/
|
495
|
+
suppressEdge = cfgs.hasSemanticContext
|
496
|
+
cfgs.hasSemanticContext = False
|
497
|
+
|
498
|
+
to = self.addDFAState(cfgs)
|
499
|
+
|
500
|
+
if suppressEdge:
|
501
|
+
return to
|
502
|
+
|
503
|
+
# add the edge
|
504
|
+
if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE:
|
505
|
+
# Only track edges within the DFA bounds
|
506
|
+
return to
|
507
|
+
|
508
|
+
if LexerATNSimulator.debug:
|
509
|
+
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
|
510
|
+
|
511
|
+
if from_.edges is None:
|
512
|
+
# make room for tokens 1..n and -1 masquerading as index 0
|
513
|
+
from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1)
|
514
|
+
|
515
|
+
from_.edges[tk - self.MIN_DFA_EDGE] = to # connect
|
516
|
+
|
517
|
+
return to
|
518
|
+
|
519
|
+
|
520
|
+
# Add a new DFA state if there isn't one with this set of
|
521
|
+
# configurations already. This method also detects the first
|
522
|
+
# configuration containing an ATN rule stop state. Later, when
|
523
|
+
# traversing the DFA, we will know which rule to accept.
|
524
|
+
def addDFAState(self, configs:ATNConfigSet) -> DFAState:
|
525
|
+
|
526
|
+
proposed = DFAState(configs=configs)
|
527
|
+
firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None)
|
528
|
+
|
529
|
+
if firstConfigWithRuleStopState is not None:
|
530
|
+
proposed.isAcceptState = True
|
531
|
+
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
|
532
|
+
proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]
|
533
|
+
|
534
|
+
dfa = self.decisionToDFA[self.mode]
|
535
|
+
existing = dfa.states.get(proposed, None)
|
536
|
+
if existing is not None:
|
537
|
+
return existing
|
538
|
+
|
539
|
+
newState = proposed
|
540
|
+
|
541
|
+
newState.stateNumber = len(dfa.states)
|
542
|
+
configs.setReadonly(True)
|
543
|
+
newState.configs = configs
|
544
|
+
dfa.states[newState] = newState
|
545
|
+
return newState
|
546
|
+
|
547
|
+
def getDFA(self, mode:int):
|
548
|
+
return self.decisionToDFA[mode]
|
549
|
+
|
550
|
+
# Get the text matched so far for the current token.
|
551
|
+
def getText(self, input:InputStream):
|
552
|
+
# index is first lookahead char, don't include.
|
553
|
+
return input.getText(self.startIndex, input.index-1)
|
554
|
+
|
555
|
+
def consume(self, input:InputStream):
|
556
|
+
curChar = input.LA(1)
|
557
|
+
if curChar==ord('\n'):
|
558
|
+
self.line += 1
|
559
|
+
self.column = 0
|
560
|
+
else:
|
561
|
+
self.column += 1
|
562
|
+
input.consume()
|
563
|
+
|
564
|
+
def getTokenName(self, t:int):
|
565
|
+
if t==-1:
|
566
|
+
return "EOF"
|
567
|
+
else:
|
568
|
+
return "'" + chr(t) + "'"
|
569
|
+
|
570
|
+
|
571
|
+
LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())
|
572
|
+
|
573
|
+
del Lexer
|