omlish 0.0.0.dev56__py3-none-any.whl → 0.0.0.dev58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omlish/__about__.py +2 -2
- omlish/antlr/__init__.py +0 -0
- omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
- omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
- omlish/antlr/_runtime/CommonTokenStream.py +90 -0
- omlish/antlr/_runtime/FileStream.py +30 -0
- omlish/antlr/_runtime/InputStream.py +90 -0
- omlish/antlr/_runtime/IntervalSet.py +183 -0
- omlish/antlr/_runtime/LL1Analyzer.py +176 -0
- omlish/antlr/_runtime/Lexer.py +332 -0
- omlish/antlr/_runtime/ListTokenSource.py +147 -0
- omlish/antlr/_runtime/Parser.py +583 -0
- omlish/antlr/_runtime/ParserInterpreter.py +173 -0
- omlish/antlr/_runtime/ParserRuleContext.py +189 -0
- omlish/antlr/_runtime/PredictionContext.py +632 -0
- omlish/antlr/_runtime/Recognizer.py +150 -0
- omlish/antlr/_runtime/RuleContext.py +230 -0
- omlish/antlr/_runtime/StdinStream.py +14 -0
- omlish/antlr/_runtime/Token.py +158 -0
- omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
- omlish/antlr/_runtime/Utils.py +36 -0
- omlish/antlr/_runtime/__init__.py +24 -0
- omlish/antlr/_runtime/_pygrun.py +174 -0
- omlish/antlr/_runtime/atn/ATN.py +135 -0
- omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
- omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
- omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
- omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
- omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
- omlish/antlr/_runtime/atn/ATNState.py +267 -0
- omlish/antlr/_runtime/atn/ATNType.py +20 -0
- omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
- omlish/antlr/_runtime/atn/LexerAction.py +301 -0
- omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
- omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
- omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
- omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
- omlish/antlr/_runtime/atn/Transition.py +271 -0
- omlish/antlr/_runtime/atn/__init__.py +4 -0
- omlish/antlr/_runtime/dfa/DFA.py +136 -0
- omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
- omlish/antlr/_runtime/dfa/DFAState.py +129 -0
- omlish/antlr/_runtime/dfa/__init__.py +4 -0
- omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
- omlish/antlr/_runtime/error/ErrorListener.py +75 -0
- omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
- omlish/antlr/_runtime/error/Errors.py +176 -0
- omlish/antlr/_runtime/error/__init__.py +4 -0
- omlish/antlr/_runtime/tree/Chunk.py +33 -0
- omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
- omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
- omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
- omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
- omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
- omlish/antlr/_runtime/tree/Tree.py +194 -0
- omlish/antlr/_runtime/tree/Trees.py +114 -0
- omlish/antlr/_runtime/tree/__init__.py +2 -0
- omlish/antlr/_runtime/xpath/XPath.py +272 -0
- omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
- omlish/antlr/_runtime/xpath/__init__.py +4 -0
- omlish/formats/json/cli.py +76 -7
- omlish/formats/props.py +6 -2
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/METADATA +1 -1
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/RECORD +68 -9
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/LICENSE +0 -0
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/WHEEL +0 -0
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/entry_points.txt +0 -0
- {omlish-0.0.0.dev56.dist-info → omlish-0.0.0.dev58.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,632 @@
|
|
1
|
+
# type: ignore
|
2
|
+
# ruff: noqa
|
3
|
+
# flake8: noqa
|
4
|
+
#
|
5
|
+
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
6
|
+
# Use of this file is governed by the BSD 3-clause license that
|
7
|
+
# can be found in the LICENSE.txt file in the project root.
|
8
|
+
#/
|
9
|
+
from .RuleContext import RuleContext
|
10
|
+
from .atn.ATN import ATN
|
11
|
+
from .error.Errors import IllegalStateException
|
12
|
+
from io import StringIO
|
13
|
+
|
14
|
+
# dup ParserATNSimulator class var here to avoid circular import; no idea why this can't be in PredictionContext
|
15
|
+
_trace_atn_sim = False
|
16
|
+
|
17
|
+
class PredictionContext(object):
|
18
|
+
|
19
|
+
# Represents {@code $} in local context prediction, which means wildcard.
|
20
|
+
# {@code#+x =#}.
|
21
|
+
#/
|
22
|
+
EMPTY = None
|
23
|
+
|
24
|
+
# Represents {@code $} in an array in full context mode, when {@code $}
|
25
|
+
# doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
26
|
+
# {@code $} = {@link #EMPTY_RETURN_STATE}.
|
27
|
+
#/
|
28
|
+
EMPTY_RETURN_STATE = 0x7FFFFFFF
|
29
|
+
|
30
|
+
globalNodeCount = 1
|
31
|
+
id = globalNodeCount
|
32
|
+
|
33
|
+
# Stores the computed hash code of this {@link PredictionContext}. The hash
|
34
|
+
# code is computed in parts to match the following reference algorithm.
|
35
|
+
#
|
36
|
+
# <pre>
|
37
|
+
# private int referenceHashCode() {
|
38
|
+
# int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
|
39
|
+
#
|
40
|
+
# for (int i = 0; i < {@link #size()}; i++) {
|
41
|
+
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
|
42
|
+
# }
|
43
|
+
#
|
44
|
+
# for (int i = 0; i < {@link #size()}; i++) {
|
45
|
+
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
|
46
|
+
# }
|
47
|
+
#
|
48
|
+
# hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2# {@link #size()});
|
49
|
+
# return hash;
|
50
|
+
# }
|
51
|
+
# </pre>
|
52
|
+
#/
|
53
|
+
|
54
|
+
def __init__(self, cachedHashCode:int):
|
55
|
+
self.cachedHashCode = cachedHashCode
|
56
|
+
|
57
|
+
def __len__(self):
|
58
|
+
return 0
|
59
|
+
|
60
|
+
# This means only the {@link #EMPTY} context is in set.
|
61
|
+
def isEmpty(self):
|
62
|
+
return self is self.EMPTY
|
63
|
+
|
64
|
+
def hasEmptyPath(self):
|
65
|
+
return self.getReturnState(len(self) - 1) == self.EMPTY_RETURN_STATE
|
66
|
+
|
67
|
+
def getReturnState(self, index:int):
|
68
|
+
raise IllegalStateException("illegal!")
|
69
|
+
|
70
|
+
def __hash__(self):
|
71
|
+
return self.cachedHashCode
|
72
|
+
|
73
|
+
def calculateHashCode(parent:PredictionContext, returnState:int):
|
74
|
+
return hash("") if parent is None else hash((hash(parent), returnState))
|
75
|
+
|
76
|
+
def calculateListsHashCode(parents:[], returnStates:[] ):
|
77
|
+
h = 0
|
78
|
+
for parent, returnState in zip(parents, returnStates):
|
79
|
+
h = hash((h, calculateHashCode(parent, returnState)))
|
80
|
+
return h
|
81
|
+
|
82
|
+
# Used to cache {@link PredictionContext} objects. Its used for the shared
|
83
|
+
# context cash associated with contexts in DFA states. This cache
|
84
|
+
# can be used for both lexers and parsers.
|
85
|
+
|
86
|
+
class PredictionContextCache(object):
|
87
|
+
|
88
|
+
def __init__(self):
|
89
|
+
self.cache = dict()
|
90
|
+
|
91
|
+
# Add a context to the cache and return it. If the context already exists,
|
92
|
+
# return that one instead and do not add a new context to the cache.
|
93
|
+
# Protect shared cache from unsafe thread access.
|
94
|
+
#
|
95
|
+
def add(self, ctx:PredictionContext):
|
96
|
+
if ctx==PredictionContext.EMPTY:
|
97
|
+
return PredictionContext.EMPTY
|
98
|
+
existing = self.cache.get(ctx, None)
|
99
|
+
if existing is not None:
|
100
|
+
return existing
|
101
|
+
self.cache[ctx] = ctx
|
102
|
+
return ctx
|
103
|
+
|
104
|
+
def get(self, ctx:PredictionContext):
|
105
|
+
return self.cache.get(ctx, None)
|
106
|
+
|
107
|
+
def __len__(self):
|
108
|
+
return len(self.cache)
|
109
|
+
|
110
|
+
|
111
|
+
class SingletonPredictionContext(PredictionContext):
|
112
|
+
|
113
|
+
@staticmethod
|
114
|
+
def create(parent:PredictionContext , returnState:int ):
|
115
|
+
if returnState == PredictionContext.EMPTY_RETURN_STATE and parent is None:
|
116
|
+
# someone can pass in the bits of an array ctx that mean $
|
117
|
+
return SingletonPredictionContext.EMPTY
|
118
|
+
else:
|
119
|
+
return SingletonPredictionContext(parent, returnState)
|
120
|
+
|
121
|
+
def __init__(self, parent:PredictionContext, returnState:int):
|
122
|
+
hashCode = calculateHashCode(parent, returnState)
|
123
|
+
super().__init__(hashCode)
|
124
|
+
self.parentCtx = parent
|
125
|
+
self.returnState = returnState
|
126
|
+
|
127
|
+
def __len__(self):
|
128
|
+
return 1
|
129
|
+
|
130
|
+
def getParent(self, index:int):
|
131
|
+
return self.parentCtx
|
132
|
+
|
133
|
+
def getReturnState(self, index:int):
|
134
|
+
return self.returnState
|
135
|
+
|
136
|
+
def __eq__(self, other):
|
137
|
+
if self is other:
|
138
|
+
return True
|
139
|
+
elif other is None:
|
140
|
+
return False
|
141
|
+
elif not isinstance(other, SingletonPredictionContext):
|
142
|
+
return False
|
143
|
+
else:
|
144
|
+
return self.returnState == other.returnState and self.parentCtx == other.parentCtx
|
145
|
+
|
146
|
+
def __hash__(self):
|
147
|
+
return self.cachedHashCode
|
148
|
+
|
149
|
+
def __str__(self):
|
150
|
+
up = "" if self.parentCtx is None else str(self.parentCtx)
|
151
|
+
if len(up)==0:
|
152
|
+
if self.returnState == self.EMPTY_RETURN_STATE:
|
153
|
+
return "$"
|
154
|
+
else:
|
155
|
+
return str(self.returnState)
|
156
|
+
else:
|
157
|
+
return str(self.returnState) + " " + up
|
158
|
+
|
159
|
+
|
160
|
+
class EmptyPredictionContext(SingletonPredictionContext):
|
161
|
+
|
162
|
+
def __init__(self):
|
163
|
+
super().__init__(None, PredictionContext.EMPTY_RETURN_STATE)
|
164
|
+
|
165
|
+
def isEmpty(self):
|
166
|
+
return True
|
167
|
+
|
168
|
+
def __eq__(self, other):
|
169
|
+
return self is other
|
170
|
+
|
171
|
+
def __hash__(self):
|
172
|
+
return self.cachedHashCode
|
173
|
+
|
174
|
+
def __str__(self):
|
175
|
+
return "$"
|
176
|
+
|
177
|
+
|
178
|
+
PredictionContext.EMPTY = EmptyPredictionContext()
|
179
|
+
|
180
|
+
class ArrayPredictionContext(PredictionContext):
|
181
|
+
# Parent can be null only if full ctx mode and we make an array
|
182
|
+
# from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and
|
183
|
+
# returnState == {@link #EMPTY_RETURN_STATE}.
|
184
|
+
|
185
|
+
def __init__(self, parents:list, returnStates:list):
|
186
|
+
super().__init__(calculateListsHashCode(parents, returnStates))
|
187
|
+
self.parents = parents
|
188
|
+
self.returnStates = returnStates
|
189
|
+
|
190
|
+
def isEmpty(self):
|
191
|
+
# since EMPTY_RETURN_STATE can only appear in the last position, we
|
192
|
+
# don't need to verify that size==1
|
193
|
+
return self.returnStates[0]==PredictionContext.EMPTY_RETURN_STATE
|
194
|
+
|
195
|
+
def __len__(self):
|
196
|
+
return len(self.returnStates)
|
197
|
+
|
198
|
+
def getParent(self, index:int):
|
199
|
+
return self.parents[index]
|
200
|
+
|
201
|
+
def getReturnState(self, index:int):
|
202
|
+
return self.returnStates[index]
|
203
|
+
|
204
|
+
def __eq__(self, other):
|
205
|
+
if self is other:
|
206
|
+
return True
|
207
|
+
elif not isinstance(other, ArrayPredictionContext):
|
208
|
+
return False
|
209
|
+
elif hash(self) != hash(other):
|
210
|
+
return False # can't be same if hash is different
|
211
|
+
else:
|
212
|
+
return self.returnStates==other.returnStates and self.parents==other.parents
|
213
|
+
|
214
|
+
def __str__(self):
|
215
|
+
if self.isEmpty():
|
216
|
+
return "[]"
|
217
|
+
with StringIO() as buf:
|
218
|
+
buf.write("[")
|
219
|
+
for i in range(0,len(self.returnStates)):
|
220
|
+
if i>0:
|
221
|
+
buf.write(", ")
|
222
|
+
if self.returnStates[i]==PredictionContext.EMPTY_RETURN_STATE:
|
223
|
+
buf.write("$")
|
224
|
+
continue
|
225
|
+
buf.write(str(self.returnStates[i]))
|
226
|
+
if self.parents[i] is not None:
|
227
|
+
buf.write(' ')
|
228
|
+
buf.write(str(self.parents[i]))
|
229
|
+
else:
|
230
|
+
buf.write("null")
|
231
|
+
buf.write("]")
|
232
|
+
return buf.getvalue()
|
233
|
+
|
234
|
+
def __hash__(self):
|
235
|
+
return self.cachedHashCode
|
236
|
+
|
237
|
+
|
238
|
+
|
239
|
+
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
240
|
+
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
241
|
+
#/
|
242
|
+
def PredictionContextFromRuleContext(atn:ATN, outerContext:RuleContext=None):
|
243
|
+
if outerContext is None:
|
244
|
+
outerContext = RuleContext.EMPTY
|
245
|
+
|
246
|
+
# if we are in RuleContext of start rule, s, then PredictionContext
|
247
|
+
# is EMPTY. Nobody called us. (if we are empty, return empty)
|
248
|
+
if outerContext.parentCtx is None or outerContext is RuleContext.EMPTY:
|
249
|
+
return PredictionContext.EMPTY
|
250
|
+
|
251
|
+
# If we have a parent, convert it to a PredictionContext graph
|
252
|
+
parent = PredictionContextFromRuleContext(atn, outerContext.parentCtx)
|
253
|
+
state = atn.states[outerContext.invokingState]
|
254
|
+
transition = state.transitions[0]
|
255
|
+
return SingletonPredictionContext.create(parent, transition.followState.stateNumber)
|
256
|
+
|
257
|
+
|
258
|
+
def merge(a:PredictionContext, b:PredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
259
|
+
|
260
|
+
# share same graph if both same
|
261
|
+
if a==b:
|
262
|
+
return a
|
263
|
+
|
264
|
+
if isinstance(a, SingletonPredictionContext) and isinstance(b, SingletonPredictionContext):
|
265
|
+
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
266
|
+
|
267
|
+
# At least one of a or b is array
|
268
|
+
# If one is $ and rootIsWildcard, return $ as# wildcard
|
269
|
+
if rootIsWildcard:
|
270
|
+
if isinstance( a, EmptyPredictionContext ):
|
271
|
+
return a
|
272
|
+
if isinstance( b, EmptyPredictionContext ):
|
273
|
+
return b
|
274
|
+
|
275
|
+
# convert singleton so both are arrays to normalize
|
276
|
+
if isinstance( a, SingletonPredictionContext ):
|
277
|
+
a = ArrayPredictionContext([a.parentCtx], [a.returnState])
|
278
|
+
if isinstance( b, SingletonPredictionContext):
|
279
|
+
b = ArrayPredictionContext([b.parentCtx], [b.returnState])
|
280
|
+
return mergeArrays(a, b, rootIsWildcard, mergeCache)
|
281
|
+
|
282
|
+
|
283
|
+
#
|
284
|
+
# Merge two {@link SingletonPredictionContext} instances.
|
285
|
+
#
|
286
|
+
# <p>Stack tops equal, parents merge is same; return left graph.<br>
|
287
|
+
# <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/></p>
|
288
|
+
#
|
289
|
+
# <p>Same stack top, parents differ; merge parents giving array node, then
|
290
|
+
# remainders of those graphs. A new root node is created to point to the
|
291
|
+
# merged parents.<br>
|
292
|
+
# <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/></p>
|
293
|
+
#
|
294
|
+
# <p>Different stack tops pointing to same parent. Make array node for the
|
295
|
+
# root where both element in the root point to the same (original)
|
296
|
+
# parent.<br>
|
297
|
+
# <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/></p>
|
298
|
+
#
|
299
|
+
# <p>Different stack tops pointing to different parents. Make array node for
|
300
|
+
# the root where each element points to the corresponding original
|
301
|
+
# parent.<br>
|
302
|
+
# <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/></p>
|
303
|
+
#
|
304
|
+
# @param a the first {@link SingletonPredictionContext}
|
305
|
+
# @param b the second {@link SingletonPredictionContext}
|
306
|
+
# @param rootIsWildcard {@code true} if this is a local-context merge,
|
307
|
+
# otherwise false to indicate a full-context merge
|
308
|
+
# @param mergeCache
|
309
|
+
#/
|
310
|
+
def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
311
|
+
if mergeCache is not None:
|
312
|
+
previous = mergeCache.get((a,b), None)
|
313
|
+
if previous is not None:
|
314
|
+
return previous
|
315
|
+
previous = mergeCache.get((b,a), None)
|
316
|
+
if previous is not None:
|
317
|
+
return previous
|
318
|
+
|
319
|
+
merged = mergeRoot(a, b, rootIsWildcard)
|
320
|
+
if merged is not None:
|
321
|
+
if mergeCache is not None:
|
322
|
+
mergeCache[(a, b)] = merged
|
323
|
+
return merged
|
324
|
+
|
325
|
+
if a.returnState==b.returnState:
|
326
|
+
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
327
|
+
# if parent is same as existing a or b parent or reduced to a parent, return it
|
328
|
+
if parent == a.parentCtx:
|
329
|
+
return a # ax + bx = ax, if a=b
|
330
|
+
if parent == b.parentCtx:
|
331
|
+
return b # ax + bx = bx, if a=b
|
332
|
+
# else: ax + ay = a'[x,y]
|
333
|
+
# merge parents x and y, giving array node with x,y then remainders
|
334
|
+
# of those graphs. dup a, a' points at merged array
|
335
|
+
# new joined parent so create new singleton pointing to it, a'
|
336
|
+
merged = SingletonPredictionContext.create(parent, a.returnState)
|
337
|
+
if mergeCache is not None:
|
338
|
+
mergeCache[(a, b)] = merged
|
339
|
+
return merged
|
340
|
+
else: # a != b payloads differ
|
341
|
+
# see if we can collapse parents due to $+x parents if local ctx
|
342
|
+
singleParent = None
|
343
|
+
if a is b or (a.parentCtx is not None and a.parentCtx==b.parentCtx): # ax + bx = [a,b]x
|
344
|
+
singleParent = a.parentCtx
|
345
|
+
if singleParent is not None: # parents are same
|
346
|
+
# sort payloads and use same parent
|
347
|
+
payloads = [ a.returnState, b.returnState ]
|
348
|
+
if a.returnState > b.returnState:
|
349
|
+
payloads = [ b.returnState, a.returnState ]
|
350
|
+
parents = [singleParent, singleParent]
|
351
|
+
merged = ArrayPredictionContext(parents, payloads)
|
352
|
+
if mergeCache is not None:
|
353
|
+
mergeCache[(a, b)] = merged
|
354
|
+
return merged
|
355
|
+
# parents differ and can't merge them. Just pack together
|
356
|
+
# into array; can't merge.
|
357
|
+
# ax + by = [ax,by]
|
358
|
+
payloads = [ a.returnState, b.returnState ]
|
359
|
+
parents = [ a.parentCtx, b.parentCtx ]
|
360
|
+
if a.returnState > b.returnState: # sort by payload
|
361
|
+
payloads = [ b.returnState, a.returnState ]
|
362
|
+
parents = [ b.parentCtx, a.parentCtx ]
|
363
|
+
merged = ArrayPredictionContext(parents, payloads)
|
364
|
+
if mergeCache is not None:
|
365
|
+
mergeCache[(a, b)] = merged
|
366
|
+
return merged
|
367
|
+
|
368
|
+
|
369
|
+
#
|
370
|
+
# Handle case where at least one of {@code a} or {@code b} is
|
371
|
+
# {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used
|
372
|
+
# to represent {@link #EMPTY}.
|
373
|
+
#
|
374
|
+
# <h2>Local-Context Merges</h2>
|
375
|
+
#
|
376
|
+
# <p>These local-context merge operations are used when {@code rootIsWildcard}
|
377
|
+
# is true.</p>
|
378
|
+
#
|
379
|
+
# <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br>
|
380
|
+
# <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
381
|
+
#
|
382
|
+
# <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
|
383
|
+
# {@code #EMPTY}; return left graph.<br>
|
384
|
+
# <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
385
|
+
#
|
386
|
+
# <p>Special case of last merge if local context.<br>
|
387
|
+
# <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
388
|
+
#
|
389
|
+
# <h2>Full-Context Merges</h2>
|
390
|
+
#
|
391
|
+
# <p>These full-context merge operations are used when {@code rootIsWildcard}
|
392
|
+
# is false.</p>
|
393
|
+
#
|
394
|
+
# <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
395
|
+
#
|
396
|
+
# <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and
|
397
|
+
# null parent).<br>
|
398
|
+
# <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
399
|
+
#
|
400
|
+
# <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
401
|
+
#
|
402
|
+
# @param a the first {@link SingletonPredictionContext}
|
403
|
+
# @param b the second {@link SingletonPredictionContext}
|
404
|
+
# @param rootIsWildcard {@code true} if this is a local-context merge,
|
405
|
+
# otherwise false to indicate a full-context merge
|
406
|
+
#/
|
407
|
+
def mergeRoot(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool):
|
408
|
+
if rootIsWildcard:
|
409
|
+
if a == PredictionContext.EMPTY:
|
410
|
+
return PredictionContext.EMPTY ## + b =#
|
411
|
+
if b == PredictionContext.EMPTY:
|
412
|
+
return PredictionContext.EMPTY # a +# =#
|
413
|
+
else:
|
414
|
+
if a == PredictionContext.EMPTY and b == PredictionContext.EMPTY:
|
415
|
+
return PredictionContext.EMPTY # $ + $ = $
|
416
|
+
elif a == PredictionContext.EMPTY: # $ + x = [$,x]
|
417
|
+
payloads = [ b.returnState, PredictionContext.EMPTY_RETURN_STATE ]
|
418
|
+
parents = [ b.parentCtx, None ]
|
419
|
+
return ArrayPredictionContext(parents, payloads)
|
420
|
+
elif b == PredictionContext.EMPTY: # x + $ = [$,x] ($ is always first if present)
|
421
|
+
payloads = [ a.returnState, PredictionContext.EMPTY_RETURN_STATE ]
|
422
|
+
parents = [ a.parentCtx, None ]
|
423
|
+
return ArrayPredictionContext(parents, payloads)
|
424
|
+
return None
|
425
|
+
|
426
|
+
|
427
|
+
#
|
428
|
+
# Merge two {@link ArrayPredictionContext} instances.
|
429
|
+
#
|
430
|
+
# <p>Different tops, different parents.<br>
|
431
|
+
# <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
432
|
+
#
|
433
|
+
# <p>Shared top, same parents.<br>
|
434
|
+
# <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
435
|
+
#
|
436
|
+
# <p>Shared top, different parents.<br>
|
437
|
+
# <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
438
|
+
#
|
439
|
+
# <p>Shared top, all shared parents.<br>
|
440
|
+
# <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/></p>
|
441
|
+
#
|
442
|
+
# <p>Equal tops, merge parents and reduce top to
|
443
|
+
# {@link SingletonPredictionContext}.<br>
|
444
|
+
# <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
445
|
+
#/
|
446
|
+
def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
447
|
+
if mergeCache is not None:
|
448
|
+
previous = mergeCache.get((a,b), None)
|
449
|
+
if previous is not None:
|
450
|
+
if _trace_atn_sim: print("mergeArrays a="+str(a)+",b="+str(b)+" -> previous")
|
451
|
+
return previous
|
452
|
+
previous = mergeCache.get((b,a), None)
|
453
|
+
if previous is not None:
|
454
|
+
if _trace_atn_sim: print("mergeArrays a="+str(a)+",b="+str(b)+" -> previous")
|
455
|
+
return previous
|
456
|
+
|
457
|
+
# merge sorted payloads a + b => M
|
458
|
+
i = 0 # walks a
|
459
|
+
j = 0 # walks b
|
460
|
+
k = 0 # walks target M array
|
461
|
+
|
462
|
+
mergedReturnStates = [None] * (len(a.returnStates) + len( b.returnStates))
|
463
|
+
mergedParents = [None] * len(mergedReturnStates)
|
464
|
+
# walk and merge to yield mergedParents, mergedReturnStates
|
465
|
+
while i<len(a.returnStates) and j<len(b.returnStates):
|
466
|
+
a_parent = a.parents[i]
|
467
|
+
b_parent = b.parents[j]
|
468
|
+
if a.returnStates[i]==b.returnStates[j]:
|
469
|
+
# same payload (stack tops are equal), must yield merged singleton
|
470
|
+
payload = a.returnStates[i]
|
471
|
+
# $+$ = $
|
472
|
+
bothDollars = payload == PredictionContext.EMPTY_RETURN_STATE and \
|
473
|
+
a_parent is None and b_parent is None
|
474
|
+
ax_ax = (a_parent is not None and b_parent is not None) and a_parent==b_parent # ax+ax -> ax
|
475
|
+
if bothDollars or ax_ax:
|
476
|
+
mergedParents[k] = a_parent # choose left
|
477
|
+
mergedReturnStates[k] = payload
|
478
|
+
else: # ax+ay -> a'[x,y]
|
479
|
+
mergedParent = merge(a_parent, b_parent, rootIsWildcard, mergeCache)
|
480
|
+
mergedParents[k] = mergedParent
|
481
|
+
mergedReturnStates[k] = payload
|
482
|
+
i += 1 # hop over left one as usual
|
483
|
+
j += 1 # but also skip one in right side since we merge
|
484
|
+
elif a.returnStates[i]<b.returnStates[j]: # copy a[i] to M
|
485
|
+
mergedParents[k] = a_parent
|
486
|
+
mergedReturnStates[k] = a.returnStates[i]
|
487
|
+
i += 1
|
488
|
+
else: # b > a, copy b[j] to M
|
489
|
+
mergedParents[k] = b_parent
|
490
|
+
mergedReturnStates[k] = b.returnStates[j]
|
491
|
+
j += 1
|
492
|
+
k += 1
|
493
|
+
|
494
|
+
# copy over any payloads remaining in either array
|
495
|
+
if i < len(a.returnStates):
|
496
|
+
for p in range(i, len(a.returnStates)):
|
497
|
+
mergedParents[k] = a.parents[p]
|
498
|
+
mergedReturnStates[k] = a.returnStates[p]
|
499
|
+
k += 1
|
500
|
+
else:
|
501
|
+
for p in range(j, len(b.returnStates)):
|
502
|
+
mergedParents[k] = b.parents[p]
|
503
|
+
mergedReturnStates[k] = b.returnStates[p]
|
504
|
+
k += 1
|
505
|
+
|
506
|
+
# trim merged if we combined a few that had same stack tops
|
507
|
+
if k < len(mergedParents): # write index < last position; trim
|
508
|
+
if k == 1: # for just one merged element, return singleton top
|
509
|
+
merged = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
510
|
+
if mergeCache is not None:
|
511
|
+
mergeCache[(a,b)] = merged
|
512
|
+
return merged
|
513
|
+
mergedParents = mergedParents[0:k]
|
514
|
+
mergedReturnStates = mergedReturnStates[0:k]
|
515
|
+
|
516
|
+
merged = ArrayPredictionContext(mergedParents, mergedReturnStates)
|
517
|
+
|
518
|
+
# if we created same array as a or b, return that instead
|
519
|
+
# TODO: track whether this is possible above during merge sort for speed
|
520
|
+
if merged==a:
|
521
|
+
if mergeCache is not None:
|
522
|
+
mergeCache[(a,b)] = a
|
523
|
+
if _trace_atn_sim: print("mergeArrays a="+str(a)+",b="+str(b)+" -> a")
|
524
|
+
return a
|
525
|
+
if merged==b:
|
526
|
+
if mergeCache is not None:
|
527
|
+
mergeCache[(a,b)] = b
|
528
|
+
if _trace_atn_sim: print("mergeArrays a="+str(a)+",b="+str(b)+" -> b")
|
529
|
+
return b
|
530
|
+
combineCommonParents(mergedParents)
|
531
|
+
|
532
|
+
if mergeCache is not None:
|
533
|
+
mergeCache[(a,b)] = merged
|
534
|
+
|
535
|
+
if _trace_atn_sim: print("mergeArrays a="+str(a)+",b="+str(b)+" -> "+str(M))
|
536
|
+
|
537
|
+
return merged
|
538
|
+
|
539
|
+
|
540
|
+
#
|
541
|
+
# Make pass over all <em>M</em> {@code parents}; merge any {@code equals()}
|
542
|
+
# ones.
|
543
|
+
#/
|
544
|
+
def combineCommonParents(parents:list):
|
545
|
+
uniqueParents = dict()
|
546
|
+
|
547
|
+
for p in range(0, len(parents)):
|
548
|
+
parent = parents[p]
|
549
|
+
if uniqueParents.get(parent, None) is None:
|
550
|
+
uniqueParents[parent] = parent
|
551
|
+
|
552
|
+
for p in range(0, len(parents)):
|
553
|
+
parents[p] = uniqueParents[parents[p]]
|
554
|
+
|
555
|
+
def getCachedPredictionContext(context:PredictionContext, contextCache:PredictionContextCache, visited:dict):
|
556
|
+
if context.isEmpty():
|
557
|
+
return context
|
558
|
+
existing = visited.get(context)
|
559
|
+
if existing is not None:
|
560
|
+
return existing
|
561
|
+
existing = contextCache.get(context)
|
562
|
+
if existing is not None:
|
563
|
+
visited[context] = existing
|
564
|
+
return existing
|
565
|
+
changed = False
|
566
|
+
parents = [None] * len(context)
|
567
|
+
for i in range(0, len(parents)):
|
568
|
+
parent = getCachedPredictionContext(context.getParent(i), contextCache, visited)
|
569
|
+
if changed or parent is not context.getParent(i):
|
570
|
+
if not changed:
|
571
|
+
parents = [context.getParent(j) for j in range(len(context))]
|
572
|
+
changed = True
|
573
|
+
parents[i] = parent
|
574
|
+
if not changed:
|
575
|
+
contextCache.add(context)
|
576
|
+
visited[context] = context
|
577
|
+
return context
|
578
|
+
|
579
|
+
updated = None
|
580
|
+
if len(parents) == 0:
|
581
|
+
updated = PredictionContext.EMPTY
|
582
|
+
elif len(parents) == 1:
|
583
|
+
updated = SingletonPredictionContext.create(parents[0], context.getReturnState(0))
|
584
|
+
else:
|
585
|
+
updated = ArrayPredictionContext(parents, context.returnStates)
|
586
|
+
|
587
|
+
contextCache.add(updated)
|
588
|
+
visited[updated] = updated
|
589
|
+
visited[context] = updated
|
590
|
+
|
591
|
+
return updated
|
592
|
+
|
593
|
+
|
594
|
+
# # extra structures, but cut/paste/morphed works, so leave it.
|
595
|
+
# # seems to do a breadth-first walk
|
596
|
+
# public static List<PredictionContext> getAllNodes(PredictionContext context) {
|
597
|
+
# Map<PredictionContext, PredictionContext> visited =
|
598
|
+
# new IdentityHashMap<PredictionContext, PredictionContext>();
|
599
|
+
# Deque<PredictionContext> workList = new ArrayDeque<PredictionContext>();
|
600
|
+
# workList.add(context);
|
601
|
+
# visited.put(context, context);
|
602
|
+
# List<PredictionContext> nodes = new ArrayList<PredictionContext>();
|
603
|
+
# while (!workList.isEmpty()) {
|
604
|
+
# PredictionContext current = workList.pop();
|
605
|
+
# nodes.add(current);
|
606
|
+
# for (int i = 0; i < current.size(); i++) {
|
607
|
+
# PredictionContext parent = current.getParent(i);
|
608
|
+
# if ( parent!=null && visited.put(parent, parent) == null) {
|
609
|
+
# workList.push(parent);
|
610
|
+
# }
|
611
|
+
# }
|
612
|
+
# }
|
613
|
+
# return nodes;
|
614
|
+
# }
|
615
|
+
|
616
|
+
# ter's recursive version of Sam's getAllNodes()
|
617
|
+
def getAllContextNodes(context:PredictionContext, nodes:list=None, visited:dict=None):
|
618
|
+
if nodes is None:
|
619
|
+
nodes = list()
|
620
|
+
return getAllContextNodes(context, nodes, visited)
|
621
|
+
elif visited is None:
|
622
|
+
visited = dict()
|
623
|
+
return getAllContextNodes(context, nodes, visited)
|
624
|
+
else:
|
625
|
+
if context is None or visited.get(context, None) is not None:
|
626
|
+
return nodes
|
627
|
+
visited.put(context, context)
|
628
|
+
nodes.add(context)
|
629
|
+
for i in range(0, len(context)):
|
630
|
+
getAllContextNodes(context.getParent(i), nodes, visited)
|
631
|
+
return nodes
|
632
|
+
|