antlr4 0.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +27 -0
- data/README.md +46 -0
- data/lib/antlr4.rb +262 -0
- data/lib/antlr4/BufferedTokenStream.rb +306 -0
- data/lib/antlr4/CommonTokenFactory.rb +53 -0
- data/lib/antlr4/CommonTokenStream.rb +56 -0
- data/lib/antlr4/FileStream.rb +14 -0
- data/lib/antlr4/InputStream.rb +82 -0
- data/lib/antlr4/IntervalSet.rb +341 -0
- data/lib/antlr4/LL1Analyzer.rb +177 -0
- data/lib/antlr4/Lexer.rb +335 -0
- data/lib/antlr4/ListTokenSource.rb +140 -0
- data/lib/antlr4/Parser.rb +562 -0
- data/lib/antlr4/ParserInterpreter.rb +149 -0
- data/lib/antlr4/ParserRuleContext.rb +162 -0
- data/lib/antlr4/PredictionContext.rb +690 -0
- data/lib/antlr4/Recognizer.rb +162 -0
- data/lib/antlr4/RuleContext.rb +226 -0
- data/lib/antlr4/Token.rb +124 -0
- data/lib/antlr4/TokenFactory.rb +3 -0
- data/lib/antlr4/TokenSource.rb +4 -0
- data/lib/antlr4/TokenStream.rb +3 -0
- data/lib/antlr4/TraceListener.rb +23 -0
- data/lib/antlr4/atn/ATN.rb +133 -0
- data/lib/antlr4/atn/ATNConfig.rb +146 -0
- data/lib/antlr4/atn/ATNConfigSet.rb +215 -0
- data/lib/antlr4/atn/ATNDeserializationOptions.rb +62 -0
- data/lib/antlr4/atn/ATNDeserializer.rb +604 -0
- data/lib/antlr4/atn/ATNSimulator.rb +43 -0
- data/lib/antlr4/atn/ATNState.rb +253 -0
- data/lib/antlr4/atn/ATNType.rb +22 -0
- data/lib/antlr4/atn/LexerATNSimulator.rb +612 -0
- data/lib/antlr4/atn/LexerAction.rb +311 -0
- data/lib/antlr4/atn/LexerActionExecutor.rb +134 -0
- data/lib/antlr4/atn/ParserATNSimulator.rb +1622 -0
- data/lib/antlr4/atn/PredictionMode.rb +525 -0
- data/lib/antlr4/atn/SemanticContext.rb +355 -0
- data/lib/antlr4/atn/Transition.rb +297 -0
- data/lib/antlr4/base.rb +60 -0
- data/lib/antlr4/dfa/DFA.rb +128 -0
- data/lib/antlr4/dfa/DFASerializer.rb +77 -0
- data/lib/antlr4/dfa/DFAState.rb +133 -0
- data/lib/antlr4/error.rb +151 -0
- data/lib/antlr4/error/DiagnosticErrorListener.rb +136 -0
- data/lib/antlr4/error/ErrorListener.rb +109 -0
- data/lib/antlr4/error/ErrorStrategy.rb +742 -0
- data/lib/antlr4/tree/Chunk.rb +31 -0
- data/lib/antlr4/tree/ParseTreeMatch.rb +105 -0
- data/lib/antlr4/tree/ParseTreePattern.rb +70 -0
- data/lib/antlr4/tree/ParseTreePatternMatcher.rb +334 -0
- data/lib/antlr4/tree/RuleTagToken.rb +39 -0
- data/lib/antlr4/tree/TokenTagToken.rb +38 -0
- data/lib/antlr4/tree/Tree.rb +204 -0
- data/lib/antlr4/tree/Trees.rb +111 -0
- data/lib/antlr4/version.rb +5 -0
- data/lib/antlr4/xpath/XPath.rb +354 -0
- data/lib/double_key_map.rb +78 -0
- data/lib/java_symbols.rb +24 -0
- data/lib/uuid.rb +87 -0
- data/test/test_intervalset.rb +664 -0
- data/test/test_tree.rb +140 -0
- data/test/test_uuid.rb +122 -0
- metadata +109 -0
@@ -0,0 +1,149 @@
|
|
1
|
+
|
2
|
+
# A parser simulator that mimics what ANTLR's generated
|
3
|
+
# parser code does. A ParserATNSimulator is used to make
|
4
|
+
# predictions via adaptivePredict but this class moves a pointer through the
|
5
|
+
# ATN to simulate parsing. ParserATNSimulator just
|
6
|
+
# makes us efficient rather than having to backtrack, for example.
|
7
|
+
#
|
8
|
+
# This properly creates parse trees even for left recursive rules.
|
9
|
+
#
|
10
|
+
# We rely on the left recursive rule invocation and special predicate
|
11
|
+
# transitions to make left recursive rules work.
|
12
|
+
#
|
13
|
+
# See TestParserInterpreter for examples.
|
14
|
+
|
15
|
+
class ParserInterpreter < Parser
|
16
|
+
|
17
|
+
attr_accessor :parentContextStack, :atn, :grammarFileName
|
18
|
+
attr_accessor :tokenNames, :ruleNames, :decisionToDFA,:sharedContextCache
|
19
|
+
attr_accessor :parentContextStack ,:pushRecursionContextStates , :interp
|
20
|
+
def initialize(grammarFileName, tokenNames, ruleNames, atn, input)
|
21
|
+
super(input)
|
22
|
+
self.grammarFileName = grammarFileName
|
23
|
+
self.atn = atn
|
24
|
+
self.tokenNames = tokenNames
|
25
|
+
self.ruleNames = ruleNames
|
26
|
+
self.decisionToDFA = atn.decisionToState.map {|state| DFA.new(state) }
|
27
|
+
self.sharedContextCache = PredictionContextCache.new()
|
28
|
+
self.parentContextStack = Array.new
|
29
|
+
# identify the ATN states where pushNewRecursionContext must be called
|
30
|
+
self.pushRecursionContextStates = Set.new()
|
31
|
+
atn.states.each do |state|
|
32
|
+
next if not state.kind_of? StarLoopEntryState
|
33
|
+
if state.precedenceRuleDecision
|
34
|
+
self.pushRecursionContextStates.add(state.stateNumber)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
# get atn simulator that knows how to do predictions
|
38
|
+
self.interp = ParserATNSimulator.new(self, atn, self.decisionToDFA, self.sharedContextCache)
|
39
|
+
end
|
40
|
+
# Begin parsing at startRuleIndex#
|
41
|
+
def parse(startRuleIndex)
|
42
|
+
startRuleStartState = self.atn.ruleToStartState[startRuleIndex]
|
43
|
+
rootContext = InterpreterRuleContext.new(nil, ATNState::INVALID_STATE_NUMBER, startRuleIndex)
|
44
|
+
if startRuleStartState.isPrecedenceRule
|
45
|
+
self.enterRecursionRule(rootContext, startRuleStartState.stateNumber, startRuleIndex, 0)
|
46
|
+
else
|
47
|
+
self.enterRule(rootContext, startRuleStartState.stateNumber, startRuleIndex)
|
48
|
+
end
|
49
|
+
while true
|
50
|
+
p = self.getATNState()
|
51
|
+
if p.stateType==ATNState::RULE_STOP
|
52
|
+
# pop; return from rule
|
53
|
+
if self.ctx.length==0
|
54
|
+
if startRuleStartState.isPrecedenceRule
|
55
|
+
result = self.ctx
|
56
|
+
parentContext = self.parentContextStack.pop()
|
57
|
+
self.unrollRecursionContexts(parentContext.a)
|
58
|
+
return result
|
59
|
+
else
|
60
|
+
self.exitRule()
|
61
|
+
return rootContext
|
62
|
+
end
|
63
|
+
end
|
64
|
+
self.visitRuleStopState(p)
|
65
|
+
else
|
66
|
+
begin
|
67
|
+
self.visitState(p)
|
68
|
+
rescue RecognitionException => e
|
69
|
+
self.state = self.atn.ruleToStopState[p.ruleIndex].stateNumber
|
70
|
+
self.ctx.exception = e
|
71
|
+
self.errHandler.reportError(self, e)
|
72
|
+
self.errHandler.recover(self, e)
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
def enterRecursionRule(localctx, state, ruleIndex, precedence)
|
78
|
+
self.parentContextStack.push([self.ctx, localctx.invokingState])
|
79
|
+
super(localctx, state, ruleIndex, precedence)
|
80
|
+
end
|
81
|
+
def getATNState
|
82
|
+
return self.atn.states[self.state]
|
83
|
+
end
|
84
|
+
|
85
|
+
def visitState(p)
|
86
|
+
edge = 0
|
87
|
+
if p.transitions.length() > 1
|
88
|
+
self.errHandler.sync(self)
|
89
|
+
edge = self.interp.adaptivePredict(self.input, p.decision, self.ctx)
|
90
|
+
else
|
91
|
+
edge = 1
|
92
|
+
end
|
93
|
+
|
94
|
+
transition = p.transitions[edge - 1]
|
95
|
+
tt = transition.serializationType
|
96
|
+
if tt==Transition::EPSILON then
|
97
|
+
|
98
|
+
if self.pushRecursionContextStates[p.stateNumber] and not transition.target.kind_of? LoopEndState
|
99
|
+
t = self.parentContextStack[-1]
|
100
|
+
ctx = InterpreterRuleContext.new(t[0], t[1], self.ctx.ruleIndex)
|
101
|
+
self.pushNewRecursionContext(ctx, self.atn.ruleToStartState[p.ruleIndex].stateNumber, self.ctx.ruleIndex)
|
102
|
+
end
|
103
|
+
elsif tt==Transition::ATOM
|
104
|
+
self.match(transition.label)
|
105
|
+
elsif [ Transition::RANGE, Transition::SET, Transition::NOT_SET].member? tt
|
106
|
+
if not transition.matches(self.input.LA(1), Token::MIN_USER_TOKEN_TYPE, 0xFFFF)
|
107
|
+
self.errHandler.recoverInline(self)
|
108
|
+
end
|
109
|
+
self.matchWildcard()
|
110
|
+
elsif tt==Transition::WILDCARD
|
111
|
+
self.matchWildcard()
|
112
|
+
elsif tt==Transition::RULE
|
113
|
+
ruleStartState = transition.target
|
114
|
+
ruleIndex = ruleStartState.ruleIndex
|
115
|
+
ctx = InterpreterRuleContext(self.ctx, p.stateNumber, ruleIndex)
|
116
|
+
if ruleStartState.isPrecedenceRule
|
117
|
+
self.enterRecursionRule(ctx, ruleStartState.stateNumber, ruleIndex, transition.precedence)
|
118
|
+
else
|
119
|
+
self.enterRule(ctx, transition.target.stateNumber, ruleIndex)
|
120
|
+
end
|
121
|
+
elsif tt==Transition::PREDICATE
|
122
|
+
if not self.sempred(self.ctx, transition.ruleIndex, transition.predIndex)
|
123
|
+
raise FailedPredicateException.new(self)
|
124
|
+
end
|
125
|
+
elsif tt==Transition::ACTION
|
126
|
+
self.action(self.ctx, transition.ruleIndex, transition.actionIndex)
|
127
|
+
elsif tt==Transition::PRECEDENCE
|
128
|
+
if not self.precpred(self.ctx, transition.precedence)
|
129
|
+
msg = "precpred(ctx, #{transition.precedence})"
|
130
|
+
raise FailedPredicateException.new(self, msg)
|
131
|
+
end
|
132
|
+
else
|
133
|
+
raise UnsupportedOperationException.new("Unrecognized ATN transition type.")
|
134
|
+
end
|
135
|
+
self.state = transition.target.stateNumber
|
136
|
+
end
|
137
|
+
def visitRuleStopState(p) # p:ATNState)
|
138
|
+
ruleStartState = self.atn.ruleToStartState[p.ruleIndex]
|
139
|
+
if ruleStartState.isPrecedenceRule then
|
140
|
+
parentContext = self.parentContextStack.pop()
|
141
|
+
self.unrollRecursionContexts(parentContext.a)
|
142
|
+
self.state = parentContext[1]
|
143
|
+
else
|
144
|
+
self.exitRule()
|
145
|
+
end
|
146
|
+
ruleTransition = self.atn.states[self.state].transitions[0]
|
147
|
+
self.state = ruleTransition.followState.stateNumber
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,162 @@
|
|
1
|
+
#* A rule invocation record for parsing.
|
2
|
+
#
|
3
|
+
# Contains all of the information about the current rule not stored in the
|
4
|
+
# RuleContext. It handles parse tree children list, Any ATN state
|
5
|
+
# tracing, and the default values available for rule indications:
|
6
|
+
# start, stop, rule index, current alt number, current
|
7
|
+
# ATN state.
|
8
|
+
#
|
9
|
+
# Subclasses made for each rule and grammar track the parameters,
|
10
|
+
# return values, locals, and labels specific to that rule. These
|
11
|
+
# are the objects that are returned from rules.
|
12
|
+
#
|
13
|
+
# Note text is not an actual field of a rule return value; it is computed
|
14
|
+
# from start and stop using the input stream's toString() method. I
|
15
|
+
# could add a ctor to this so that we can pass in and store the input
|
16
|
+
# stream, but I'm not sure we want to do that. It would seem to be undefined
|
17
|
+
# to get the .text property anyway if the rule matches tokens from multiple
|
18
|
+
# input streams.
|
19
|
+
#
|
20
|
+
# I do not use getters for fields of objects that are used simply to
|
21
|
+
# group values such as this aggregate. The getters/setters are there to
|
22
|
+
# satisfy the superclass interface.
|
23
|
+
|
24
|
+
|
25
|
+
class ParserRuleContext < RuleContext
|
26
|
+
|
27
|
+
attr_accessor :children, :start, :stop, :exception
|
28
|
+
attr_accessor :parser
|
29
|
+
def initialize(parent= nil, invoking_state_number= nil)
|
30
|
+
super(parent, invoking_state_number)
|
31
|
+
#* If we are debugging or building a parse tree for a visitor,
|
32
|
+
# we need to track all of the tokens and rule invocations associated
|
33
|
+
# with this rule's context. This is empty for parsing w/o tree constr.
|
34
|
+
# operation because we don't the need to track the details about
|
35
|
+
# how we parse this rule.
|
36
|
+
#/
|
37
|
+
@children = Array.new
|
38
|
+
@start = nil
|
39
|
+
@stop = nil
|
40
|
+
# The exception that forced this rule to return. If the rule successfully
|
41
|
+
# completed, this is {@code null}.
|
42
|
+
@exception =nil
|
43
|
+
end
|
44
|
+
|
45
|
+
#* COPY a ctx (I'm deliberately not using copy constructor)#/
|
46
|
+
def copyFrom(ctx)
|
47
|
+
# from RuleContext
|
48
|
+
self.parentCtx = ctx.parentCtx
|
49
|
+
self.invokingState = ctx.invokingState
|
50
|
+
self.children = Array.new
|
51
|
+
self.start = ctx.start
|
52
|
+
self.stop = ctx.stop
|
53
|
+
end
|
54
|
+
|
55
|
+
# Double dispatch methods for listeners
|
56
|
+
def enterRule(listener)
|
57
|
+
end
|
58
|
+
def exitRule(listener)
|
59
|
+
end
|
60
|
+
|
61
|
+
#* Does not set parent link; other add methods do that#/
|
62
|
+
def addChild(child)
|
63
|
+
self.children.push(child)
|
64
|
+
return child
|
65
|
+
end
|
66
|
+
#* Used by enterOuterAlt to toss out a RuleContext previously added as
|
67
|
+
# we entered a rule. If we have # label, we will need to remove
|
68
|
+
# generic ruleContext object.
|
69
|
+
#/
|
70
|
+
def removeLastChild()
|
71
|
+
self.children.delete_at(-1)
|
72
|
+
end
|
73
|
+
def addTokenNode(token)
|
74
|
+
node = TerminalNodeImpl.new(token) #XXX
|
75
|
+
self.addChild(node)
|
76
|
+
node.parentCtx = self
|
77
|
+
return node
|
78
|
+
end
|
79
|
+
|
80
|
+
def addErrorNode(badToken)
|
81
|
+
node = ErrorNodeImpl.new(badToken)
|
82
|
+
self.addChild(node)
|
83
|
+
node.parentCtx = self
|
84
|
+
return node
|
85
|
+
end
|
86
|
+
|
87
|
+
def getChild(i, type=nil )
|
88
|
+
if type.nil?
|
89
|
+
if self.children.length >= i then
|
90
|
+
return self.children[i]
|
91
|
+
end
|
92
|
+
else
|
93
|
+
for child in self.getChildren() do
|
94
|
+
next if not child.kind_of? type
|
95
|
+
return child if i==0
|
96
|
+
i = i - 1
|
97
|
+
end
|
98
|
+
end
|
99
|
+
return nil
|
100
|
+
end
|
101
|
+
def getChildren
|
102
|
+
@children
|
103
|
+
end
|
104
|
+
def getToken(ttype, i)
|
105
|
+
self.getChildren().each do |child|
|
106
|
+
next if not child.kind_of? TerminalNode
|
107
|
+
next if child.symbol.type != ttype
|
108
|
+
return child if i==0
|
109
|
+
i -= 1
|
110
|
+
i = i - 1
|
111
|
+
end
|
112
|
+
return nil
|
113
|
+
end
|
114
|
+
def getTokens(ttype)
|
115
|
+
return Array.new if self.getChildren().empty?
|
116
|
+
self.getChildren().map do |child|
|
117
|
+
next if not child.kind_of? TerminalNode
|
118
|
+
next if child.symbol.type != ttype
|
119
|
+
child
|
120
|
+
end.compact
|
121
|
+
end
|
122
|
+
|
123
|
+
def getTypedRuleContext(ctxType, i)
|
124
|
+
return self.getChild(i, ctxType)
|
125
|
+
end
|
126
|
+
def getTypedRuleContexts(ctxType)
|
127
|
+
return Array.new if self.getChildren().empty?
|
128
|
+
self.getChildren.map do |child|
|
129
|
+
next if not child.kind_of? ctxType
|
130
|
+
child
|
131
|
+
end.compact
|
132
|
+
end
|
133
|
+
def getChildCount
|
134
|
+
return self.children.length
|
135
|
+
end
|
136
|
+
|
137
|
+
def getSourceInterval
|
138
|
+
if self.start.nil? or self.stop.nil? then
|
139
|
+
return Antlr4::INVALID_INTERVAL
|
140
|
+
else
|
141
|
+
return [self.start.tokenIndex, self.stop.tokenIndex]
|
142
|
+
end
|
143
|
+
end
|
144
|
+
def to_s
|
145
|
+
p = nil
|
146
|
+
if parentCtx then
|
147
|
+
p = parentCtx.to_s
|
148
|
+
end
|
149
|
+
"#{self.class}:[#{invokingState}]#{p}"
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
# RuleContext.set_empty(ParserRuleContext.new())
|
154
|
+
|
155
|
+
class InterpreterRuleContext < ParserRuleContext
|
156
|
+
|
157
|
+
attr_accessor :ruleIndex
|
158
|
+
def initialize(parent, invokingStateNumber, rule_index)
|
159
|
+
super(parent, invokingStateNumber)
|
160
|
+
@ruleIndex = rule_index
|
161
|
+
end
|
162
|
+
end
|
@@ -0,0 +1,690 @@
|
|
1
|
+
|
2
|
+
class PredictionContext
|
3
|
+
|
4
|
+
# Represents {@code $} in local context prediction, which means wildcard.
|
5
|
+
# {@code#+x =#}.
|
6
|
+
@@EMPTY = nil
|
7
|
+
def self.EMPTY
|
8
|
+
@@EMPTY = EmptyPredictionContext.new if @@EMPTY.nil?
|
9
|
+
@@EMPTY
|
10
|
+
end
|
11
|
+
# Represents {@code $} in an array in full context mode, when {@code $}
|
12
|
+
# doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
13
|
+
# {@code $} = {@link #EMPTY_RETURN_STATE}.
|
14
|
+
EMPTY_RETURN_STATE = 0x7FFFFFFF
|
15
|
+
def self.EMPTY_RETURN_STATE
|
16
|
+
PredictionContext::EMPTY_RETURN_STATE
|
17
|
+
end
|
18
|
+
|
19
|
+
@@globalNodeCount = 1
|
20
|
+
@@id = @@globalNodeCount
|
21
|
+
|
22
|
+
# Stores the computed hash code of this {@link PredictionContext}. The hash
|
23
|
+
# code is computed in parts to match the following reference algorithm.
|
24
|
+
#
|
25
|
+
# <pre>
|
26
|
+
# private int referenceHashCode() {
|
27
|
+
# int hash = {@link MurmurHash#initialize MurmurHash.initialize}({@link #INITIAL_HASH});
|
28
|
+
#
|
29
|
+
# for (int i = 0; i < {@link #size()}; i++) {
|
30
|
+
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getParent getParent}(i));
|
31
|
+
# }
|
32
|
+
#
|
33
|
+
# for (int i = 0; i < {@link #size()}; i++) {
|
34
|
+
# hash = {@link MurmurHash#update MurmurHash.update}(hash, {@link #getReturnState getReturnState}(i));
|
35
|
+
# }
|
36
|
+
#
|
37
|
+
# hash = {@link MurmurHash#finish MurmurHash.finish}(hash, 2# {@link #size()});
|
38
|
+
# return hash;
|
39
|
+
# }
|
40
|
+
# </pre>
|
41
|
+
#/
|
42
|
+
attr_reader :cachedHashCode
|
43
|
+
def initialize(_cachedHashCode)
|
44
|
+
@cachedHashCode = _cachedHashCode
|
45
|
+
end
|
46
|
+
|
47
|
+
# This means only the {@link #EMPTY} context is in set.
|
48
|
+
def isEmpty
|
49
|
+
self.equal? PredictionContext.EMPTY
|
50
|
+
end
|
51
|
+
def hasEmptyPath
|
52
|
+
return self.getReturnState(self.length - 1) == PredictionContext::EMPTY_RETURN_STATE
|
53
|
+
end
|
54
|
+
def hash
|
55
|
+
return self.cachedHashCode
|
56
|
+
end
|
57
|
+
def self.calculateEmptyHashCode
|
58
|
+
"".hash
|
59
|
+
end
|
60
|
+
def self.calculateHashCode(parent, returnState)
|
61
|
+
"#{parent}#{returnState}".hash
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
# Used to cache {@link PredictionContext} objects. Its used for the shared
|
66
|
+
# context cash associated with contexts in DFA states. This cache
|
67
|
+
# can be used for both lexers and parsers.
|
68
|
+
class PredictionContextCache
|
69
|
+
|
70
|
+
attr_reader :cache
|
71
|
+
def initialize
|
72
|
+
@cache = Hash.new
|
73
|
+
end
|
74
|
+
|
75
|
+
# Add a context to the cache and return it. If the context already exists,
|
76
|
+
# return that one instead and do not add a new context to the cache.
|
77
|
+
# Protect shared cache from unsafe thread access.
|
78
|
+
#
|
79
|
+
def add(ctx)
|
80
|
+
if ctx.equal? PredictionContext.EMPTY then
|
81
|
+
return PredictionContext.EMPTY
|
82
|
+
end
|
83
|
+
existing = self.cache[ctx]
|
84
|
+
return existing if existing
|
85
|
+
self.cache[ctx] = ctx
|
86
|
+
return ctx
|
87
|
+
end
|
88
|
+
def get(ctx)
|
89
|
+
return self.cache[ctx]
|
90
|
+
end
|
91
|
+
def length
|
92
|
+
return self.cache.length
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
class SingletonPredictionContext < PredictionContext
|
97
|
+
|
98
|
+
def self.create(parent, returnState)
|
99
|
+
if returnState == PredictionContext::EMPTY_RETURN_STATE and parent.nil? then
|
100
|
+
# someone can pass in the bits of an array ctx that mean $
|
101
|
+
return PredictionContext.EMPTY
|
102
|
+
else
|
103
|
+
return SingletonPredictionContext.new(parent, returnState)
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
attr_reader :parentCtx, :returnState
|
108
|
+
attr_accessor :cache_string
|
109
|
+
def initialize( parent, returnState)
|
110
|
+
#assert returnState!=ATNState.INVALID_STATE_NUMBER
|
111
|
+
if parent.nil? then
|
112
|
+
hashCode = PredictionContext.calculateEmptyHashCode
|
113
|
+
else
|
114
|
+
hashCode = PredictionContext.calculateHashCode(parent, returnState)
|
115
|
+
end
|
116
|
+
super(hashCode)
|
117
|
+
@parentCtx = parent
|
118
|
+
@returnState = returnState
|
119
|
+
end
|
120
|
+
|
121
|
+
def length
|
122
|
+
return 1
|
123
|
+
end
|
124
|
+
|
125
|
+
def getParent(index)
|
126
|
+
# assert index == 0
|
127
|
+
return self.parentCtx
|
128
|
+
end
|
129
|
+
|
130
|
+
def getReturnState(index)
|
131
|
+
#assert index == 0
|
132
|
+
return self.returnState
|
133
|
+
end
|
134
|
+
|
135
|
+
def eql?(other)
|
136
|
+
self == other
|
137
|
+
end
|
138
|
+
def ==(other)
|
139
|
+
return true if self.equal?(other)
|
140
|
+
return false unless self.class == other.class
|
141
|
+
if self.hash != other.hash
|
142
|
+
false # can't be same if hash is different
|
143
|
+
else
|
144
|
+
self.returnState == other.returnState and \
|
145
|
+
(self.parentCtx.equal?(other.parentCtx) \
|
146
|
+
or self.parentCtx==other.parentCtx )
|
147
|
+
end
|
148
|
+
end
|
149
|
+
def hash
|
150
|
+
return self.cachedHashCode
|
151
|
+
end
|
152
|
+
|
153
|
+
def to_s
|
154
|
+
@cache_string = mk_string if @cache_string.nil?
|
155
|
+
return @cache_string
|
156
|
+
end
|
157
|
+
def mk_string
|
158
|
+
if @parentCtx.nil? then
|
159
|
+
if @returnState == PredictionContext::EMPTY_RETURN_STATE
|
160
|
+
return "$"
|
161
|
+
else
|
162
|
+
return @returnState.to_s
|
163
|
+
end
|
164
|
+
else
|
165
|
+
return "#{@returnState} #{@parentCtx}"
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
class EmptyPredictionContext < SingletonPredictionContext
|
170
|
+
|
171
|
+
def initialize(h=nil)
|
172
|
+
super(nil, PredictionContext::EMPTY_RETURN_STATE )
|
173
|
+
@cachedHashCode = "".hash
|
174
|
+
end
|
175
|
+
|
176
|
+
def isEmpty
|
177
|
+
true
|
178
|
+
end
|
179
|
+
|
180
|
+
def getParent(index)
|
181
|
+
nil
|
182
|
+
end
|
183
|
+
|
184
|
+
def getReturnState(index)
|
185
|
+
PredictionContext::EMPTY_RETURN_STATE # self.returnState
|
186
|
+
end
|
187
|
+
|
188
|
+
# def ==(other)
|
189
|
+
# self.equal? other
|
190
|
+
# end
|
191
|
+
|
192
|
+
def to_s
|
193
|
+
"$"
|
194
|
+
end
|
195
|
+
end
|
196
|
+
|
197
|
+
class ArrayPredictionContext < PredictionContext
|
198
|
+
# Parent can be null only if full ctx mode and we make an array
|
199
|
+
# from {@link #EMPTY} and non-empty. We merge {@link #EMPTY} by using null parent and
|
200
|
+
# returnState == {@link #EMPTY_RETURN_STATE}.
|
201
|
+
|
202
|
+
def initialzie(parents, returnStates)
|
203
|
+
super(PredictionContext.calculateHashCode(parents, returnStates))
|
204
|
+
# assert parents is not None and len(parents)>0
|
205
|
+
# assert returnStates is not None and len(returnStates)>0
|
206
|
+
self.parents = parents
|
207
|
+
self.returnStates = returnStates
|
208
|
+
end
|
209
|
+
|
210
|
+
def isEmpty
|
211
|
+
# since EMPTY_RETURN_STATE can only appear in the last position, we
|
212
|
+
# don't need to verify that size==1
|
213
|
+
return self.returnStates[0]==PredictionContext::EMPTY_RETURN_STATE
|
214
|
+
end
|
215
|
+
|
216
|
+
def length
|
217
|
+
return self.returnStates.length()
|
218
|
+
end
|
219
|
+
|
220
|
+
def getParent(index)
|
221
|
+
return self.parents[index]
|
222
|
+
end
|
223
|
+
def getReturnState(index)
|
224
|
+
return self.returnStates[index]
|
225
|
+
end
|
226
|
+
def eql?(other)
|
227
|
+
self == other
|
228
|
+
end
|
229
|
+
def ==(other)
|
230
|
+
return false if self.class != other.class
|
231
|
+
return true if self.equal?(other)
|
232
|
+
if self.hash != other.hash
|
233
|
+
false # can't be same if hash is different
|
234
|
+
else
|
235
|
+
self.returnStates==other.returnStates and self.parents==other.parents
|
236
|
+
end
|
237
|
+
end
|
238
|
+
|
239
|
+
def to_s
|
240
|
+
if self.isEmpty()
|
241
|
+
return "[]"
|
242
|
+
end
|
243
|
+
StringIO.open do |buf|
|
244
|
+
buf.write("[")
|
245
|
+
for i in 0..self.returnStates.length-1 do
|
246
|
+
buf.write(", ") if i>0
|
247
|
+
if self.returnStates[i]==PredictionContext::EMPTY_RETURN_STATE
|
248
|
+
buf.write("$")
|
249
|
+
next
|
250
|
+
end
|
251
|
+
buf.write(self.returnStates[i].to_s)
|
252
|
+
if not self.parents[i].nil?
|
253
|
+
buf.write(' ')
|
254
|
+
buf.write(self.parents[i].to_s())
|
255
|
+
end
|
256
|
+
end
|
257
|
+
buf.write("]")
|
258
|
+
return buf.string()
|
259
|
+
end
|
260
|
+
end
|
261
|
+
end
|
262
|
+
# Convert a {@link RuleContext} tree to a {@link PredictionContext} graph.
|
263
|
+
# Return {@link #EMPTY} if {@code outerContext} is empty or null.
|
264
|
+
#/
|
265
|
+
|
266
|
+
module PredictionContextFunctions
|
267
|
+
|
268
|
+
def self.included(klass)
|
269
|
+
klass.send(:include, PredictionContextFunctions::Methods )
|
270
|
+
klass.send(:extend, PredictionContextFunctions::Methods )
|
271
|
+
end
|
272
|
+
|
273
|
+
module Methods
|
274
|
+
def PredictionContextFromRuleContext(atn, outerContext=nil)
|
275
|
+
outerContext = RuleContext.EMPTY if outerContext.nil?
|
276
|
+
|
277
|
+
# if we are in RuleContext of start rule, s, then PredictionContext
|
278
|
+
# is EMPTY. Nobody called us. (if we are empty, return empty)
|
279
|
+
if outerContext.parentCtx.nil? or RuleContext.EMPTY == outerContext
|
280
|
+
return PredictionContext.EMPTY
|
281
|
+
end
|
282
|
+
|
283
|
+
# If we have a parent, convert it to a PredictionContext graph
|
284
|
+
parent = PredictionContextFromRuleContext(atn, outerContext.parentCtx)
|
285
|
+
state = atn.states[outerContext.invokingState]
|
286
|
+
transition = state.transitions[0]
|
287
|
+
return SingletonPredictionContext.create(parent, transition.followState.stateNumber)
|
288
|
+
end
|
289
|
+
|
290
|
+
|
291
|
+
def calculateListsHashCode(parents, returnStates)
|
292
|
+
str_parents = parents.map{|parent| parent.to_s }
|
293
|
+
str_rs = returnStates.map{|r| r.to_s }
|
294
|
+
return [str_parents,str_rs].flatten.join('').hash
|
295
|
+
end
|
296
|
+
#def merge(a:PredictionContext, b:PredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
297
|
+
def merge(a, b, rootIsWildcard, mergeCache)
|
298
|
+
#assert a is not None and b is not None # must be empty context, never null
|
299
|
+
|
300
|
+
# share same graph if both same
|
301
|
+
return a if a==b
|
302
|
+
|
303
|
+
if a.kind_of? SingletonPredictionContext and b.kind_of? SingletonPredictionContext
|
304
|
+
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
305
|
+
end
|
306
|
+
|
307
|
+
# At least one of a or b is array
|
308
|
+
# If one is $ and rootIsWildcard, return $ as# wildcard
|
309
|
+
if rootIsWildcard then
|
310
|
+
return a if a.kind_of? EmptyPredictionContext
|
311
|
+
return b if b.kind_of? EmptyPredictionContext
|
312
|
+
end
|
313
|
+
# convert singleton so both are arrays to normalize
|
314
|
+
if a.kind_of? SingletonPredictionContext
|
315
|
+
a = ArrayPredictionContext.new(a)
|
316
|
+
end
|
317
|
+
if b.kind_of? SingletonPredictionContext
|
318
|
+
b = ArrayPredictionContext.new(b)
|
319
|
+
end
|
320
|
+
return mergeArrays(a, b, rootIsWildcard, mergeCache)
|
321
|
+
end
|
322
|
+
|
323
|
+
#
|
324
|
+
# Merge two {@link SingletonPredictionContext} instances.
|
325
|
+
#
|
326
|
+
# <p>Stack tops equal, parents merge is same; return left graph.<br>
|
327
|
+
# <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/></p>
|
328
|
+
#
|
329
|
+
# <p>Same stack top, parents differ; merge parents giving array node, then
|
330
|
+
# remainders of those graphs. A new root node is created to point to the
|
331
|
+
# merged parents.<br>
|
332
|
+
# <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/></p>
|
333
|
+
#
|
334
|
+
# <p>Different stack tops pointing to same parent. Make array node for the
|
335
|
+
# root where both element in the root point to the same (original)
|
336
|
+
# parent.<br>
|
337
|
+
# <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/></p>
|
338
|
+
#
|
339
|
+
# <p>Different stack tops pointing to different parents. Make array node for
|
340
|
+
# the root where each element points to the corresponding original
|
341
|
+
# parent.<br>
|
342
|
+
# <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/></p>
|
343
|
+
#
|
344
|
+
# @param a the first {@link SingletonPredictionContext}
|
345
|
+
# @param b the second {@link SingletonPredictionContext}
|
346
|
+
# @param rootIsWildcard {@code true} if this is a local-context merge,
|
347
|
+
# otherwise false to indicate a full-context merge
|
348
|
+
# @param mergeCache
|
349
|
+
#/
|
350
|
+
#def mergeSingletons(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
351
|
+
def mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
352
|
+
if mergeCache then
|
353
|
+
previous = mergeCache.get(a,b)
|
354
|
+
if not previous.nil?
|
355
|
+
return previous
|
356
|
+
end
|
357
|
+
previous = mergeCache.get(b,a)
|
358
|
+
if not previous.nil?
|
359
|
+
return previous
|
360
|
+
end
|
361
|
+
end
|
362
|
+
rootMerge = mergeRoot(a, b, rootIsWildcard)
|
363
|
+
if rootMerge then
|
364
|
+
if mergeCache then
|
365
|
+
mergeCache.put(a, b, rootMerge)
|
366
|
+
end
|
367
|
+
return rootMerge
|
368
|
+
end
|
369
|
+
|
370
|
+
if a.returnState==b.returnState then
|
371
|
+
parent = merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
372
|
+
# if parent is same as existing a or b parent or reduced to a parent, return it
|
373
|
+
return a if parent == a.parentCtx # ax + bx = ax, if a=b
|
374
|
+
return b if parent == b.parentCtx # ax + bx = bx, if a=b
|
375
|
+
# else: ax + ay = a'[x,y]
|
376
|
+
# merge parents x and y, giving array node with x,y then remainders
|
377
|
+
# of those graphs. dup a, a' points at merged array
|
378
|
+
# new joined parent so create new singleton pointing to it, a'
|
379
|
+
a_ = SingletonPredictionContext.create(parent, a.returnState)
|
380
|
+
mergeCache.put(a, b, a_) if mergeCache
|
381
|
+
return a_
|
382
|
+
else # a != b payloads differ
|
383
|
+
# see if we can collapse parents due to $+x parents if local ctx
|
384
|
+
singleParent = nil
|
385
|
+
if a.equal?(b) or (a.parentCtx and a.parentCtx==b.parentCtx) # ax + bx = [a,b]x
|
386
|
+
singleParent = a.parentCtx
|
387
|
+
end
|
388
|
+
if not singleParent.nil? # parents are same
|
389
|
+
# sort payloads and use same parent
|
390
|
+
payloads = [ a.returnState, b.returnState ]
|
391
|
+
if a.returnState > b.returnState then
|
392
|
+
payloads[0] = b.returnState
|
393
|
+
payloads[1] = a.returnState
|
394
|
+
end
|
395
|
+
parents = [singleParent, singleParent]
|
396
|
+
a_ = ArrayPredictionContext.new(parents, payloads)
|
397
|
+
mergeCache.put(a, b, a_) if mergeCache
|
398
|
+
return a_
|
399
|
+
end
|
400
|
+
# parents differ and can't merge them. Just pack together
|
401
|
+
# into array; can't merge.
|
402
|
+
# ax + by = [ax,by]
|
403
|
+
payloads = [ a.returnState, b.returnState ]
|
404
|
+
parents = [ a.parentCtx, b.parentCtx ]
|
405
|
+
if a.returnState > b.returnState # sort by payload
|
406
|
+
payloads[0] = b.returnState
|
407
|
+
payloads[1] = a.returnState
|
408
|
+
parents = [ b.parentCtx, a.parentCtx ]
|
409
|
+
end
|
410
|
+
a_ = ArrayPredictionContext.new(parents, payloads)
|
411
|
+
mergeCache.put(a, b, a_) if mergeCache
|
412
|
+
return a_
|
413
|
+
end
|
414
|
+
end
|
415
|
+
|
416
|
+
|
417
|
+
#
|
418
|
+
# Handle case where at least one of {@code a} or {@code b} is
|
419
|
+
# {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used
|
420
|
+
# to represent {@link #EMPTY}.
|
421
|
+
#
|
422
|
+
# <h2>Local-Context Merges</h2>
|
423
|
+
#
|
424
|
+
# <p>These local-context merge operations are used when {@code rootIsWildcard}
|
425
|
+
# is true.</p>
|
426
|
+
#
|
427
|
+
# <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br>
|
428
|
+
# <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
429
|
+
#
|
430
|
+
# <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
|
431
|
+
# {@code #EMPTY}; return left graph.<br>
|
432
|
+
# <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
433
|
+
#
|
434
|
+
# <p>Special case of last merge if local context.<br>
|
435
|
+
# <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
436
|
+
#
|
437
|
+
# <h2>Full-Context Merges</h2>
|
438
|
+
#
|
439
|
+
# <p>These full-context merge operations are used when {@code rootIsWildcard}
|
440
|
+
# is false.</p>
|
441
|
+
#
|
442
|
+
# <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
443
|
+
#
|
444
|
+
# <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and
|
445
|
+
# null parent).<br>
|
446
|
+
# <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
447
|
+
#
|
448
|
+
# <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
449
|
+
#
|
450
|
+
# @param a the first {@link SingletonPredictionContext}
|
451
|
+
# @param b the second {@link SingletonPredictionContext}
|
452
|
+
# @param rootIsWildcard {@code true} if this is a local-context merge,
|
453
|
+
# otherwise false to indicate a full-context merge
|
454
|
+
#/
|
455
|
+
#def mergeRoot(a:SingletonPredictionContext, b:SingletonPredictionContext, rootIsWildcard:bool):
|
456
|
+
def mergeRoot(a, b, rootIsWildcard)
|
457
|
+
if rootIsWildcard
|
458
|
+
return PredictionContext.EMPTY if PredictionContext.EMPTY == a ## + b =#
|
459
|
+
return PredictionContext.EMPTY if PredictionContext.EMPTY == b # a +# =#
|
460
|
+
else
|
461
|
+
if PredictionContext.EMPTY == a and PredictionContext.EMPTY == b then
|
462
|
+
return PredictionContext.EMPTY # $ + $ = $
|
463
|
+
elsif PredictionContext.EMPTY == a # $ + x = [$,x]
|
464
|
+
payloads = [ b.returnState, PredictionContext::EMPTY_RETURN_STATE ]
|
465
|
+
parents = [ b.parentCtx, nil ]
|
466
|
+
return ArrayPredictionContext.new(parents, payloads)
|
467
|
+
elsif PredictionContext.EMPTY == b # x + $ = [$,x] ($ is always first if present)
|
468
|
+
payloads = [ a.returnState, PredictionContext::EMPTY_RETURN_STATE ]
|
469
|
+
parents = [ a.parentCtx, nil ]
|
470
|
+
return ArrayPredictionContext.new(parents, payloads)
|
471
|
+
end
|
472
|
+
end
|
473
|
+
return nil
|
474
|
+
end
|
475
|
+
|
476
|
+
#
|
477
|
+
# Merge two {@link ArrayPredictionContext} instances.
|
478
|
+
#
|
479
|
+
# <p>Different tops, different parents.<br>
|
480
|
+
# <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
481
|
+
#
|
482
|
+
# <p>Shared top, same parents.<br>
|
483
|
+
# <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
484
|
+
#
|
485
|
+
# <p>Shared top, different parents.<br>
|
486
|
+
# <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
487
|
+
#
|
488
|
+
# <p>Shared top, all shared parents.<br>
|
489
|
+
# <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/></p>
|
490
|
+
#
|
491
|
+
# <p>Equal tops, merge parents and reduce top to
|
492
|
+
# {@link SingletonPredictionContext}.<br>
|
493
|
+
# <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
494
|
+
#/
|
495
|
+
#def mergeArrays(a:ArrayPredictionContext, b:ArrayPredictionContext, rootIsWildcard:bool, mergeCache:dict):
|
496
|
+
def mergeArrays(a, b, rootIsWildcard, mergeCache)
|
497
|
+
if mergeCache
|
498
|
+
previous = mergeCache.get(a,b)
|
499
|
+
return previous unless previous.nil?
|
500
|
+
previous = mergeCache.get(b,a)
|
501
|
+
return previous unless previous.nil?
|
502
|
+
end
|
503
|
+
# merge sorted payloads a + b => M
|
504
|
+
i = 0 # walks a
|
505
|
+
j = 0 # walks b
|
506
|
+
k = 0 # walks target M array
|
507
|
+
|
508
|
+
mergedReturnStates = Array.new(a.returnState.length + b.returnStates.length)
|
509
|
+
mergedParents = Array.new(mergedReturnStates.length)
|
510
|
+
# walk and merge to yield mergedParents, mergedReturnStates
|
511
|
+
while i<a.returnStates.length and j<b.returnStates.length do
|
512
|
+
a_parent = a.parents[i]
|
513
|
+
b_parent = b.parents[j]
|
514
|
+
if a.returnStates[i]==b.returnStates[j] then
|
515
|
+
# same payload (stack tops are equal), must yield merged singleton
|
516
|
+
payload = a.returnStates[i]
|
517
|
+
# $+$ = $
|
518
|
+
bothDollars = (payload == PredictionContext::EMPTY_RETURN_STATE and \
|
519
|
+
a_parent.nil? and b_parent.nil? )
|
520
|
+
ax_ax = ( ! a_parent.nil? and ! b_parent.nil?) and a_parent==b_parent # ax+ax -> ax
|
521
|
+
if bothDollars or ax_ax
|
522
|
+
mergedParents[k] = a_parent # choose left
|
523
|
+
mergedReturnStates[k] = payload
|
524
|
+
else # ax+ay -> a'[x,y]
|
525
|
+
mergedParent = merge(a_parent, b_parent, rootIsWildcard, mergeCache)
|
526
|
+
mergedParents[k] = mergedParent
|
527
|
+
mergedReturnStates[k] = payload
|
528
|
+
end
|
529
|
+
i = i + 1 # hop over left one as usual
|
530
|
+
j = j + 1 # but also skip one in right side since we merge
|
531
|
+
elsif a.returnStates[i]<b.returnStates[j] # copy a[i] to M
|
532
|
+
mergedParents[k] = a_parent
|
533
|
+
mergedReturnStates[k] = a.returnStates[i]
|
534
|
+
i = i + 1
|
535
|
+
else # b > a, copy b[j] to M
|
536
|
+
mergedParents[k] = b_parent
|
537
|
+
mergedReturnStates[k] = b.returnStates[j]
|
538
|
+
j = j + 1
|
539
|
+
end
|
540
|
+
k = k + 1
|
541
|
+
end
|
542
|
+
# copy over any payloads remaining in either array
|
543
|
+
if i < a.returnStates.length then
|
544
|
+
for p in i..a.returnStates.length()-1 do
|
545
|
+
mergedParents[k] = a.parents[p]
|
546
|
+
mergedReturnStates[k] = a.returnStates[p]
|
547
|
+
k = k + 1
|
548
|
+
end
|
549
|
+
else
|
550
|
+
for p in j..b.returnStates.length()-1 do
|
551
|
+
mergedParents[k] = b.parents[p]
|
552
|
+
mergedReturnStates[k] = b.returnStates[p]
|
553
|
+
k = k + 1
|
554
|
+
end
|
555
|
+
end
|
556
|
+
|
557
|
+
# trim merged if we combined a few that had same stack tops
|
558
|
+
if k < mergedParents.length() # write index < last position; trim
|
559
|
+
if k == 1 # for just one merged element, return singleton top
|
560
|
+
a_ = SingletonPredictionContext.create(mergedParents[0], mergedReturnStates[0])
|
561
|
+
mergeCache.put(a,b,a_) if mergeCache
|
562
|
+
return a_
|
563
|
+
end
|
564
|
+
mergedParents = mergedParents[0..k-1]
|
565
|
+
mergedReturnStates = mergedReturnStates[0..k-1]
|
566
|
+
end
|
567
|
+
capM = ArrayPredictionContext.new(mergedParents, mergedReturnStates)
|
568
|
+
|
569
|
+
# if we created same array as a or b, return that instead
|
570
|
+
# TODO: track whether this is possible above during merge sort for speed
|
571
|
+
if capM==a
|
572
|
+
mergeCache.put(a,b,a) if mergeCache
|
573
|
+
return a
|
574
|
+
end
|
575
|
+
if capM==b
|
576
|
+
mergeCache.put(a,b,b) if mergeCache
|
577
|
+
return b
|
578
|
+
end
|
579
|
+
combineCommonParents(mergedParents)
|
580
|
+
|
581
|
+
mergeCache.put(a,b,capM) if mergeCache
|
582
|
+
return capM
|
583
|
+
end
|
584
|
+
|
585
|
+
#
|
586
|
+
# Make pass over all <em>M</em> {@code parents}; merge any {@code equals()}
|
587
|
+
# ones.
|
588
|
+
#/
|
589
|
+
def combineCommonParents(parents)
|
590
|
+
uniqueParents = Hash.new
|
591
|
+
|
592
|
+
parents.each{|parent|
|
593
|
+
if not uniqueParents.has_key? parent
|
594
|
+
uniqueParents[parent] = parent
|
595
|
+
end
|
596
|
+
}
|
597
|
+
parents.each_index {|p|
|
598
|
+
parents[p] = uniqueParents[parents[p]]
|
599
|
+
}
|
600
|
+
end
|
601
|
+
def getCachedPredictionContext(context, contextCache, visited)
|
602
|
+
if context.isEmpty()
|
603
|
+
return context
|
604
|
+
end
|
605
|
+
existing = visited[context]
|
606
|
+
return existing unless existing.nil?
|
607
|
+
existing = contextCache.get(context)
|
608
|
+
if not existing.nil? then
|
609
|
+
visited[context] = existing
|
610
|
+
return existing
|
611
|
+
end
|
612
|
+
changed = false
|
613
|
+
parents = Array.new context.length()
|
614
|
+
parents.each_index do |i|
|
615
|
+
parent = getCachedPredictionContext(context.getParent(i), contextCache, visited)
|
616
|
+
if changed or parent != context.getParent(i)
|
617
|
+
if not changed then
|
618
|
+
parents = Array.new context.length()
|
619
|
+
context.each_index {|j| #for j in range(0, len(context)):
|
620
|
+
parents[j] = context.getParent(j)
|
621
|
+
}
|
622
|
+
changed = true
|
623
|
+
end
|
624
|
+
parents[i] = parent
|
625
|
+
end
|
626
|
+
end
|
627
|
+
if not changed
|
628
|
+
contextCache.add(context)
|
629
|
+
visited[context] = context
|
630
|
+
return context
|
631
|
+
end
|
632
|
+
updated = nil
|
633
|
+
if parents.length == 0
|
634
|
+
updated = PredictionContext.EMPTY
|
635
|
+
elsif parents.length == 1
|
636
|
+
updated = SingletonPredictionContext.create(parents[0], context.getReturnState(0))
|
637
|
+
else
|
638
|
+
updated = ArrayPredictionContext.new(parents, context.returnStates)
|
639
|
+
end
|
640
|
+
|
641
|
+
contextCache.add(updated)
|
642
|
+
visited[updated] = updated
|
643
|
+
visited[context] = updated
|
644
|
+
|
645
|
+
return updated
|
646
|
+
end
|
647
|
+
|
648
|
+
# # extra structures, but cut/paste/morphed works, so leave it.
|
649
|
+
# # seems to do a breadth-first walk
|
650
|
+
# public static List<PredictionContext> getAllNodes(PredictionContext context) {
|
651
|
+
# Map<PredictionContext, PredictionContext> visited =
|
652
|
+
# new IdentityHashMap<PredictionContext, PredictionContext>();
|
653
|
+
# Deque<PredictionContext> workList = new ArrayDeque<PredictionContext>();
|
654
|
+
# workList.add(context);
|
655
|
+
# visited.put(context, context);
|
656
|
+
# List<PredictionContext> nodes = new ArrayList<PredictionContext>();
|
657
|
+
# while (!workList.isEmpty()) {
|
658
|
+
# PredictionContext current = workList.pop();
|
659
|
+
# nodes.add(current);
|
660
|
+
# for (int i = 0; i < current.size(); i++) {
|
661
|
+
# PredictionContext parent = current.getParent(i);
|
662
|
+
# if ( parent!=null && visited.put(parent, parent) == null) {
|
663
|
+
# workList.push(parent);
|
664
|
+
# }
|
665
|
+
# }
|
666
|
+
# }
|
667
|
+
# return nodes;
|
668
|
+
# }
|
669
|
+
# ter's recursive version of Sam's getAllNodes()
|
670
|
+
def getAllContextNodes(context, nodes=nil, visited=nil)
|
671
|
+
if nodes.nil?
|
672
|
+
nodes = Array.new
|
673
|
+
return getAllContextNodes(context, nodes, visited)
|
674
|
+
elsif visited.nil?
|
675
|
+
visited = Hash.new
|
676
|
+
return getAllContextNodes(context, nodes, visited)
|
677
|
+
else
|
678
|
+
if context.nil? or visited.has_key? context
|
679
|
+
return nodes
|
680
|
+
end
|
681
|
+
visited[context] = context
|
682
|
+
nodes.add(context)
|
683
|
+
for i in 0..context.length do
|
684
|
+
getAllContextNodes(context.getParent(i), nodes, visited)
|
685
|
+
end
|
686
|
+
return nodes
|
687
|
+
end
|
688
|
+
end
|
689
|
+
end
|
690
|
+
end
|