omextra 0.0.0.dev436__py3-none-any.whl → 0.0.0.dev438__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. omextra/formats/json/_antlr/JsonLexer.py +1 -1
  2. omextra/formats/json/_antlr/JsonListener.py +1 -1
  3. omextra/formats/json/_antlr/JsonParser.py +1 -1
  4. omextra/formats/json/_antlr/JsonVisitor.py +1 -1
  5. omextra/formats/json5/Json5.g4 +168 -0
  6. omextra/formats/json5/__init__.py +0 -0
  7. omextra/formats/json5/_antlr/Json5Lexer.py +354 -0
  8. omextra/formats/json5/_antlr/Json5Listener.py +79 -0
  9. omextra/formats/json5/_antlr/Json5Parser.py +617 -0
  10. omextra/formats/json5/_antlr/Json5Visitor.py +52 -0
  11. omextra/formats/json5/_antlr/__init__.py +0 -0
  12. omextra/formats/json5/parsing.py +101 -0
  13. omextra/specs/proto/_antlr/Protobuf3Lexer.py +1 -1
  14. omextra/specs/proto/_antlr/Protobuf3Listener.py +1 -1
  15. omextra/specs/proto/_antlr/Protobuf3Parser.py +1 -1
  16. omextra/specs/proto/_antlr/Protobuf3Visitor.py +1 -1
  17. omextra/specs/proto/parsing.py +2 -2
  18. omextra/sql/parsing/_antlr/MinisqlLexer.py +1 -1
  19. omextra/sql/parsing/_antlr/MinisqlListener.py +1 -1
  20. omextra/sql/parsing/_antlr/MinisqlParser.py +1 -1
  21. omextra/sql/parsing/_antlr/MinisqlVisitor.py +1 -1
  22. omextra/sql/parsing/parsing.py +3 -3
  23. omextra/text/antlr/__init__.py +3 -0
  24. omextra/text/antlr/_runtime/BufferedTokenStream.py +305 -0
  25. omextra/text/antlr/_runtime/CommonTokenFactory.py +64 -0
  26. omextra/text/antlr/_runtime/CommonTokenStream.py +90 -0
  27. omextra/text/antlr/_runtime/FileStream.py +30 -0
  28. omextra/text/antlr/_runtime/InputStream.py +90 -0
  29. omextra/text/antlr/_runtime/IntervalSet.py +183 -0
  30. omextra/text/antlr/_runtime/LICENSE.txt +28 -0
  31. omextra/text/antlr/_runtime/LL1Analyzer.py +176 -0
  32. omextra/text/antlr/_runtime/Lexer.py +332 -0
  33. omextra/text/antlr/_runtime/ListTokenSource.py +147 -0
  34. omextra/text/antlr/_runtime/Parser.py +583 -0
  35. omextra/text/antlr/_runtime/ParserInterpreter.py +173 -0
  36. omextra/text/antlr/_runtime/ParserRuleContext.py +189 -0
  37. omextra/text/antlr/_runtime/PredictionContext.py +632 -0
  38. omextra/text/antlr/_runtime/Recognizer.py +150 -0
  39. omextra/text/antlr/_runtime/RuleContext.py +230 -0
  40. omextra/text/antlr/_runtime/StdinStream.py +14 -0
  41. omextra/text/antlr/_runtime/Token.py +158 -0
  42. omextra/text/antlr/_runtime/TokenStreamRewriter.py +258 -0
  43. omextra/text/antlr/_runtime/Utils.py +36 -0
  44. omextra/text/antlr/_runtime/__init__.py +2 -0
  45. omextra/text/antlr/_runtime/_all.py +24 -0
  46. omextra/text/antlr/_runtime/_pygrun.py +174 -0
  47. omextra/text/antlr/_runtime/atn/ATN.py +135 -0
  48. omextra/text/antlr/_runtime/atn/ATNConfig.py +162 -0
  49. omextra/text/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  50. omextra/text/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  51. omextra/text/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  52. omextra/text/antlr/_runtime/atn/ATNSimulator.py +50 -0
  53. omextra/text/antlr/_runtime/atn/ATNState.py +267 -0
  54. omextra/text/antlr/_runtime/atn/ATNType.py +20 -0
  55. omextra/text/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  56. omextra/text/antlr/_runtime/atn/LexerAction.py +301 -0
  57. omextra/text/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  58. omextra/text/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  59. omextra/text/antlr/_runtime/atn/PredictionMode.py +502 -0
  60. omextra/text/antlr/_runtime/atn/SemanticContext.py +333 -0
  61. omextra/text/antlr/_runtime/atn/Transition.py +271 -0
  62. omextra/text/antlr/_runtime/atn/__init__.py +4 -0
  63. omextra/text/antlr/_runtime/dfa/DFA.py +136 -0
  64. omextra/text/antlr/_runtime/dfa/DFASerializer.py +76 -0
  65. omextra/text/antlr/_runtime/dfa/DFAState.py +129 -0
  66. omextra/text/antlr/_runtime/dfa/__init__.py +4 -0
  67. omextra/text/antlr/_runtime/error/DiagnosticErrorListener.py +111 -0
  68. omextra/text/antlr/_runtime/error/ErrorListener.py +75 -0
  69. omextra/text/antlr/_runtime/error/ErrorStrategy.py +712 -0
  70. omextra/text/antlr/_runtime/error/Errors.py +176 -0
  71. omextra/text/antlr/_runtime/error/__init__.py +4 -0
  72. omextra/text/antlr/_runtime/tree/Chunk.py +33 -0
  73. omextra/text/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  74. omextra/text/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  75. omextra/text/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  76. omextra/text/antlr/_runtime/tree/RuleTagToken.py +53 -0
  77. omextra/text/antlr/_runtime/tree/TokenTagToken.py +50 -0
  78. omextra/text/antlr/_runtime/tree/Tree.py +194 -0
  79. omextra/text/antlr/_runtime/tree/Trees.py +114 -0
  80. omextra/text/antlr/_runtime/tree/__init__.py +2 -0
  81. omextra/text/antlr/_runtime/xpath/XPath.py +278 -0
  82. omextra/text/antlr/_runtime/xpath/XPathLexer.py +98 -0
  83. omextra/text/antlr/_runtime/xpath/__init__.py +4 -0
  84. omextra/text/antlr/cli/consts.py +1 -1
  85. omextra/text/antlr/delimit.py +110 -0
  86. omextra/text/antlr/dot.py +42 -0
  87. omextra/text/antlr/errors.py +14 -0
  88. omextra/text/antlr/input.py +96 -0
  89. omextra/text/antlr/parsing.py +55 -0
  90. omextra/text/antlr/runtime.py +102 -0
  91. omextra/text/antlr/utils.py +38 -0
  92. omextra-0.0.0.dev438.dist-info/METADATA +28 -0
  93. omextra-0.0.0.dev438.dist-info/RECORD +144 -0
  94. omextra-0.0.0.dev436.dist-info/METADATA +0 -73
  95. omextra-0.0.0.dev436.dist-info/RECORD +0 -69
  96. {omextra-0.0.0.dev436.dist-info → omextra-0.0.0.dev438.dist-info}/WHEEL +0 -0
  97. {omextra-0.0.0.dev436.dist-info → omextra-0.0.0.dev438.dist-info}/entry_points.txt +0 -0
  98. {omextra-0.0.0.dev436.dist-info → omextra-0.0.0.dev438.dist-info}/licenses/LICENSE +0 -0
  99. {omextra-0.0.0.dev436.dist-info → omextra-0.0.0.dev438.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,30 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ #
11
+ # This is an InputStream that is loaded from a file all at once
12
+ # when you construct the object.
13
+ #
14
+
15
+ import codecs
16
+ from .InputStream import InputStream
17
+
18
+
19
+ class FileStream(InputStream):
20
+ __slots__ = 'fileName'
21
+
22
+ def __init__(self, fileName:str, encoding:str='ascii', errors:str='strict'):
23
+ super().__init__(self.readDataFrom(fileName, encoding, errors))
24
+ self.fileName = fileName
25
+
26
+ def readDataFrom(self, fileName:str, encoding:str, errors:str='strict'):
27
+ # read binary to avoid line ending conversion
28
+ with open(fileName, 'rb') as file:
29
+ bytes = file.read()
30
+ return codecs.decode(bytes, encoding, errors)
@@ -0,0 +1,90 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+
11
+ #
12
+ # Vacuum all input from a string and then treat it like a buffer.
13
+ #
14
+ from .Token import Token
15
+
16
+
17
+ class InputStream (object):
18
+ __slots__ = ('name', 'strdata', '_index', 'data', '_size')
19
+
20
+ def __init__(self, data: str):
21
+ self.name = "<empty>"
22
+ self.strdata = data
23
+ self._loadString()
24
+
25
+ def _loadString(self):
26
+ self._index = 0
27
+ self.data = [ord(c) for c in self.strdata]
28
+ self._size = len(self.data)
29
+
30
+ @property
31
+ def index(self):
32
+ return self._index
33
+
34
+ @property
35
+ def size(self):
36
+ return self._size
37
+
38
+ # Reset the stream so that it's in the same state it was
39
+ # when the object was created *except* the data array is not
40
+ # touched.
41
+ #
42
+ def reset(self):
43
+ self._index = 0
44
+
45
+ def consume(self):
46
+ if self._index >= self._size:
47
+ assert self.LA(1) == Token.EOF
48
+ raise Exception("cannot consume EOF")
49
+ self._index += 1
50
+
51
+ def LA(self, offset: int):
52
+ if offset==0:
53
+ return 0 # undefined
54
+ if offset<0:
55
+ offset += 1 # e.g., translate LA(-1) to use offset=0
56
+ pos = self._index + offset - 1
57
+ if pos < 0 or pos >= self._size: # invalid
58
+ return Token.EOF
59
+ return self.data[pos]
60
+
61
+ def LT(self, offset: int):
62
+ return self.LA(offset)
63
+
64
+ # mark/release do nothing; we have entire buffer
65
+ def mark(self):
66
+ return -1
67
+
68
+ def release(self, marker: int):
69
+ pass
70
+
71
+ # consume() ahead until p==_index; can't just set p=_index as we must
72
+ # update line and column. If we seek backwards, just set p
73
+ #
74
+ def seek(self, _index: int):
75
+ if _index<=self._index:
76
+ self._index = _index # just jump; don't update stream state (line, ...)
77
+ return
78
+ # seek forward
79
+ self._index = min(_index, self._size)
80
+
81
+ def getText(self, start :int, stop: int):
82
+ if stop >= self._size:
83
+ stop = self._size-1
84
+ if start >= self._size:
85
+ return ""
86
+ else:
87
+ return self.strdata[start:stop+1]
88
+
89
+ def __str__(self):
90
+ return self.strdata
@@ -0,0 +1,183 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ from io import StringIO
11
+ from .Token import Token
12
+
13
+ # need forward declarations
14
+ IntervalSet = None
15
+
16
+ class IntervalSet(object):
17
+ __slots__ = ('intervals', 'readonly')
18
+
19
+ def __init__(self):
20
+ self.intervals = None
21
+ self.readonly = False
22
+
23
+ def __iter__(self):
24
+ if self.intervals is not None:
25
+ for i in self.intervals:
26
+ for c in i:
27
+ yield c
28
+
29
+ def __getitem__(self, item):
30
+ i = 0
31
+ for k in self:
32
+ if i==item:
33
+ return k
34
+ else:
35
+ i += 1
36
+ return Token.INVALID_TYPE
37
+
38
+ def addOne(self, v:int):
39
+ self.addRange(range(v, v+1))
40
+
41
+ def addRange(self, v:range):
42
+ if self.intervals is None:
43
+ self.intervals = list()
44
+ self.intervals.append(v)
45
+ else:
46
+ # find insert pos
47
+ k = 0
48
+ for i in self.intervals:
49
+ # distinct range -> insert
50
+ if v.stop<i.start:
51
+ self.intervals.insert(k, v)
52
+ return
53
+ # contiguous range -> adjust
54
+ elif v.stop==i.start:
55
+ self.intervals[k] = range(v.start, i.stop)
56
+ return
57
+ # overlapping range -> adjust and reduce
58
+ elif v.start<=i.stop:
59
+ self.intervals[k] = range(min(i.start,v.start), max(i.stop,v.stop))
60
+ self.reduce(k)
61
+ return
62
+ k += 1
63
+ # greater than any existing
64
+ self.intervals.append(v)
65
+
66
+ def addSet(self, other:IntervalSet):
67
+ if other.intervals is not None:
68
+ for i in other.intervals:
69
+ self.addRange(i)
70
+ return self
71
+
72
+ def reduce(self, k:int):
73
+ # only need to reduce if k is not the last
74
+ if k<len(self.intervals)-1:
75
+ l = self.intervals[k]
76
+ r = self.intervals[k+1]
77
+ # if r contained in l
78
+ if l.stop >= r.stop:
79
+ self.intervals.pop(k+1)
80
+ self.reduce(k)
81
+ elif l.stop >= r.start:
82
+ self.intervals[k] = range(l.start, r.stop)
83
+ self.intervals.pop(k+1)
84
+
85
+ def complement(self, start, stop):
86
+ result = IntervalSet()
87
+ result.addRange(range(start,stop+1))
88
+ for i in self.intervals:
89
+ result.removeRange(i)
90
+ return result
91
+
92
+ def __contains__(self, item):
93
+ if self.intervals is None:
94
+ return False
95
+ else:
96
+ return any(item in i for i in self.intervals)
97
+
98
+ def __len__(self):
99
+ return sum(len(i) for i in self.intervals)
100
+
101
+ def removeRange(self, v):
102
+ if v.start==v.stop-1:
103
+ self.removeOne(v.start)
104
+ elif self.intervals is not None:
105
+ k = 0
106
+ for i in self.intervals:
107
+ # intervals are ordered
108
+ if v.stop<=i.start:
109
+ return
110
+ # check for including range, split it
111
+ elif v.start>i.start and v.stop<i.stop:
112
+ self.intervals[k] = range(i.start, v.start)
113
+ x = range(v.stop, i.stop)
114
+ self.intervals.insert(k, x)
115
+ return
116
+ # check for included range, remove it
117
+ elif v.start<=i.start and v.stop>=i.stop:
118
+ self.intervals.pop(k)
119
+ k -= 1 # need another pass
120
+ # check for lower boundary
121
+ elif v.start<i.stop:
122
+ self.intervals[k] = range(i.start, v.start)
123
+ # check for upper boundary
124
+ elif v.stop<i.stop:
125
+ self.intervals[k] = range(v.stop, i.stop)
126
+ k += 1
127
+
128
+ def removeOne(self, v):
129
+ if self.intervals is not None:
130
+ k = 0
131
+ for i in self.intervals:
132
+ # intervals is ordered
133
+ if v<i.start:
134
+ return
135
+ # check for single value range
136
+ elif v==i.start and v==i.stop-1:
137
+ self.intervals.pop(k)
138
+ return
139
+ # check for lower boundary
140
+ elif v==i.start:
141
+ self.intervals[k] = range(i.start+1, i.stop)
142
+ return
143
+ # check for upper boundary
144
+ elif v==i.stop-1:
145
+ self.intervals[k] = range(i.start, i.stop-1)
146
+ return
147
+ # split existing range
148
+ elif v<i.stop-1:
149
+ x = range(i.start, v)
150
+ self.intervals[k] = range(v + 1, i.stop)
151
+ self.intervals.insert(k, x)
152
+ return
153
+ k += 1
154
+
155
+
156
+ def toString(self, literalNames:list, symbolicNames:list):
157
+ if self.intervals is None:
158
+ return "{}"
159
+ with StringIO() as buf:
160
+ if len(self)>1:
161
+ buf.write("{")
162
+ first = True
163
+ for i in self.intervals:
164
+ for j in i:
165
+ if not first:
166
+ buf.write(", ")
167
+ buf.write(self.elementName(literalNames, symbolicNames, j))
168
+ first = False
169
+ if len(self)>1:
170
+ buf.write("}")
171
+ return buf.getvalue()
172
+
173
+ def elementName(self, literalNames:list, symbolicNames:list, a:int):
174
+ if a==Token.EOF:
175
+ return "<EOF>"
176
+ elif a==Token.EPSILON:
177
+ return "<EPSILON>"
178
+ else:
179
+ if a<len(literalNames) and literalNames[a] != "<INVALID>":
180
+ return literalNames[a]
181
+ if a<len(symbolicNames):
182
+ return symbolicNames[a]
183
+ return "<UNKNOWN>"
@@ -0,0 +1,28 @@
1
+ Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions
5
+ are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither name of copyright holders nor the names of its contributors
15
+ may be used to endorse or promote products derived from this software
16
+ without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
22
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,176 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #/
9
+ from .IntervalSet import IntervalSet
10
+ from .Token import Token
11
+ from .PredictionContext import PredictionContext, SingletonPredictionContext, PredictionContextFromRuleContext
12
+ from .RuleContext import RuleContext
13
+ from .atn.ATN import ATN
14
+ from .atn.ATNConfig import ATNConfig
15
+ from .atn.ATNState import ATNState, RuleStopState
16
+ from .atn.Transition import WildcardTransition, NotSetTransition, AbstractPredicateTransition, RuleTransition
17
+
18
+
19
+ class LL1Analyzer (object):
20
+ __slots__ = 'atn'
21
+
22
+ #* Special value added to the lookahead sets to indicate that we hit
23
+ # a predicate during analysis if {@code seeThruPreds==false}.
24
+ #/
25
+ HIT_PRED = Token.INVALID_TYPE
26
+
27
+ def __init__(self, atn:ATN):
28
+ self.atn = atn
29
+
30
+ #*
31
+ # Calculates the SLL(1) expected lookahead set for each outgoing transition
32
+ # of an {@link ATNState}. The returned array has one element for each
33
+ # outgoing transition in {@code s}. If the closure from transition
34
+ # <em>i</em> leads to a semantic predicate before matching a symbol, the
35
+ # element at index <em>i</em> of the result will be {@code null}.
36
+ #
37
+ # @param s the ATN state
38
+ # @return the expected symbols for each outgoing transition of {@code s}.
39
+ #/
40
+ def getDecisionLookahead(self, s:ATNState):
41
+ if s is None:
42
+ return None
43
+
44
+ count = len(s.transitions)
45
+ look = [] * count
46
+ for alt in range(0, count):
47
+ look[alt] = set()
48
+ lookBusy = set()
49
+ seeThruPreds = False # fail to get lookahead upon pred
50
+ self._LOOK(s.transition(alt).target, None, PredictionContext.EMPTY,
51
+ look[alt], lookBusy, set(), seeThruPreds, False)
52
+ # Wipe out lookahead for this alternative if we found nothing
53
+ # or we had a predicate when we !seeThruPreds
54
+ if len(look[alt])==0 or self.HIT_PRED in look[alt]:
55
+ look[alt] = None
56
+ return look
57
+
58
+ #*
59
+ # Compute set of tokens that can follow {@code s} in the ATN in the
60
+ # specified {@code ctx}.
61
+ #
62
+ # <p>If {@code ctx} is {@code null} and the end of the rule containing
63
+ # {@code s} is reached, {@link Token#EPSILON} is added to the result set.
64
+ # If {@code ctx} is not {@code null} and the end of the outermost rule is
65
+ # reached, {@link Token#EOF} is added to the result set.</p>
66
+ #
67
+ # @param s the ATN state
68
+ # @param stopState the ATN state to stop at. This can be a
69
+ # {@link BlockEndState} to detect epsilon paths through a closure.
70
+ # @param ctx the complete parser context, or {@code null} if the context
71
+ # should be ignored
72
+ #
73
+ # @return The set of tokens that can follow {@code s} in the ATN in the
74
+ # specified {@code ctx}.
75
+ #/
76
+ def LOOK(self, s:ATNState, stopState:ATNState=None, ctx:RuleContext=None):
77
+ r = IntervalSet()
78
+ seeThruPreds = True # ignore preds; get all lookahead
79
+ lookContext = PredictionContextFromRuleContext(s.atn, ctx) if ctx is not None else None
80
+ self._LOOK(s, stopState, lookContext, r, set(), set(), seeThruPreds, True)
81
+ return r
82
+
83
+ #*
84
+ # Compute set of tokens that can follow {@code s} in the ATN in the
85
+ # specified {@code ctx}.
86
+ #
87
+ # <p>If {@code ctx} is {@code null} and {@code stopState} or the end of the
88
+ # rule containing {@code s} is reached, {@link Token#EPSILON} is added to
89
+ # the result set. If {@code ctx} is not {@code null} and {@code addEOF} is
90
+ # {@code true} and {@code stopState} or the end of the outermost rule is
91
+ # reached, {@link Token#EOF} is added to the result set.</p>
92
+ #
93
+ # @param s the ATN state.
94
+ # @param stopState the ATN state to stop at. This can be a
95
+ # {@link BlockEndState} to detect epsilon paths through a closure.
96
+ # @param ctx The outer context, or {@code null} if the outer context should
97
+ # not be used.
98
+ # @param look The result lookahead set.
99
+ # @param lookBusy A set used for preventing epsilon closures in the ATN
100
+ # from causing a stack overflow. Outside code should pass
101
+ # {@code new HashSet<ATNConfig>} for this argument.
102
+ # @param calledRuleStack A set used for preventing left recursion in the
103
+ # ATN from causing a stack overflow. Outside code should pass
104
+ # {@code new BitSet()} for this argument.
105
+ # @param seeThruPreds {@code true} to true semantic predicates as
106
+ # implicitly {@code true} and "see through them", otherwise {@code false}
107
+ # to treat semantic predicates as opaque and add {@link #HIT_PRED} to the
108
+ # result if one is encountered.
109
+ # @param addEOF Add {@link Token#EOF} to the result if the end of the
110
+ # outermost context is reached. This parameter has no effect if {@code ctx}
111
+ # is {@code null}.
112
+ #/
113
+ def _LOOK(self, s:ATNState, stopState:ATNState , ctx:PredictionContext, look:IntervalSet, lookBusy:set,
114
+ calledRuleStack:set, seeThruPreds:bool, addEOF:bool):
115
+ c = ATNConfig(s, 0, ctx)
116
+
117
+ if c in lookBusy:
118
+ return
119
+ lookBusy.add(c)
120
+
121
+ if s == stopState:
122
+ if ctx is None:
123
+ look.addOne(Token.EPSILON)
124
+ return
125
+ elif ctx.isEmpty() and addEOF:
126
+ look.addOne(Token.EOF)
127
+ return
128
+
129
+ if isinstance(s, RuleStopState ):
130
+ if ctx is None:
131
+ look.addOne(Token.EPSILON)
132
+ return
133
+ elif ctx.isEmpty() and addEOF:
134
+ look.addOne(Token.EOF)
135
+ return
136
+
137
+ if ctx != PredictionContext.EMPTY:
138
+ removed = s.ruleIndex in calledRuleStack
139
+ try:
140
+ calledRuleStack.discard(s.ruleIndex)
141
+ # run thru all possible stack tops in ctx
142
+ for i in range(0, len(ctx)):
143
+ returnState = self.atn.states[ctx.getReturnState(i)]
144
+ self._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
145
+ finally:
146
+ if removed:
147
+ calledRuleStack.add(s.ruleIndex)
148
+ return
149
+
150
+ for t in s.transitions:
151
+ if type(t) == RuleTransition:
152
+ if t.target.ruleIndex in calledRuleStack:
153
+ continue
154
+
155
+ newContext = SingletonPredictionContext.create(ctx, t.followState.stateNumber)
156
+
157
+ try:
158
+ calledRuleStack.add(t.target.ruleIndex)
159
+ self._LOOK(t.target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
160
+ finally:
161
+ calledRuleStack.remove(t.target.ruleIndex)
162
+ elif isinstance(t, AbstractPredicateTransition ):
163
+ if seeThruPreds:
164
+ self._LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
165
+ else:
166
+ look.addOne(self.HIT_PRED)
167
+ elif t.isEpsilon:
168
+ self._LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
169
+ elif type(t) == WildcardTransition:
170
+ look.addRange( range(Token.MIN_USER_TOKEN_TYPE, self.atn.maxTokenType + 1) )
171
+ else:
172
+ set_ = t.label
173
+ if set_ is not None:
174
+ if isinstance(t, NotSetTransition):
175
+ set_ = set_.complement(Token.MIN_USER_TOKEN_TYPE, self.atn.maxTokenType)
176
+ look.addSet(set_)