omlish 0.0.0.dev57__py3-none-any.whl → 0.0.0.dev59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/antlr/__init__.py +0 -0
  3. omlish/antlr/_runtime/BufferedTokenStream.py +305 -0
  4. omlish/antlr/_runtime/CommonTokenFactory.py +64 -0
  5. omlish/antlr/_runtime/CommonTokenStream.py +90 -0
  6. omlish/antlr/_runtime/FileStream.py +30 -0
  7. omlish/antlr/_runtime/InputStream.py +90 -0
  8. omlish/antlr/_runtime/IntervalSet.py +183 -0
  9. omlish/antlr/_runtime/LL1Analyzer.py +176 -0
  10. omlish/antlr/_runtime/Lexer.py +332 -0
  11. omlish/antlr/_runtime/ListTokenSource.py +147 -0
  12. omlish/antlr/_runtime/Parser.py +583 -0
  13. omlish/antlr/_runtime/ParserInterpreter.py +173 -0
  14. omlish/antlr/_runtime/ParserRuleContext.py +189 -0
  15. omlish/antlr/_runtime/PredictionContext.py +632 -0
  16. omlish/antlr/_runtime/Recognizer.py +150 -0
  17. omlish/antlr/_runtime/RuleContext.py +230 -0
  18. omlish/antlr/_runtime/StdinStream.py +14 -0
  19. omlish/antlr/_runtime/Token.py +158 -0
  20. omlish/antlr/_runtime/TokenStreamRewriter.py +258 -0
  21. omlish/antlr/_runtime/Utils.py +36 -0
  22. omlish/antlr/_runtime/__init__.py +24 -0
  23. omlish/antlr/_runtime/_pygrun.py +174 -0
  24. omlish/antlr/_runtime/atn/ATN.py +135 -0
  25. omlish/antlr/_runtime/atn/ATNConfig.py +162 -0
  26. omlish/antlr/_runtime/atn/ATNConfigSet.py +215 -0
  27. omlish/antlr/_runtime/atn/ATNDeserializationOptions.py +27 -0
  28. omlish/antlr/_runtime/atn/ATNDeserializer.py +449 -0
  29. omlish/antlr/_runtime/atn/ATNSimulator.py +50 -0
  30. omlish/antlr/_runtime/atn/ATNState.py +267 -0
  31. omlish/antlr/_runtime/atn/ATNType.py +20 -0
  32. omlish/antlr/_runtime/atn/LexerATNSimulator.py +573 -0
  33. omlish/antlr/_runtime/atn/LexerAction.py +301 -0
  34. omlish/antlr/_runtime/atn/LexerActionExecutor.py +146 -0
  35. omlish/antlr/_runtime/atn/ParserATNSimulator.py +1664 -0
  36. omlish/antlr/_runtime/atn/PredictionMode.py +502 -0
  37. omlish/antlr/_runtime/atn/SemanticContext.py +333 -0
  38. omlish/antlr/_runtime/atn/Transition.py +271 -0
  39. omlish/antlr/_runtime/atn/__init__.py +4 -0
  40. omlish/antlr/_runtime/dfa/DFA.py +136 -0
  41. omlish/antlr/_runtime/dfa/DFASerializer.py +76 -0
  42. omlish/antlr/_runtime/dfa/DFAState.py +129 -0
  43. omlish/antlr/_runtime/dfa/__init__.py +4 -0
  44. omlish/antlr/_runtime/error/DiagnosticErrorListener.py +110 -0
  45. omlish/antlr/_runtime/error/ErrorListener.py +75 -0
  46. omlish/antlr/_runtime/error/ErrorStrategy.py +712 -0
  47. omlish/antlr/_runtime/error/Errors.py +176 -0
  48. omlish/antlr/_runtime/error/__init__.py +4 -0
  49. omlish/antlr/_runtime/tree/Chunk.py +33 -0
  50. omlish/antlr/_runtime/tree/ParseTreeMatch.py +121 -0
  51. omlish/antlr/_runtime/tree/ParseTreePattern.py +75 -0
  52. omlish/antlr/_runtime/tree/ParseTreePatternMatcher.py +377 -0
  53. omlish/antlr/_runtime/tree/RuleTagToken.py +53 -0
  54. omlish/antlr/_runtime/tree/TokenTagToken.py +50 -0
  55. omlish/antlr/_runtime/tree/Tree.py +194 -0
  56. omlish/antlr/_runtime/tree/Trees.py +114 -0
  57. omlish/antlr/_runtime/tree/__init__.py +2 -0
  58. omlish/antlr/_runtime/xpath/XPath.py +272 -0
  59. omlish/antlr/_runtime/xpath/XPathLexer.py +98 -0
  60. omlish/antlr/_runtime/xpath/__init__.py +4 -0
  61. omlish/marshal/__init__.py +10 -5
  62. omlish/marshal/nop.py +18 -0
  63. omlish/marshal/primitives.py +16 -6
  64. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/METADATA +1 -1
  65. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/RECORD +69 -9
  66. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/LICENSE +0 -0
  67. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/WHEEL +0 -0
  68. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/entry_points.txt +0 -0
  69. {omlish-0.0.0.dev57.dist-info → omlish-0.0.0.dev59.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,258 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ #
5
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
6
+ # Use of this file is governed by the BSD 3-clause license that
7
+ # can be found in the LICENSE.txt file in the project root.
8
+ #
9
+
10
+ from io import StringIO
11
+ from .Token import Token
12
+
13
+ from .CommonTokenStream import CommonTokenStream
14
+
15
+
16
+ class TokenStreamRewriter(object):
17
+ __slots__ = ('tokens', 'programs', 'lastRewriteTokenIndexes')
18
+
19
+ DEFAULT_PROGRAM_NAME = "default"
20
+ PROGRAM_INIT_SIZE = 100
21
+ MIN_TOKEN_INDEX = 0
22
+
23
+ def __init__(self, tokens):
24
+ """
25
+ :type tokens: antlr4.BufferedTokenStream.BufferedTokenStream
26
+ :param tokens:
27
+ :return:
28
+ """
29
+ super(TokenStreamRewriter, self).__init__()
30
+ self.tokens = tokens
31
+ self.programs = {self.DEFAULT_PROGRAM_NAME: []}
32
+ self.lastRewriteTokenIndexes = {}
33
+
34
+ def getTokenStream(self):
35
+ return self.tokens
36
+
37
+ def rollback(self, instruction_index, program_name):
38
+ ins = self.programs.get(program_name, None)
39
+ if ins:
40
+ self.programs[program_name] = ins[self.MIN_TOKEN_INDEX: instruction_index]
41
+
42
+ def deleteProgram(self, program_name=DEFAULT_PROGRAM_NAME):
43
+ self.rollback(self.MIN_TOKEN_INDEX, program_name)
44
+
45
+ def insertAfterToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
46
+ self.insertAfter(token.tokenIndex, text, program_name)
47
+
48
+ def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME):
49
+ op = self.InsertAfterOp(self.tokens, index + 1, text)
50
+ rewrites = self.getProgram(program_name)
51
+ op.instructionIndex = len(rewrites)
52
+ rewrites.append(op)
53
+
54
+ def insertBeforeIndex(self, index, text):
55
+ self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text)
56
+
57
+ def insertBeforeToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
58
+ self.insertBefore(program_name, token.tokenIndex, text)
59
+
60
+ def insertBefore(self, program_name, index, text):
61
+ op = self.InsertBeforeOp(self.tokens, index, text)
62
+ rewrites = self.getProgram(program_name)
63
+ op.instructionIndex = len(rewrites)
64
+ rewrites.append(op)
65
+
66
+ def replaceIndex(self, index, text):
67
+ self.replace(self.DEFAULT_PROGRAM_NAME, index, index, text)
68
+
69
+ def replaceRange(self, from_idx, to_idx, text):
70
+ self.replace(self.DEFAULT_PROGRAM_NAME, from_idx, to_idx, text)
71
+
72
+ def replaceSingleToken(self, token, text):
73
+ self.replace(self.DEFAULT_PROGRAM_NAME, token.tokenIndex, token.tokenIndex, text)
74
+
75
+ def replaceRangeTokens(self, from_token, to_token, text, program_name=DEFAULT_PROGRAM_NAME):
76
+ self.replace(program_name, from_token.tokenIndex, to_token.tokenIndex, text)
77
+
78
+ def replace(self, program_name, from_idx, to_idx, text):
79
+ if any((from_idx > to_idx, from_idx < 0, to_idx < 0, to_idx >= len(self.tokens.tokens))):
80
+ raise ValueError(
81
+ 'replace: range invalid: {}..{}(size={})'.format(from_idx, to_idx, len(self.tokens.tokens)))
82
+ op = self.ReplaceOp(from_idx, to_idx, self.tokens, text)
83
+ rewrites = self.getProgram(program_name)
84
+ op.instructionIndex = len(rewrites)
85
+ rewrites.append(op)
86
+
87
+ def deleteToken(self, token):
88
+ self.delete(self.DEFAULT_PROGRAM_NAME, token, token)
89
+
90
+ def deleteIndex(self, index):
91
+ self.delete(self.DEFAULT_PROGRAM_NAME, index, index)
92
+
93
+ def delete(self, program_name, from_idx, to_idx):
94
+ if isinstance(from_idx, Token):
95
+ self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "")
96
+ else:
97
+ self.replace(program_name, from_idx, to_idx, "")
98
+
99
+ def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
100
+ return self.lastRewriteTokenIndexes.get(program_name, -1)
101
+
102
+ def setLastRewriteTokenIndex(self, program_name, i):
103
+ self.lastRewriteTokenIndexes[program_name] = i
104
+
105
+ def getProgram(self, program_name):
106
+ return self.programs.setdefault(program_name, [])
107
+
108
+ def getDefaultText(self):
109
+ return self.getText(self.DEFAULT_PROGRAM_NAME, 0, len(self.tokens.tokens) - 1)
110
+
111
+ def getText(self, program_name, start:int, stop:int):
112
+ """
113
+ :return: the text in tokens[start, stop](closed interval)
114
+ """
115
+ rewrites = self.programs.get(program_name)
116
+
117
+ # ensure start/end are in range
118
+ if stop > len(self.tokens.tokens) - 1:
119
+ stop = len(self.tokens.tokens) - 1
120
+ if start < 0:
121
+ start = 0
122
+
123
+ # if no instructions to execute
124
+ if not rewrites: return self.tokens.getText(start, stop)
125
+ buf = StringIO()
126
+ indexToOp = self._reduceToSingleOperationPerIndex(rewrites)
127
+ i = start
128
+ while all((i <= stop, i < len(self.tokens.tokens))):
129
+ op = indexToOp.pop(i, None)
130
+ token = self.tokens.get(i)
131
+ if op is None:
132
+ if token.type != Token.EOF: buf.write(token.text)
133
+ i += 1
134
+ else:
135
+ i = op.execute(buf)
136
+
137
+ if stop == len(self.tokens.tokens)-1:
138
+ for op in indexToOp.values():
139
+ if op.index >= len(self.tokens.tokens)-1: buf.write(op.text)
140
+
141
+ return buf.getvalue()
142
+
143
+ def _reduceToSingleOperationPerIndex(self, rewrites):
144
+ # Walk replaces
145
+ for i, rop in enumerate(rewrites):
146
+ if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))):
147
+ continue
148
+ # Wipe prior inserts within range
149
+ inserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
150
+ for iop in inserts:
151
+ if iop.index == rop.index:
152
+ rewrites[iop.instructionIndex] = None
153
+ rop.text = '{}{}'.format(iop.text, rop.text)
154
+ elif all((iop.index > rop.index, iop.index <= rop.last_index)):
155
+ rewrites[iop.instructionIndex] = None
156
+
157
+ # Drop any prior replaces contained within
158
+ prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
159
+ for prevRop in prevReplaces:
160
+ if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)):
161
+ rewrites[prevRop.instructionIndex] = None
162
+ continue
163
+ isDisjoint = any((prevRop.last_index<rop.index, prevRop.index>rop.last_index))
164
+ if all((prevRop.text is None, rop.text is None, not isDisjoint)):
165
+ rewrites[prevRop.instructionIndex] = None
166
+ rop.index = min(prevRop.index, rop.index)
167
+ rop.last_index = min(prevRop.last_index, rop.last_index)
168
+ print('New rop {}'.format(rop))
169
+ elif (not(isDisjoint)):
170
+ raise ValueError("replace op boundaries of {} overlap with previous {}".format(rop, prevRop))
171
+
172
+ # Walk inserts
173
+ for i, iop in enumerate(rewrites):
174
+ if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
175
+ continue
176
+ prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
177
+ for prev_index, prevIop in enumerate(prevInserts):
178
+ if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp:
179
+ iop.text += prevIop.text
180
+ rewrites[prev_index] = None
181
+ elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp:
182
+ iop.text = prevIop.text + iop.text
183
+ rewrites[prev_index] = None
184
+ # look for replaces where iop.index is in range; error
185
+ prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
186
+ for rop in prevReplaces:
187
+ if iop.index == rop.index:
188
+ rop.text = iop.text + rop.text
189
+ rewrites[i] = None
190
+ continue
191
+ if all((iop.index >= rop.index, iop.index <= rop.last_index)):
192
+ raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop))
193
+
194
+ reduced = {}
195
+ for i, op in enumerate(rewrites):
196
+ if op is None: continue
197
+ if reduced.get(op.index): raise ValueError('should be only one op per index')
198
+ reduced[op.index] = op
199
+
200
+ return reduced
201
+
202
+ class RewriteOperation(object):
203
+ __slots__ = ('tokens', 'index', 'text', 'instructionIndex')
204
+
205
+ def __init__(self, tokens, index, text=""):
206
+ """
207
+ :type tokens: CommonTokenStream
208
+ :param tokens:
209
+ :param index:
210
+ :param text:
211
+ :return:
212
+ """
213
+ self.tokens = tokens
214
+ self.index = index
215
+ self.text = text
216
+ self.instructionIndex = 0
217
+
218
+ def execute(self, buf):
219
+ """
220
+ :type buf: StringIO.StringIO
221
+ :param buf:
222
+ :return:
223
+ """
224
+ return self.index
225
+
226
+ def __str__(self):
227
+ return '<{}@{}:"{}">'.format(self.__class__.__name__, self.tokens.get(self.index), self.text)
228
+
229
+ class InsertBeforeOp(RewriteOperation):
230
+
231
+ def __init__(self, tokens, index, text=""):
232
+ super(TokenStreamRewriter.InsertBeforeOp, self).__init__(tokens, index, text)
233
+
234
+ def execute(self, buf):
235
+ buf.write(self.text)
236
+ if self.tokens.get(self.index).type != Token.EOF:
237
+ buf.write(self.tokens.get(self.index).text)
238
+ return self.index + 1
239
+
240
+ class InsertAfterOp(InsertBeforeOp):
241
+ pass
242
+
243
+ class ReplaceOp(RewriteOperation):
244
+ __slots__ = 'last_index'
245
+
246
+ def __init__(self, from_idx, to_idx, tokens, text):
247
+ super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
248
+ self.last_index = to_idx
249
+
250
+ def execute(self, buf):
251
+ if self.text:
252
+ buf.write(self.text)
253
+ return self.last_index + 1
254
+
255
+ def __str__(self):
256
+ if self.text:
257
+ return '<ReplaceOp@{}..{}:"{}">'.format(self.tokens.get(self.index), self.tokens.get(self.last_index),
258
+ self.text)
@@ -0,0 +1,36 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
5
+ # Use of this file is governed by the BSD 3-clause license that
6
+ # can be found in the LICENSE.txt file in the project root.
7
+ #
8
+
9
+ from io import StringIO
10
+
11
+ def str_list(val):
12
+ with StringIO() as buf:
13
+ buf.write('[')
14
+ first = True
15
+ for item in val:
16
+ if not first:
17
+ buf.write(', ')
18
+ buf.write(str(item))
19
+ first = False
20
+ buf.write(']')
21
+ return buf.getvalue()
22
+
23
+ def escapeWhitespace(s:str, escapeSpaces:bool):
24
+ with StringIO() as buf:
25
+ for c in s:
26
+ if c==' ' and escapeSpaces:
27
+ buf.write('\u00B7')
28
+ elif c=='\t':
29
+ buf.write("\\t")
30
+ elif c=='\n':
31
+ buf.write("\\n")
32
+ elif c=='\r':
33
+ buf.write("\\r")
34
+ else:
35
+ buf.write(c)
36
+ return buf.getvalue()
@@ -0,0 +1,24 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ from .Token import Token
5
+ from .InputStream import InputStream
6
+ from .FileStream import FileStream
7
+ from .StdinStream import StdinStream
8
+ from .BufferedTokenStream import TokenStream
9
+ from .CommonTokenStream import CommonTokenStream
10
+ from .Lexer import Lexer
11
+ from .Parser import Parser
12
+ from .dfa.DFA import DFA
13
+ from .atn.ATN import ATN
14
+ from .atn.ATNDeserializer import ATNDeserializer
15
+ from .atn.LexerATNSimulator import LexerATNSimulator
16
+ from .atn.ParserATNSimulator import ParserATNSimulator
17
+ from .atn.PredictionMode import PredictionMode
18
+ from .PredictionContext import PredictionContextCache
19
+ from .ParserRuleContext import RuleContext, ParserRuleContext
20
+ from .tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode
21
+ from .error.Errors import RecognitionException, IllegalStateException, NoViableAltException
22
+ from .error.ErrorStrategy import BailErrorStrategy
23
+ from .error.DiagnosticErrorListener import DiagnosticErrorListener
24
+ from .Utils import str_list
@@ -0,0 +1,174 @@
1
+ #!python
2
+ # type: ignore
3
+ # ruff: noqa
4
+ # flake8: noqa
5
+ __author__ = 'jszheng'
6
+ import optparse
7
+ import sys
8
+ import os
9
+ import importlib
10
+ from .. import *
11
+
12
+
13
+ # this is a python version of TestRig
14
+ def beautify_lisp_string(in_string):
15
+ indent_size = 3
16
+ add_indent = ' '*indent_size
17
+ out_string = in_string[0] # no indent for 1st (
18
+ indent = ''
19
+ for i in range(1, len(in_string)):
20
+ if in_string[i] == '(' and in_string[i+1] != ' ':
21
+ indent += add_indent
22
+ out_string += "\n" + indent + '('
23
+ elif in_string[i] == ')':
24
+ out_string += ')'
25
+ if len(indent) > 0:
26
+ indent = indent.replace(add_indent, '', 1)
27
+ else:
28
+ out_string += in_string[i]
29
+ return out_string
30
+
31
+
32
+ def main():
33
+
34
+ #############################################################
35
+ # parse options
36
+ # not support -gui -encoding -ps
37
+ #############################################################
38
+ usage = "Usage: %prog [options] Grammar_Name Start_Rule"
39
+ parser = optparse.OptionParser(usage=usage)
40
+ # parser.add_option('-t', '--tree',
41
+ # dest="out_file",
42
+ # default="default.out",
43
+ # help='set output file name',
44
+ # )
45
+ parser.add_option('-t', '--tree',
46
+ default=False,
47
+ action='store_true',
48
+ help='Print AST tree'
49
+ )
50
+ parser.add_option('-k', '--tokens',
51
+ dest="token",
52
+ default=False,
53
+ action='store_true',
54
+ help='Show Tokens'
55
+ )
56
+ parser.add_option('-s', '--sll',
57
+ dest="sll",
58
+ default=False,
59
+ action='store_true',
60
+ help='Show SLL'
61
+ )
62
+ parser.add_option('-d', '--diagnostics',
63
+ dest="diagnostics",
64
+ default=False,
65
+ action='store_true',
66
+ help='Enable diagnostics error listener'
67
+ )
68
+ parser.add_option('-a', '--trace',
69
+ dest="trace",
70
+ default=False,
71
+ action='store_true',
72
+ help='Enable Trace'
73
+ )
74
+
75
+ options, remainder = parser.parse_args()
76
+ if len(remainder) < 2:
77
+ print('ERROR: You have to provide at least 2 arguments!')
78
+ parser.print_help()
79
+ exit(1)
80
+ else:
81
+ grammar = remainder.pop(0)
82
+ start_rule = remainder.pop(0)
83
+ file_list = remainder
84
+
85
+ #############################################################
86
+ # check and load antlr generated files
87
+ #############################################################
88
+ # dynamic load the module and class
89
+ lexerName = grammar + 'Lexer'
90
+ parserName = grammar + 'Parser'
91
+ # check if the generate file exist
92
+ lexer_file = lexerName + '.py'
93
+ parser_file = parserName + '.py'
94
+ if not os.path.exists(lexer_file):
95
+ print("[ERROR] Can't find lexer file {}!".format(lexer_file))
96
+ print(os.path.realpath('.'))
97
+ exit(1)
98
+ if not os.path.exists(parser_file):
99
+ print("[ERROR] Can't find parser file {}!".format(lexer_file))
100
+ print(os.path.realpath('.'))
101
+ exit(1)
102
+
103
+ # current directory is where the generated file loaded
104
+ # the script might be in different place.
105
+ sys.path.append('.')
106
+ # print(sys.path)
107
+
108
+ # add current directory to python global namespace in case of relative imports
109
+ globals().update({'__package__': os.path.basename(os.getcwd())})
110
+
111
+ # print("Load Lexer {}".format(lexerName))
112
+ module_lexer = __import__(lexerName, globals(), locals(), lexerName)
113
+ class_lexer = getattr(module_lexer, lexerName)
114
+ # print(class_lexer)
115
+
116
+ # print("Load Parser {}".format(parserName))
117
+ module_parser = __import__(parserName, globals(), locals(), parserName)
118
+ class_parser = getattr(module_parser, parserName)
119
+ # print(class_parser)
120
+
121
+ #############################################################
122
+ # main process steps.
123
+ #############################################################
124
+ def process(input_stream, class_lexer, class_parser):
125
+ lexer = class_lexer(input_stream)
126
+ token_stream = CommonTokenStream(lexer)
127
+ token_stream.fill()
128
+ if options.token: # need to show token
129
+ for tok in token_stream.tokens:
130
+ print(tok)
131
+ if start_rule == 'tokens':
132
+ return
133
+
134
+ parser = class_parser(token_stream)
135
+
136
+ if options.diagnostics:
137
+ parser.addErrorListener(DiagnosticErrorListener())
138
+ parser._interp.predictionMode = PredictionMode.LL_EXACT_AMBIG_DETECTION
139
+ if options.tree:
140
+ parser.buildParseTrees = True
141
+ if options.sll:
142
+ parser._interp.predictionMode = PredictionMode.SLL
143
+ #parser.setTokenStream(token_stream)
144
+ parser.setTrace(options.trace)
145
+ if hasattr(parser, start_rule):
146
+ func_start_rule = getattr(parser, start_rule)
147
+ parser_ret = func_start_rule()
148
+ if options.tree:
149
+ lisp_tree_str = parser_ret.toStringTree(recog=parser)
150
+ print(beautify_lisp_string(lisp_tree_str))
151
+ else:
152
+ print("[ERROR] Can't find start rule '{}' in parser '{}'".format(start_rule, parserName))
153
+
154
+ #############################################################
155
+ # use stdin if not provide file as input stream
156
+ #############################################################
157
+ if len(file_list) == 0:
158
+ input_stream = InputStream(sys.stdin.read())
159
+ process(input_stream, class_lexer, class_parser)
160
+ exit(0)
161
+
162
+ #############################################################
163
+ # iterate all input file
164
+ #############################################################
165
+ for file_name in file_list:
166
+ if os.path.exists(file_name) and os.path.isfile(file_name):
167
+ input_stream = FileStream(file_name)
168
+ process(input_stream, class_lexer, class_parser)
169
+ else:
170
+ print("[ERROR] file {} not exist".format(os.path.normpath(file_name)))
171
+
172
+
173
+ if __name__ == '__main__':
174
+ main()
@@ -0,0 +1,135 @@
1
+ # type: ignore
2
+ # ruff: noqa
3
+ # flake8: noqa
4
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
5
+ # Use of this file is governed by the BSD 3-clause license that
6
+ # can be found in the LICENSE.txt file in the project root.
7
+ #/
8
+ from ..IntervalSet import IntervalSet
9
+
10
+ from ..RuleContext import RuleContext
11
+
12
+ from ..Token import Token
13
+ from .ATNType import ATNType
14
+ from .ATNState import ATNState, DecisionState
15
+
16
+
17
+ class ATN(object):
18
+ __slots__ = (
19
+ 'grammarType', 'maxTokenType', 'states', 'decisionToState',
20
+ 'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
21
+ 'ruleToTokenType', 'lexerActions', 'modeToStartState'
22
+ )
23
+
24
+ INVALID_ALT_NUMBER = 0
25
+
26
+ # Used for runtime deserialization of ATNs from strings#/
27
+ def __init__(self, grammarType:ATNType , maxTokenType:int ):
28
+ # The type of the ATN.
29
+ self.grammarType = grammarType
30
+ # The maximum value for any symbol recognized by a transition in the ATN.
31
+ self.maxTokenType = maxTokenType
32
+ self.states = []
33
+ # Each subrule/rule is a decision point and we must track them so we
34
+ # can go back later and build DFA predictors for them. This includes
35
+ # all the rules, subrules, optional blocks, ()+, ()* etc...
36
+ self.decisionToState = []
37
+ # Maps from rule index to starting state number.
38
+ self.ruleToStartState = []
39
+ # Maps from rule index to stop state number.
40
+ self.ruleToStopState = None
41
+ self.modeNameToStartState = dict()
42
+ # For lexer ATNs, this maps the rule index to the resulting token type.
43
+ # For parser ATNs, this maps the rule index to the generated bypass token
44
+ # type if the
45
+ # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
46
+ # deserialization option was specified; otherwise, this is {@code null}.
47
+ self.ruleToTokenType = None
48
+ # For lexer ATNs, this is an array of {@link LexerAction} objects which may
49
+ # be referenced by action transitions in the ATN.
50
+ self.lexerActions = None
51
+ self.modeToStartState = []
52
+
53
+ # Compute the set of valid tokens that can occur starting in state {@code s}.
54
+ # If {@code ctx} is null, the set of tokens will not include what can follow
55
+ # the rule surrounding {@code s}. In other words, the set will be
56
+ # restricted to tokens reachable staying within {@code s}'s rule.
57
+ def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
58
+ from ..LL1Analyzer import LL1Analyzer
59
+ anal = LL1Analyzer(self)
60
+ return anal.LOOK(s, ctx=ctx)
61
+
62
+ # Compute the set of valid tokens that can occur starting in {@code s} and
63
+ # staying in same rule. {@link Token#EPSILON} is in set if we reach end of
64
+ # rule.
65
+ def nextTokensNoContext(self, s:ATNState):
66
+ if s.nextTokenWithinRule is not None:
67
+ return s.nextTokenWithinRule
68
+ s.nextTokenWithinRule = self.nextTokensInContext(s, None)
69
+ s.nextTokenWithinRule.readonly = True
70
+ return s.nextTokenWithinRule
71
+
72
+ def nextTokens(self, s:ATNState, ctx:RuleContext = None):
73
+ if ctx==None:
74
+ return self.nextTokensNoContext(s)
75
+ else:
76
+ return self.nextTokensInContext(s, ctx)
77
+
78
+ def addState(self, state:ATNState):
79
+ if state is not None:
80
+ state.atn = self
81
+ state.stateNumber = len(self.states)
82
+ self.states.append(state)
83
+
84
+ def removeState(self, state:ATNState):
85
+ self.states[state.stateNumber] = None # just free mem, don't shift states in list
86
+
87
+ def defineDecisionState(self, s:DecisionState):
88
+ self.decisionToState.append(s)
89
+ s.decision = len(self.decisionToState)-1
90
+ return s.decision
91
+
92
+ def getDecisionState(self, decision:int):
93
+ if len(self.decisionToState)==0:
94
+ return None
95
+ else:
96
+ return self.decisionToState[decision]
97
+
98
+ # Computes the set of input symbols which could follow ATN state number
99
+ # {@code stateNumber} in the specified full {@code context}. This method
100
+ # considers the complete parser context, but does not evaluate semantic
101
+ # predicates (i.e. all predicates encountered during the calculation are
102
+ # assumed true). If a path in the ATN exists from the starting state to the
103
+ # {@link RuleStopState} of the outermost context without matching any
104
+ # symbols, {@link Token#EOF} is added to the returned set.
105
+ #
106
+ # <p>If {@code context} is {@code null}, it is treated as
107
+ # {@link ParserRuleContext#EMPTY}.</p>
108
+ #
109
+ # @param stateNumber the ATN state number
110
+ # @param context the full parse context
111
+ # @return The set of potentially valid input symbols which could follow the
112
+ # specified state in the specified context.
113
+ # @throws IllegalArgumentException if the ATN does not contain a state with
114
+ # number {@code stateNumber}
115
+ #/
116
+ def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
117
+ if stateNumber < 0 or stateNumber >= len(self.states):
118
+ raise Exception("Invalid state number.")
119
+ s = self.states[stateNumber]
120
+ following = self.nextTokens(s)
121
+ if Token.EPSILON not in following:
122
+ return following
123
+ expected = IntervalSet()
124
+ expected.addSet(following)
125
+ expected.removeOne(Token.EPSILON)
126
+ while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
127
+ invokingState = self.states[ctx.invokingState]
128
+ rt = invokingState.transitions[0]
129
+ following = self.nextTokens(rt.followState)
130
+ expected.addSet(following)
131
+ expected.removeOne(Token.EPSILON)
132
+ ctx = ctx.parentCtx
133
+ if Token.EPSILON in following:
134
+ expected.addOne(Token.EOF)
135
+ return expected