dhaka 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/evaluator/evaluator.rb +18 -17
- data/lib/grammar/grammar.rb +4 -5
- data/lib/lexer/dfa.rb +63 -13
- data/lib/lexer/lexeme.rb +3 -4
- data/lib/lexer/lexer.rb +12 -3
- data/lib/lexer/lexer_run.rb +22 -10
- data/lib/lexer/regex_grammar.rb +88 -14
- data/lib/lexer/regex_parser.rb +1523 -1401
- data/lib/lexer/specification.rb +29 -3
- data/lib/lexer/state.rb +32 -9
- data/lib/lexer/state_machine.rb +2 -2
- data/lib/parser/channel.rb +4 -4
- data/lib/parser/parser.rb +17 -12
- data/lib/parser/parser_state.rb +3 -1
- data/test/chittagong/chittagong_lexer.rb +63 -63
- data/test/chittagong/chittagong_lexer.rb.rej +189 -0
- data/test/chittagong/chittagong_lexer_specification.rb +6 -8
- data/test/chittagong/chittagong_parser.rb +659 -659
- data/test/chittagong/chittagong_parser.rb.rej +1623 -0
- data/test/{another_lalr_but_not_slr_grammar.rb → core/another_lalr_but_not_slr_grammar.rb} +1 -1
- data/test/{compiled_parser_test.rb → core/compiled_parser_test.rb} +1 -1
- data/test/core/dfa_test.rb +170 -0
- data/test/{evaluator_test.rb → core/evaluator_test.rb} +3 -3
- data/test/{grammar_test.rb → core/grammar_test.rb} +3 -3
- data/test/{lalr_but_not_slr_grammar.rb → core/lalr_but_not_slr_grammar.rb} +0 -0
- data/test/core/lexer_test.rb +139 -0
- data/test/{malformed_grammar.rb → core/malformed_grammar.rb} +0 -0
- data/test/{malformed_grammar_test.rb → core/malformed_grammar_test.rb} +1 -1
- data/test/{nullable_grammar.rb → core/nullable_grammar.rb} +0 -0
- data/test/{parse_result_test.rb → core/parse_result_test.rb} +1 -1
- data/test/{parser_state_test.rb → core/parser_state_test.rb} +1 -1
- data/test/{parser_test.rb → core/parser_test.rb} +2 -2
- data/test/{precedence_grammar.rb → core/precedence_grammar.rb} +0 -0
- data/test/{precedence_grammar_test.rb → core/precedence_grammar_test.rb} +1 -1
- data/test/{rr_conflict_grammar.rb → core/rr_conflict_grammar.rb} +0 -0
- data/test/{simple_grammar.rb → core/simple_grammar.rb} +0 -0
- data/test/{sr_conflict_grammar.rb → core/sr_conflict_grammar.rb} +0 -0
- metadata +25 -22
- data/test/lexer_test.rb +0 -215
data/lib/lexer/specification.rb
CHANGED
@@ -30,7 +30,17 @@ module Dhaka
|
|
30
30
|
# 'z's instead of a word token.
|
31
31
|
#
|
32
32
|
# The patterns are <i>not</i> Ruby regular expressions - a lot of operators featured in Ruby's regular expression engine are not yet supported.
|
33
|
-
# See http://dhaka.rubyforge.org/regex_grammar.html for the current syntax.
|
33
|
+
# See http://dhaka.rubyforge.org/regex_grammar.html for the current syntax. Patterns may be specified using Ruby regular expression literals
|
34
|
+
# as well as string literals.
|
35
|
+
#
|
36
|
+
# There are a few things to keep in mind with regard to the regular expression implementation:
|
37
|
+
# * The greediest matching expression always wins. Precedences are only used when the same set of characters matches
|
38
|
+
# multiple expressions.
|
39
|
+
# * All quantifiers are greedy. There is as yet no support for non-greedy modifiers.
|
40
|
+
# * The lookahead operator "/" can behave in counter-intuitive ways in situations where the pre-lookahead-operator expression and the
|
41
|
+
# post-lookahead-operator expression have characters in common. For example the expression "(ab)+/abcd", when applied to the input
|
42
|
+
# "abababcd" will yield "ababab" as the match instead of "abab". A good thumb rule is that the pre-lookahead expression is greedy.
|
43
|
+
|
34
44
|
|
35
45
|
class LexerSpecification
|
36
46
|
class << self
|
@@ -38,10 +48,26 @@ module Dhaka
|
|
38
48
|
# it creates a LexerRun object that provides the context for +blk+ to be evaluated in. Methods available in this block
|
39
49
|
# are LexerRun#current_lexeme and LexerRun#create_token.
|
40
50
|
def for_pattern(pattern, &blk)
|
41
|
-
|
51
|
+
source = case pattern
|
52
|
+
when String : pattern
|
53
|
+
when Regexp : pattern.source
|
54
|
+
end
|
55
|
+
items[source] = LexerSpecificationItem.new(source, priority, blk)
|
42
56
|
self.priority += 1
|
43
57
|
end
|
44
|
-
|
58
|
+
|
59
|
+
# Use this to automatically handle escaping for regular expression metacharacters. For example,
|
60
|
+
# for_symbol('+') { ... }
|
61
|
+
# translates to:
|
62
|
+
# for_pattern('\+') { ... }
|
63
|
+
def for_symbol(symbol, &blk)
|
64
|
+
if LexerSupport::OPERATOR_CHARACTERS.include?(symbol)
|
65
|
+
for_pattern("\\#{symbol}", &blk)
|
66
|
+
else
|
67
|
+
for_pattern(symbol, &blk)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
45
71
|
private
|
46
72
|
def inherited(specification)
|
47
73
|
class << specification
|
data/lib/lexer/state.rb
CHANGED
@@ -1,15 +1,22 @@
|
|
1
1
|
module Dhaka
|
2
2
|
module LexerSupport
|
3
|
+
|
3
4
|
class State
|
4
|
-
attr_reader :transitions, :
|
5
|
-
def initialize
|
6
|
-
@state_machine
|
7
|
-
@
|
8
|
-
@
|
5
|
+
attr_reader :transitions, :checkpoint_actions, :action
|
6
|
+
def initialize(state_machine, action=nil)
|
7
|
+
@state_machine = state_machine
|
8
|
+
@transitions = {}
|
9
|
+
@checkpoint_actions = []
|
10
|
+
@action = action
|
9
11
|
end
|
10
|
-
|
12
|
+
|
11
13
|
def accepting?
|
12
|
-
|
14
|
+
@action
|
15
|
+
end
|
16
|
+
|
17
|
+
def process lexer_run
|
18
|
+
checkpoint_actions.each {|action| action.call(lexer_run)}
|
19
|
+
action.call(lexer_run) if accepting?
|
13
20
|
end
|
14
21
|
|
15
22
|
def for_characters *characters, &blk
|
@@ -19,13 +26,28 @@ module Dhaka
|
|
19
26
|
end
|
20
27
|
end
|
21
28
|
|
29
|
+
def add_checkpoint(pattern)
|
30
|
+
checkpoint_actions << LexerSupport::CheckpointAction.new(pattern)
|
31
|
+
end
|
32
|
+
|
33
|
+
def accept(pattern)
|
34
|
+
@action = AcceptAction.new(pattern)
|
35
|
+
end
|
36
|
+
|
37
|
+
def accept_with_lookahead(pattern)
|
38
|
+
@action = LookaheadAcceptAction.new(pattern)
|
39
|
+
end
|
40
|
+
|
22
41
|
def recognize pattern
|
23
42
|
@pattern = pattern
|
24
43
|
end
|
25
44
|
|
26
45
|
def compile_to_ruby_source
|
27
46
|
result = " at_state(#{object_id}) {\n"
|
28
|
-
result << "
|
47
|
+
result << " #{action.compile_to_ruby_source}\n" if action
|
48
|
+
checkpoint_actions.each do |checkpoint_action|
|
49
|
+
result << " #{checkpoint_action.compile_to_ruby_source}\n"
|
50
|
+
end
|
29
51
|
transition_keys_by_destination_state = Hash.new {|hash, key| hash[key] = []}
|
30
52
|
transitions.each do |key, dest_state|
|
31
53
|
transition_keys_by_destination_state[dest_state.object_id] << key
|
@@ -40,6 +62,7 @@ module Dhaka
|
|
40
62
|
result
|
41
63
|
end
|
42
64
|
end
|
65
|
+
|
43
66
|
end
|
44
67
|
end
|
45
|
-
|
68
|
+
|
data/lib/lexer/state_machine.rb
CHANGED
@@ -24,10 +24,10 @@ module Dhaka
|
|
24
24
|
g.edge(start, @start_state)
|
25
25
|
@states.values.each do |state|
|
26
26
|
state_attributes = {}
|
27
|
-
state_attributes.merge!(:shape => :doublecircle, :label => state.
|
27
|
+
state_attributes.merge!(:shape => :doublecircle, :label => state.action.to_dot) if state.accepting?
|
28
28
|
g.node(state, state_attributes)
|
29
29
|
state.transitions.each do |transition_key, dest_state|
|
30
|
-
g.edge(state, dest_state, :label => transition_key)
|
30
|
+
g.edge(state, dest_state, :label => transition_key.inspect)
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end.to_dot
|
data/lib/parser/channel.rb
CHANGED
@@ -10,9 +10,9 @@ module Dhaka
|
|
10
10
|
end
|
11
11
|
|
12
12
|
def propagate cargo
|
13
|
-
|
14
|
-
end_item.lookaheadset.merge(
|
15
|
-
|
13
|
+
initial_size = end_item.lookaheadset.size
|
14
|
+
end_item.lookaheadset.merge(cargo)
|
15
|
+
(end_item.lookaheadset.size - initial_size) > 0
|
16
16
|
end
|
17
17
|
|
18
18
|
def to_s
|
@@ -20,7 +20,7 @@ module Dhaka
|
|
20
20
|
end
|
21
21
|
|
22
22
|
def eql? other
|
23
|
-
start_item.eql?(other.start_item)
|
23
|
+
start_item.eql?(other.start_item) && end_item.eql?(other.end_item)
|
24
24
|
end
|
25
25
|
|
26
26
|
def hash
|
data/lib/parser/parser.rb
CHANGED
@@ -19,17 +19,19 @@ module Dhaka
|
|
19
19
|
@logger = logger || default_logger
|
20
20
|
@transitions = Hash.new {|hash, state| hash[state] = {}}
|
21
21
|
@grammar = grammar
|
22
|
-
@channels = []
|
22
|
+
@channels = Hash.new {|hash, start_item| hash[start_item] = []}
|
23
23
|
@states = Hash.new do |hash, kernel|
|
24
|
-
|
25
|
-
|
24
|
+
closure, channels = grammar.closure(kernel)
|
25
|
+
channels.each do |start_item, channel_set|
|
26
|
+
@channels[start_item].concat channel_set.to_a
|
27
|
+
end
|
26
28
|
new_state = ParserState.new(self, closure)
|
27
29
|
hash[kernel] = new_state
|
28
30
|
@logger.debug("Created #{new_state.unique_name}.")
|
29
31
|
new_state.transition_items.each do |symbol, items|
|
30
32
|
destination_kernel = ItemSet.new(items.collect{|item| item.next_item})
|
31
33
|
destination_state = hash[destination_kernel]
|
32
|
-
items.each {
|
34
|
+
items.each {|item| @channels[item] << grammar.passive_channel(item, destination_state.items[item.next_item])}
|
33
35
|
@transitions[new_state][symbol] = destination_state
|
34
36
|
end
|
35
37
|
new_state
|
@@ -87,8 +89,8 @@ module Dhaka
|
|
87
89
|
start_items = ItemSet.new(start_productions.collect {|production| Item.new(production, 0)})
|
88
90
|
start_items.each {|start_item| start_item.lookaheadset << grammar.end_symbol}
|
89
91
|
@start_state = @states[start_items]
|
90
|
-
@logger.debug("Pumping #{@channels.size}
|
91
|
-
pump_channels
|
92
|
+
@logger.debug("Pumping #{@channels.keys.size} dirty items...")
|
93
|
+
pump_channels @channels.keys
|
92
94
|
@logger.debug("Generating shift actions...")
|
93
95
|
generate_shift_actions
|
94
96
|
@logger.debug("Generating reduce actions...")
|
@@ -128,14 +130,17 @@ module Dhaka
|
|
128
130
|
end
|
129
131
|
end
|
130
132
|
|
131
|
-
def pump_channels
|
133
|
+
def pump_channels dirty_items
|
132
134
|
loop do
|
133
|
-
|
134
|
-
|
135
|
-
|
135
|
+
new_dirty_items = Set.new
|
136
|
+
dirty_items.each do |dirty_item|
|
137
|
+
@channels[dirty_item].each do |channel|
|
138
|
+
new_dirty_items << channel.end_item if channel.pump
|
139
|
+
end
|
136
140
|
end
|
137
|
-
break if
|
138
|
-
@logger.debug("#{
|
141
|
+
break if new_dirty_items.empty?
|
142
|
+
@logger.debug("#{new_dirty_items.size} dirty items...")
|
143
|
+
dirty_items = new_dirty_items
|
139
144
|
end
|
140
145
|
end
|
141
146
|
end
|
data/lib/parser/parser_state.rb
CHANGED
@@ -2,108 +2,108 @@ class ChittagongLexer < Dhaka::CompiledLexer
|
|
2
2
|
|
3
3
|
self.specification = ChittagongLexerSpecification
|
4
4
|
|
5
|
-
start_with
|
5
|
+
start_with 21969380
|
6
6
|
|
7
|
-
at_state(
|
8
|
-
|
7
|
+
at_state(21968700) {
|
8
|
+
accept("\n")
|
9
9
|
}
|
10
10
|
|
11
|
-
at_state(
|
12
|
-
|
11
|
+
at_state(21958430) {
|
12
|
+
accept("-")
|
13
13
|
}
|
14
14
|
|
15
|
-
at_state(
|
16
|
-
|
15
|
+
at_state(21967500) {
|
16
|
+
accept("<")
|
17
17
|
}
|
18
18
|
|
19
|
-
at_state(
|
20
|
-
|
19
|
+
at_state(21957330) {
|
20
|
+
accept("\\(")
|
21
21
|
}
|
22
22
|
|
23
|
-
at_state(
|
24
|
-
|
25
|
-
for_characters("K", "V", "k", "v", "W", "A", "L", "w", "l", "a", "b", "M", "B", "X", "m", "x", "c", "Y", "C", "N", "y", "n", "O", "D", "Z", "o", "z", "d", "p", "e", "E", "P", "f", "Q", "F", "q", "G", "R", "r", "g", "S", "H", "s", "h", "I", "T", "i", "t", "U", "J", "u", "j") { switch_to 25910420 }
|
23
|
+
at_state(21957550) {
|
24
|
+
accept("\\/")
|
26
25
|
}
|
27
26
|
|
28
|
-
at_state(
|
29
|
-
|
27
|
+
at_state(21967250) {
|
28
|
+
accept("\\*")
|
30
29
|
}
|
31
30
|
|
32
|
-
at_state(
|
33
|
-
|
31
|
+
at_state(21968360) {
|
32
|
+
accept("=")
|
33
|
+
for_characters("=") { switch_to 21968040 }
|
34
34
|
}
|
35
35
|
|
36
|
-
at_state(
|
37
|
-
|
38
|
-
for_characters("
|
39
|
-
for_characters("
|
36
|
+
at_state(21969380) {
|
37
|
+
accept("\\d*(\\.\\d+)?")
|
38
|
+
for_characters("<") { switch_to 21967500 }
|
39
|
+
for_characters(")") { switch_to 21961890 }
|
40
|
+
for_characters(" ") { switch_to 21967770 }
|
41
|
+
for_characters("\n") { switch_to 21968700 }
|
42
|
+
for_characters("=") { switch_to 21968360 }
|
43
|
+
for_characters("8", "9", "0", "1", "2", "3", "4", "5", "6", "7") { switch_to 21961100 }
|
44
|
+
for_characters("J", "o", "p", "K", "q", "L", "r", "M", "s", "N", "t", "O", "a", "u", "P", "b", "Q", "c", "v", "R", "d", "w", "S", "e", "x", "T", "f", "y", "A", "U", "g", "z", "B", "h", "C", "V", "i", "D", "W", "j", "E", "X", "F", "Y", "k", "G", "Z", "l", "H", "m", "I", "n") { switch_to 21966980 }
|
45
|
+
for_characters("*") { switch_to 21967250 }
|
46
|
+
for_characters(",") { switch_to 21961360 }
|
47
|
+
for_characters("!") { switch_to 21958210 }
|
48
|
+
for_characters("/") { switch_to 21957550 }
|
49
|
+
for_characters(".") { switch_to 21960630 }
|
50
|
+
for_characters("-") { switch_to 21958430 }
|
51
|
+
for_characters(">") { switch_to 21961620 }
|
52
|
+
for_characters("+") { switch_to 21957990 }
|
53
|
+
for_characters("(") { switch_to 21957330 }
|
54
|
+
for_characters("^") { switch_to 21957770 }
|
40
55
|
}
|
41
56
|
|
42
|
-
at_state(
|
43
|
-
|
57
|
+
at_state(21961360) {
|
58
|
+
accept(",")
|
44
59
|
}
|
45
60
|
|
46
|
-
at_state(
|
47
|
-
|
61
|
+
at_state(21961890) {
|
62
|
+
accept("\\)")
|
48
63
|
}
|
49
64
|
|
50
|
-
at_state(
|
51
|
-
|
65
|
+
at_state(21966980) {
|
66
|
+
accept("\\w+")
|
67
|
+
for_characters("v", "K", "V", "k", "L", "W", "w", "l", "A", "a", "B", "X", "x", "b", "M", "m", "C", "c", "n", "y", "N", "Y", "d", "o", "z", "O", "Z", "D", "e", "p", "E", "P", "f", "q", "F", "Q", "R", "g", "r", "G", "h", "s", "H", "S", "t", "I", "T", "i", "U", "j", "u", "J") { switch_to 21966980 }
|
52
68
|
}
|
53
69
|
|
54
|
-
at_state(
|
55
|
-
|
70
|
+
at_state(21957990) {
|
71
|
+
accept("\\+")
|
56
72
|
}
|
57
73
|
|
58
|
-
at_state(
|
59
|
-
|
74
|
+
at_state(21960630) {
|
75
|
+
for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21960320 }
|
60
76
|
}
|
61
77
|
|
62
|
-
at_state(
|
63
|
-
|
64
|
-
for_characters("=") { switch_to 25911660 }
|
78
|
+
at_state(21961620) {
|
79
|
+
accept(">")
|
65
80
|
}
|
66
81
|
|
67
|
-
at_state(
|
68
|
-
|
82
|
+
at_state(21967770) {
|
83
|
+
accept(" ")
|
69
84
|
}
|
70
85
|
|
71
|
-
at_state(
|
72
|
-
|
86
|
+
at_state(21968040) {
|
87
|
+
accept("==")
|
73
88
|
}
|
74
89
|
|
75
|
-
at_state(
|
76
|
-
|
90
|
+
at_state(21960320) {
|
91
|
+
accept("\\d*(\\.\\d+)?")
|
92
|
+
for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21960320 }
|
77
93
|
}
|
78
94
|
|
79
|
-
at_state(
|
80
|
-
|
95
|
+
at_state(21957770) {
|
96
|
+
accept("\\^")
|
81
97
|
}
|
82
98
|
|
83
|
-
at_state(
|
84
|
-
|
85
|
-
for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 25915990 }
|
99
|
+
at_state(21958210) {
|
100
|
+
accept("!")
|
86
101
|
}
|
87
102
|
|
88
|
-
at_state(
|
89
|
-
|
90
|
-
for_characters("
|
91
|
-
for_characters("
|
92
|
-
for_characters("=") { switch_to 25912040 }
|
93
|
-
for_characters("(") { switch_to 25917260 }
|
94
|
-
for_characters(")") { switch_to 25901470 }
|
95
|
-
for_characters("\n") { switch_to 25902510 }
|
96
|
-
for_characters(".") { switch_to 25916370 }
|
97
|
-
for_characters("!") { switch_to 25912350 }
|
98
|
-
for_characters("<") { switch_to 25911030 }
|
99
|
-
for_characters(",") { switch_to 25901730 }
|
100
|
-
for_characters("J", "o", "p", "K", "q", "L", "r", "M", "s", "N", "t", "O", "a", "u", "P", "b", "Q", "c", "v", "R", "d", "w", "S", "e", "x", "T", "f", "y", "A", "U", "g", "z", "B", "h", "C", "V", "i", "D", "W", "j", "E", "X", "F", "Y", "k", "G", "Z", "l", "H", "m", "I", "n") { switch_to 25910420 }
|
101
|
-
for_characters(" ") { switch_to 25902770 }
|
102
|
-
for_characters("-") { switch_to 25912660 }
|
103
|
-
for_characters(">") { switch_to 25911340 }
|
104
|
-
for_characters("+") { switch_to 25901990 }
|
105
|
-
for_characters("8", "9", "0", "1", "2", "3", "4", "5", "6", "7") { switch_to 25916950 }
|
106
|
-
for_characters("*") { switch_to 25910730 }
|
103
|
+
at_state(21961100) {
|
104
|
+
accept("\\d*(\\.\\d+)?")
|
105
|
+
for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21961100 }
|
106
|
+
for_characters(".") { switch_to 21960630 }
|
107
107
|
}
|
108
108
|
|
109
109
|
end
|
@@ -0,0 +1,189 @@
|
|
1
|
+
***************
|
2
|
+
*** 5
|
3
|
+
- start_with 21824080
|
4
|
+
--- 5 -----
|
5
|
+
+ start_with 22347760
|
6
|
+
***************
|
7
|
+
*** 7,8
|
8
|
+
- at_state(21819770) {
|
9
|
+
- for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21819470 }
|
10
|
+
--- 7,8 -----
|
11
|
+
+ at_state(22321370) {
|
12
|
+
+ accept("\\)")
|
13
|
+
***************
|
14
|
+
*** 11,12
|
15
|
+
- at_state(21821860) {
|
16
|
+
- recognize("\\+")
|
17
|
+
--- 11,12 -----
|
18
|
+
+ at_state(22339000) {
|
19
|
+
+ for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 22337320 }
|
20
|
+
***************
|
21
|
+
*** 15,17
|
22
|
+
- at_state(21817320) {
|
23
|
+
- recognize("\\w+")
|
24
|
+
- for_characters("k", "V", "K", "v", "L", "A", "a", "w", "l", "W", "M", "B", "X", "x", "m", "b", "y", "n", "c", "N", "Y", "C", "d", "Z", "O", "D", "z", "o", "E", "p", "e", "P", "Q", "F", "q", "f", "r", "g", "R", "G", "h", "S", "H", "s", "T", "I", "t", "i", "J", "u", "j", "U") { switch_to 21817320 }
|
25
|
+
--- 15,16 -----
|
26
|
+
+ at_state(22342900) {
|
27
|
+
+ accept("==")
|
28
|
+
***************
|
29
|
+
*** 20,21
|
30
|
+
- at_state(21820580) {
|
31
|
+
- recognize("!")
|
32
|
+
--- 19,37 -----
|
33
|
+
+ at_state(22347760) {
|
34
|
+
+ accept("\\d*(\\.\\d+)?")
|
35
|
+
+ for_characters(")") { switch_to 22321370 }
|
36
|
+
+ for_characters("^") { switch_to 22291450 }
|
37
|
+
+ for_characters("+") { switch_to 22322410 }
|
38
|
+
+ for_characters("(") { switch_to 22319550 }
|
39
|
+
+ for_characters(" ") { switch_to 22289950 }
|
40
|
+
+ for_characters(">") { switch_to 22293250 }
|
41
|
+
+ for_characters("8", "9", "0", "1", "2", "3", "4", "5", "6", "7") { switch_to 22341260 }
|
42
|
+
+ for_characters("\n") { switch_to 22294340 }
|
43
|
+
+ for_characters(".") { switch_to 22339000 }
|
44
|
+
+ for_characters(",") { switch_to 22320520 }
|
45
|
+
+ for_characters("!") { switch_to 22325520 }
|
46
|
+
+ for_characters("*") { switch_to 22292410 }
|
47
|
+
+ for_characters("J", "o", "p", "K", "q", "L", "r", "M", "s", "N", "t", "O", "a", "u", "P", "b", "Q", "c", "v", "R", "d", "w", "S", "e", "x", "T", "f", "y", "A", "U", "g", "z", "B", "h", "C", "V", "i", "D", "W", "j", "E", "X", "F", "Y", "k", "G", "Z", "l", "H", "m", "I", "n") { switch_to 22317930 }
|
48
|
+
+ for_characters("/") { switch_to 22288930 }
|
49
|
+
+ for_characters("-") { switch_to 22326990 }
|
50
|
+
+ for_characters("=") { switch_to 22344860 }
|
51
|
+
+ for_characters("<") { switch_to 22324070 }
|
52
|
+
***************
|
53
|
+
*** 24,25
|
54
|
+
- at_state(21821040) {
|
55
|
+
- recognize("\\^")
|
56
|
+
--- 40,41 -----
|
57
|
+
+ at_state(22325520) {
|
58
|
+
+ accept("!")
|
59
|
+
***************
|
60
|
+
*** 28,29
|
61
|
+
- at_state(21822840) {
|
62
|
+
- recognize(",")
|
63
|
+
--- 44,46 -----
|
64
|
+
+ at_state(22317930) {
|
65
|
+
+ accept("\\w+")
|
66
|
+
+ for_characters("K", "V", "k", "v", "a", "A", "L", "W", "l", "w", "b", "B", "M", "X", "m", "x", "C", "N", "Y", "c", "n", "y", "D", "O", "Z", "d", "o", "z", "E", "P", "e", "p", "F", "Q", "f", "q", "G", "R", "g", "r", "H", "S", "h", "s", "I", "T", "i", "t", "j", "u", "J", "U") { switch_to 22317930 }
|
67
|
+
***************
|
68
|
+
*** 32,33
|
69
|
+
- at_state(21822110) {
|
70
|
+
- recognize(">")
|
71
|
+
--- 49,50 -----
|
72
|
+
+ at_state(22320520) {
|
73
|
+
+ accept(",")
|
74
|
+
***************
|
75
|
+
*** 36,54
|
76
|
+
- at_state(21824080) {
|
77
|
+
- recognize("\\d*(\\.\\d+)?")
|
78
|
+
- for_characters(".") { switch_to 21819770 }
|
79
|
+
- for_characters("+") { switch_to 21821860 }
|
80
|
+
- for_characters("/") { switch_to 21820370 }
|
81
|
+
- for_characters("*") { switch_to 21823340 }
|
82
|
+
- for_characters(",") { switch_to 21822840 }
|
83
|
+
- for_characters("!") { switch_to 21820580 }
|
84
|
+
- for_characters("=") { switch_to 21821620 }
|
85
|
+
- for_characters(" ") { switch_to 21822330 }
|
86
|
+
- for_characters("\n") { switch_to 21823100 }
|
87
|
+
- for_characters(">") { switch_to 21822110 }
|
88
|
+
- for_characters("(") { switch_to 21820790 }
|
89
|
+
- for_characters("<") { switch_to 21823590 }
|
90
|
+
- for_characters("J", "o", "p", "K", "q", "L", "r", "M", "s", "N", "t", "O", "a", "u", "P", "b", "Q", "c", "v", "R", "d", "w", "S", "e", "x", "T", "f", "y", "A", "U", "g", "z", "B", "h", "C", "V", "i", "D", "W", "j", "E", "X", "F", "Y", "k", "G", "Z", "l", "H", "m", "I", "n") { switch_to 21817320 }
|
91
|
+
- for_characters(")") { switch_to 21817530 }
|
92
|
+
- for_characters("-") { switch_to 21822580 }
|
93
|
+
- for_characters("^") { switch_to 21821040 }
|
94
|
+
- for_characters("8", "9", "0", "1", "2", "3", "4", "5", "6", "7") { switch_to 21820160 }
|
95
|
+
--- 53,54 -----
|
96
|
+
+ at_state(22322410) {
|
97
|
+
+ accept("\\+")
|
98
|
+
***************
|
99
|
+
*** 57,58
|
100
|
+
- at_state(21817530) {
|
101
|
+
- recognize("\\)")
|
102
|
+
--- 57,59 -----
|
103
|
+
+ at_state(22344860) {
|
104
|
+
+ accept("=")
|
105
|
+
+ for_characters("=") { switch_to 22342900 }
|
106
|
+
***************
|
107
|
+
*** 61,62
|
108
|
+
- at_state(21822580) {
|
109
|
+
- recognize("-")
|
110
|
+
--- 62,63 -----
|
111
|
+
+ at_state(22324070) {
|
112
|
+
+ accept("<")
|
113
|
+
***************
|
114
|
+
*** 65,66
|
115
|
+
- at_state(21823340) {
|
116
|
+
- recognize("\\*")
|
117
|
+
--- 66,67 -----
|
118
|
+
+ at_state(22288930) {
|
119
|
+
+ accept("\\/")
|
120
|
+
***************
|
121
|
+
*** 69,70
|
122
|
+
- at_state(21820370) {
|
123
|
+
- recognize("\\/")
|
124
|
+
--- 70,71 -----
|
125
|
+
+ at_state(22289950) {
|
126
|
+
+ accept(" ")
|
127
|
+
***************
|
128
|
+
*** 73,74
|
129
|
+
- at_state(21823100) {
|
130
|
+
- recognize("\n")
|
131
|
+
--- 74,75 -----
|
132
|
+
+ at_state(22291450) {
|
133
|
+
+ accept("\\^")
|
134
|
+
***************
|
135
|
+
*** 77,80
|
136
|
+
- at_state(21820160) {
|
137
|
+
- recognize("\\d*(\\.\\d+)?")
|
138
|
+
- for_characters(".") { switch_to 21819770 }
|
139
|
+
- for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21820160 }
|
140
|
+
--- 78,79 -----
|
141
|
+
+ at_state(22292410) {
|
142
|
+
+ accept("\\*")
|
143
|
+
***************
|
144
|
+
*** 83,84
|
145
|
+
- at_state(21820790) {
|
146
|
+
- recognize("\\(")
|
147
|
+
--- 82,83 -----
|
148
|
+
+ at_state(22294340) {
|
149
|
+
+ accept("\n")
|
150
|
+
***************
|
151
|
+
*** 87,88
|
152
|
+
- at_state(21821310) {
|
153
|
+
- recognize("==")
|
154
|
+
--- 86,88 -----
|
155
|
+
+ at_state(22337320) {
|
156
|
+
+ accept("\\d*(\\.\\d+)?")
|
157
|
+
+ for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 22337320 }
|
158
|
+
***************
|
159
|
+
*** 91,92
|
160
|
+
- at_state(21822330) {
|
161
|
+
- recognize(" ")
|
162
|
+
--- 91,94 -----
|
163
|
+
+ at_state(22341260) {
|
164
|
+
+ accept("\\d*(\\.\\d+)?")
|
165
|
+
+ for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 22341260 }
|
166
|
+
+ for_characters(".") { switch_to 22339000 }
|
167
|
+
***************
|
168
|
+
*** 95,96
|
169
|
+
- at_state(21823590) {
|
170
|
+
- recognize("<")
|
171
|
+
--- 97,98 -----
|
172
|
+
+ at_state(22293250) {
|
173
|
+
+ accept(">")
|
174
|
+
***************
|
175
|
+
*** 99,101
|
176
|
+
- at_state(21819470) {
|
177
|
+
- recognize("\\d*(\\.\\d+)?")
|
178
|
+
- for_characters("6", "7", "8", "9", "0", "1", "2", "3", "4", "5") { switch_to 21819470 }
|
179
|
+
--- 101,102 -----
|
180
|
+
+ at_state(22319550) {
|
181
|
+
+ accept("\\(")
|
182
|
+
***************
|
183
|
+
*** 104,106
|
184
|
+
- at_state(21821620) {
|
185
|
+
- recognize("=")
|
186
|
+
- for_characters("=") { switch_to 21821310 }
|
187
|
+
--- 105,106 -----
|
188
|
+
+ at_state(22326990) {
|
189
|
+
+ accept("-")
|