rley 0.6.00 → 0.6.01

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +8 -1
  3. data/CHANGELOG.md +3 -0
  4. data/Gemfile +1 -1
  5. data/examples/NLP/benchmark_pico_en.rb +6 -10
  6. data/examples/NLP/nano_eng/nano_en_demo.rb +2 -2
  7. data/examples/NLP/nano_eng/nano_grammar.rb +1 -2
  8. data/examples/data_formats/JSON/json_ast_builder.rb +8 -8
  9. data/examples/general/SRL/lib/ast_builder.rb +74 -72
  10. data/examples/general/SRL/lib/grammar.rb +2 -2
  11. data/examples/general/SRL/lib/regex/abstract_method.rb +28 -28
  12. data/examples/general/SRL/lib/regex/alternation.rb +21 -25
  13. data/examples/general/SRL/lib/regex/anchor.rb +6 -9
  14. data/examples/general/SRL/lib/regex/atomic_expression.rb +10 -15
  15. data/examples/general/SRL/lib/regex/capturing_group.rb +15 -14
  16. data/examples/general/SRL/lib/regex/char_class.rb +10 -13
  17. data/examples/general/SRL/lib/regex/char_range.rb +45 -46
  18. data/examples/general/SRL/lib/regex/char_shorthand.rb +8 -9
  19. data/examples/general/SRL/lib/regex/character.rb +196 -191
  20. data/examples/general/SRL/lib/regex/compound_expression.rb +47 -50
  21. data/examples/general/SRL/lib/regex/concatenation.rb +23 -27
  22. data/examples/general/SRL/lib/regex/expression.rb +53 -56
  23. data/examples/general/SRL/lib/regex/lookaround.rb +23 -20
  24. data/examples/general/SRL/lib/regex/match_option.rb +26 -28
  25. data/examples/general/SRL/lib/regex/monadic_expression.rb +20 -23
  26. data/examples/general/SRL/lib/regex/multiplicity.rb +17 -20
  27. data/examples/general/SRL/lib/regex/non_capturing_group.rb +9 -12
  28. data/examples/general/SRL/lib/regex/polyadic_expression.rb +51 -55
  29. data/examples/general/SRL/lib/regex/quantifiable.rb +14 -20
  30. data/examples/general/SRL/lib/regex/repetition.rb +20 -23
  31. data/examples/general/SRL/lib/regex/wildcard.rb +15 -19
  32. data/examples/general/SRL/lib/regex_repr.rb +1 -1
  33. data/examples/general/SRL/lib/tokenizer.rb +2 -2
  34. data/examples/general/SRL/spec/integration_spec.rb +17 -12
  35. data/examples/general/SRL/spec/regex/character_spec.rb +160 -153
  36. data/examples/general/SRL/spec/regex/multiplicity_spec.rb +27 -31
  37. data/examples/general/SRL/spec/spec_helper.rb +1 -1
  38. data/examples/general/SRL/spec/tokenizer_spec.rb +25 -27
  39. data/examples/general/calc_iter1/calc_ast_builder.rb +10 -10
  40. data/examples/general/calc_iter2/calc_ast_builder.rb +7 -9
  41. data/examples/general/calc_iter2/calc_ast_nodes.rb +5 -6
  42. data/examples/general/calc_iter2/calc_lexer.rb +3 -5
  43. data/examples/general/calc_iter2/spec/calculator_spec.rb +16 -14
  44. data/examples/general/left.rb +8 -8
  45. data/examples/general/right.rb +8 -8
  46. data/lib/rley/constants.rb +1 -1
  47. data/lib/rley/engine.rb +16 -20
  48. data/lib/rley/formatter/json.rb +1 -1
  49. data/lib/rley/gfg/grm_flow_graph.rb +1 -1
  50. data/lib/rley/gfg/item_vertex.rb +6 -5
  51. data/lib/rley/gfg/vertex.rb +3 -3
  52. data/lib/rley/lexical/token.rb +4 -3
  53. data/lib/rley/parse_rep/ast_base_builder.rb +4 -3
  54. data/lib/rley/parse_rep/parse_rep_creator.rb +1 -1
  55. data/lib/rley/parse_rep/parse_tree_builder.rb +3 -2
  56. data/lib/rley/parser/error_reason.rb +1 -1
  57. data/lib/rley/parser/gfg_chart.rb +6 -6
  58. data/lib/rley/parser/gfg_parsing.rb +19 -19
  59. data/lib/rley/parser/parse_entry.rb +3 -3
  60. data/lib/rley/parser/parse_entry_set.rb +1 -1
  61. data/lib/rley/parser/parse_walker_factory.rb +15 -15
  62. data/lib/rley/syntax/grammar.rb +1 -1
  63. data/lib/rley/syntax/grammar_builder.rb +2 -2
  64. data/lib/rley/syntax/production.rb +4 -3
  65. data/lib/rley/syntax/symbol_seq.rb +2 -2
  66. data/spec/rley/base/grm_items_builder_spec.rb +1 -1
  67. data/spec/rley/engine_spec.rb +3 -6
  68. data/spec/rley/formatter/asciitree_spec.rb +0 -1
  69. data/spec/rley/formatter/bracket_notation_spec.rb +0 -1
  70. data/spec/rley/formatter/debug_spec.rb +2 -3
  71. data/spec/rley/gfg/grm_flow_graph_spec.rb +19 -19
  72. data/spec/rley/parse_rep/ast_builder_spec.rb +12 -12
  73. data/spec/rley/parser/gfg_earley_parser_spec.rb +1 -1
  74. data/spec/rley/parser/parse_entry_set_spec.rb +5 -5
  75. data/spec/rley/parser/parse_state_spec.rb +8 -3
  76. data/spec/rley/parser/parse_tracer_spec.rb +3 -1
  77. data/spec/rley/parser/parse_walker_factory_spec.rb +1 -1
  78. data/spec/rley/ptree/parse_tree_node_spec.rb +1 -1
  79. data/spec/rley/syntax/grammar_builder_spec.rb +1 -1
  80. data/spec/rley/syntax/grammar_spec.rb +1 -1
  81. metadata +2 -3
  82. data/spec/rley/support/ast_builder.rb +0 -403
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a7fa26ec5c47c28dd6d0cc68f32e8f023f92ce75
4
- data.tar.gz: 73717a9a1c2daa9886952c502020b2fcf5e4bb33
3
+ metadata.gz: e5431e3dddd23b1b29be7db8b2b5fe268301f16f
4
+ data.tar.gz: 929f4f15a4d996cf0e14ddf903b15a5c5325fb93
5
5
  SHA512:
6
- metadata.gz: 055963ec1637d43b33703e87b454fd9a1590a3993609f2df89f0d3ca94a1b8eba7b39cf102d318e0c1da6fc3e29c51a84bd3929f3000b7c800ee4585260d9b78
7
- data.tar.gz: 2b0bc71c159ecac03789644497af1ce789b7975efe2fb6fecc399e41fb346aec26ad67a9d1015ebed1d43a9e9fdd09321d7dfef5f86afa3ce5402b4f04b8f062
6
+ metadata.gz: 3cda86bffff68e3591e89cbd05a669a59a7ee111dc75cd29fa56b0968798a981210ba3d3565bf5ed8435c2070b6ac9cb1efd35290ea10b60eee1ad3bb7d3caa7
7
+ data.tar.gz: c49cbc2d61484807e23cee6801057cf7618e9a0f18e85a80082acb8c08bad9012067c895ad581b7b0cdc3d3ac02699f45db42d525ce8b62e4803023d9a5cbca0
@@ -78,6 +78,9 @@ Metrics/ModuleLength:
78
78
  Metrics/PerceivedComplexity:
79
79
  Enabled: true
80
80
  Max: 50
81
+
82
+ Naming/MethodName:
83
+ Enabled: false
81
84
 
82
85
  NonNilCheck:
83
86
  Enabled: false
@@ -101,4 +104,8 @@ VariableName:
101
104
  Enabled: false
102
105
 
103
106
  VariableNumber:
104
- Enabled: false
107
+ Enabled: false
108
+
109
+ Style/CommentedKeyword:
110
+ Enabled: false
111
+
@@ -1,3 +1,6 @@
1
+ ### 0.6.01 / 2018-02-25
2
+ * [FIX] Code re-styling to remove most style offenses found by Rubocop 0.52.1
3
+
1
4
  ### 0.6.00 / 2018-02-25
2
5
  Version bump. Highlights: new programming interface through facade object, improved AST generation.
3
6
  * [NEW] Class `Rley::Engine`: Implementation of Facade design pattern to reach more convenient interface.
data/Gemfile CHANGED
@@ -2,8 +2,8 @@ source 'https://rubygems.org'
2
2
  # Prevent Bundler to load the dependencies from our .gemspec file
3
3
 
4
4
  group :development do
5
+ gem 'coveralls', '>= 0.7.0'
5
6
  gem 'rake', '>= 10.0.0'
6
7
  gem 'rspec', '>= 3.5.0'
7
8
  gem 'simplecov', '>= 0.1.0'
8
- gem 'coveralls', '>= 0.7.0'
9
9
  end
@@ -79,14 +79,10 @@ input_to_parse = 'John saw Mary with a telescope'
79
79
  # input_to_parse = 'the dog saw a man in the park' # This one is ambiguous
80
80
  # Convert input text into a sequence of token objects...
81
81
  tokens = tokenizer(input_to_parse)
82
- result = engine.parse(tokens)
83
82
 
84
- # Use Benchmark mix-in
85
- include Benchmark
86
-
87
- bm(6) do |meter|
88
- meter.report("Parse 100 times") { 100.times { engine.parse(tokens) } }
89
- meter.report("Parse 1000 times") { 1000.times { engine.parse(tokens) } }
90
- meter.report("Parse 10000 times") { 10000.times { engine.parse(tokens) } }
91
- meter.report("Parse 1000000 times") { 100000.times { engine.parse(tokens) } }
92
- end
83
+ Benchmark.bm(6) do |meter|
84
+ meter.report('Parse 100 times') { 100.times { engine.parse(tokens) } }
85
+ meter.report('Parse 1000 times') { 1000.times { engine.parse(tokens) } }
86
+ meter.report('Parse 10000 times') { 10000.times { engine.parse(tokens) } }
87
+ meter.report('Parse 1000000 times') { 100000.times { engine.parse(tokens) } }
88
+ end
@@ -26,8 +26,8 @@ engine.build_grammar do
26
26
  rule 'NP' => 'Pronoun'
27
27
  rule 'NP' => 'Proper-Noun'
28
28
  rule 'NP' => %w[Determiner Nominal]
29
- rule 'Nominal' => %[Noun]
30
- rule 'Nominal' => %[Nominal Noun]
29
+ rule 'Nominal' => %w[Noun]
30
+ rule 'Nominal' => %w[Nominal Noun]
31
31
  rule 'VP' => 'Verb'
32
32
  rule 'VP' => %w[Verb NP]
33
33
  rule 'VP' => %w[Verb NP PP]
@@ -43,7 +43,7 @@ builder = Rley::Syntax::GrammarBuilder.new do
43
43
  rule 'Ord' => 'Ordinal_number'
44
44
  rule 'Ord' => []
45
45
  rule 'Nominal' => 'Noun'
46
- rule 'Nominal' => %[Nominal Noun]
46
+ rule 'Nominal' => %w[Nominal Noun]
47
47
  rule 'Nominal' => %w[Nominal GerundVP]
48
48
  rule 'Nominal' => %w[Nominal RelClause]
49
49
  rule 'PP' => %w[Preposition NP]
@@ -52,7 +52,6 @@ builder = Rley::Syntax::GrammarBuilder.new do
52
52
  rule 'GerundVP' => %w[GerundV NP PP]
53
53
  rule 'GerundVP' => %w[GerundV PP]
54
54
  rule 'RelClause' => %w[Relative_pronoun VP]
55
-
56
55
  end
57
56
 
58
57
  # And now build the grammar...
@@ -9,12 +9,12 @@ require_relative 'json_ast_nodes'
9
9
  # nodes) and using a step by step approach.
10
10
  class JSONASTBuilder < Rley::ParseRep::ParseTreeBuilder
11
11
  Terminal2NodeClass = {
12
- 'false' => JSONBooleanNode,
13
- 'true' => JSONBooleanNode,
14
- 'null' => JSONNullNode,
15
- 'string' => JSONStringNode,
16
- 'number' => JSONNumberNode
17
- }.freeze
12
+ 'false' => JSONBooleanNode,
13
+ 'true' => JSONBooleanNode,
14
+ 'null' => JSONNullNode,
15
+ 'string' => JSONStringNode,
16
+ 'number' => JSONNumberNode
17
+ }.freeze
18
18
 
19
19
  protected
20
20
 
@@ -28,8 +28,8 @@ class JSONASTBuilder < Rley::ParseRep::ParseTreeBuilder
28
28
  JSONTerminalNode
29
29
  end
30
30
 
31
- def reduce_JSON_text_0(_aProd, _range, _tokens, theChildren)
32
- return_first_child(_range, _tokens, theChildren)
31
+ def reduce_JSON_text_0(_aProd, aRange, theTokens, theChildren)
32
+ return_first_child(aRange, theTokens, theChildren)
33
33
  end
34
34
 
35
35
  # rule 'object' => %w[begin-object member-list end-object]
@@ -9,8 +9,7 @@ require_relative 'regex_repr'
9
9
  # (say, a parse tree) from simpler objects (terminal and non-terminal
10
10
  # nodes) and using a step by step approach.
11
11
  class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
12
-
13
- Terminal2NodeClass = { }.freeze
12
+ Terminal2NodeClass = {}.freeze
14
13
 
15
14
  attr_reader :options
16
15
 
@@ -26,7 +25,7 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
26
25
  # @param aTerminal [Terminal] Terminal symbol associated with the token
27
26
  # @param aTokenPosition [Integer] Position of token in the input stream
28
27
  # @param aToken [Token] The input token
29
- def new_leaf_node(aProduction, aTerminal, aTokenPosition, aToken)
28
+ def new_leaf_node(_production, _terminal, aTokenPosition, aToken)
30
29
  node = Rley::PTree::TerminalNode.new(aToken, aTokenPosition)
31
30
 
32
31
  return node
@@ -46,13 +45,12 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
46
45
  chars << Regex::Character.new(ch)
47
46
  end
48
47
  result = Regex::Concatenation.new(*chars)
48
+ elsif to_escape && Regex::Character::MetaChars.include?(aString)
49
+ backslash = Regex::Character.new("\\")
50
+ a_string = Regex::Character.new(aString)
51
+ result = Regex::Concatenation.new(backslash, a_string)
49
52
  else
50
- if to_escape && Regex::Character::MetaChars.include?(aString)
51
- result = Regex::Concatenation.new(Regex::Character.new("\\"),
52
- Regex::Character.new(aString))
53
- else
54
- result = Regex::Character.new(aString)
55
- end
53
+ result = Regex::Character.new(aString)
56
54
  end
57
55
 
58
56
  return result
@@ -61,7 +59,7 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
61
59
  def char_range(lowerBound, upperBound)
62
60
  # TODO fix module nesting
63
61
  lower = Regex::Character.new(lowerBound)
64
- upper = Regex::Character.new(upperBound)
62
+ upper = Regex::Character.new(upperBound)
65
63
  return Regex::CharRange.new(lower, upper)
66
64
  end
67
65
 
@@ -86,106 +84,106 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
86
84
  end
87
85
 
88
86
  # rule('expression' => %w[pattern separator flags]).as 'flagged_expr'
89
- def reduce_flagged_expr(aProduction, aRange, theTokens, theChildren)
87
+ def reduce_flagged_expr(_production, aRange, theTokens, theChildren)
90
88
  @options = theChildren[2] if theChildren[2]
91
89
  return_first_child(aRange, theTokens, theChildren)
92
90
  end
93
91
 
94
92
  # rule('pattern' => %w[pattern separator quantifiable]).as 'pattern_sequence'
95
- def reduce_pattern_sequence(aProduction, aRange, theTokens, theChildren)
93
+ def reduce_pattern_sequence(_production, _range, _tokens, theChildren)
96
94
  return Regex::Concatenation.new(theChildren[0], theChildren[2])
97
95
  end
98
96
 
99
97
  # rule('flags' => %[flags separator single_flag]).as 'flag_sequence'
100
- def reduce_flag_sequence(aProduction, aRange, theTokens, theChildren)
98
+ def reduce_flag_sequence(_production, _range, _tokens, theChildren)
101
99
  theChildren[0] << theChildren[2]
102
100
  end
103
101
 
104
102
  # rule('single_flag' => %w[CASE INSENSITIVE]).as 'case_insensitive'
105
- def reduce_case_insensitive(aProduction, aRange, theTokens, theChildren)
106
- return [ Regex::MatchOption.new(:IGNORECASE, true) ]
103
+ def reduce_case_insensitive(_production, _range, _tokens, _children)
104
+ return [Regex::MatchOption.new(:IGNORECASE, true)]
107
105
  end
108
106
 
109
107
  # rule('single_flag' => %w[MULTI LINE]).as 'multi_line'
110
- def reduce_multi_line(aProduction, aRange, theTokens, theChildren)
111
- return [ Regex::MatchOption.new(:MULTILINE, true) ]
108
+ def reduce_multi_line(_production, _range, _tokens, _children)
109
+ return [Regex::MatchOption.new(:MULTILINE, true)]
112
110
  end
113
111
 
114
112
  # rule('single_flag' => %w[ALL LAZY]).as 'all_lazy'
115
- def reduce_all_lazy(aProduction, aRange, theTokens, theChildren)
116
- return [ Regex::MatchOption.new(:ALL_LAZY, true) ]
113
+ def reduce_all_lazy(_production, _range, _tokens, _children)
114
+ return [Regex::MatchOption.new(:ALL_LAZY, true)]
117
115
  end
118
116
 
119
117
  # rule 'quantifiable' => %w[begin_anchor anchorable end_anchor]
120
- def reduce_pinned_quantifiable(aProduction, aRange, theTokens, theChildren)
118
+ def reduce_pinned_quantifiable(_production, _range, _tokens, theChildren)
121
119
  theChildren[1].begin_anchor = theChildren[0]
122
120
  theChildren[1].end_anchor = theChildren[2]
123
121
  return theChildren[1]
124
122
  end
125
123
 
126
124
  # rule 'quantifiable' => %w[begin_anchor anchorable]
127
- def reduce_begin_anchor_quantifiable(aProduction, aRange, theTokens, theChildren)
125
+ def reduce_begin_anchor_quantifiable(_production, _range, _tokens, theChildren)
128
126
  theChildren[1].begin_anchor = theChildren[0]
129
127
  return theChildren[1]
130
128
  end
131
129
 
132
130
  # rule 'quantifiable' => %w[anchorable end_anchor]
133
- def reduce_end_anchor_quantifiable(aProduction, aRange, theTokens, theChildren)
131
+ def reduce_end_anchor_quantifiable(_production, _range, _tokens, theChildren)
134
132
  theChildren[0].end_anchor = theChildren[1]
135
133
  return theChildren[0]
136
134
  end
137
135
 
138
136
  # rule 'begin_anchor' => %w[STARTS WITH]
139
- def reduce_starts_with(aProduction, aRange, theTokens, theChildren)
137
+ def reduce_starts_with(_production, _range, _tokens, _children)
140
138
  begin_anchor
141
139
  end
142
140
 
143
141
  # rule 'begin_anchor' => %w[BEGIN WITH]
144
- def reduce_begin_with(aProduction, aRange, theTokens, theChildren)
142
+ def reduce_begin_with(_production, _range, _tokens, _children)
145
143
  begin_anchor
146
144
  end
147
145
 
148
146
  # rule 'end_anchor' => %w[MUST END].as 'end_anchor'
149
- def reduce_end_anchor(aProduction, aRange, theTokens, theChildren)
147
+ def reduce_end_anchor(_production, _range, _tokens, _children)
150
148
  return Regex::Anchor.new('$')
151
149
  end
152
150
 
153
151
  # rule('anchorable' => %w[assertable assertion]).as 'asserted_anchorable'
154
- def reduce_asserted_anchorable(aProduction, aRange, theTokens, theChildren)
152
+ def reduce_asserted_anchorable(_production, _range, _tokens, theChildren)
155
153
  assertion = theChildren.last
156
154
  assertion.children.unshift(theChildren[0])
157
155
  return assertion
158
156
  end
159
157
 
160
158
  # rule('assertion' => %w[IF FOLLOWED BY assertable]).as 'if_followed'
161
- def reduce_if_followed(aProduction, aRange, theTokens, theChildren)
159
+ def reduce_if_followed(_production, _range, _tokens, theChildren)
162
160
  return Regex::Lookaround.new(theChildren.last, :ahead, :positive)
163
161
  end
164
162
 
165
163
  # rule('assertion' => %w[IF NOT FOLLOWED BY assertable]).as 'if_not_followed'
166
- def reduce_if_not_followed(aProduction, aRange, theTokens, theChildren)
164
+ def reduce_if_not_followed(_production, _range, _tokens, theChildren)
167
165
  return Regex::Lookaround.new(theChildren.last, :ahead, :negative)
168
166
  end
169
167
 
170
168
  # rule('assertion' => %w[IF ALREADY HAD assertable]).as 'if_had'
171
- def reduce_if_had(aProduction, aRange, theTokens, theChildren)
169
+ def reduce_if_had(_production, _range, _tokens, theChildren)
172
170
  return Regex::Lookaround.new(theChildren.last, :behind, :positive)
173
171
  end
174
172
 
175
173
  # rule('assertion' => %w[IF NOT ALREADY HAD assertable]).as 'if_not_had'
176
- def reduce_if_not_had(aProduction, aRange, theTokens, theChildren)
174
+ def reduce_if_not_had(_production, _range, _tokens, theChildren)
177
175
  return Regex::Lookaround.new(theChildren.last, :behind, :negative)
178
176
  end
179
177
 
180
178
  # rule('assertable' => %w[term quantifier]).as 'quantified_assertable'
181
- def reduce_quantified_assertable(aProduction, aRange, theTokens, theChildren)
179
+ def reduce_quantified_assertable(_production, _range, _tokens, theChildren)
182
180
  quantifier = theChildren[1]
183
181
  term = theChildren[0]
184
182
  repetition(term, quantifier)
185
183
  end
186
184
 
187
185
  # rule('letter_range' => %w[LETTER FROM LETTER_LIT TO LETTER_LIT]).as 'lowercase_from_to'
188
- def reduce_lowercase_from_to(aProduction, aRange, theTokens, theChildren)
186
+ def reduce_lowercase_from_to(_production, _range, _tokens, theChildren)
189
187
  lower = theChildren[2].token.lexeme
190
188
  upper = theChildren[4].token.lexeme
191
189
  ch_range = char_range(lower, upper)
@@ -193,7 +191,7 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
193
191
  end
194
192
 
195
193
  # rule('letter_range' => %w[UPPERCASE LETTER FROM LETTER_LIT TO LETTER_LIT]).as 'uppercase_from_to'
196
- def reduce_uppercase_from_to(aProduction, aRange, theTokens, theChildren)
194
+ def reduce_uppercase_from_to(_production, _range, _tokens, theChildren)
197
195
  lower = theChildren[3].token.lexeme
198
196
  upper = theChildren[5].token.lexeme
199
197
  ch_range = char_range(lower.upcase, upper.upcase)
@@ -201,13 +199,13 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
201
199
  end
202
200
 
203
201
  # rule('letter_range' => 'LETTER').as 'any_lowercase'
204
- def reduce_any_lowercase(aProduction, aRange, theTokens, theChildren)
202
+ def reduce_any_lowercase(_production, _range, _tokens, _children)
205
203
  ch_range = char_range('a', 'z')
206
204
  char_class(false, ch_range)
207
205
  end
208
206
 
209
207
  # rule('letter_range' => %w[UPPERCASE LETTER]).as 'any_uppercase'
210
- def reduce_any_uppercase(aProduction, aRange, theTokens, theChildren)
208
+ def reduce_any_uppercase(_production, _range, _tokens, _children)
211
209
  ch_range = char_range('A', 'Z')
212
210
  char_class(false, ch_range)
213
211
  end
@@ -218,163 +216,167 @@ class ASTBuilder < Rley::ParseRep::ASTBaseBuilder
218
216
  end
219
217
 
220
218
  # rule('digit_range' => 'digit_or_number').as 'simple_digit_range'
221
- def reduce_simple_digit_range(aProduction, aRange, theTokens, theChildren)
219
+ def reduce_simple_digit_range(_production, _range, _tokens, _children)
222
220
  char_shorthand('d')
223
221
  end
224
222
 
225
223
  # rule('character_class' => %w[ANY CHARACTER]).as 'any_character'
226
- def reduce_any_character(aProduction, aRange, theTokens, theChildren)
224
+ def reduce_any_character(_production, _range, _tokens, _children)
227
225
  char_shorthand('w')
228
226
  end
229
227
 
230
228
  # rule('character_class' => %w[NO CHARACTER]).as 'no_character'
231
- def reduce_no_character(aProduction, aRange, theTokens, theChildren)
229
+ def reduce_no_character(_production, _range, _tokens, _children)
232
230
  char_shorthand('W')
233
231
  end
234
232
 
235
233
  # rule('character_class' => 'WHITESPACE').as 'whitespace'
236
- def reduce_whitespace(aProduction, aRange, theTokens, theChildren)
234
+ def reduce_whitespace(_production, _range, _tokens, _children)
237
235
  char_shorthand('s')
238
236
  end
239
237
 
240
238
  # rule('character_class' => %w[NO WHITESPACE]).as 'no_whitespace'
241
- def reduce_no_whitespace(aProduction, aRange, theTokens, theChildren)
239
+ def reduce_no_whitespace(_production, _range, _tokens, _children)
242
240
  char_shorthand('S')
243
241
  end
244
242
 
245
243
  # rule('character_class' => 'ANYTHING').as 'anything'
246
- def reduce_anything(aProduction, aRange, theTokens, theChildren)
244
+ def reduce_anything(_production, _range, _tokens, _children)
247
245
  wildcard
248
246
  end
249
247
 
250
248
  # rule('alternation' => %w[ANY OF LPAREN alternatives RPAREN]).as 'any_of'
251
- def reduce_one_of(aProduction, aRange, theTokens, theChildren)
249
+ def reduce_one_of(_production, _range, _tokens, theChildren)
252
250
  raw_literal = theChildren[-1].token.lexeme.dup
253
251
  alternatives = raw_literal.chars.map { |ch| Regex::Character.new(ch) }
254
- return Regex::CharClass.new(false, *alternatives) # TODO check other implementations
252
+ # TODO check other implementations
253
+ return Regex::CharClass.new(false, *alternatives)
255
254
  end
256
255
 
257
256
  # rule('special_char' => 'TAB').as 'tab'
258
- def reduce_tab(aProduction, aRange, theTokens, theChildren)
257
+ def reduce_tab(_production, _range, _tokens, _children)
259
258
  Regex::Character.new('\t')
260
259
  end
261
260
 
262
261
  # rule('special_char' => 'BACKSLASH').as 'backslash'
263
- def reduce_backslash(aProduction, aRange, theTokens, theChildren)
262
+ def reduce_backslash(_production, _range, _tokens, _children)
264
263
  Regex::Character.new('\\')
265
264
  end
266
265
 
267
266
  # rule('special_char' => %w[NEW LINE]).as 'new_line'
268
- def reduce_new_line(aProduction, aRange, theTokens, theChildren)
267
+ def reduce_new_line(_production, _range, _tokens, _children)
269
268
  # TODO: control portability
270
269
  Regex::Character.new('\n')
271
270
  end
272
271
 
273
272
  # rule('literal' => %w[LITERALLY STRING_LIT]).as 'literally'
274
- def reduce_literally(aProduction, aRange, theTokens, theChildren)
273
+ def reduce_literally(_production, _range, _tokens, theChildren)
275
274
  # What if literal is empty?...
276
275
 
277
276
  raw_literal = theChildren[-1].token.lexeme.dup
278
277
  return string_literal(raw_literal)
279
278
  end
280
279
 
281
- #rule('alternation' => %w[ANY OF LPAREN alternatives RPAREN]).as 'any_of'
282
- def reduce_any_of(aProduction, aRange, theTokens, theChildren)
280
+ # rule('alternation' => %w[ANY OF LPAREN alternatives RPAREN]).as 'any_of'
281
+ def reduce_any_of(_production, _range, _tokens, theChildren)
283
282
  return Regex::Alternation.new(*theChildren[3])
284
283
  end
285
284
 
286
285
  # rule('alternatives' => %w[alternatives separator quantifiable]).as 'alternative_list'
287
- def reduce_alternative_list(aProduction, aRange, theTokens, theChildren)
286
+ def reduce_alternative_list(_production, _range, _tokens, theChildren)
288
287
  return theChildren[0] << theChildren[-1]
289
288
  end
290
289
 
291
290
  # rule('alternatives' => 'quantifiable').as 'simple_alternative'
292
- def reduce_simple_alternative(aProduction, aRange, theTokens, theChildren)
291
+ def reduce_simple_alternative(_production, _range, _tokens, theChildren)
293
292
  return [theChildren.last]
294
293
  end
295
294
 
296
295
  # rule('grouping' => %w[LPAREN pattern RPAREN]).as 'grouping_parenthenses'
297
- def reduce_grouping_parenthenses(aProduction, aRange, theTokens, theChildren)
296
+ def reduce_grouping_parenthenses(_production, _range, _tokens, theChildren)
298
297
  return Regex::NonCapturingGroup.new(theChildren[1])
299
298
  end
300
299
 
301
300
  # rule('capturing_group' => %w[CAPTURE assertable]).as 'capture'
302
- def reduce_capture(aProduction, aRange, theTokens, theChildren)
301
+ def reduce_capture(_production, _range, _tokens, theChildren)
303
302
  return Regex::CapturingGroup.new(theChildren[1])
304
303
  end
305
304
 
306
- # rule('capturing_group' => %w[CAPTURE assertable UNTIL assertable]).as 'capture_until'
307
- def reduce_capture_until(aProduction, aRange, theTokens, theChildren)
305
+ # rule('capturing_group' => %w[CAPTURE assertable UNTIL assertable]).as
306
+ # 'capture_until'
307
+ def reduce_capture_until(_production, _range, _tokens, theChildren)
308
308
  group = Regex::CapturingGroup.new(theChildren[1])
309
309
  return Regex::Concatenation.new(group, theChildren[3])
310
310
  end
311
311
 
312
- # rule('capturing_group' => %w[CAPTURE assertable AS var_name]).as 'named_capture'
313
- def reduce_named_capture(aProduction, aRange, theTokens, theChildren)
312
+ # rule('capturing_group' => %w[CAPTURE assertable AS var_name]).as
313
+ # 'named_capture'
314
+ def reduce_named_capture(_production, _range, _tokens, theChildren)
314
315
  name = theChildren[3].token.lexeme.dup
315
316
  return Regex::CapturingGroup.new(theChildren[1], name)
316
317
  end
317
318
 
318
- # rule('capturing_group' => %w[CAPTURE assertable AS var_name UNTIL assertable]).as 'named_capture_until'
319
- def reduce_named_capture_until(aProduction, aRange, theTokens, theChildren)
319
+ # rule('capturing_group' => %w[CAPTURE assertable AS var_name
320
+ # UNTIL assertable]).as 'named_capture_until'
321
+ def reduce_named_capture_until(_production, _range, _tokens, theChildren)
320
322
  name = theChildren[3].token.lexeme.dup
321
323
  group = Regex::CapturingGroup.new(theChildren[1], name)
322
324
  return Regex::Concatenation.new(group, theChildren[5])
323
325
  end
324
326
 
325
327
  # rule('quantifier' => 'ONCE').as 'once'
326
- def reduce_once(aProduction, aRange, theTokens, theChildren)
328
+ def reduce_once(_production, _range, _tokens, _children)
327
329
  multiplicity(1, 1)
328
330
  end
329
331
 
330
332
  # rule('quantifier' => 'TWICE').as 'twice'
331
- def reduce_twice(aProduction, aRange, theTokens, theChildren)
333
+ def reduce_twice(_production, _range, _tokens, _children)
332
334
  multiplicity(2, 2)
333
335
  end
334
336
 
335
337
  # rule('quantifier' => %w[EXACTLY count TIMES]).as 'exactly'
336
- def reduce_exactly(aProduction, aRange, theTokens, theChildren)
338
+ def reduce_exactly(_production, _range, _tokens, theChildren)
337
339
  count = theChildren[1].token.lexeme.to_i
338
340
  multiplicity(count, count)
339
341
  end
340
342
 
341
- # rule('quantifier' => %w[BETWEEN count AND count times_suffix]).as 'between_and'
342
- def reduce_between_and(aProduction, aRange, theTokens, theChildren)
343
+ # rule('quantifier' => %w[BETWEEN count AND count times_suffix]).as
344
+ # 'between_and'
345
+ def reduce_between_and(_production, _range, _tokens, theChildren)
343
346
  lower = theChildren[1].token.lexeme.to_i
344
347
  upper = theChildren[3].token.lexeme.to_i
345
348
  multiplicity(lower, upper)
346
349
  end
347
350
 
348
351
  # rule('quantifier' => 'OPTIONAL').as 'optional'
349
- def reduce_optional(aProduction, aRange, theTokens, theChildren)
352
+ def reduce_optional(_production, _range, _tokens, _children)
350
353
  multiplicity(0, 1)
351
354
  end
352
355
 
353
- # rule('quantifier' => %w[ONCE OR MORE]).as 'once_or_more'
354
- def reduce_once_or_more(aProduction, aRange, theTokens, theChildren)
356
+ # rule('quantifier' => %w[ONCE OR MORE]).as 'once_or_more'
357
+ def reduce_once_or_more(_production, _range, _tokens, _children)
355
358
  multiplicity(1, :more)
356
359
  end
357
360
 
358
361
  # rule('quantifier' => %w[NEVER OR MORE]).as 'never_or_more'
359
- def reduce_never_or_more(aProduction, aRange, theTokens, theChildren)
362
+ def reduce_never_or_more(_production, _range, _tokens, _children)
360
363
  multiplicity(0, :more)
361
364
  end
362
365
 
363
366
  # rule('quantifier' => %w[AT LEAST count TIMES]).as 'at_least'
364
- def reduce_at_least(aProduction, aRange, theTokens, theChildren)
367
+ def reduce_at_least(_production, _range, _tokens, theChildren)
365
368
  count = theChildren[2].token.lexeme.to_i
366
369
  multiplicity(count, :more)
367
370
  end
368
371
 
369
372
  # rule('times_suffix' => 'TIMES').as 'times_keyword'
370
- def reduce_times_keyword(aProduction, aRange, theTokens, theChildren)
373
+ def reduce_times_keyword(_production, _range, _tokens, _children)
371
374
  return nil
372
375
  end
373
376
 
374
377
  # rule('times_suffix' => []).as 'times_dropped'
375
- def reduce_times_dropped(aProduction, aRange, theTokens, theChildren)
378
+ def reduce_times_dropped(_production, _range, _tokens, _children)
376
379
  return nil
377
380
  end
378
-
379
381
  end # class
380
382
  # End of file