rley 0.7.06 → 0.8.01

Sign up to get free protection for your applications and to get access to all the features.
Files changed (167) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +362 -62
  3. data/.travis.yml +6 -6
  4. data/CHANGELOG.md +20 -4
  5. data/LICENSE.txt +1 -1
  6. data/README.md +7 -7
  7. data/examples/NLP/engtagger.rb +193 -190
  8. data/examples/NLP/nano_eng/nano_en_demo.rb +7 -11
  9. data/examples/NLP/nano_eng/nano_grammar.rb +21 -21
  10. data/examples/NLP/pico_en_demo.rb +2 -2
  11. data/examples/data_formats/JSON/cli_options.rb +1 -1
  12. data/examples/data_formats/JSON/json_ast_builder.rb +21 -27
  13. data/examples/data_formats/JSON/json_ast_nodes.rb +12 -21
  14. data/examples/data_formats/JSON/json_demo.rb +1 -2
  15. data/examples/data_formats/JSON/json_grammar.rb +13 -13
  16. data/examples/data_formats/JSON/json_lexer.rb +8 -8
  17. data/examples/data_formats/JSON/json_minifier.rb +1 -1
  18. data/examples/general/calc_iter1/calc_ast_builder.rb +13 -10
  19. data/examples/general/calc_iter1/calc_ast_nodes.rb +23 -37
  20. data/examples/general/calc_iter1/calc_grammar.rb +7 -6
  21. data/examples/general/calc_iter1/calc_lexer.rb +6 -4
  22. data/examples/general/calc_iter1/spec/calculator_spec.rb +5 -5
  23. data/examples/general/calc_iter2/calc_ast_builder.rb +5 -3
  24. data/examples/general/calc_iter2/calc_ast_nodes.rb +27 -43
  25. data/examples/general/calc_iter2/calc_grammar.rb +12 -12
  26. data/examples/general/calc_iter2/calc_lexer.rb +11 -10
  27. data/examples/general/calc_iter2/spec/calculator_spec.rb +26 -26
  28. data/examples/general/left.rb +2 -2
  29. data/examples/general/right.rb +2 -2
  30. data/lib/rley.rb +1 -1
  31. data/lib/rley/base/dotted_item.rb +28 -31
  32. data/lib/rley/base/grm_items_builder.rb +6 -0
  33. data/lib/rley/constants.rb +2 -2
  34. data/lib/rley/engine.rb +22 -25
  35. data/lib/rley/formatter/asciitree.rb +3 -3
  36. data/lib/rley/formatter/bracket_notation.rb +1 -8
  37. data/lib/rley/formatter/debug.rb +6 -6
  38. data/lib/rley/formatter/json.rb +2 -2
  39. data/lib/rley/gfg/call_edge.rb +1 -1
  40. data/lib/rley/gfg/edge.rb +5 -5
  41. data/lib/rley/gfg/end_vertex.rb +2 -6
  42. data/lib/rley/gfg/epsilon_edge.rb +1 -5
  43. data/lib/rley/gfg/grm_flow_graph.rb +27 -23
  44. data/lib/rley/gfg/item_vertex.rb +10 -10
  45. data/lib/rley/gfg/non_terminal_vertex.rb +4 -4
  46. data/lib/rley/gfg/scan_edge.rb +1 -1
  47. data/lib/rley/gfg/shortcut_edge.rb +2 -2
  48. data/lib/rley/gfg/start_vertex.rb +4 -8
  49. data/lib/rley/gfg/vertex.rb +43 -39
  50. data/lib/rley/interface.rb +16 -0
  51. data/lib/rley/lexical/token_range.rb +6 -6
  52. data/lib/rley/notation/all_notation_nodes.rb +2 -0
  53. data/lib/rley/notation/ast_builder.rb +191 -0
  54. data/lib/rley/notation/ast_node.rb +44 -0
  55. data/lib/rley/notation/ast_visitor.rb +113 -0
  56. data/lib/rley/notation/grammar.rb +49 -0
  57. data/lib/rley/notation/grammar_builder.rb +504 -0
  58. data/lib/rley/notation/grouping_node.rb +23 -0
  59. data/lib/rley/notation/parser.rb +56 -0
  60. data/lib/rley/notation/sequence_node.rb +35 -0
  61. data/lib/rley/notation/symbol_node.rb +29 -0
  62. data/lib/rley/notation/tokenizer.rb +192 -0
  63. data/lib/rley/parse_forest_visitor.rb +5 -5
  64. data/lib/rley/parse_rep/ast_base_builder.rb +48 -11
  65. data/lib/rley/parse_rep/cst_builder.rb +5 -6
  66. data/lib/rley/parse_rep/parse_forest_builder.rb +22 -18
  67. data/lib/rley/parse_rep/parse_forest_factory.rb +3 -3
  68. data/lib/rley/parse_rep/parse_rep_creator.rb +14 -16
  69. data/lib/rley/parse_rep/parse_tree_builder.rb +4 -4
  70. data/lib/rley/parse_rep/parse_tree_factory.rb +27 -27
  71. data/lib/rley/parse_tree_visitor.rb +1 -1
  72. data/lib/rley/parser/error_reason.rb +4 -5
  73. data/lib/rley/parser/gfg_chart.rb +118 -26
  74. data/lib/rley/parser/gfg_parsing.rb +22 -33
  75. data/lib/rley/parser/parse_entry.rb +25 -31
  76. data/lib/rley/parser/parse_entry_set.rb +19 -16
  77. data/lib/rley/parser/parse_entry_tracker.rb +4 -4
  78. data/lib/rley/parser/parse_tracer.rb +13 -13
  79. data/lib/rley/parser/parse_walker_factory.rb +23 -28
  80. data/lib/rley/ptree/non_terminal_node.rb +7 -5
  81. data/lib/rley/ptree/parse_tree.rb +3 -3
  82. data/lib/rley/ptree/parse_tree_node.rb +5 -5
  83. data/lib/rley/ptree/terminal_node.rb +7 -7
  84. data/lib/rley/rley_error.rb +12 -12
  85. data/lib/rley/sppf/alternative_node.rb +6 -6
  86. data/lib/rley/sppf/composite_node.rb +7 -7
  87. data/lib/rley/sppf/epsilon_node.rb +3 -3
  88. data/lib/rley/sppf/leaf_node.rb +3 -3
  89. data/lib/rley/sppf/parse_forest.rb +16 -16
  90. data/lib/rley/sppf/sppf_node.rb +7 -8
  91. data/lib/rley/sppf/token_node.rb +3 -3
  92. data/lib/rley/syntax/{grammar_builder.rb → base_grammar_builder.rb} +61 -23
  93. data/lib/rley/syntax/grammar.rb +5 -5
  94. data/lib/rley/syntax/grm_symbol.rb +7 -7
  95. data/lib/rley/syntax/match_closest.rb +43 -0
  96. data/lib/rley/syntax/non_terminal.rb +9 -15
  97. data/lib/rley/syntax/production.rb +16 -10
  98. data/lib/rley/syntax/symbol_seq.rb +7 -9
  99. data/lib/rley/syntax/terminal.rb +4 -5
  100. data/lib/rley/syntax/verbatim_symbol.rb +3 -3
  101. data/lib/support/base_tokenizer.rb +19 -18
  102. data/spec/rley/base/dotted_item_spec.rb +2 -2
  103. data/spec/rley/engine_spec.rb +23 -21
  104. data/spec/rley/formatter/asciitree_spec.rb +7 -7
  105. data/spec/rley/formatter/bracket_notation_spec.rb +13 -13
  106. data/spec/rley/formatter/json_spec.rb +1 -1
  107. data/spec/rley/gfg/end_vertex_spec.rb +5 -5
  108. data/spec/rley/gfg/grm_flow_graph_spec.rb +2 -2
  109. data/spec/rley/gfg/item_vertex_spec.rb +10 -10
  110. data/spec/rley/gfg/non_terminal_vertex_spec.rb +3 -3
  111. data/spec/rley/gfg/shortcut_edge_spec.rb +1 -1
  112. data/spec/rley/gfg/start_vertex_spec.rb +5 -5
  113. data/spec/rley/gfg/vertex_spec.rb +3 -3
  114. data/spec/rley/lexical/token_range_spec.rb +16 -16
  115. data/spec/rley/lexical/token_spec.rb +2 -2
  116. data/spec/rley/notation/grammar_builder_spec.rb +302 -0
  117. data/spec/rley/notation/parser_spec.rb +184 -0
  118. data/spec/rley/notation/tokenizer_spec.rb +370 -0
  119. data/spec/rley/parse_forest_visitor_spec.rb +165 -163
  120. data/spec/rley/parse_rep/ambiguous_parse_spec.rb +44 -44
  121. data/spec/rley/parse_rep/ast_builder_spec.rb +6 -7
  122. data/spec/rley/parse_rep/cst_builder_spec.rb +5 -5
  123. data/spec/rley/parse_rep/groucho_spec.rb +24 -26
  124. data/spec/rley/parse_rep/parse_forest_builder_spec.rb +27 -27
  125. data/spec/rley/parse_rep/parse_forest_factory_spec.rb +8 -8
  126. data/spec/rley/parse_rep/parse_tree_factory_spec.rb +3 -3
  127. data/spec/rley/parse_tree_visitor_spec.rb +10 -8
  128. data/spec/rley/parser/dangling_else_spec.rb +445 -0
  129. data/spec/rley/parser/error_reason_spec.rb +6 -6
  130. data/spec/rley/parser/gfg_earley_parser_spec.rb +120 -12
  131. data/spec/rley/parser/gfg_parsing_spec.rb +6 -13
  132. data/spec/rley/parser/parse_entry_spec.rb +19 -19
  133. data/spec/rley/parser/parse_walker_factory_spec.rb +10 -10
  134. data/spec/rley/ptree/non_terminal_node_spec.rb +5 -3
  135. data/spec/rley/ptree/parse_tree_node_spec.rb +4 -4
  136. data/spec/rley/ptree/terminal_node_spec.rb +6 -6
  137. data/spec/rley/sppf/alternative_node_spec.rb +6 -6
  138. data/spec/rley/sppf/non_terminal_node_spec.rb +3 -3
  139. data/spec/rley/sppf/token_node_spec.rb +4 -4
  140. data/spec/rley/support/ambiguous_grammar_helper.rb +4 -5
  141. data/spec/rley/support/grammar_abc_helper.rb +3 -5
  142. data/spec/rley/support/grammar_ambig01_helper.rb +5 -6
  143. data/spec/rley/support/grammar_arr_int_helper.rb +5 -6
  144. data/spec/rley/support/grammar_b_expr_helper.rb +5 -6
  145. data/spec/rley/support/grammar_int_seq_helper.rb +51 -0
  146. data/spec/rley/support/grammar_l0_helper.rb +14 -17
  147. data/spec/rley/support/grammar_pb_helper.rb +8 -7
  148. data/spec/rley/support/grammar_sppf_helper.rb +3 -3
  149. data/spec/rley/syntax/{grammar_builder_spec.rb → base_grammar_builder_spec.rb} +35 -16
  150. data/spec/rley/syntax/grammar_spec.rb +6 -6
  151. data/spec/rley/syntax/grm_symbol_spec.rb +1 -1
  152. data/spec/rley/syntax/match_closest_spec.rb +46 -0
  153. data/spec/rley/syntax/non_terminal_spec.rb +8 -8
  154. data/spec/rley/syntax/production_spec.rb +17 -13
  155. data/spec/rley/syntax/symbol_seq_spec.rb +2 -2
  156. data/spec/rley/syntax/terminal_spec.rb +5 -5
  157. data/spec/rley/syntax/verbatim_symbol_spec.rb +1 -1
  158. data/spec/spec_helper.rb +0 -12
  159. data/spec/support/base_tokenizer_spec.rb +7 -2
  160. metadata +48 -74
  161. data/.simplecov +0 -7
  162. data/lib/rley/parser/parse_state.rb +0 -83
  163. data/lib/rley/parser/parse_state_tracker.rb +0 -59
  164. data/lib/rley/parser/state_set.rb +0 -101
  165. data/spec/rley/parser/parse_state_spec.rb +0 -125
  166. data/spec/rley/parser/parse_tracer_spec.rb +0 -200
  167. data/spec/rley/parser/state_set_spec.rb +0 -130
@@ -12,9 +12,9 @@ module Rley # This module is used as a namespace
12
12
  # @return [Array<GrmSymbol>] The sequence of symbols
13
13
  attr_reader(:members)
14
14
 
15
- # Create a sequence of grammar symbols (as in right-hand side of
15
+ # Create a sequence of grammar symbols (as in right-hand side of
16
16
  # a production rule).
17
- # @param theSymbols [Array<GrmSymbol>] An array of symbols.
17
+ # @param theSymbols [Array<GrmSymbol>] An array of symbols.
18
18
  def initialize(theSymbols)
19
19
  @members = theSymbols.dup
20
20
  end
@@ -33,20 +33,18 @@ module Rley # This module is used as a namespace
33
33
  raise StandardError, msg
34
34
  end
35
35
 
36
- return result
36
+ result
37
37
  end
38
-
39
- # Returns a string containing a human-readable representation of the
38
+
39
+ # Returns a string containing a human-readable representation of the
40
40
  # sequence of symbols.
41
41
  # @return [String]
42
- def inspect()
42
+ def inspect
43
43
  result = +"#<#{self.class.name}:#{object_id}"
44
44
  symbol_names = members.map(&:name)
45
45
  result << " @members=#{symbol_names}>"
46
- return result
46
+ result
47
47
  end
48
-
49
-
50
48
  end # class
51
49
  end # module
52
50
  end # module
@@ -4,29 +4,28 @@ require_relative 'grm_symbol' # Load superclass
4
4
 
5
5
  module Rley # This module is used as a namespace
6
6
  module Syntax # This module is used as a namespace
7
- # A terminal symbol represents a class of words in the language
7
+ # A terminal symbol represents a class of words in the language
8
8
  # defined the grammar.
9
9
  class Terminal < GrmSymbol
10
-
11
10
  # Constructor.
12
11
  # @param aName [String] The name of the grammar symbol.
13
12
  def initialize(aName)
14
13
  super(aName)
15
14
  self.generative = true
16
15
  end
17
-
16
+
18
17
  # Return true iff the symbol is a terminal
19
18
  def terminal?
20
19
  return true
21
20
  end
22
-
21
+
23
22
  # @return [false] Return true if the symbol derives
24
23
  # the empty string. As terminal symbol corresponds to a input token
25
24
  # it is by definition non-nullable.
26
25
  def nullable?
27
26
  false
28
27
  end
29
-
28
+
30
29
  def to_s
31
30
  name
32
31
  end
@@ -14,11 +14,11 @@ module Rley # This module is used as a namespace
14
14
  super(aText) # Do we need to separate the text from the name?
15
15
  @text = aText.dup
16
16
  end
17
-
17
+
18
18
  # The String representation of the verbatim symbol
19
19
  # @return [String]
20
- def to_s()
21
- return "'#{text}'"
20
+ def to_s
21
+ "'#{text}'"
22
22
  end
23
23
  end # class
24
24
  end # module
@@ -3,21 +3,27 @@
3
3
  require 'strscan'
4
4
  require_relative '../rley/lexical/token'
5
5
 
6
+ # Simplistic tokenizer used mostly for testing purposes
6
7
  class BaseTokenizer
8
+ # @return [StringScanner]
7
9
  attr_reader(:scanner)
10
+
11
+ # @return [Integer] current line number
8
12
  attr_reader(:lineno)
13
+
14
+ # @return [Integer] position of start of current line in source text
9
15
  attr_reader(:line_start)
10
-
16
+
11
17
  class ScanError < StandardError; end
12
18
 
13
- # Constructor. Initialize a tokenizer for Skeem.
19
+ # Constructor. Initialize a tokenizer.
14
20
  # @param source [String] Skeem text to tokenize.
15
21
  def initialize(source)
16
22
  @scanner = StringScanner.new('')
17
23
  restart(source)
18
24
  end
19
25
 
20
- # @param source [String] Skeem text to tokenize.
26
+ # @param source [String] input text to tokenize.
21
27
  def restart(source)
22
28
  @scanner.string = source
23
29
  @lineno = 1
@@ -34,13 +40,13 @@ class BaseTokenizer
34
40
 
35
41
  return tok_sequence
36
42
  end
37
-
43
+
38
44
  protected
39
-
45
+
40
46
  # Patterns:
41
47
  # Unambiguous single character
42
48
  # Conditional single character:
43
- # (e.g. '+' operator, '+' prefix for positive numbers)
49
+ # (e.g. '+' operator, '+' prefix for positive numbers)
44
50
  def _next_token
45
51
  skip_whitespaces
46
52
  curr_ch = scanner.peek(1)
@@ -57,11 +63,11 @@ class BaseTokenizer
57
63
 
58
64
  return token
59
65
  end
60
-
66
+
61
67
  def recognize_token
62
68
  raise NotImplementedError
63
69
  end
64
-
70
+
65
71
  def build_token(aSymbolName, aLexeme, aFormat = :default)
66
72
  begin
67
73
  value = convert_to(aLexeme, aSymbolName, aFormat)
@@ -75,11 +81,11 @@ class BaseTokenizer
75
81
 
76
82
  return token
77
83
  end
78
-
84
+
79
85
  def convert_to(aLexeme, _symbol_name, _format)
80
86
  return aLexeme
81
87
  end
82
-
88
+
83
89
  def skip_whitespaces
84
90
  pre_pos = scanner.pos
85
91
 
@@ -93,21 +99,16 @@ class BaseTokenizer
93
99
  ws_found = true
94
100
  next_line
95
101
  end
96
- # next_ch = scanner.peek(1)
97
- # if next_ch == ';'
98
- # cmt_found = true
99
- # scanner.skip(/;[^\r\n]*(?:(?:\r\n)|\r|\n)?/)
100
- # next_line
101
- # end
102
+
102
103
  break unless ws_found || cmt_found
103
104
  end
104
105
 
105
106
  curr_pos = scanner.pos
106
107
  return if curr_pos == pre_pos
107
108
  end
108
-
109
+
109
110
  def next_line
110
111
  @lineno += 1
111
112
  @line_start = scanner.pos
112
- end
113
+ end
113
114
  end # class
@@ -125,7 +125,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
125
125
 
126
126
  it 'should determine if it is a successor of another dotted item' do
127
127
  expect(subject).not_to be_successor_of(subject)
128
-
128
+
129
129
  # Case: different productions
130
130
  instance = DottedItem.new(empty_prod, 0)
131
131
  expect(subject).not_to be_successor_of(instance)
@@ -139,7 +139,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
139
139
  instance2 = DottedItem.new(sample_prod, 2)
140
140
  expect(instance).not_to be_successor_of(instance2)
141
141
  expect(subject).not_to be_successor_of(instance2)
142
- expect(instance2).to be_successor_of(subject)
142
+ expect(instance2).to be_successor_of(subject)
143
143
  end
144
144
 
145
145
 
@@ -19,11 +19,11 @@ module Rley # Open this namespace to avoid module qualifier prefixes
19
19
  end
20
20
 
21
21
  it 'could be created with block argument' do
22
- expect do
23
- Engine.new do |config|
24
- config.parse_repr = :raw
25
- end
26
- end.not_to raise_error
22
+ expect do
23
+ Engine.new do |config|
24
+ config.parse_repr = :raw
25
+ end
26
+ end.not_to raise_error
27
27
  end
28
28
 
29
29
  it "shouldn't have a link to a grammar yet" do
@@ -35,9 +35,9 @@ module Rley # Open this namespace to avoid module qualifier prefixes
35
35
  it 'should build grammar' do
36
36
  subject.build_grammar do
37
37
  add_terminals('a', 'b', 'c')
38
- add_production('S' => ['A'])
39
- add_production('A' => %w[a A c])
40
- add_production('A' => ['b'])
38
+ add_production('S' => 'A')
39
+ add_production('A' => 'a A c')
40
+ add_production('A' => 'b')
41
41
  end
42
42
 
43
43
  expect(subject.grammar).to be_kind_of(Rley::Syntax::Grammar)
@@ -45,13 +45,14 @@ module Rley # Open this namespace to avoid module qualifier prefixes
45
45
  end
46
46
  end # context
47
47
 
48
+ # rubocop: disable Lint/ConstantDefinitionInBlock
48
49
  class ABCTokenizer
49
50
  # Constructor
50
51
  def initialize(someText)
51
52
  @input = someText.dup
52
53
  end
53
54
 
54
- def each()
55
+ def each
55
56
  pos = Rley::Lexical::Position.new(1, 1) # Dummy position
56
57
  lexemes = @input.scan(/\S/)
57
58
  lexemes.each do |ch|
@@ -63,15 +64,16 @@ module Rley # Open this namespace to avoid module qualifier prefixes
63
64
  end
64
65
  end
65
66
  end # class
67
+ # rubocop: enable Lint/ConstantDefinitionInBlock
66
68
 
67
69
  # Utility method. Ensure that the engine
68
70
  # has the defnition of a sample grammar
69
71
  def add_sample_grammar(anEngine)
70
72
  anEngine.build_grammar do
71
73
  add_terminals('a', 'b', 'c')
72
- add_production('S' => ['A'])
73
- add_production('A' => %w[a A c])
74
- add_production('A' => ['b'])
74
+ add_production('S' => 'A')
75
+ add_production('A' => 'a A c')
76
+ add_production('A' => 'b')
75
77
  end
76
78
  end
77
79
 
@@ -101,7 +103,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
101
103
  sample_text = 'a a b c c'
102
104
  ABCTokenizer.new(sample_text)
103
105
  end
104
-
106
+
105
107
  it 'should build a parse tree even for a nullable production' do
106
108
  instance = Engine.new
107
109
  instance.build_grammar do
@@ -112,12 +114,12 @@ module Rley # Open this namespace to avoid module qualifier prefixes
112
114
  add_production 'B_opt' => 'b'
113
115
  add_production 'B_opt' => []
114
116
  add_production 'C_opt' => 'c'
115
- add_production 'C_opt' => []
117
+ add_production 'C_opt' => []
116
118
  end
117
119
  input = ABCTokenizer.new('a')
118
120
  raw_result = instance.parse(input)
119
121
  expect { instance.to_ptree(raw_result) }.not_to raise_error
120
- end
122
+ end
121
123
 
122
124
  it 'should build default parse trees' do
123
125
  raw_result = subject.parse(sample_tokenizer)
@@ -152,7 +154,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
152
154
  sample_text = 'a a b c c'
153
155
  ABCTokenizer.new(sample_text)
154
156
  end
155
-
157
+
156
158
  it 'should build a parse forest even for a nullable production' do
157
159
  instance = Engine.new
158
160
  instance.build_grammar do
@@ -163,19 +165,19 @@ module Rley # Open this namespace to avoid module qualifier prefixes
163
165
  add_production 'B_opt' => 'b'
164
166
  add_production 'B_opt' => []
165
167
  add_production 'C_opt' => 'c'
166
- add_production 'C_opt' => []
168
+ add_production 'C_opt' => []
167
169
  end
168
170
  input = ABCTokenizer.new('a')
169
171
  raw_result = instance.parse(input)
170
172
  expect { instance.to_pforest(raw_result) }.not_to raise_error
171
- end
172
-
173
+ end
174
+
173
175
  it 'should build parse forest' do
174
176
  raw_result = subject.parse(sample_tokenizer)
175
177
  pforest = subject.to_pforest(raw_result)
176
178
  expect(pforest).to be_kind_of(SPPF::ParseForest)
177
- end
178
-
179
+ end
180
+
179
181
  it 'should provide a parse visitor' do
180
182
  raw_result = subject.parse(sample_tokenizer)
181
183
  ptree = subject.to_pforest(raw_result)
@@ -23,12 +23,12 @@ module Rley # Re-open the module to get rid of qualified names
23
23
  builder = sandbox.grammar_abc_builder
24
24
  builder.grammar
25
25
  end
26
-
26
+
27
27
  # Variables for the terminal symbols
28
28
  let(:a_) { grammar_abc.name2symbol['a'] }
29
29
  let(:b_) { grammar_abc.name2symbol['b'] }
30
30
  let(:c_) { grammar_abc.name2symbol['c'] }
31
-
31
+
32
32
  # Helper method that mimicks the output of a tokenizer
33
33
  # for the language specified by grammar_abc
34
34
  let(:grm_abc_tokens1) do
@@ -50,12 +50,12 @@ module Rley # Re-open the module to get rid of qualified names
50
50
  # Capital letters represent non-terminal nodes
51
51
  let(:grm_abc_ptree1) do
52
52
  engine = Rley::Engine.new
53
- engine.use_grammar(grammar_abc)
53
+ engine.use_grammar(grammar_abc)
54
54
  parse_result = engine.parse(grm_abc_tokens1)
55
55
  ptree = engine.convert(parse_result)
56
56
  ptree
57
57
  end
58
-
58
+
59
59
  let(:destination) { StringIO.new(+'', 'w') }
60
60
  subject { Asciitree.new(destination) }
61
61
 
@@ -63,14 +63,14 @@ module Rley # Re-open the module to get rid of qualified names
63
63
  it 'should be initialized with an IO argument' do
64
64
  expect { Asciitree.new(StringIO.new(+'', 'w')) }.not_to raise_error
65
65
  end
66
-
66
+
67
67
  it 'should know its output destination' do
68
68
  expect(subject.output).to eq(destination)
69
69
  end
70
70
  end # context
71
-
72
71
 
73
- context 'Rendering:' do
72
+
73
+ context 'Rendering:' do
74
74
  it 'should render a parse tree' do
75
75
  visitor = Rley::ParseTreeVisitor.new(grm_abc_ptree1)
76
76
  subject.render(visitor)
@@ -23,12 +23,12 @@ module Rley # Re-open the module to get rid of qualified names
23
23
  builder = sandbox.grammar_abc_builder
24
24
  builder.grammar
25
25
  end
26
-
26
+
27
27
  # Variables for the terminal symbols
28
28
  let(:a_) { grammar_abc.name2symbol['a'] }
29
29
  let(:b_) { grammar_abc.name2symbol['b'] }
30
30
  let(:c_) { grammar_abc.name2symbol['c'] }
31
-
31
+
32
32
  # Helper method that mimicks the output of a tokenizer
33
33
  # for the language specified by grammar_abc
34
34
  let(:grm_abc_tokens1) do
@@ -50,45 +50,45 @@ module Rley # Re-open the module to get rid of qualified names
50
50
  # Capital letters represent non-terminal nodes
51
51
  let(:grm_abc_ptree1) do
52
52
  engine = Rley::Engine.new
53
- engine.use_grammar(grammar_abc)
53
+ engine.use_grammar(grammar_abc)
54
54
  parse_result = engine.parse(grm_abc_tokens1)
55
55
  ptree = engine.convert(parse_result)
56
- ptree
56
+ ptree
57
57
  end
58
-
58
+
59
59
  let(:destination) { StringIO.new(+'', 'w') }
60
60
  subject { BracketNotation.new(destination) }
61
61
 
62
62
  context 'Standard creation & initialization:' do
63
63
  it 'should be initialized with an IO argument' do
64
- expect do
65
- BracketNotation.new(StringIO.new(+'', 'w'))
64
+ expect do
65
+ BracketNotation.new(StringIO.new(+'', 'w'))
66
66
  end.not_to raise_error
67
67
  end
68
-
68
+
69
69
  it 'should know its output destination' do
70
70
  expect(subject.output).to eq(destination)
71
71
  end
72
72
  end # context
73
-
74
73
 
75
- context 'Formatting events:' do
74
+
75
+ context 'Formatting events:' do
76
76
  it 'should support visit events of a parse tree' do
77
77
  visitor = Rley::ParseTreeVisitor.new(grm_abc_ptree1)
78
78
  subject.render(visitor)
79
79
  expectations = '[S [A [a a][A [a a][A [b b]][c c]][c c]]]'
80
80
  expect(destination.string).to eq(expectations)
81
81
  end
82
-
82
+
83
83
  it 'should escape square brackets' do
84
84
  f_node = double('fake-node')
85
85
  f_token = double('fake-token')
86
86
  expect(f_node).to receive(:token).and_return(f_token)
87
87
  expect(f_token).to receive(:lexeme).and_return('[][]')
88
-
88
+
89
89
  subject.after_terminal(f_node)
90
90
  expectations = '\[\]\[\]]'
91
- expect(destination.string).to eq(expectations)
91
+ expect(destination.string).to eq(expectations)
92
92
  end
93
93
  end # context
94
94
  end # describe
@@ -49,7 +49,7 @@ module Rley # Re-open the module to get rid of qualified names
49
49
  # Capital letters represent non-terminal nodes
50
50
  let(:grm_abc_ptree1) do
51
51
  engine = Rley::Engine.new
52
- engine.use_grammar(grammar_abc)
52
+ engine.use_grammar(grammar_abc)
53
53
  parse_result = engine.parse(grm_abc_tokens1)
54
54
  ptree = engine.convert(parse_result)
55
55
  ptree