rley 0.6.09 → 0.7.00

Sign up to get free protection for your applications and to get access to all the features.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +10 -0
  3. data/README.md +13 -2
  4. data/examples/NLP/benchmark_pico_en.rb +4 -1
  5. data/examples/NLP/engtagger.rb +4 -1
  6. data/examples/NLP/nano_eng/nano_en_demo.rb +15 -4
  7. data/examples/NLP/pico_en_demo.rb +2 -17
  8. data/examples/data_formats/JSON/json_ast_builder.rb +2 -2
  9. data/examples/data_formats/JSON/json_ast_nodes.rb +18 -2
  10. data/examples/data_formats/JSON/json_lexer.rb +10 -4
  11. data/examples/general/calc_iter1/calc_lexer.rb +5 -4
  12. data/examples/general/calc_iter2/calc_lexer.rb +2 -1
  13. data/examples/general/left.rb +4 -1
  14. data/examples/general/right.rb +4 -1
  15. data/lib/rley/constants.rb +1 -1
  16. data/lib/rley/lexical/token.rb +14 -2
  17. data/lib/rley/parser/error_reason.rb +1 -1
  18. data/lib/rley/parser/gfg_earley_parser.rb +4 -0
  19. data/lib/rley/syntax/terminal.rb +6 -2
  20. data/lib/support/base_tokenizer.rb +197 -0
  21. data/spec/rley/engine_spec.rb +2 -1
  22. data/spec/rley/formatter/asciitree_spec.rb +2 -1
  23. data/spec/rley/formatter/bracket_notation_spec.rb +2 -1
  24. data/spec/rley/formatter/debug_spec.rb +4 -2
  25. data/spec/rley/formatter/json_spec.rb +2 -1
  26. data/spec/rley/lexical/token_spec.rb +10 -5
  27. data/spec/rley/parse_rep/ambiguous_parse_spec.rb +1 -1
  28. data/spec/rley/parse_rep/ast_builder_spec.rb +1 -1
  29. data/spec/rley/parse_rep/cst_builder_spec.rb +2 -2
  30. data/spec/rley/parse_rep/groucho_spec.rb +2 -1
  31. data/spec/rley/parse_rep/parse_forest_builder_spec.rb +1 -1
  32. data/spec/rley/parse_tree_visitor_spec.rb +2 -1
  33. data/spec/rley/parser/error_reason_spec.rb +6 -4
  34. data/spec/rley/parser/gfg_earley_parser_spec.rb +59 -57
  35. data/spec/rley/parser/gfg_parsing_spec.rb +1 -1
  36. data/spec/rley/parser/parse_tracer_spec.rb +3 -2
  37. data/spec/rley/sppf/token_node_spec.rb +9 -6
  38. data/spec/rley/support/ambiguous_grammar_helper.rb +2 -1
  39. data/spec/rley/support/expectation_helper.rb +1 -0
  40. data/spec/rley/support/grammar_ambig01_helper.rb +15 -6
  41. data/spec/rley/support/grammar_arr_int_helper.rb +16 -15
  42. data/spec/rley/support/grammar_b_expr_helper.rb +16 -7
  43. data/spec/rley/support/grammar_helper.rb +6 -2
  44. data/spec/rley/support/grammar_l0_helper.rb +12 -4
  45. data/spec/rley/support/grammar_pb_helper.rb +46 -21
  46. data/spec/support/base_tokenizer_spec.rb +77 -0
  47. metadata +5 -2
@@ -49,10 +49,11 @@ module Rley # Open this namespace to avoid module qualifier prefixes
49
49
  end
50
50
 
51
51
  def each()
52
+ pos = Rley::Lexical::Position.new(1, 1) # Dummy position
52
53
  lexemes = @input.scan(/\S/)
53
54
  lexemes.each do |ch|
54
55
  if ch =~ /[abc]/
55
- yield Rley::Lexical::Token.new(ch, ch)
56
+ yield Rley::Lexical::Token.new(ch, ch, pos)
56
57
  else
57
58
  raise StandardError, "Invalid character #{ch}"
58
59
  end
@@ -30,7 +30,8 @@ module Rley # Re-open the module to get rid of qualified names
30
30
  # Helper method that mimicks the output of a tokenizer
31
31
  # for the language specified by grammar_abc
32
32
  let(:grm_abc_tokens1) do
33
- %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch) }
33
+ pos = Lexical::Position.new(1, 2) # Dummy position
34
+ %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch, pos) }
34
35
  end
35
36
 
36
37
  # Factory method that builds a sample parse tree.
@@ -30,7 +30,8 @@ module Rley # Re-open the module to get rid of qualified names
30
30
  # Helper method that mimicks the output of a tokenizer
31
31
  # for the language specified by grammar_abc
32
32
  let(:grm_abc_tokens1) do
33
- %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch) }
33
+ pos = Lexical::Position.new(1, 2) # Dummy position
34
+ %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch, pos) }
34
35
  end
35
36
 
36
37
  # Factory method that builds a sample parse tree.
@@ -31,7 +31,8 @@ module Rley # Re-open the module to get rid of qualified names
31
31
  # Helper method that mimicks the output of a tokenizer
32
32
  # for the language specified by grammar_abc
33
33
  let(:grm_abc_tokens1) do
34
- %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch) }
34
+ pos = Lexical::Position.new(1, 2) # Dummy position
35
+ %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch, pos) }
35
36
  end
36
37
 
37
38
  let(:grammar_sppf) do
@@ -40,7 +41,8 @@ module Rley # Re-open the module to get rid of qualified names
40
41
  end
41
42
 
42
43
  let(:sample_tokens) do
43
- %w[a b b b].map { |ch| Lexical::Token.new(ch, ch) }
44
+ pos = Lexical::Position.new(1, 2) # Dummy position
45
+ %w[a b b b].map { |ch| Lexical::Token.new(ch, ch, pos) }
44
46
  end
45
47
 
46
48
  # Factory method that builds a sample parse tree.
@@ -29,7 +29,8 @@ module Rley # Re-open the module to get rid of qualified names
29
29
  # Helper method that mimicks the output of a tokenizer
30
30
  # for the language specified by grammar_abc
31
31
  let(:grm_abc_tokens1) do
32
- %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch) }
32
+ pos = Lexical::Position.new(1, 2) # Dummy position
33
+ %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch, pos) }
33
34
  end
34
35
 
35
36
  # Factory method that builds a sample parse tree.
@@ -9,14 +9,15 @@ module Rley # Open this namespace to avoid module qualifier prefixes
9
9
  module Lexical # Open this namespace to avoid module qualifier prefixes
10
10
  describe Token do
11
11
  let(:lexeme) { '"some text"' }
12
- let(:sample_terminal) { Syntax::Terminal.new('if') }
12
+ let(:a_terminal) { Syntax::Terminal.new('if') }
13
+ let(:a_pos) { Position.new(3, 4) }
13
14
 
14
15
  context 'Initialization:' do
15
16
  # Default instantiation rule
16
- subject { Token.new(lexeme, sample_terminal) }
17
+ subject { Token.new(lexeme, a_terminal, a_pos) }
17
18
 
18
- it 'should be created with a lexeme and a terminal argument' do
19
- expect { Token.new(lexeme, sample_terminal) }.not_to raise_error
19
+ it 'should be created with a lexeme, a terminal and position' do
20
+ expect { Token.new(lexeme, a_terminal, a_pos) }.not_to raise_error
20
21
  end
21
22
 
22
23
  it 'should know its lexeme' do
@@ -24,8 +25,12 @@ module Rley # Open this namespace to avoid module qualifier prefixes
24
25
  end
25
26
 
26
27
  it 'should know its terminal' do
27
- expect(subject.terminal).to eq(sample_terminal)
28
+ expect(subject.terminal).to eq(a_terminal)
28
29
  end
30
+
31
+ it 'should know its terminal' do
32
+ expect(subject.position).to eq(a_pos)
33
+ end
29
34
  end # context
30
35
  end # describe
31
36
  end # module
@@ -61,7 +61,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
61
61
 
62
62
  let(:expr_tokens) do
63
63
  sentence = '2 + 3 * 4'
64
- tokenizer_ambig01(sentence, grammar_ambig01)
64
+ tokenizer_ambig01(sentence)
65
65
  end
66
66
 
67
67
  let(:sentence_result) do
@@ -94,7 +94,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
94
94
  end
95
95
 
96
96
  let(:sample_tokens) do
97
- arr_int_tokenizer('[2 , 3, 5 ]', sample_grammar)
97
+ arr_int_tokenizer('[2 , 3, 5 ]')
98
98
  end
99
99
 
100
100
  subject { ASTBuilder.new(sample_tokens) }
@@ -23,7 +23,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
23
23
  end
24
24
 
25
25
  let(:sample_tokens) do
26
- expr_tokenizer('2 + 3 * 4', sample_grammar)
26
+ expr_tokenizer('2 + 3 * 4')
27
27
  end
28
28
 
29
29
  subject { CSTBuilder.new(sample_tokens) }
@@ -357,7 +357,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
357
357
  # Event: visit P => . arr | 0 0
358
358
  # Event: visit .P | 0 0
359
359
  it 'should build a tree for an empty array' do
360
- empty_arr_tokens = arr_int_tokenizer('[ ]', array_grammar)
360
+ empty_arr_tokens = arr_int_tokenizer('[ ]')
361
361
  @instance = CSTBuilder.new(empty_arr_tokens)
362
362
  init_walker(@parser, empty_arr_tokens)
363
363
  stack = get_stack(@instance)
@@ -48,13 +48,14 @@ module Rley # Open this namespace to avoid module qualifier prefixes
48
48
 
49
49
  # Highly simplified tokenizer implementation.
50
50
  def tokenizer(aText, aGrammar)
51
+ pos = Rley::Lexical::Position.new(1, 2) # Dummy position
51
52
  tokens = aText.scan(/\S+/).map do |word|
52
53
  term_name = Groucho_lexicon[word]
53
54
  if term_name.nil?
54
55
  raise StandardError, "Word '#{word}' not found in lexicon"
55
56
  end
56
57
  terminal = aGrammar.name2symbol[term_name]
57
- Rley::Lexical::Token.new(word, terminal)
58
+ Rley::Lexical::Token.new(word, terminal, pos)
58
59
  end
59
60
 
60
61
  return tokens
@@ -285,7 +285,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
285
285
 
286
286
  let(:sentence_tokens) do
287
287
  sentence = 'I prefer a morning flight'
288
- tokenizer_l0(sentence, grammar_l0)
288
+ tokenizer_l0(sentence)
289
289
  end
290
290
 
291
291
  let(:sentence_result) do
@@ -25,7 +25,8 @@ module Rley # Open this namespace to avoid module qualifier prefixes
25
25
  # Helper method that mimicks the output of a tokenizer
26
26
  # for the language specified by grammar_abc
27
27
  let(:grm_abc_tokens1) do
28
- %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch) }
28
+ pos = Lexical::Position.new(1, 2) # Dummy position
29
+ %w[a a b c c].map { |ch| Lexical::Token.new(ch, ch, pos) }
29
30
  end
30
31
 
31
32
  # Factory method that builds a sample parse tree.
@@ -62,7 +62,8 @@ module Rley # Open this namespace to avoid module qualifier prefixes
62
62
  describe UnexpectedToken do
63
63
  let(:err_lexeme) { '-' }
64
64
  let(:err_terminal) { Syntax::Terminal.new('MINUS') }
65
- let(:err_token) { Lexical::Token.new(err_lexeme, err_terminal) }
65
+ let(:pos) { Lexical::Position.new(3, 4) }
66
+ let(:err_token) { Lexical::Token.new(err_lexeme, err_terminal, pos) }
66
67
  let(:terminals) do
67
68
  %w[PLUS LPAREN].map { |name| Syntax::Terminal.new(name) }
68
69
  end
@@ -81,7 +82,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
81
82
  context 'Provided services:' do
82
83
  it 'should emit a message' do
83
84
  text = <<MESSAGE_END
84
- Syntax error at or near token 4 >>>-<<<
85
+ Syntax error at or near token line 3, column 4 >>>-<<<
85
86
  Expected one of: ['PLUS', 'LPAREN'], found a 'MINUS' instead.
86
87
  MESSAGE_END
87
88
  expect(subject.to_s).to eq(text.chomp)
@@ -93,7 +94,8 @@ MESSAGE_END
93
94
  describe PrematureInputEnd do
94
95
  let(:err_lexeme) { '+' }
95
96
  let(:err_terminal) { Syntax::Terminal.new('PLUS') }
96
- let(:err_token) { Lexical::Token.new(err_lexeme, err_terminal) }
97
+ let(:pos) { Lexical::Position.new(3, 4) }
98
+ let(:err_token) { Lexical::Token.new(err_lexeme, err_terminal, pos) }
97
99
  let(:terminals) do
98
100
  %w[INT LPAREN].map { |name| Syntax::Terminal.new(name) }
99
101
  end
@@ -112,7 +114,7 @@ MESSAGE_END
112
114
  context 'Provided services:' do
113
115
  it 'should emit a message' do
114
116
  text = <<MESSAGE_END
115
- Premature end of input after '+' at position 4
117
+ Premature end of input after '+' at position line 3, column 4
116
118
  Expected one of: ['INT', 'LPAREN'].
117
119
  MESSAGE_END
118
120
  expect(subject.to_s).to eq(text.chomp)
@@ -288,7 +288,8 @@ module Rley # Open this namespace to avoid module qualifier prefixes
288
288
  builder.add_terminals(t_x)
289
289
  builder.add_production('Ss' => %w[A A x])
290
290
  builder.add_production('A' => [])
291
- tokens = [Lexical::Token.new('x', t_x)]
291
+ pos = Lexical::Position.new(1, 1)
292
+ tokens = [Lexical::Token.new('x', t_x, pos)]
292
293
 
293
294
  instance = GFGEarleyParser.new(builder.grammar)
294
295
  expect { instance.parse(tokens) }.not_to raise_error
@@ -299,8 +300,8 @@ module Rley # Open this namespace to avoid module qualifier prefixes
299
300
  expected = [
300
301
  '.Ss | 0', # Initialization
301
302
  "Ss => . A A 'x' | 0", # start rule
302
- '.A | 0', # call rule
303
- 'A => . | 0', # start rule
303
+ '.A | 0', # call rule
304
+ 'A => . | 0', # start rule
304
305
  'A. | 0', # exit rule
305
306
  "Ss => A . A 'x' | 0", # end rule
306
307
  "Ss => A A . 'x' | 0" # end rule
@@ -559,7 +560,7 @@ module Rley # Open this namespace to avoid module qualifier prefixes
559
560
  parse_result = subject.parse(wrong)
560
561
  expect(parse_result.success?).to eq(false)
561
562
  err_msg = <<-MSG
562
- Syntax error at or near token 3 >>>c<<<
563
+ Syntax error at or near token line 1, column 5 >>>c<<<
563
564
  Expected one of: ['a', 'b'], found a 'c' instead.
564
565
  MSG
565
566
  expect(parse_result.failure_reason.message).to eq(err_msg.chomp)
@@ -590,34 +591,35 @@ MSG
590
591
  'S => . E | 0', # start rule
591
592
  '.E | 0', # call rule
592
593
  'E => . int | 0', # start rule
593
- "E => . '(' E '+' E ')' | 0", # start rule
594
- "E => . E '+' E | 0" # start rule
594
+ "E => . ( E + E ) | 0", # start rule
595
+ "E => . E + E | 0" # start rule
595
596
  ]
596
597
  compare_entry_texts(parse_result.chart[0], expected)
597
598
 
598
599
  ###################### S(1) == 1 . +
599
600
  # Expectation chart[1]:
600
601
  expected = [
601
- 'E => int . | 0', # scan '1'
602
- 'E. | 0', # exit rule
603
- 'S => E . | 0', # end rule
604
- "E => E . '+' E | 0", # end rule
605
- 'S. | 0' # exit rule
602
+ 'E => int . | 0', # scan '1'
603
+ 'E. | 0', # exit rule
604
+ 'S => E . | 0', # end rule
605
+ 'E => E . + E | 0', # end rule
606
+ 'S. | 0' # exit rule
606
607
  ]
607
608
  compare_entry_texts(parse_result.chart[1], expected)
608
609
 
609
610
  ###################### S(2) == 1 + .
610
611
  # Expectation chart[2]:
611
612
  expected = [
612
- "E => E '+' . E | 0", # scan '+'
613
- '.E | 2', # exit rule
614
- 'E => . int | 2', # start rule
615
- "E => . '(' E '+' E ')' | 2", # start rule
616
- "E => . E '+' E | 2" # start rule
613
+ 'E => E + . E | 0', # scan '+'
614
+ '.E | 2', # exit rule
615
+ 'E => . int | 2', # start rule
616
+ 'E => . ( E + E ) | 2', # start rule
617
+ 'E => . E + E | 2' # start rule
617
618
  ]
618
619
  compare_entry_texts(parse_result.chart[2], expected)
619
620
 
620
- err_msg = "Premature end of input after '+' at position 2"
621
+ err_msg = "Premature end of input after '+' at position line 1, "
622
+ err_msg << "column 3"
621
623
  err_msg << "\nExpected one of: ['int', '(']."
622
624
  expect(parse_result.failure_reason.message).to eq(err_msg)
623
625
  end
@@ -636,76 +638,76 @@ MSG
636
638
  ###################### S(0) == . 7 + 8 + 9
637
639
  # Expectation chart[0]:
638
640
  expected = [
639
- '.S | 0', # initialization
640
- 'S => . E | 0', # start rule
641
- '.E | 0', # call rule
642
- 'E => . int | 0', # start rule
643
- "E => . '(' E '+' E ')' | 0", # start rule
644
- "E => . E '+' E | 0" # start rule
641
+ '.S | 0', # initialization
642
+ 'S => . E | 0', # start rule
643
+ '.E | 0', # call rule
644
+ 'E => . int | 0', # start rule
645
+ 'E => . ( E + E ) | 0', # start rule
646
+ 'E => . E + E | 0' # start rule
645
647
  ]
646
648
  compare_entry_texts(parse_result.chart[0], expected)
647
649
 
648
650
  ###################### S(1) == 7 . + 8 + 9
649
651
  # Expectation chart[1]:
650
652
  expected = [
651
- 'E => int . | 0', # scan '7'
652
- 'E. | 0', # exit rule
653
- 'S => E . | 0', # end rule
654
- "E => E . '+' E | 0", # end rule
655
- 'S. | 0' # exit rule
653
+ 'E => int . | 0', # scan '7'
654
+ 'E. | 0', # exit rule
655
+ 'S => E . | 0', # end rule
656
+ 'E => E . + E | 0', # end rule
657
+ 'S. | 0' # exit rule
656
658
  ]
657
659
  compare_entry_texts(parse_result.chart[1], expected)
658
660
 
659
661
  ###################### S(2) == 7 + . 8 + 9
660
662
  # Expectation chart[2]:
661
663
  expected = [
662
- "E => E '+' . E | 0", # scan '+'
663
- '.E | 2', # exit rule
664
- 'E => . int | 2', # start rule
665
- "E => . '(' E '+' E ')' | 2", # start rule
666
- "E => . E '+' E | 2" # start rule
664
+ 'E => E + . E | 0', # scan '+'
665
+ '.E | 2', # exit rule
666
+ 'E => . int | 2', # start rule
667
+ 'E => . ( E + E ) | 2', # start rule
668
+ 'E => . E + E | 2' # start rule
667
669
  ]
668
670
  compare_entry_texts(parse_result.chart[2], expected)
669
671
 
670
672
  ###################### S(3) == 7 + 8 . + 9
671
673
  # Expectation chart[3]:
672
674
  expected = [
673
- 'E => int . | 2', # scan '8'
674
- 'E. | 2', # exit rule
675
- "E => E '+' E . | 0", # end rule
676
- "E => E . '+' E | 2", # end rule
677
- 'E. | 0', # exit rule
678
- 'S => E . | 0', # end rule
679
- "E => E . '+' E | 0", # end rule
680
- 'S. | 0' # exit rule
675
+ 'E => int . | 2', # scan '8'
676
+ 'E. | 2', # exit rule
677
+ 'E => E + E . | 0', # end rule
678
+ 'E => E . + E | 2', # end rule
679
+ 'E. | 0', # exit rule
680
+ 'S => E . | 0', # end rule
681
+ 'E => E . + E | 0', # end rule
682
+ 'S. | 0' # exit rule
681
683
  ]
682
684
  compare_entry_texts(parse_result.chart[3], expected)
683
685
 
684
686
  ###################### S(4) == 7 + 8 + . 9
685
687
  # Expectation chart[4]:
686
688
  expected = [
687
- "E => E '+' . E | 2", # scan '+'
688
- "E => E '+' . E | 0", # scan '+'
689
- '.E | 4', # exit rule
690
- 'E => . int | 4', # start rule
691
- "E => . '(' E '+' E ')' | 4", # start rule
692
- "E => . E '+' E | 4" # start rule
689
+ 'E => E + . E | 2', # scan '+'
690
+ 'E => E + . E | 0', # scan '+'
691
+ '.E | 4', # exit rule
692
+ 'E => . int | 4', # start rule
693
+ 'E => . ( E + E ) | 4', # start rule
694
+ 'E => . E + E | 4' # start rule
693
695
  ]
694
696
  compare_entry_texts(parse_result.chart[4], expected)
695
697
 
696
698
  ###################### S(5) == 7 + 8 + 9 .
697
699
  # Expectation chart[5]:
698
700
  expected = [
699
- 'E => int . | 4', # scan '9'
700
- 'E. | 4', # exit rule
701
- "E => E '+' E . | 2", # end rule
702
- "E => E '+' E . | 0", # end rule
703
- "E => E . '+' E | 4", # exit rule (not shown in paper)
704
- 'E. | 2', # exit rule
705
- 'E. | 0', # exit rule
706
- "E => E . '+' E | 2", # end rule
707
- 'S => E . | 0', # end rule
708
- "E => E . '+' E | 0", # end rule
701
+ 'E => int . | 4', # scan '9'
702
+ 'E. | 4', # exit rule
703
+ 'E => E + E . | 2', # end rule
704
+ 'E => E + E . | 0', # end rule
705
+ 'E => E . + E | 4', # exit rule (not shown in paper)
706
+ 'E. | 2', # exit rule
707
+ 'E. | 0', # exit rule
708
+ 'E => E . + E | 2', # end rule
709
+ 'S => E . | 0', # end rule
710
+ 'E => E . + E | 0', # end rule
709
711
  'S. | 0'
710
712
  ]
711
713
  compare_entry_texts(parse_result.chart[5], expected)
@@ -305,7 +305,7 @@ SNIPPET
305
305
 
306
306
  subject do
307
307
  parser = GFGEarleyParser.new(b_expr_grammar)
308
- tokens = expr_tokenizer('2 + 3 * 4', b_expr_grammar)
308
+ tokens = expr_tokenizer('2 + 3 * 4')
309
309
  parser.parse(tokens)
310
310
  end
311
311
 
@@ -15,11 +15,12 @@ module Rley # Open this namespace to avoid module qualifier prefixes
15
15
  module Parser # Open this namespace to avoid module qualifier prefixes
16
16
  describe ParseTracer do
17
17
  let(:output) { StringIO.new('', 'w') }
18
+ let(:tpos) { Lexical::Position.new(3, 4) }
18
19
 
19
20
  let(:token_seq) do
20
21
  literals = %w[I saw John with a dog]
21
- literals.map do |lexeme|
22
- Lexical::Token.new(lexeme, double('fake-terminal'))
22
+ literals.map do |lexeme|
23
+ Lexical::Token.new(lexeme, double('fake-terminal'), tpos)
23
24
  end
24
25
  end
25
26
 
@@ -12,10 +12,13 @@ module Rley # Open this namespace to avoid module qualifier prefixes
12
12
  module SPPF # Open this namespace to avoid module qualifier prefixes
13
13
  describe TokenNode do
14
14
  let(:sample_symbol) { Syntax::Terminal.new('Noun') }
15
- let(:sample_token) { Lexical::Token.new('language', sample_symbol) }
16
- let(:sample_position) { 3 }
15
+ let(:sample_position) { Lexical::Position.new(3, 4) }
16
+ let(:sample_token) do
17
+ Lexical::Token.new('language', sample_symbol, sample_position)
18
+ end
19
+ let(:sample_rank) { 3 }
17
20
 
18
- subject { TokenNode.new(sample_token, sample_position) }
21
+ subject { TokenNode.new(sample_token, sample_rank) }
19
22
 
20
23
  context 'Initialization:' do
21
24
  it 'should know its token' do
@@ -23,9 +26,9 @@ module Rley # Open this namespace to avoid module qualifier prefixes
23
26
  end
24
27
 
25
28
  it 'should know its token range' do
26
- expect(subject.origin).to eq(sample_position)
27
- expect(subject.range.low).to eq(sample_position)
28
- expect(subject.range.high).to eq(sample_position + 1)
29
+ expect(subject.origin).to eq(sample_rank)
30
+ expect(subject.range.low).to eq(sample_rank)
31
+ expect(subject.range.high).to eq(sample_rank + 1)
29
32
  end
30
33
  end # context
31
34