lrama 0.5.10 → 0.5.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yaml +21 -1
  3. data/Steepfile +8 -12
  4. data/lib/lrama/grammar/code/rule_action.rb +1 -1
  5. data/lib/lrama/grammar/parameterizing_rules/builder/base.rb +10 -2
  6. data/lib/lrama/grammar/parameterizing_rules/builder/list.rb +12 -4
  7. data/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb +12 -4
  8. data/lib/lrama/grammar/parameterizing_rules/builder/option.rb +12 -4
  9. data/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb +17 -6
  10. data/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb +12 -5
  11. data/lib/lrama/grammar/parameterizing_rules/builder.rb +23 -6
  12. data/lib/lrama/grammar/rule.rb +2 -1
  13. data/lib/lrama/grammar/rule_builder.rb +17 -19
  14. data/lib/lrama/grammar/symbol.rb +16 -2
  15. data/lib/lrama/grammar/type.rb +6 -0
  16. data/lib/lrama/grammar.rb +8 -3
  17. data/lib/lrama/lexer/token/parameterizing.rb +1 -1
  18. data/lib/lrama/lexer/token.rb +16 -9
  19. data/lib/lrama/lexer.rb +1 -2
  20. data/lib/lrama/parser.rb +359 -346
  21. data/lib/lrama/version.rb +1 -1
  22. data/lib/lrama.rb +0 -1
  23. data/parser.y +17 -15
  24. data/rbs_collection.lock.yaml +2 -8
  25. data/sig/lrama/grammar/error_token.rbs +11 -0
  26. data/sig/lrama/grammar/parameterizing_rules/builder/base.rbs +26 -0
  27. data/sig/lrama/grammar/parameterizing_rules/builder/list.rbs +10 -0
  28. data/sig/lrama/grammar/parameterizing_rules/builder/nonempty_list.rbs +10 -0
  29. data/sig/lrama/grammar/parameterizing_rules/builder/option.rbs +10 -0
  30. data/sig/lrama/grammar/parameterizing_rules/builder/separated_list.rbs +11 -0
  31. data/sig/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rbs +11 -0
  32. data/sig/lrama/grammar/parameterizing_rules/builder.rbs +14 -1
  33. data/sig/lrama/grammar/reference.rbs +2 -2
  34. data/sig/lrama/grammar/rule.rbs +1 -1
  35. data/sig/lrama/grammar/rule_builder.rbs +1 -0
  36. data/sig/lrama/grammar/symbol.rbs +37 -0
  37. data/sig/lrama/lexer/token/parameterizing.rbs +2 -0
  38. data/sig/lrama/lexer/token.rbs +3 -3
  39. data/template/bison/yacc.c +0 -2
  40. metadata +11 -3
  41. data/lib/lrama/type.rb +0 -4
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '0097beb1eb7af4947e417b356a7e3c81c3002916f4a1b2940b2df196ee58f144'
4
- data.tar.gz: 982c957cdb8c42d8b77210756046e35f395875bb527f32a66b3a82ff236cbe78
3
+ metadata.gz: 634093bc73fd1504910364bffa6827a66fa7479f1f5b5238fdf577f0bb6d3d9d
4
+ data.tar.gz: 96db570d049f47f20bf619535ee91e7f0f846a492404a54f7fe41a6addcce484
5
5
  SHA512:
6
- metadata.gz: a4bbdebc7f100a1b26c801c3c153519e3fb11d34f6dc5001391302665e779905d95041781917af93b2dac64854f174630044e384465bd5606df40f9f878707b1
7
- data.tar.gz: b46f8eb2dffa4df04563bbc8997a9efd06f5a70dcbc26e19f6301624aadf5bc416721045ecf6650b83ae64b9889689c8aeef078e9637e0a152a9ac204ab766de
6
+ metadata.gz: 6b09e84cd16cd162f263e68abf7ada808418212ca449fab0f4a380c42c0193f479cad3d0d1afcc1d5f17d81b890e3754642d1a9433f7be5f6fc71fdcb2df3f97
7
+ data.tar.gz: 1994a670f1033e737f2193e2e71ccaa0818e4144ac7f4424ebf24046ece23efc3f67eeb0927c5336663a244f270b321247b39dbbdbe76cfacc13b94da6289a8b
@@ -35,10 +35,30 @@ jobs:
35
35
  with:
36
36
  ruby-version: ${{ matrix.ruby }}
37
37
  bundler-cache: true
38
- - run: choco install winflexbison
38
+ - run: choco install winflexbison || choco install winflexbison
39
39
  - run: win_flex --help
40
40
  - run: bundle install
41
41
  - run: bundle exec rspec
42
+ test-memory:
43
+ runs-on: ubuntu-20.04
44
+ strategy:
45
+ fail-fast: false
46
+ matrix:
47
+ ruby: ['head']
48
+ steps:
49
+ - uses: actions/checkout@v4
50
+ - uses: ruby/setup-ruby@v1
51
+ with:
52
+ ruby-version: ${{ matrix.ruby }}
53
+ bundler-cache: true
54
+ - run: |
55
+ sudo apt-get update -q
56
+ sudo apt-get install --no-install-recommends -q -y valgrind
57
+ - run: valgrind --version
58
+ - run: bundle install
59
+ - run: bundle exec rspec spec/lrama/integration_spec.rb
60
+ env:
61
+ ENABEL_VALGRIND: 'true'
42
62
  check-misc:
43
63
  runs-on: ubuntu-20.04
44
64
  strategy:
data/Steepfile CHANGED
@@ -4,24 +4,20 @@ target :lib do
4
4
  repo_path '.gem_rbs_collection/'
5
5
  signature "sig"
6
6
 
7
- check "lib/lrama/bitmap.rb"
8
- check "lib/lrama/digraph.rb"
9
- check "lib/lrama/grammar/code.rb"
10
7
  check "lib/lrama/grammar/code/printer_code.rb"
8
+ check "lib/lrama/grammar/code.rb"
11
9
  check "lib/lrama/grammar/counter.rb"
10
+ check "lib/lrama/grammar/error_token.rb"
11
+ check "lib/lrama/grammar/parameterizing_rules"
12
12
  check "lib/lrama/grammar/percent_code.rb"
13
13
  check "lib/lrama/grammar/precedence.rb"
14
14
  check "lib/lrama/grammar/printer.rb"
15
15
  check "lib/lrama/grammar/reference.rb"
16
16
  check "lib/lrama/grammar/rule_builder.rb"
17
- check "lib/lrama/lexer/token/char.rb"
18
- check "lib/lrama/lexer/token/ident.rb"
19
- check "lib/lrama/lexer/token/parameterizing.rb"
20
- check "lib/lrama/lexer/token/tag.rb"
21
- check "lib/lrama/lexer/token/user_code.rb"
22
- check "lib/lrama/lexer/location.rb"
23
- check "lib/lrama/lexer/token.rb"
24
- check "lib/lrama/report/duration.rb"
25
- check "lib/lrama/report/profile.rb"
17
+ check "lib/lrama/grammar/symbol.rb"
18
+ check "lib/lrama/lexer"
19
+ check "lib/lrama/report"
20
+ check "lib/lrama/bitmap.rb"
21
+ check "lib/lrama/digraph.rb"
26
22
  check "lib/lrama/warning.rb"
27
23
  end
@@ -50,7 +50,7 @@ module Lrama
50
50
  end
51
51
 
52
52
  def lhs
53
- (@rule.original_rule || @rule).lhs
53
+ @rule.lhs
54
54
  end
55
55
 
56
56
  def raise_tag_not_found_error(ref)
@@ -2,16 +2,24 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Base class for parameterizing rules builder
5
6
  class Base
6
- def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
7
+ attr_reader :build_token
8
+
9
+ def initialize(token, rule_counter, lhs_tag, user_code, precedence_sym, line)
7
10
  @args = token.args
8
11
  @token = @args.first
9
12
  @rule_counter = rule_counter
10
- @lhs = lhs
13
+ @lhs_tag = lhs_tag
11
14
  @user_code = user_code
12
15
  @precedence_sym = precedence_sym
13
16
  @line = line
14
17
  @expected_argument_num = 1
18
+ @build_token = nil
19
+ end
20
+
21
+ def build
22
+ raise NotImplementedError
15
23
  end
16
24
 
17
25
  private
@@ -2,15 +2,23 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Builder for list of general parameterizing rules
5
6
  class List < Base
7
+
8
+ # program: list(number)
9
+ #
10
+ # =>
11
+ #
12
+ # program: list_number
13
+ # list_number: ε
14
+ # list_number: list_number number
6
15
  def build
7
16
  validate_argument_number!
8
17
 
9
18
  rules = []
10
- list_token = Lrama::Lexer::Token::Ident.new(s_value: "list_#{@token.s_value}")
11
- rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
12
- rules << Rule.new(id: @rule_counter.increment, _lhs: list_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
13
- rules << Rule.new(id: @rule_counter.increment, _lhs: list_token, _rhs: [list_token, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
19
+ @build_token = Lrama::Lexer::Token::Ident.new(s_value: "list_#{@token.s_value}")
20
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
21
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@build_token, @token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
14
22
  rules
15
23
  end
16
24
  end
@@ -2,15 +2,23 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Builder for nonempty list of general parameterizing rules
5
6
  class NonemptyList < Base
7
+
8
+ # program: nonempty_list(number)
9
+ #
10
+ # =>
11
+ #
12
+ # program: nonempty_list_number
13
+ # nonempty_list_number: number
14
+ # nonempty_list_number: nonempty_list_number number
6
15
  def build
7
16
  validate_argument_number!
8
17
 
9
18
  rules = []
10
- nonempty_list_token = Lrama::Lexer::Token::Ident.new(s_value: "nonempty_list_#{@token.s_value}")
11
- rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [nonempty_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
12
- rules << Rule.new(id: @rule_counter.increment, _lhs: nonempty_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
13
- rules << Rule.new(id: @rule_counter.increment, _lhs: nonempty_list_token, _rhs: [nonempty_list_token, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
19
+ @build_token = Lrama::Lexer::Token::Ident.new(s_value: "nonempty_list_#{@token.s_value}")
20
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
21
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@build_token, @token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
14
22
  rules
15
23
  end
16
24
  end
@@ -2,15 +2,23 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Builder for option of general parameterizing rules
5
6
  class Option < Base
7
+
8
+ # program: option(number)
9
+ #
10
+ # =>
11
+ #
12
+ # program: option_number
13
+ # option_number: ε
14
+ # option_number: number
6
15
  def build
7
16
  validate_argument_number!
8
17
 
9
18
  rules = []
10
- option_token = Lrama::Lexer::Token::Ident.new(s_value: "option_#{@token.s_value}")
11
- rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [option_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
12
- rules << Rule.new(id: @rule_counter.increment, _lhs: option_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
13
- rules << Rule.new(id: @rule_counter.increment, _lhs: option_token, _rhs: [@token], token_code: @ser_code, precedence_sym: @precedence_sym, lineno: @line)
19
+ @build_token = Lrama::Lexer::Token::Ident.new(s_value: "option_#{@token.s_value}")
20
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
21
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
14
22
  rules
15
23
  end
16
24
  end
@@ -2,23 +2,34 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Builder for separated list of general parameterizing rules
5
6
  class SeparatedList < Base
6
- def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
7
+ def initialize(token, rule_counter, lhs_tag, user_code, precedence_sym, line)
7
8
  super
8
9
  @separator = @args[0]
9
10
  @token = @args[1]
10
11
  @expected_argument_num = 2
11
12
  end
12
13
 
14
+ # program: separated_list(',', number)
15
+ #
16
+ # =>
17
+ #
18
+ # program: separated_list_number
19
+ # separated_list_number: ε
20
+ # separated_list_number: separated_nonempty_list_number
21
+ # separated_nonempty_list_number: number
22
+ # separated_nonempty_list_number: separated_nonempty_list_number ',' number
13
23
  def build
14
24
  validate_argument_number!
15
25
 
16
26
  rules = []
17
- separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_list_#{@token.s_value}")
18
- rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [separated_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
19
- rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
20
- rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
21
- rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [separated_list_token, @separator, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
27
+ @build_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_list_#{@token.s_value}")
28
+ separated_nonempty_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_nonempty_list_#{@token.s_value}")
29
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
30
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [separated_nonempty_list_token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
31
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_nonempty_list_token, _rhs: [@token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
32
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_nonempty_list_token, _rhs: [separated_nonempty_list_token, @separator, @token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
22
33
  rules
23
34
  end
24
35
  end
@@ -2,22 +2,29 @@ module Lrama
2
2
  class Grammar
3
3
  class ParameterizingRules
4
4
  class Builder
5
+ # Builder for separated nonempty list of general parameterizing rules
5
6
  class SeparatedNonemptyList < Base
6
- def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
7
+ def initialize(token, rule_counter, lhs_tag, user_code, precedence_sym, line)
7
8
  super
8
9
  @separator = @args[0]
9
10
  @token = @args[1]
10
11
  @expected_argument_num = 2
11
12
  end
12
13
 
14
+ # program: separated_nonempty_list(',', number)
15
+ #
16
+ # =>
17
+ #
18
+ # program: separated_nonempty_list_number
19
+ # separated_nonempty_list_number: number
20
+ # separated_nonempty_list_number: separated_nonempty_list_number ',' number
13
21
  def build
14
22
  validate_argument_number!
15
23
 
16
24
  rules = []
17
- separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_nonempty_list_#{@token.s_value}")
18
- rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [separated_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
19
- rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
20
- rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [separated_list_token, @separator, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
25
+ @build_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_nonempty_list_#{@token.s_value}")
26
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
27
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @build_token, _rhs: [@build_token, @separator, @token], lhs_tag: @lhs_tag, token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
21
28
  rules
22
29
  end
23
30
  end
@@ -8,6 +8,7 @@ require 'lrama/grammar/parameterizing_rules/builder/separated_list'
8
8
  module Lrama
9
9
  class Grammar
10
10
  class ParameterizingRules
11
+ # Builder for parameterizing rules
11
12
  class Builder
12
13
  RULES = {
13
14
  option: Lrama::Grammar::ParameterizingRules::Builder::Option,
@@ -20,23 +21,39 @@ module Lrama
20
21
  separated_list: Lrama::Grammar::ParameterizingRules::Builder::SeparatedList,
21
22
  }
22
23
 
23
- def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
24
+ def initialize(token, rule_counter, lhs_tag, user_code, precedence_sym, line)
24
25
  @token = token
25
26
  @key = token.s_value.to_sym
26
27
  @rule_counter = rule_counter
27
- @lhs = lhs
28
+ @lhs_tag = lhs_tag
28
29
  @user_code = user_code
29
30
  @precedence_sym = precedence_sym
30
31
  @line = line
32
+ @builder = nil
31
33
  end
32
34
 
33
35
  def build
34
- if RULES.key?(@key)
35
- RULES[@key].new(@token, @rule_counter, @lhs, @user_code, @precedence_sym, @line).build
36
- else
37
- raise "Parameterizing rule does not exist. `#{@key}`"
36
+ create_builder
37
+ @builder.build
38
+ end
39
+
40
+ def build_token
41
+ create_builder
42
+ @builder.build_token
43
+ end
44
+
45
+ private
46
+
47
+ def create_builder
48
+ unless @builder
49
+ validate_key!
50
+ @builder = RULES[@key].new(@token, @rule_counter, @lhs_tag, @user_code, @precedence_sym, @line)
38
51
  end
39
52
  end
53
+
54
+ def validate_key!
55
+ raise "Parameterizing rule does not exist. `#{@key}`" unless RULES.key?(@key)
56
+ end
40
57
  end
41
58
  end
42
59
  end
@@ -1,12 +1,13 @@
1
1
  module Lrama
2
2
  class Grammar
3
3
  # _rhs holds original RHS element. Use rhs to refer to Symbol.
4
- class Rule < Struct.new(:id, :_lhs, :lhs, :_rhs, :rhs, :token_code, :position_in_original_rule_rhs, :nullable, :precedence_sym, :lineno, keyword_init: true)
4
+ class Rule < Struct.new(:id, :_lhs, :lhs, :lhs_tag, :_rhs, :rhs, :token_code, :position_in_original_rule_rhs, :nullable, :precedence_sym, :lineno, keyword_init: true)
5
5
  attr_accessor :original_rule
6
6
 
7
7
  def ==(other)
8
8
  self.class == other.class &&
9
9
  self.lhs == other.lhs &&
10
+ self.lhs_tag == other.lhs_tag &&
10
11
  self.rhs == other.rhs &&
11
12
  self.token_code == other.token_code &&
12
13
  self.position_in_original_rule_rhs == other.position_in_original_rule_rhs &&
@@ -3,7 +3,7 @@ require 'lrama/grammar/parameterizing_rules/builder'
3
3
  module Lrama
4
4
  class Grammar
5
5
  class RuleBuilder
6
- attr_accessor :lhs, :line
6
+ attr_accessor :lhs, :lhs_tag, :line
7
7
  attr_reader :rhs, :user_code, :precedence_sym
8
8
 
9
9
  def initialize(rule_counter, midrule_action_counter, position_in_original_rule_rhs = nil, skip_preprocess_references: false)
@@ -14,6 +14,7 @@ module Lrama
14
14
 
15
15
  @lhs = nil
16
16
  @rhs = []
17
+ @lhs_tag = nil
17
18
  @user_code = nil
18
19
  @precedence_sym = nil
19
20
  @line = nil
@@ -81,22 +82,16 @@ module Lrama
81
82
  def build_rules
82
83
  tokens = @replaced_rhs
83
84
 
84
- # Expand Parameterizing rules
85
- if tokens.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) }
86
- @rules = @parameterizing_rules
87
- @midrule_action_rules = []
88
- else
89
- rule = Rule.new(
90
- id: @rule_counter.increment, _lhs: lhs, _rhs: tokens, token_code: user_code,
91
- position_in_original_rule_rhs: @position_in_original_rule_rhs, precedence_sym: precedence_sym, lineno: line
92
- )
93
- @rules = [rule]
94
- @midrule_action_rules = @rule_builders_for_derived_rules.map do |rule_builder|
95
- rule_builder.rules
96
- end.flatten
97
- @midrule_action_rules.each do |r|
98
- r.original_rule = rule
99
- end
85
+ rule = Rule.new(
86
+ id: @rule_counter.increment, _lhs: lhs, _rhs: tokens, token_code: user_code,
87
+ position_in_original_rule_rhs: @position_in_original_rule_rhs, precedence_sym: precedence_sym, lineno: line
88
+ )
89
+ @rules = [rule]
90
+ @midrule_action_rules = @rule_builders_for_derived_rules.map do |rule_builder|
91
+ rule_builder.rules
92
+ end.flatten
93
+ @midrule_action_rules.each do |r|
94
+ r.original_rule = rule
100
95
  end
101
96
  end
102
97
 
@@ -115,8 +110,11 @@ module Lrama
115
110
  when Lrama::Lexer::Token::Ident
116
111
  @replaced_rhs << token
117
112
  when Lrama::Lexer::Token::Parameterizing
118
- @parameterizing_rules = ParameterizingRules::Builder.new(token, @rule_counter, lhs, user_code, precedence_sym, line).build
119
- @replaced_rhs << token
113
+ parameterizing = ParameterizingRules::Builder.new(token, @rule_counter, @lhs_tag, user_code, precedence_sym, line)
114
+ parameterizing.build.each do |r|
115
+ @parameterizing_rules << r
116
+ end
117
+ @replaced_rhs << parameterizing.build_token
120
118
  when Lrama::Lexer::Token::UserCode
121
119
  prefix = token.referred ? "@" : "$@"
122
120
  new_token = Lrama::Lexer::Token::Ident.new(s_value: prefix + @midrule_action_counter.increment.to_s)
@@ -6,10 +6,23 @@
6
6
 
7
7
  module Lrama
8
8
  class Grammar
9
- class Symbol < Struct.new(:id, :alias_name, :number, :tag, :term, :token_id, :nullable, :precedence, :printer, :error_token, keyword_init: true)
10
- attr_accessor :first_set, :first_set_bitmap
9
+ class Symbol
10
+ attr_accessor :id, :alias_name, :tag, :number, :token_id, :nullable, :precedence, :printer, :error_token, :first_set, :first_set_bitmap
11
+ attr_reader :term
11
12
  attr_writer :eof_symbol, :error_symbol, :undef_symbol, :accept_symbol
12
13
 
14
+ def initialize(id:, alias_name: nil, number: nil, tag: nil, term:, token_id: nil, nullable: nil, precedence: nil, printer: nil)
15
+ @id = id
16
+ @alias_name = alias_name
17
+ @number = number
18
+ @tag = tag
19
+ @term = term
20
+ @token_id = token_id
21
+ @nullable = nullable
22
+ @precedence = precedence
23
+ @printer = printer
24
+ end
25
+
13
26
  def term?
14
27
  term
15
28
  end
@@ -41,6 +54,7 @@ module Lrama
41
54
  # name for yysymbol_kind_t
42
55
  #
43
56
  # See: b4_symbol_kind_base
57
+ # @type var name: String
44
58
  def enum_name
45
59
  case
46
60
  when accept_symbol?
@@ -0,0 +1,6 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Type < Struct.new(:id, :tag, keyword_init: true)
4
+ end
5
+ end
6
+ end
data/lib/lrama/grammar.rb CHANGED
@@ -9,9 +9,9 @@ require "lrama/grammar/reference"
9
9
  require "lrama/grammar/rule"
10
10
  require "lrama/grammar/rule_builder"
11
11
  require "lrama/grammar/symbol"
12
+ require "lrama/grammar/type"
12
13
  require "lrama/grammar/union"
13
14
  require "lrama/lexer"
14
- require "lrama/type"
15
15
 
16
16
  module Lrama
17
17
  # Grammar is the result of parsing an input grammar file
@@ -148,7 +148,7 @@ module Lrama
148
148
  def prepare
149
149
  normalize_rules
150
150
  collect_symbols
151
- replace_token_with_symbol
151
+ set_lhs_and_rhs
152
152
  fill_symbol_number
153
153
  fill_default_precedence
154
154
  fill_sym_to_rules
@@ -391,6 +391,11 @@ module Lrama
391
391
  @rules << rule
392
392
  end
393
393
 
394
+ builder.parameterizing_rules.each do |rule|
395
+ add_nterm(id: rule._lhs, tag: rule.lhs_tag)
396
+ @rules << rule
397
+ end
398
+
394
399
  builder.midrule_action_rules.each do |rule|
395
400
  add_nterm(id: rule._lhs)
396
401
  end
@@ -484,7 +489,7 @@ module Lrama
484
489
  end
485
490
  end
486
491
 
487
- def replace_token_with_symbol
492
+ def set_lhs_and_rhs
488
493
  @rules.each do |rule|
489
494
  rule.lhs = token_to_symbol(rule._lhs) if rule._lhs
490
495
 
@@ -4,7 +4,7 @@ module Lrama
4
4
  class Parameterizing < Token
5
5
  attr_accessor :args
6
6
 
7
- def initialize(s_value: nil, alias_name: nil, location: nil, args: [])
7
+ def initialize(s_value:, alias_name: nil, location: nil, args: [])
8
8
  super s_value: s_value, alias_name: alias_name, location: location
9
9
  @args = args
10
10
  end
@@ -1,8 +1,21 @@
1
+ require 'lrama/lexer/token/char'
2
+ require 'lrama/lexer/token/ident'
3
+ require 'lrama/lexer/token/parameterizing'
4
+ require 'lrama/lexer/token/tag'
5
+ require 'lrama/lexer/token/user_code'
6
+
1
7
  module Lrama
2
8
  class Lexer
3
- class Token < Struct.new(:s_value, :alias_name, :location, keyword_init: true)
4
-
5
- attr_accessor :referred
9
+ class Token
10
+ attr_reader :s_value, :location
11
+ attr_accessor :alias_name, :referred
12
+
13
+ def initialize(s_value:, alias_name: nil, location: nil)
14
+ s_value.freeze
15
+ @s_value = s_value
16
+ @alias_name = alias_name
17
+ @location = location
18
+ end
6
19
 
7
20
  def to_s
8
21
  "#{super} location: #{location}"
@@ -36,9 +49,3 @@ module Lrama
36
49
  end
37
50
  end
38
51
  end
39
-
40
- require 'lrama/lexer/token/char'
41
- require 'lrama/lexer/token/ident'
42
- require 'lrama/lexer/token/parameterizing'
43
- require 'lrama/lexer/token/tag'
44
- require 'lrama/lexer/token/user_code'
data/lib/lrama/lexer.rb CHANGED
@@ -157,8 +157,7 @@ module Lrama
157
157
  while !@scanner.eos? do
158
158
  case
159
159
  when @scanner.scan(/\n/)
160
- @line += 1
161
- @head = @scanner.pos + 1
160
+ newline
162
161
  when @scanner.scan(/\*\//)
163
162
  return
164
163
  else