lrama 0.7.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (260) hide show
  1. checksums.yaml +4 -4
  2. data/.gitattributes +2 -0
  3. data/.github/workflows/codespell.yaml +1 -1
  4. data/.github/workflows/gh-pages.yml +5 -6
  5. data/.github/workflows/test.yaml +25 -14
  6. data/Gemfile +4 -3
  7. data/NEWS.md +370 -35
  8. data/README.md +7 -88
  9. data/Rakefile +3 -2
  10. data/Steepfile +11 -5
  11. data/doc/Index.md +1 -1
  12. data/doc/development/compressed_state_table/parser.rb +2 -0
  13. data/doc/development/profiling.md +44 -0
  14. data/exe/lrama +1 -1
  15. data/lib/lrama/bitmap.rb +18 -5
  16. data/lib/lrama/command.rb +95 -43
  17. data/lib/lrama/context.rb +22 -24
  18. data/lib/lrama/counterexamples/derivation.rb +14 -4
  19. data/lib/lrama/counterexamples/example.rb +47 -22
  20. data/lib/lrama/counterexamples/node.rb +30 -0
  21. data/lib/lrama/counterexamples/path.rb +12 -14
  22. data/lib/lrama/counterexamples/state_item.rb +24 -1
  23. data/lib/lrama/counterexamples/triple.rb +27 -9
  24. data/lib/lrama/counterexamples.rb +216 -88
  25. data/lib/lrama/diagram.rb +77 -0
  26. data/lib/lrama/digraph.rb +28 -7
  27. data/lib/lrama/erb.rb +29 -0
  28. data/lib/lrama/grammar/auxiliary.rb +6 -1
  29. data/lib/lrama/grammar/binding.rb +37 -25
  30. data/lib/lrama/grammar/code/destructor_code.rb +11 -0
  31. data/lib/lrama/grammar/code/initial_action_code.rb +3 -0
  32. data/lib/lrama/grammar/code/no_reference_code.rb +3 -0
  33. data/lib/lrama/grammar/code/printer_code.rb +11 -0
  34. data/lib/lrama/grammar/code/rule_action.rb +17 -0
  35. data/lib/lrama/grammar/code.rb +16 -1
  36. data/lib/lrama/grammar/counter.rb +10 -0
  37. data/lib/lrama/grammar/destructor.rb +14 -1
  38. data/lib/lrama/grammar/error_token.rb +14 -1
  39. data/lib/lrama/grammar/inline/resolver.rb +80 -0
  40. data/lib/lrama/grammar/inline.rb +3 -0
  41. data/lib/lrama/grammar/{parameterizing_rule → parameterized}/resolver.rb +19 -8
  42. data/lib/lrama/grammar/{parameterizing_rule → parameterized}/rhs.rb +7 -2
  43. data/lib/lrama/grammar/parameterized/rule.rb +36 -0
  44. data/lib/lrama/grammar/parameterized.rb +5 -0
  45. data/lib/lrama/grammar/percent_code.rb +12 -1
  46. data/lib/lrama/grammar/precedence.rb +43 -1
  47. data/lib/lrama/grammar/printer.rb +9 -0
  48. data/lib/lrama/grammar/reference.rb +13 -0
  49. data/lib/lrama/grammar/rule.rb +61 -1
  50. data/lib/lrama/grammar/rule_builder.rb +84 -69
  51. data/lib/lrama/grammar/stdlib.y +68 -48
  52. data/lib/lrama/grammar/symbol.rb +63 -19
  53. data/lib/lrama/grammar/symbols/resolver.rb +64 -3
  54. data/lib/lrama/grammar/type.rb +13 -1
  55. data/lib/lrama/grammar/union.rb +12 -1
  56. data/lib/lrama/grammar.rb +231 -35
  57. data/lib/lrama/lexer/location.rb +25 -8
  58. data/lib/lrama/lexer/token/base.rb +73 -0
  59. data/lib/lrama/lexer/token/char.rb +15 -2
  60. data/lib/lrama/lexer/token/empty.rb +14 -0
  61. data/lib/lrama/lexer/token/ident.rb +2 -2
  62. data/lib/lrama/lexer/token/instantiate_rule.rb +4 -4
  63. data/lib/lrama/lexer/token/int.rb +14 -0
  64. data/lib/lrama/lexer/token/str.rb +11 -0
  65. data/lib/lrama/lexer/token/tag.rb +2 -2
  66. data/lib/lrama/lexer/token/token.rb +11 -0
  67. data/lib/lrama/lexer/token/user_code.rb +63 -37
  68. data/lib/lrama/lexer/token.rb +6 -56
  69. data/lib/lrama/lexer.rb +51 -23
  70. data/lib/lrama/logger.rb +12 -2
  71. data/lib/lrama/option_parser.rb +63 -9
  72. data/lib/lrama/options.rb +25 -7
  73. data/lib/lrama/output.rb +4 -11
  74. data/lib/lrama/parser.rb +854 -723
  75. data/lib/lrama/reporter/conflicts.rb +44 -0
  76. data/lib/lrama/reporter/grammar.rb +39 -0
  77. data/lib/lrama/reporter/precedences.rb +54 -0
  78. data/lib/lrama/reporter/profile/call_stack.rb +45 -0
  79. data/lib/lrama/reporter/profile/memory.rb +44 -0
  80. data/lib/lrama/reporter/profile.rb +4 -0
  81. data/lib/lrama/reporter/rules.rb +43 -0
  82. data/lib/lrama/reporter/states.rb +387 -0
  83. data/lib/lrama/reporter/terms.rb +44 -0
  84. data/lib/lrama/reporter.rb +39 -0
  85. data/lib/lrama/state/action/goto.rb +33 -0
  86. data/lib/lrama/state/action/reduce.rb +71 -0
  87. data/lib/lrama/state/action/shift.rb +39 -0
  88. data/lib/lrama/state/action.rb +5 -0
  89. data/lib/lrama/state/inadequacy_annotation.rb +140 -0
  90. data/lib/lrama/{states → state}/item.rb +33 -4
  91. data/lib/lrama/state/reduce_reduce_conflict.rb +14 -1
  92. data/lib/lrama/state/resolved_conflict.rb +38 -4
  93. data/lib/lrama/state/shift_reduce_conflict.rb +14 -1
  94. data/lib/lrama/state.rb +301 -200
  95. data/lib/lrama/states.rb +447 -175
  96. data/lib/lrama/tracer/actions.rb +22 -0
  97. data/lib/lrama/tracer/closure.rb +30 -0
  98. data/lib/lrama/tracer/duration.rb +38 -0
  99. data/lib/lrama/tracer/only_explicit_rules.rb +24 -0
  100. data/lib/lrama/tracer/rules.rb +23 -0
  101. data/lib/lrama/tracer/state.rb +33 -0
  102. data/lib/lrama/tracer.rb +51 -0
  103. data/lib/lrama/version.rb +2 -1
  104. data/lib/lrama/warnings/conflicts.rb +27 -0
  105. data/lib/lrama/warnings/implicit_empty.rb +29 -0
  106. data/lib/lrama/warnings/name_conflicts.rb +63 -0
  107. data/lib/lrama/warnings/redefined_rules.rb +23 -0
  108. data/lib/lrama/warnings/required.rb +23 -0
  109. data/lib/lrama/warnings/useless_precedence.rb +25 -0
  110. data/lib/lrama/warnings.rb +33 -0
  111. data/lib/lrama.rb +5 -5
  112. data/parser.y +495 -404
  113. data/rbs_collection.lock.yaml +27 -3
  114. data/rbs_collection.yaml +2 -0
  115. data/sig/generated/lrama/bitmap.rbs +12 -4
  116. data/sig/generated/lrama/counterexamples/derivation.rbs +36 -0
  117. data/sig/generated/lrama/counterexamples/example.rbs +58 -0
  118. data/sig/generated/lrama/counterexamples/node.rbs +18 -0
  119. data/sig/generated/lrama/counterexamples/path.rbs +23 -0
  120. data/sig/generated/lrama/counterexamples/state_item.rbs +19 -0
  121. data/sig/generated/lrama/counterexamples/triple.rbs +32 -0
  122. data/sig/generated/lrama/counterexamples.rbs +98 -0
  123. data/sig/generated/lrama/diagram.rbs +34 -0
  124. data/sig/generated/lrama/digraph.rbs +26 -6
  125. data/sig/generated/lrama/erb.rbs +14 -0
  126. data/sig/generated/lrama/grammar/auxiliary.rbs +16 -0
  127. data/sig/generated/lrama/grammar/binding.rbs +18 -12
  128. data/sig/generated/lrama/grammar/code/destructor_code.rbs +26 -0
  129. data/sig/{lrama → generated/lrama}/grammar/code/initial_action_code.rbs +6 -0
  130. data/sig/{lrama → generated/lrama}/grammar/code/no_reference_code.rbs +6 -0
  131. data/sig/generated/lrama/grammar/code/printer_code.rbs +26 -0
  132. data/sig/generated/lrama/grammar/code/rule_action.rbs +63 -0
  133. data/sig/generated/lrama/grammar/code.rbs +38 -0
  134. data/sig/{lrama → generated/lrama}/grammar/counter.rbs +4 -0
  135. data/sig/generated/lrama/grammar/destructor.rbs +19 -0
  136. data/sig/generated/lrama/grammar/error_token.rbs +19 -0
  137. data/sig/generated/lrama/grammar/inline/resolver.rbs +26 -0
  138. data/sig/generated/lrama/grammar/parameterized/resolver.rbs +42 -0
  139. data/sig/generated/lrama/grammar/parameterized/rhs.rbs +21 -0
  140. data/sig/generated/lrama/grammar/parameterized/rule.rbs +28 -0
  141. data/sig/{lrama → generated/lrama}/grammar/percent_code.rbs +8 -0
  142. data/sig/generated/lrama/grammar/precedence.rbs +45 -0
  143. data/sig/{lrama/grammar/error_token.rbs → generated/lrama/grammar/printer.rbs} +8 -3
  144. data/sig/generated/lrama/grammar/reference.rbs +31 -0
  145. data/sig/generated/lrama/grammar/rule.rbs +83 -0
  146. data/sig/generated/lrama/grammar/rule_builder.rbs +91 -0
  147. data/sig/generated/lrama/grammar/symbol.rbs +89 -0
  148. data/sig/generated/lrama/grammar/symbols/resolver.rbs +131 -0
  149. data/sig/generated/lrama/grammar/type.rbs +21 -0
  150. data/sig/generated/lrama/grammar/union.rbs +17 -0
  151. data/sig/generated/lrama/grammar.rbs +289 -0
  152. data/sig/generated/lrama/lexer/location.rbs +12 -3
  153. data/sig/generated/lrama/lexer/token/base.rbs +53 -0
  154. data/sig/generated/lrama/lexer/token/char.rbs +9 -2
  155. data/sig/generated/lrama/lexer/token/empty.rbs +11 -0
  156. data/sig/generated/lrama/lexer/token/ident.rbs +2 -2
  157. data/sig/generated/lrama/lexer/token/instantiate_rule.rbs +5 -5
  158. data/sig/generated/lrama/lexer/token/int.rbs +13 -0
  159. data/sig/generated/lrama/lexer/token/str.rbs +10 -0
  160. data/sig/generated/lrama/lexer/token/tag.rbs +2 -2
  161. data/sig/generated/lrama/lexer/token/token.rbs +10 -0
  162. data/sig/generated/lrama/lexer/token/user_code.rbs +2 -2
  163. data/sig/generated/lrama/lexer/token.rbs +1 -39
  164. data/sig/generated/lrama/lexer.rbs +54 -0
  165. data/sig/generated/lrama/logger.rbs +6 -0
  166. data/sig/generated/lrama/option_parser.rbs +52 -0
  167. data/sig/{lrama → generated/lrama}/options.rbs +27 -3
  168. data/sig/generated/lrama/reporter/conflicts.rbs +18 -0
  169. data/sig/generated/lrama/reporter/grammar.rbs +13 -0
  170. data/sig/generated/lrama/reporter/precedences.rbs +15 -0
  171. data/sig/generated/lrama/reporter/profile/call_stack.rbs +19 -0
  172. data/sig/generated/lrama/reporter/profile/memory.rbs +19 -0
  173. data/sig/generated/lrama/reporter/rules.rbs +13 -0
  174. data/sig/generated/lrama/reporter/states.rbs +69 -0
  175. data/sig/generated/lrama/reporter/terms.rbs +13 -0
  176. data/sig/generated/lrama/reporter.rbs +13 -0
  177. data/sig/generated/lrama/state/action/goto.rbs +28 -0
  178. data/sig/generated/lrama/state/action/reduce.rbs +49 -0
  179. data/sig/generated/lrama/state/action/shift.rbs +33 -0
  180. data/sig/generated/lrama/state/inadequacy_annotation.rbs +45 -0
  181. data/sig/generated/lrama/state/item.rbs +75 -0
  182. data/sig/generated/lrama/state/reduce_reduce_conflict.rbs +19 -0
  183. data/sig/generated/lrama/state/resolved_conflict.rbs +38 -0
  184. data/sig/generated/lrama/state/shift_reduce_conflict.rbs +19 -0
  185. data/sig/generated/lrama/state.rbs +231 -0
  186. data/sig/generated/lrama/states.rbs +215 -0
  187. data/sig/generated/lrama/tracer/actions.rbs +13 -0
  188. data/sig/generated/lrama/tracer/closure.rbs +13 -0
  189. data/sig/generated/lrama/tracer/duration.rbs +18 -0
  190. data/sig/generated/lrama/tracer/only_explicit_rules.rbs +13 -0
  191. data/sig/generated/lrama/tracer/rules.rbs +13 -0
  192. data/sig/generated/lrama/tracer/state.rbs +16 -0
  193. data/sig/generated/lrama/tracer.rbs +23 -0
  194. data/sig/generated/lrama/version.rbs +5 -0
  195. data/sig/generated/lrama/warnings/conflicts.rbs +13 -0
  196. data/sig/generated/lrama/warnings/implicit_empty.rbs +17 -0
  197. data/sig/generated/lrama/warnings/name_conflicts.rbs +31 -0
  198. data/sig/generated/lrama/warnings/redefined_rules.rbs +13 -0
  199. data/sig/generated/lrama/warnings/required.rbs +13 -0
  200. data/sig/generated/lrama/warnings/useless_precedence.rbs +13 -0
  201. data/sig/generated/lrama/warnings.rbs +11 -0
  202. data/sig/railroad_diagrams/railroad_diagrams.rbs +16 -0
  203. data/template/bison/_yacc.h +8 -0
  204. data/template/diagram/diagram.html +102 -0
  205. metadata +126 -66
  206. data/lib/lrama/counterexamples/production_path.rb +0 -19
  207. data/lib/lrama/counterexamples/start_path.rb +0 -23
  208. data/lib/lrama/counterexamples/transition_path.rb +0 -19
  209. data/lib/lrama/diagnostics.rb +0 -36
  210. data/lib/lrama/grammar/parameterizing_rule/rule.rb +0 -24
  211. data/lib/lrama/grammar/parameterizing_rule.rb +0 -5
  212. data/lib/lrama/grammar_validator.rb +0 -37
  213. data/lib/lrama/report/duration.rb +0 -27
  214. data/lib/lrama/report/profile.rb +0 -16
  215. data/lib/lrama/report.rb +0 -4
  216. data/lib/lrama/state/reduce.rb +0 -37
  217. data/lib/lrama/state/shift.rb +0 -15
  218. data/lib/lrama/states_reporter.rb +0 -362
  219. data/lib/lrama/trace_reporter.rb +0 -45
  220. data/sig/generated/lrama/trace_reporter.rbs +0 -25
  221. data/sig/lrama/counterexamples/derivation.rbs +0 -33
  222. data/sig/lrama/counterexamples/example.rbs +0 -45
  223. data/sig/lrama/counterexamples/path.rbs +0 -21
  224. data/sig/lrama/counterexamples/production_path.rbs +0 -11
  225. data/sig/lrama/counterexamples/start_path.rbs +0 -13
  226. data/sig/lrama/counterexamples/state_item.rbs +0 -10
  227. data/sig/lrama/counterexamples/transition_path.rbs +0 -11
  228. data/sig/lrama/counterexamples/triple.rbs +0 -20
  229. data/sig/lrama/counterexamples.rbs +0 -29
  230. data/sig/lrama/grammar/auxiliary.rbs +0 -10
  231. data/sig/lrama/grammar/code/destructor_code.rbs +0 -14
  232. data/sig/lrama/grammar/code/printer_code.rbs +0 -14
  233. data/sig/lrama/grammar/code/rule_action.rbs +0 -19
  234. data/sig/lrama/grammar/code.rbs +0 -24
  235. data/sig/lrama/grammar/destructor.rbs +0 -13
  236. data/sig/lrama/grammar/parameterizing_rule/resolver.rbs +0 -24
  237. data/sig/lrama/grammar/parameterizing_rule/rhs.rbs +0 -14
  238. data/sig/lrama/grammar/parameterizing_rule/rule.rbs +0 -16
  239. data/sig/lrama/grammar/parameterizing_rule.rbs +0 -6
  240. data/sig/lrama/grammar/precedence.rbs +0 -13
  241. data/sig/lrama/grammar/printer.rbs +0 -13
  242. data/sig/lrama/grammar/reference.rbs +0 -22
  243. data/sig/lrama/grammar/rule.rbs +0 -45
  244. data/sig/lrama/grammar/rule_builder.rbs +0 -47
  245. data/sig/lrama/grammar/symbol.rbs +0 -38
  246. data/sig/lrama/grammar/symbols/resolver.rbs +0 -60
  247. data/sig/lrama/grammar/type.rbs +0 -11
  248. data/sig/lrama/grammar/union.rbs +0 -12
  249. data/sig/lrama/grammar.rbs +0 -108
  250. data/sig/lrama/report/duration.rbs +0 -11
  251. data/sig/lrama/report/profile.rbs +0 -7
  252. data/sig/lrama/state/reduce.rbs +0 -20
  253. data/sig/lrama/state/reduce_reduce_conflict.rbs +0 -13
  254. data/sig/lrama/state/resolved_conflict.rbs +0 -14
  255. data/sig/lrama/state/shift.rbs +0 -14
  256. data/sig/lrama/state/shift_reduce_conflict.rbs +0 -13
  257. data/sig/lrama/state.rbs +0 -79
  258. data/sig/lrama/states/item.rbs +0 -30
  259. data/sig/lrama/states.rbs +0 -101
  260. data/sig/lrama/warning.rbs +0 -16
data/parser.y CHANGED
@@ -8,411 +8,487 @@ rule
8
8
 
9
9
  input: prologue_declaration* bison_declaration* "%%" rules_or_grammar_declaration+ epilogue_declaration?
10
10
 
11
- prologue_declaration: "%{"
12
- {
13
- begin_c_declaration("%}")
14
- @grammar.prologue_first_lineno = @lexer.line
15
- }
16
- C_DECLARATION
17
- {
18
- end_c_declaration
19
- }
20
- "%}"
21
- {
22
- @grammar.prologue = val[2].s_value
23
- }
24
- | "%require" STRING
25
-
26
- bison_declaration: grammar_declaration
27
- | "%expect" INTEGER { @grammar.expect = val[1] }
28
- | "%define" variable value { @grammar.define[val[1].s_value] = val[2]&.s_value }
29
- | "%param" param+
30
- | "%lex-param" param+
31
- {
32
- val[1].each {|token|
33
- @grammar.lex_param = Grammar::Code::NoReferenceCode.new(type: :lex_param, token_code: token).token_code.s_value
34
- }
35
- }
36
- | "%parse-param" param+
37
- {
38
- val[1].each {|token|
39
- @grammar.parse_param = Grammar::Code::NoReferenceCode.new(type: :parse_param, token_code: token).token_code.s_value
40
- }
41
- }
42
- | "%code" IDENTIFIER param
43
- {
44
- @grammar.add_percent_code(id: val[1], code: val[2])
45
- }
46
- | "%initial-action" param
47
- {
48
- @grammar.initial_action = Grammar::Code::InitialActionCode.new(type: :initial_action, token_code: val[1])
49
- }
50
- | "%no-stdlib" { @grammar.no_stdlib = true }
51
- | "%locations" { @grammar.locations = true }
52
- | bison_declaration ";"
53
-
54
- grammar_declaration: "%union" param
55
- {
56
- @grammar.set_union(
57
- Grammar::Code::NoReferenceCode.new(type: :union, token_code: val[1]),
58
- val[1].line
59
- )
60
- }
61
- | symbol_declaration
62
- | rule_declaration
63
- | inline_declaration
64
- | "%destructor" param (symbol | TAG)+
65
- {
66
- @grammar.add_destructor(
67
- ident_or_tags: val[2].flatten,
68
- token_code: val[1],
69
- lineno: val[1].line
70
- )
71
- }
72
- | "%printer" param (symbol | TAG)+
73
- {
74
- @grammar.add_printer(
75
- ident_or_tags: val[2].flatten,
76
- token_code: val[1],
77
- lineno: val[1].line
78
- )
79
- }
80
- | "%error-token" param (symbol | TAG)+
81
- {
82
- @grammar.add_error_token(
83
- ident_or_tags: val[2].flatten,
84
- token_code: val[1],
85
- lineno: val[1].line
86
- )
87
- }
88
- | "%after-shift" IDENTIFIER
89
- {
90
- @grammar.after_shift = val[1]
91
- }
92
- | "%before-reduce" IDENTIFIER
93
- {
94
- @grammar.before_reduce = val[1]
95
- }
96
- | "%after-reduce" IDENTIFIER
97
- {
98
- @grammar.after_reduce = val[1]
99
- }
100
- | "%after-shift-error-token" IDENTIFIER
101
- {
102
- @grammar.after_shift_error_token = val[1]
103
- }
104
- | "%after-pop-stack" IDENTIFIER
105
- {
106
- @grammar.after_pop_stack = val[1]
107
- }
108
-
109
- symbol_declaration: "%token" token_declarations
110
- | "%type" symbol_declarations
111
- {
112
- val[1].each {|hash|
113
- hash[:tokens].each {|id|
114
- @grammar.add_type(id: id, tag: hash[:tag])
115
- }
116
- }
117
- }
118
- | "%nterm" symbol_declarations
119
- {
120
- val[1].each {|hash|
121
- hash[:tokens].each {|id|
122
- if @grammar.find_term_by_s_value(id.s_value)
123
- on_action_error("symbol #{id.s_value} redeclared as a nonterminal", id)
124
- else
125
- @grammar.add_type(id: id, tag: hash[:tag])
126
- end
127
- }
128
- }
129
- }
130
- | "%left" token_declarations_for_precedence
131
- {
132
- val[1].each {|hash|
133
- hash[:tokens].each {|id|
134
- sym = @grammar.add_term(id: id)
135
- @grammar.add_left(sym, @precedence_number)
136
- }
137
- }
138
- @precedence_number += 1
139
- }
140
- | "%right" token_declarations_for_precedence
141
- {
142
- val[1].each {|hash|
143
- hash[:tokens].each {|id|
144
- sym = @grammar.add_term(id: id)
145
- @grammar.add_right(sym, @precedence_number)
146
- }
147
- }
148
- @precedence_number += 1
149
- }
150
- | "%precedence" token_declarations_for_precedence
151
- {
152
- val[1].each {|hash|
153
- hash[:tokens].each {|id|
154
- sym = @grammar.add_term(id: id)
155
- @grammar.add_precedence(sym, @precedence_number)
156
- }
157
- }
158
- @precedence_number += 1
159
- }
160
- | "%nonassoc" token_declarations_for_precedence
161
- {
162
- val[1].each {|hash|
163
- hash[:tokens].each {|id|
164
- sym = @grammar.add_term(id: id)
165
- @grammar.add_nonassoc(sym, @precedence_number)
166
- }
167
- }
168
- @precedence_number += 1
169
- }
170
-
171
- token_declarations: TAG? token_declaration+
172
- {
173
- val[1].each {|token_declaration|
174
- @grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: val[0], replace: true)
175
- }
176
- }
177
- | token_declarations TAG token_declaration+
178
- {
179
- val[2].each {|token_declaration|
180
- @grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: val[1], replace: true)
181
- }
182
- }
183
-
184
- token_declaration: id INTEGER? alias { result = val }
185
-
186
- rule_declaration: "%rule" IDENTIFIER "(" rule_args ")" TAG? ":" rule_rhs_list
187
- {
188
- rule = Grammar::ParameterizingRule::Rule.new(val[1].s_value, val[3], val[7], tag: val[5])
189
- @grammar.add_parameterizing_rule(rule)
190
- }
191
-
192
- inline_declaration: "%rule" "%inline" IDENT_COLON ":" rule_rhs_list
193
- {
194
- rule = Grammar::ParameterizingRule::Rule.new(val[2].s_value, [], val[4], is_inline: true)
195
- @grammar.add_parameterizing_rule(rule)
196
- }
197
- | "%rule" "%inline" IDENTIFIER "(" rule_args ")" ":" rule_rhs_list
198
- {
199
- rule = Grammar::ParameterizingRule::Rule.new(val[2].s_value, val[4], val[7], is_inline: true)
200
- @grammar.add_parameterizing_rule(rule)
201
- }
202
-
203
- rule_args: IDENTIFIER { result = [val[0]] }
204
- | rule_args "," IDENTIFIER { result = val[0].append(val[2]) }
205
-
206
- rule_rhs_list: rule_rhs
207
- {
208
- builder = val[0]
209
- result = [builder]
210
- }
211
- | rule_rhs_list "|" rule_rhs
212
- {
213
- builder = val[2]
214
- result = val[0].append(builder)
215
- }
216
-
217
- rule_rhs: "%empty"?
218
- {
219
- reset_precs
220
- result = Grammar::ParameterizingRule::Rhs.new
11
+ prologue_declaration:
12
+ "%{"
13
+ {
14
+ begin_c_declaration("%}")
15
+ }
16
+ C_DECLARATION
17
+ {
18
+ end_c_declaration
19
+ }
20
+ "%}"
21
+ {
22
+ @grammar.prologue_first_lineno = val[0].first_line
23
+ @grammar.prologue = val[2].s_value
24
+ }
25
+ | "%require" STRING
26
+ {
27
+ @grammar.required = true
28
+ }
29
+
30
+ bison_declaration:
31
+ parser_option ";"*
32
+ | grammar_declaration ";"*
33
+
34
+ parser_option:
35
+ "%expect" INTEGER
36
+ {
37
+ @grammar.expect = val[1].s_value
38
+ }
39
+ | "%define" variable value
40
+ {
41
+ @grammar.define[val[1].s_value] = val[2]&.s_value
42
+ }
43
+ | "%define" variable "{" value "}"
44
+ {
45
+ @grammar.define[val[1].s_value] = val[3]&.s_value
46
+ }
47
+ | "%param" param+
48
+ | "%lex-param" param+
49
+ {
50
+ val[1].each {|token|
51
+ @grammar.lex_param = Grammar::Code::NoReferenceCode.new(type: :lex_param, token_code: token).token_code.s_value
52
+ }
53
+ }
54
+ | "%parse-param" param+
55
+ {
56
+ val[1].each {|token|
57
+ @grammar.parse_param = Grammar::Code::NoReferenceCode.new(type: :parse_param, token_code: token).token_code.s_value
58
+ }
59
+ }
60
+ | "%code" IDENTIFIER param
61
+ {
62
+ @grammar.add_percent_code(id: val[1], code: val[2])
63
+ }
64
+ | "%initial-action" param
65
+ {
66
+ @grammar.initial_action = Grammar::Code::InitialActionCode.new(type: :initial_action, token_code: val[1])
67
+ }
68
+ | "%no-stdlib"
69
+ {
70
+ @grammar.no_stdlib = true
71
+ }
72
+ | "%locations"
73
+ {
74
+ @grammar.locations = true
75
+ }
76
+
77
+ grammar_declaration:
78
+ symbol_declaration
79
+ | rule_declaration
80
+ | inline_declaration
81
+ | "%union" param
82
+ {
83
+ @grammar.set_union(
84
+ Grammar::Code::NoReferenceCode.new(type: :union, token_code: val[1]),
85
+ val[1].line
86
+ )
87
+ }
88
+ | "%destructor" param (symbol | TAG)+
89
+ {
90
+ @grammar.add_destructor(
91
+ ident_or_tags: val[2].flatten,
92
+ token_code: val[1],
93
+ lineno: val[1].line
94
+ )
95
+ }
96
+ | "%printer" param (symbol | TAG)+
97
+ {
98
+ @grammar.add_printer(
99
+ ident_or_tags: val[2].flatten,
100
+ token_code: val[1],
101
+ lineno: val[1].line
102
+ )
103
+ }
104
+ | "%error-token" param (symbol | TAG)+
105
+ {
106
+ @grammar.add_error_token(
107
+ ident_or_tags: val[2].flatten,
108
+ token_code: val[1],
109
+ lineno: val[1].line
110
+ )
111
+ }
112
+ | "%after-shift" IDENTIFIER
113
+ {
114
+ @grammar.after_shift = val[1]
115
+ }
116
+ | "%before-reduce" IDENTIFIER
117
+ {
118
+ @grammar.before_reduce = val[1]
119
+ }
120
+ | "%after-reduce" IDENTIFIER
121
+ {
122
+ @grammar.after_reduce = val[1]
123
+ }
124
+ | "%after-shift-error-token" IDENTIFIER
125
+ {
126
+ @grammar.after_shift_error_token = val[1]
127
+ }
128
+ | "%after-pop-stack" IDENTIFIER
129
+ {
130
+ @grammar.after_pop_stack = val[1]
131
+ }
132
+
133
+ symbol_declaration:
134
+ "%token" token_declarations
135
+ | "%type" symbol_declarations
136
+ {
137
+ val[1].each {|hash|
138
+ hash[:tokens].each {|id|
139
+ @grammar.add_type(id: id, tag: hash[:tag])
221
140
  }
222
- | rule_rhs symbol named_ref?
223
- {
224
- token = val[1]
225
- token.alias_name = val[2]
226
- builder = val[0]
227
- builder.symbols << token
228
- result = builder
141
+ }
142
+ }
143
+ | "%nterm" symbol_declarations
144
+ {
145
+ val[1].each {|hash|
146
+ hash[:tokens].each {|id|
147
+ if @grammar.find_term_by_s_value(id.s_value)
148
+ on_action_error("symbol #{id.s_value} redeclared as a nonterminal", id)
149
+ else
150
+ @grammar.add_type(id: id, tag: hash[:tag])
151
+ end
229
152
  }
230
- | rule_rhs symbol parameterizing_suffix
231
- {
232
- builder = val[0]
233
- builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], location: @lexer.location, args: [val[1]])
234
- result = builder
235
- }
236
- | rule_rhs IDENTIFIER "(" parameterizing_args ")" TAG?
237
- {
238
- builder = val[0]
239
- builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[3], lhs_tag: val[5])
240
- result = builder
241
- }
242
- | rule_rhs midrule_action named_ref?
243
- {
244
- user_code = val[1]
245
- user_code.alias_name = val[2]
246
- builder = val[0]
247
- builder.user_code = user_code
248
- result = builder
153
+ }
154
+ }
155
+ | "%left" token_declarations_for_precedence
156
+ {
157
+ val[1].each {|hash|
158
+ hash[:tokens].each {|id|
159
+ sym = @grammar.add_term(id: id, tag: hash[:tag])
160
+ @grammar.add_left(sym, @precedence_number, id.s_value, id.first_line)
249
161
  }
250
- | rule_rhs "%prec" symbol
251
- {
252
- sym = @grammar.find_symbol_by_id!(val[2])
253
- @prec_seen = true
254
- builder = val[0]
255
- builder.precedence_sym = sym
256
- result = builder
162
+ }
163
+ @precedence_number += 1
164
+ }
165
+ | "%right" token_declarations_for_precedence
166
+ {
167
+ val[1].each {|hash|
168
+ hash[:tokens].each {|id|
169
+ sym = @grammar.add_term(id: id, tag: hash[:tag])
170
+ @grammar.add_right(sym, @precedence_number, id.s_value, id.first_line)
257
171
  }
172
+ }
173
+ @precedence_number += 1
174
+ }
175
+ | "%precedence" token_declarations_for_precedence
176
+ {
177
+ val[1].each {|hash|
178
+ hash[:tokens].each {|id|
179
+ sym = @grammar.add_term(id: id, tag: hash[:tag])
180
+ @grammar.add_precedence(sym, @precedence_number, id.s_value, id.first_line)
181
+ }
182
+ }
183
+ @precedence_number += 1
184
+ }
185
+ | "%nonassoc" token_declarations_for_precedence
186
+ {
187
+ val[1].each {|hash|
188
+ hash[:tokens].each {|id|
189
+ sym = @grammar.add_term(id: id, tag: hash[:tag])
190
+ @grammar.add_nonassoc(sym, @precedence_number, id.s_value, id.first_line)
191
+ }
192
+ }
193
+ @precedence_number += 1
194
+ }
195
+ | "%start" IDENTIFIER
196
+ {
197
+ @grammar.set_start_nterm(val[1])
198
+ }
199
+
200
+ token_declarations:
201
+ TAG? token_declaration+
202
+ {
203
+ val[1].each {|token_declaration|
204
+ @grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1]&.s_value, tag: val[0], replace: true)
205
+ }
206
+ }
207
+ | token_declarations TAG token_declaration+
208
+ {
209
+ val[2].each {|token_declaration|
210
+ @grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1]&.s_value, tag: val[1], replace: true)
211
+ }
212
+ }
213
+
214
+ token_declaration: id INTEGER? alias { result = val }
215
+
216
+ rule_declaration:
217
+ "%rule" IDENTIFIER "(" rule_args ")" TAG? ":" rule_rhs_list
218
+ {
219
+ rule = Grammar::Parameterized::Rule.new(val[1].s_value, val[3], val[7], tag: val[5])
220
+ @grammar.add_parameterized_rule(rule)
221
+ }
222
+
223
+ inline_declaration:
224
+ "%rule" "%inline" IDENT_COLON ":" rule_rhs_list
225
+ {
226
+ rule = Grammar::Parameterized::Rule.new(val[2].s_value, [], val[4], is_inline: true)
227
+ @grammar.add_parameterized_rule(rule)
228
+ }
229
+ | "%rule" "%inline" IDENTIFIER "(" rule_args ")" ":" rule_rhs_list
230
+ {
231
+ rule = Grammar::Parameterized::Rule.new(val[2].s_value, val[4], val[7], is_inline: true)
232
+ @grammar.add_parameterized_rule(rule)
233
+ }
234
+
235
+ rule_args:
236
+ IDENTIFIER { result = [val[0]] }
237
+ | rule_args "," IDENTIFIER { result = val[0].append(val[2]) }
238
+
239
+ rule_rhs_list:
240
+ rule_rhs
241
+ {
242
+ builder = val[0]
243
+ result = [builder]
244
+ }
245
+ | rule_rhs_list "|" rule_rhs
246
+ {
247
+ builder = val[2]
248
+ result = val[0].append(builder)
249
+ }
250
+
251
+ rule_rhs:
252
+ "%empty"?
253
+ {
254
+ reset_precs
255
+ result = Grammar::Parameterized::Rhs.new
256
+ }
257
+ | rule_rhs symbol named_ref?
258
+ {
259
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
260
+ token = val[1]
261
+ token.alias_name = val[2]
262
+ builder = val[0]
263
+ builder.symbols << token
264
+ result = builder
265
+ }
266
+ | rule_rhs symbol parameterized_suffix
267
+ {
268
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
269
+ builder = val[0]
270
+ builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], location: @lexer.location, args: [val[1]])
271
+ result = builder
272
+ }
273
+ | rule_rhs IDENTIFIER "(" parameterized_args ")" TAG?
274
+ {
275
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
276
+ builder = val[0]
277
+ builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[3], lhs_tag: val[5])
278
+ result = builder
279
+ }
280
+ | rule_rhs action named_ref?
281
+ {
282
+ user_code = val[1]
283
+ user_code.alias_name = val[2]
284
+ builder = val[0]
285
+ builder.user_code = user_code
286
+ result = builder
287
+ }
288
+ | rule_rhs "%prec" symbol
289
+ {
290
+ on_action_error("multiple %prec in a rule", val[0]) if prec_seen?
291
+ sym = @grammar.find_symbol_by_id!(val[2])
292
+ if val[0].rhs.empty?
293
+ @opening_prec_seen = true
294
+ else
295
+ @trailing_prec_seen = true
296
+ end
297
+ builder = val[0]
298
+ builder.precedence_sym = sym
299
+ result = builder
300
+ }
258
301
 
259
302
  alias: string_as_id? { result = val[0].s_value if val[0] }
260
303
 
261
- symbol_declarations: TAG? symbol+
262
- {
263
- result = if val[0]
264
- [{tag: val[0], tokens: val[1]}]
265
- else
266
- [{tag: nil, tokens: val[1]}]
267
- end
268
- }
269
- | symbol_declarations TAG symbol+ { result = val[0].append({tag: val[1], tokens: val[2]}) }
270
-
271
- symbol: id
272
- | string_as_id
273
-
274
- param: "{" {
275
- begin_c_declaration("}")
276
- }
277
- C_DECLARATION
278
- {
279
- end_c_declaration
280
- }
281
- "}"
282
- {
283
- result = val[2]
284
- }
285
-
286
- token_declarations_for_precedence: id+ { result = [{tag: nil, tokens: val[0]}] }
287
- | TAG id+ { result = [{tag: val[0], tokens: val[1]}] }
288
- | id TAG id+ { result = val[0].append({tag: val[1], tokens: val[2]}) }
289
-
290
- id: IDENTIFIER { on_action_error("ident after %prec", val[0]) if @prec_seen }
291
- | CHARACTER { on_action_error("char after %prec", val[0]) if @prec_seen }
292
-
293
-
294
- rules_or_grammar_declaration: rules ";"?
295
- | grammar_declaration ";"
296
-
297
- rules: IDENT_COLON named_ref? ":" rhs_list
298
- {
299
- lhs = val[0]
300
- lhs.alias_name = val[1]
301
- val[3].each do |builder|
302
- builder.lhs = lhs
303
- builder.complete_input
304
- @grammar.add_rule_builder(builder)
305
- end
306
- }
307
-
308
- rhs_list: rhs
309
- {
310
- builder = val[0]
311
- if !builder.line
312
- builder.line = @lexer.line - 1
313
- end
314
- result = [builder]
315
- }
316
- | rhs_list "|" rhs
317
- {
318
- builder = val[2]
319
- if !builder.line
320
- builder.line = @lexer.line - 1
321
- end
322
- result = val[0].append(builder)
323
- }
324
-
325
- rhs: "%empty"?
326
- {
327
- reset_precs
328
- result = @grammar.create_rule_builder(@rule_counter, @midrule_action_counter)
329
- }
330
- | rhs symbol named_ref?
331
- {
332
- token = val[1]
333
- token.alias_name = val[2]
334
- builder = val[0]
335
- builder.add_rhs(token)
336
- result = builder
337
- }
338
- | rhs symbol parameterizing_suffix named_ref? TAG?
339
- {
340
- token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], alias_name: val[3], location: @lexer.location, args: [val[1]], lhs_tag: val[4])
341
- builder = val[0]
342
- builder.add_rhs(token)
343
- builder.line = val[1].first_line
344
- result = builder
345
- }
346
- | rhs IDENTIFIER "(" parameterizing_args ")" named_ref? TAG?
347
- {
348
- token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, alias_name: val[5], location: @lexer.location, args: val[3], lhs_tag: val[6])
349
- builder = val[0]
350
- builder.add_rhs(token)
351
- builder.line = val[1].first_line
352
- result = builder
353
- }
354
- | rhs midrule_action named_ref? TAG?
355
- {
356
- user_code = val[1]
357
- user_code.alias_name = val[2]
358
- user_code.tag = val[3]
359
- builder = val[0]
360
- builder.user_code = user_code
361
- result = builder
362
- }
363
- | rhs "%prec" symbol
364
- {
365
- sym = @grammar.find_symbol_by_id!(val[2])
366
- @prec_seen = true
367
- builder = val[0]
368
- builder.precedence_sym = sym
369
- result = builder
370
- }
371
-
372
- parameterizing_suffix: "?" { result = "option" }
373
- | "+" { result = "nonempty_list" }
374
- | "*" { result = "list" }
375
-
376
- parameterizing_args: symbol parameterizing_suffix?
377
- {
378
- result = if val[1]
379
- [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[0])]
380
- else
381
- [val[0]]
382
- end
383
- }
384
- | parameterizing_args ',' symbol { result = val[0].append(val[2]) }
385
- | IDENTIFIER "(" parameterizing_args ")" { result = [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[0].s_value, location: @lexer.location, args: val[2])] }
386
-
387
- midrule_action: "{"
388
- {
389
- if @prec_seen
390
- on_action_error("multiple User_code after %prec", val[0]) if @code_after_prec
391
- @code_after_prec = true
392
- end
393
- begin_c_declaration("}")
394
- }
395
- C_DECLARATION
396
- {
397
- end_c_declaration
398
- }
399
- "}"
400
- {
401
- result = val[2]
402
- }
304
+ symbol_declarations:
305
+ TAG? symbol+
306
+ {
307
+ result = if val[0]
308
+ [{tag: val[0], tokens: val[1]}]
309
+ else
310
+ [{tag: nil, tokens: val[1]}]
311
+ end
312
+ }
313
+ | symbol_declarations TAG symbol+ { result = val[0].append({tag: val[1], tokens: val[2]}) }
314
+
315
+ symbol:
316
+ id
317
+ | string_as_id
318
+
319
+ param:
320
+ "{"
321
+ {
322
+ begin_c_declaration("}")
323
+ }
324
+ C_DECLARATION
325
+ {
326
+ end_c_declaration
327
+ }
328
+ "}"
329
+ {
330
+ result = val[2]
331
+ }
332
+
333
+ token_declarations_for_precedence:
334
+ id+ { result = [{tag: nil, tokens: val[0]}] }
335
+ | (TAG id+)+ { result = val[0].map {|tag, ids| {tag: tag, tokens: ids} } }
336
+ | id+ TAG id+ { result = [{tag: nil, tokens: val[0]}, {tag: val[1], tokens: val[2]}] }
337
+
338
+ id:
339
+ IDENTIFIER
340
+ | CHARACTER
341
+
342
+ rules_or_grammar_declaration:
343
+ rules ";"*
344
+ | grammar_declaration ";"+
345
+
346
+ rules:
347
+ IDENT_COLON named_ref? ":" rhs_list
348
+ {
349
+ lhs = val[0]
350
+ lhs.alias_name = val[1]
351
+ val[3].each do |builder|
352
+ builder.lhs = lhs
353
+ builder.complete_input
354
+ @grammar.add_rule_builder(builder)
355
+ end
356
+ }
357
+
358
+ rhs_list:
359
+ rhs
360
+ {
361
+ if val[0].rhs.count > 1
362
+ empties = val[0].rhs.select { |sym| sym.is_a?(Lrama::Lexer::Token::Empty) }
363
+ empties.each do |empty|
364
+ on_action_error("%empty on non-empty rule", empty)
365
+ end
366
+ end
367
+ builder = val[0]
368
+ if !builder.line
369
+ builder.line = @lexer.line - 1
370
+ end
371
+ result = [builder]
372
+ }
373
+ | rhs_list "|" rhs
374
+ {
375
+ builder = val[2]
376
+ if !builder.line
377
+ builder.line = @lexer.line - 1
378
+ end
379
+ result = val[0].append(builder)
380
+ }
381
+
382
+ rhs:
383
+ /* empty */
384
+ {
385
+ reset_precs
386
+ result = @grammar.create_rule_builder(@rule_counter, @midrule_action_counter)
387
+ }
388
+ | rhs "%empty"
389
+ {
390
+ builder = val[0]
391
+ builder.add_rhs(Lrama::Lexer::Token::Empty.new(location: @lexer.location))
392
+ result = builder
393
+ }
394
+ | rhs symbol named_ref?
395
+ {
396
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
397
+ token = val[1]
398
+ token.alias_name = val[2]
399
+ builder = val[0]
400
+ builder.add_rhs(token)
401
+ result = builder
402
+ }
403
+ | rhs symbol parameterized_suffix named_ref? TAG?
404
+ {
405
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
406
+ token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], alias_name: val[3], location: @lexer.location, args: [val[1]], lhs_tag: val[4])
407
+ builder = val[0]
408
+ builder.add_rhs(token)
409
+ builder.line = val[1].first_line
410
+ result = builder
411
+ }
412
+ | rhs IDENTIFIER "(" parameterized_args ")" named_ref? TAG?
413
+ {
414
+ on_action_error("intermediate %prec in a rule", val[1]) if @trailing_prec_seen
415
+ token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, alias_name: val[5], location: @lexer.location, args: val[3], lhs_tag: val[6])
416
+ builder = val[0]
417
+ builder.add_rhs(token)
418
+ builder.line = val[1].first_line
419
+ result = builder
420
+ }
421
+ | rhs action named_ref? TAG?
422
+ {
423
+ user_code = val[1]
424
+ user_code.alias_name = val[2]
425
+ user_code.tag = val[3]
426
+ builder = val[0]
427
+ builder.user_code = user_code
428
+ result = builder
429
+ }
430
+ | rhs "%prec" symbol
431
+ {
432
+ on_action_error("multiple %prec in a rule", val[0]) if prec_seen?
433
+ sym = @grammar.find_symbol_by_id!(val[2])
434
+ if val[0].rhs.empty?
435
+ @opening_prec_seen = true
436
+ else
437
+ @trailing_prec_seen = true
438
+ end
439
+ builder = val[0]
440
+ builder.precedence_sym = sym
441
+ result = builder
442
+ }
443
+
444
+ parameterized_suffix:
445
+ "?" { result = "option" }
446
+ | "+" { result = "nonempty_list" }
447
+ | "*" { result = "list" }
448
+
449
+ parameterized_args:
450
+ symbol parameterized_suffix?
451
+ {
452
+ result = if val[1]
453
+ [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[0])]
454
+ else
455
+ [val[0]]
456
+ end
457
+ }
458
+ | parameterized_args ',' symbol { result = val[0].append(val[2]) }
459
+ | IDENTIFIER "(" parameterized_args ")" { result = [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[0].s_value, location: @lexer.location, args: val[2])] }
460
+
461
+ action:
462
+ "{"
463
+ {
464
+ if prec_seen?
465
+ on_action_error("multiple User_code after %prec", val[0]) if @code_after_prec
466
+ @code_after_prec = true
467
+ end
468
+ begin_c_declaration("}")
469
+ }
470
+ C_DECLARATION
471
+ {
472
+ end_c_declaration
473
+ }
474
+ "}"
475
+ {
476
+ result = val[2]
477
+ }
403
478
 
404
479
  named_ref: '[' IDENTIFIER ']' { result = val[1].s_value }
405
480
 
406
- epilogue_declaration: "%%"
407
- {
408
- begin_c_declaration('\Z')
409
- @grammar.epilogue_first_lineno = @lexer.line + 1
410
- }
411
- C_DECLARATION
412
- {
413
- end_c_declaration
414
- @grammar.epilogue = val[2].s_value
415
- }
481
+ epilogue_declaration:
482
+ "%%"
483
+ {
484
+ begin_c_declaration('\Z')
485
+ }
486
+ C_DECLARATION
487
+ {
488
+ end_c_declaration
489
+ @grammar.epilogue_first_lineno = val[0].first_line + 1
490
+ @grammar.epilogue = val[2].s_value
491
+ }
416
492
 
417
493
  variable: id
418
494
 
@@ -421,25 +497,28 @@ rule
421
497
  | STRING
422
498
  | "{...}"
423
499
 
424
- string_as_id: STRING { result = Lrama::Lexer::Token::Ident.new(s_value: val[0]) }
500
+ string_as_id: STRING { result = Lrama::Lexer::Token::Ident.new(s_value: val[0].s_value) }
425
501
  end
426
502
 
427
503
  ---- inner
428
504
 
429
- include Lrama::Report::Duration
505
+ include Lrama::Tracer::Duration
430
506
 
431
- def initialize(text, path, debug = false, define = {})
507
+ def initialize(text, path, debug = false, locations = false, define = {})
508
+ @path = path
432
509
  @grammar_file = Lrama::Lexer::GrammarFile.new(path, text)
433
- @yydebug = debug
510
+ @yydebug = debug || define.key?('parse.trace')
434
511
  @rule_counter = Lrama::Grammar::Counter.new(0)
435
512
  @midrule_action_counter = Lrama::Grammar::Counter.new(1)
513
+ @locations = locations
436
514
  @define = define
437
515
  end
438
516
 
439
517
  def parse
440
- report_duration(:parse) do
518
+ message = "parse '#{File.basename(@path)}'"
519
+ report_duration(message) do
441
520
  @lexer = Lrama::Lexer.new(@grammar_file)
442
- @grammar = Lrama::Grammar.new(@rule_counter, @define)
521
+ @grammar = Lrama::Grammar.new(@rule_counter, @locations, @define)
443
522
  @precedence_number = 0
444
523
  reset_precs
445
524
  do_parse
@@ -452,7 +531,14 @@ def next_token
452
531
  end
453
532
 
454
533
  def on_error(error_token_id, error_value, value_stack)
455
- if error_value.is_a?(Lrama::Lexer::Token)
534
+ case error_value
535
+ when Lrama::Lexer::Token::Int
536
+ location = error_value.location
537
+ value = "#{error_value.s_value}"
538
+ when Lrama::Lexer::Token::Token
539
+ location = error_value.location
540
+ value = "\"#{error_value.s_value}\""
541
+ when Lrama::Lexer::Token::Base
456
542
  location = error_value.location
457
543
  value = "'#{error_value.s_value}'"
458
544
  else
@@ -466,7 +552,7 @@ def on_error(error_token_id, error_value, value_stack)
466
552
  end
467
553
 
468
554
  def on_action_error(error_message, error_value)
469
- if error_value.is_a?(Lrama::Lexer::Token)
555
+ if error_value.is_a?(Lrama::Lexer::Token::Base)
470
556
  location = error_value.location
471
557
  else
472
558
  location = @lexer.location
@@ -478,10 +564,15 @@ end
478
564
  private
479
565
 
480
566
  def reset_precs
481
- @prec_seen = false
567
+ @opening_prec_seen = false
568
+ @trailing_prec_seen = false
482
569
  @code_after_prec = false
483
570
  end
484
571
 
572
+ def prec_seen?
573
+ @opening_prec_seen || @trailing_prec_seen
574
+ end
575
+
485
576
  def begin_c_declaration(end_symbol)
486
577
  @lexer.status = :c_declaration
487
578
  @lexer.end_symbol = end_symbol