lrama 0.5.9 → 0.5.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yaml +5 -0
  3. data/.gitignore +7 -4
  4. data/Gemfile +9 -5
  5. data/Rakefile +13 -0
  6. data/Steepfile +9 -3
  7. data/lib/lrama/context.rb +1 -3
  8. data/lib/lrama/counterexamples/path.rb +0 -46
  9. data/lib/lrama/counterexamples/production_path.rb +17 -0
  10. data/lib/lrama/counterexamples/start_path.rb +21 -0
  11. data/lib/lrama/counterexamples/transition_path.rb +17 -0
  12. data/lib/lrama/counterexamples.rb +3 -0
  13. data/lib/lrama/grammar/code/initial_action_code.rb +28 -0
  14. data/lib/lrama/grammar/code/no_reference_code.rb +24 -0
  15. data/lib/lrama/grammar/code/printer_code.rb +34 -0
  16. data/lib/lrama/grammar/code/rule_action.rb +62 -0
  17. data/lib/lrama/grammar/code.rb +9 -93
  18. data/lib/lrama/grammar/counter.rb +15 -0
  19. data/lib/lrama/grammar/error_token.rb +3 -3
  20. data/lib/lrama/grammar/parameterizing_rules/builder/base.rb +28 -0
  21. data/lib/lrama/grammar/parameterizing_rules/builder/list.rb +20 -0
  22. data/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb +20 -0
  23. data/lib/lrama/grammar/parameterizing_rules/builder/option.rb +20 -0
  24. data/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb +28 -0
  25. data/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb +27 -0
  26. data/lib/lrama/grammar/parameterizing_rules/builder.rb +43 -0
  27. data/lib/lrama/grammar/printer.rb +3 -3
  28. data/lib/lrama/grammar/reference.rb +7 -16
  29. data/lib/lrama/grammar/rule.rb +18 -2
  30. data/lib/lrama/grammar/rule_builder.rb +179 -0
  31. data/lib/lrama/grammar.rb +109 -324
  32. data/lib/lrama/lexer/location.rb +22 -0
  33. data/lib/lrama/lexer/token/parameterizing.rb +18 -3
  34. data/lib/lrama/lexer/token/tag.rb +4 -0
  35. data/lib/lrama/lexer/token/user_code.rb +54 -4
  36. data/lib/lrama/lexer/token.rb +22 -4
  37. data/lib/lrama/lexer.rb +31 -29
  38. data/lib/lrama/options.rb +1 -2
  39. data/lib/lrama/output.rb +2 -2
  40. data/lib/lrama/parser.rb +420 -343
  41. data/lib/lrama/report/profile.rb +1 -12
  42. data/lib/lrama/version.rb +1 -1
  43. data/parser.y +106 -49
  44. data/rbs_collection.lock.yaml +5 -1
  45. data/rbs_collection.yaml +1 -0
  46. data/sig/lrama/grammar/code/printer_code.rbs +15 -0
  47. data/sig/lrama/grammar/code.rbs +24 -0
  48. data/sig/lrama/grammar/counter.rbs +11 -0
  49. data/sig/lrama/grammar/parameterizing_rules/builder.rbs +10 -0
  50. data/sig/lrama/grammar/precedence.rbs +11 -0
  51. data/sig/lrama/grammar/printer.rbs +11 -0
  52. data/sig/lrama/grammar/reference.rbs +5 -5
  53. data/sig/lrama/grammar/rule.rbs +13 -0
  54. data/sig/lrama/grammar/rule_builder.rbs +41 -0
  55. data/sig/lrama/lexer/location.rbs +14 -0
  56. data/sig/lrama/lexer/token/parameterizing.rbs +7 -0
  57. data/sig/lrama/lexer/token/tag.rbs +1 -0
  58. data/sig/lrama/lexer/token/user_code.rbs +8 -1
  59. data/sig/lrama/lexer/token.rbs +8 -3
  60. data/sig/stdlib/strscan/string_scanner.rbs +5 -0
  61. data/template/bison/yacc.c +5 -0
  62. metadata +29 -2
@@ -1,18 +1,7 @@
1
1
  module Lrama
2
2
  class Report
3
3
  module Profile
4
- # 1. Wrap target method with Profile.report_profile like below:
5
- #
6
- # Lrama::Report::Profile.report_profile { method }
7
- #
8
- # 2. Run lrama command, for example
9
- #
10
- # $ ./exe/lrama --trace=time spec/fixtures/integration/ruby_3_2_0/parse.tmp.y
11
- #
12
- # 3. Generate html file
13
- #
14
- # $ stackprof --d3-flamegraph tmp/stackprof-cpu-myapp.dump > tmp/flamegraph.html
15
- #
4
+ # See "Profiling Lrama" in README.md for how to use.
16
5
  def self.report_profile
17
6
  require "stackprof"
18
7
 
data/lib/lrama/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Lrama
2
- VERSION = "0.5.9".freeze
2
+ VERSION = "0.5.10".freeze
3
3
  end
data/parser.y CHANGED
@@ -1,5 +1,5 @@
1
1
  class Lrama::Parser
2
- expect 7
2
+ expect 1
3
3
 
4
4
  token C_DECLARATION CHARACTER IDENT_COLON IDENTIFIER INTEGER STRING TAG
5
5
 
@@ -36,15 +36,13 @@ rule
36
36
  | "%lex-param" params
37
37
  {
38
38
  val[1].each {|token|
39
- token.references = []
40
- @grammar.lex_param = @grammar.build_code(:lex_param, token).token_code.s_value
39
+ @grammar.lex_param = Grammar::Code::NoReferenceCode.new(type: :lex_param, token_code: token).token_code.s_value
41
40
  }
42
41
  }
43
42
  | "%parse-param" params
44
43
  {
45
44
  val[1].each {|token|
46
- token.references = []
47
- @grammar.parse_param = @grammar.build_code(:parse_param, token).token_code.s_value
45
+ @grammar.parse_param = Grammar::Code::NoReferenceCode.new(type: :parse_param, token_code: token).token_code.s_value
48
46
  }
49
47
  }
50
48
  | "%code" IDENTIFIER "{"
@@ -69,7 +67,7 @@ rule
69
67
  }
70
68
  "}"
71
69
  {
72
- @grammar.initial_action = @grammar.build_code(:initial_action, val[3])
70
+ @grammar.initial_action = Grammar::Code::InitialActionCode.new(type: :initial_action, token_code: val[3])
73
71
  }
74
72
  | ";"
75
73
 
@@ -83,7 +81,10 @@ rule
83
81
  }
84
82
  "}"
85
83
  {
86
- @grammar.set_union(@grammar.build_code(:union, val[3]), val[3].line)
84
+ @grammar.set_union(
85
+ Grammar::Code::NoReferenceCode.new(type: :union, token_code: val[3]),
86
+ val[3].line
87
+ )
87
88
  }
88
89
  | symbol_declaration
89
90
  | "%destructor" "{"
@@ -105,7 +106,11 @@ rule
105
106
  }
106
107
  "}" generic_symlist
107
108
  {
108
- @grammar.add_printer(ident_or_tags: val[6], code: @grammar.build_code(:printer, val[3]), lineno: val[3].line)
109
+ @grammar.add_printer(
110
+ ident_or_tags: val[6],
111
+ token_code: val[3],
112
+ lineno: val[3].line
113
+ )
109
114
  }
110
115
  | "%error-token" "{"
111
116
  {
@@ -117,7 +122,11 @@ rule
117
122
  }
118
123
  "}" generic_symlist
119
124
  {
120
- @grammar.add_error_token(ident_or_tags: val[6], code: @grammar.build_code(:error_token, val[3]), lineno: val[3].line)
125
+ @grammar.add_error_token(
126
+ ident_or_tags: val[6],
127
+ token_code: val[3],
128
+ lineno: val[3].line
129
+ )
121
130
  }
122
131
 
123
132
  symbol_declaration: "%token" token_declarations
@@ -252,9 +261,9 @@ rule
252
261
  {
253
262
  result = [{tag: val[0], tokens: val[1]}]
254
263
  }
255
- | token_declarations_for_precedence token_declaration_list_for_precedence
264
+ | token_declarations_for_precedence TAG token_declaration_list_for_precedence
256
265
  {
257
- result = val[0].append({tag: nil, tokens: val[1]})
266
+ result = val[0].append({tag: val[1], tokens: val[2]})
258
267
  }
259
268
 
260
269
  token_declaration_list_for_precedence: token_declaration_for_precedence { result = [val[0]] }
@@ -262,8 +271,8 @@ rule
262
271
 
263
272
  token_declaration_for_precedence: id
264
273
 
265
- id: IDENTIFIER { raise "Ident after %prec" if @prec_seen }
266
- | CHARACTER { raise "Char after %prec" if @prec_seen }
274
+ id: IDENTIFIER { on_action_error("ident after %prec", val[0]) if @prec_seen }
275
+ | CHARACTER { on_action_error("char after %prec", val[0]) if @prec_seen }
267
276
 
268
277
  grammar: rules_or_grammar_declaration
269
278
  | grammar rules_or_grammar_declaration
@@ -275,56 +284,74 @@ rule
275
284
  {
276
285
  lhs = val[0]
277
286
  lhs.alias_name = val[1]
278
- val[3].each {|hash|
279
- @grammar.add_rule(lhs: lhs, rhs: hash[:rhs], lineno: hash[:lineno])
280
- }
287
+ val[3].each do |builder|
288
+ builder.lhs = lhs
289
+ builder.complete_input
290
+ @grammar.add_rule_builder(builder)
291
+ end
281
292
  }
282
293
 
283
294
  rhs_list: rhs
284
295
  {
285
- result = [{rhs: val[0], lineno: val[0].first&.line || @lexer.line - 1}]
296
+ builder = val[0]
297
+ if !builder.line
298
+ builder.line = @lexer.line - 1
299
+ end
300
+ result = [builder]
286
301
  }
287
302
  | rhs_list "|" rhs
288
303
  {
289
- result = val[0].append({rhs: val[2], lineno: val[2].first&.line || @lexer.line - 1})
304
+ builder = val[2]
305
+ if !builder.line
306
+ builder.line = @lexer.line - 1
307
+ end
308
+ result = val[0].append(builder)
290
309
  }
291
310
  | rhs_list ";"
292
311
 
293
312
  rhs: /* empty */
294
313
  {
295
314
  reset_precs
296
- result = []
315
+ result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
297
316
  }
298
317
  | "%empty"
299
318
  {
300
319
  reset_precs
301
- result = []
320
+ result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
302
321
  }
303
322
  | rhs symbol named_ref_opt
304
323
  {
305
324
  token = val[1]
306
325
  token.alias_name = val[2]
307
- result = val[0].append(token)
326
+ builder = val[0]
327
+ builder.add_rhs(token)
328
+ result = builder
308
329
  }
309
- | rhs "?"
310
- {
311
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
312
- result = val[0].append(token)
313
- }
314
- | rhs "+"
315
- {
316
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
317
- result = val[0].append(token)
318
- }
319
- | rhs "*"
320
- {
321
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
322
- result = val[0].append(token)
323
- }
330
+ | rhs IDENTIFIER parameterizing_suffix
331
+ {
332
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[2], location: @lexer.location, args: [val[1]])
333
+ builder = val[0]
334
+ builder.add_rhs(token)
335
+ result = builder
336
+ }
337
+ | rhs IDENTIFIER "(" symbol ")"
338
+ {
339
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1].s_value, location: @lexer.location, args: [val[3]])
340
+ builder = val[0]
341
+ builder.add_rhs(token)
342
+ result = builder
343
+ }
344
+ | rhs IDENTIFIER "(" symbol "," symbol ")"
345
+ {
346
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1].s_value, location: @lexer.location, args: [val[3], val[5]])
347
+ builder = val[0]
348
+ builder.add_rhs(token)
349
+ result = builder
350
+ }
324
351
  | rhs "{"
325
352
  {
326
353
  if @prec_seen
327
- raise "Multiple User_code after %prec" if @code_after_prec
354
+ on_action_error("multiple User_code after %prec", val[0]) if @code_after_prec
328
355
  @code_after_prec = true
329
356
  end
330
357
  begin_c_declaration("}")
@@ -337,15 +364,23 @@ rule
337
364
  {
338
365
  token = val[3]
339
366
  token.alias_name = val[6]
340
- result = val[0].append(token)
367
+ builder = val[0]
368
+ builder.user_code = token
369
+ result = builder
341
370
  }
342
371
  | rhs "%prec" symbol
343
372
  {
344
373
  sym = @grammar.find_symbol_by_id!(val[2])
345
- result = val[0].append(sym)
346
374
  @prec_seen = true
375
+ builder = val[0]
376
+ builder.precedence_sym = sym
377
+ result = builder
347
378
  }
348
379
 
380
+ parameterizing_suffix: "?"
381
+ | "+"
382
+ | "*"
383
+
349
384
  named_ref_opt: # empty
350
385
  | '[' IDENTIFIER ']' { result = val[1].s_value }
351
386
 
@@ -387,18 +422,18 @@ def initialize(text, path, debug = false)
387
422
  @text = text
388
423
  @path = path
389
424
  @yydebug = debug
425
+ @rule_counter = Lrama::Grammar::Counter.new(0)
426
+ @midrule_action_counter = Lrama::Grammar::Counter.new(1)
390
427
  end
391
428
 
392
429
  def parse
393
430
  report_duration(:parse) do
394
431
  @lexer = Lrama::Lexer.new(@text)
395
- @grammar = Lrama::Grammar.new
432
+ @grammar = Lrama::Grammar.new(@rule_counter)
396
433
  @precedence_number = 0
397
434
  reset_precs
398
435
  do_parse
399
436
  @grammar.prepare
400
- @grammar.compute_nullable
401
- @grammar.compute_first_set
402
437
  @grammar.validate!
403
438
  @grammar
404
439
  end
@@ -409,18 +444,40 @@ def next_token
409
444
  end
410
445
 
411
446
  def on_error(error_token_id, error_value, value_stack)
412
- if error_value.respond_to?(:line) && error_value.respond_to?(:column)
413
- line = error_value.line
414
- first_column = error_value.column
447
+ if error_value.is_a?(Lrama::Lexer::Token)
448
+ line = error_value.first_line
449
+ first_column = error_value.first_column
450
+ last_column = error_value.last_column
451
+ value = "'#{error_value.s_value}'"
452
+ else
453
+ line = @lexer.line
454
+ first_column = @lexer.head_column
455
+ last_column = @lexer.column
456
+ value = error_value.inspect
457
+ end
458
+
459
+ raise ParseError, <<~ERROR
460
+ #{@path}:#{line}:#{first_column}: parse error on value #{value} (#{token_to_str(error_token_id) || '?'})
461
+ #{@text.split("\n")[line - 1]}
462
+ #{carrets(first_column, last_column)}
463
+ ERROR
464
+ end
465
+
466
+ def on_action_error(error_message, error_value)
467
+ if error_value.is_a?(Lrama::Lexer::Token)
468
+ line = error_value.first_line
469
+ first_column = error_value.first_column
470
+ last_column = error_value.last_column
415
471
  else
416
472
  line = @lexer.line
417
473
  first_column = @lexer.head_column
474
+ last_column = @lexer.column
418
475
  end
419
476
 
420
477
  raise ParseError, <<~ERROR
421
- #{@path}:#{line}:#{first_column}: parse error on value #{error_value.inspect} (#{token_to_str(error_token_id) || '?'})
478
+ #{@path}:#{line}: #{error_message}
422
479
  #{@text.split("\n")[line - 1]}
423
- #{carrets(first_column)}
480
+ #{carrets(first_column, last_column)}
424
481
  ERROR
425
482
  end
426
483
 
@@ -441,6 +498,6 @@ def end_c_declaration
441
498
  @lexer.end_symbol = nil
442
499
  end
443
500
 
444
- def carrets(first_column)
445
- ' ' * (first_column + 1) + '^' * (@lexer.column - first_column)
501
+ def carrets(first_column, last_column)
502
+ ' ' * (first_column + 1) + '^' * (last_column - first_column)
446
503
  end
@@ -2,7 +2,7 @@
2
2
  sources:
3
3
  - type: git
4
4
  name: ruby/gem_rbs_collection
5
- revision: 2de2d4535caba275f3b8533684aab110d921f553
5
+ revision: 25286c51a19927f28623aee3cd36655f902399ba
6
6
  remote: https://github.com/ruby/gem_rbs_collection.git
7
7
  repo_dir: gems
8
8
  path: ".gem_rbs_collection"
@@ -15,6 +15,10 @@ gems:
15
15
  version: '0'
16
16
  source:
17
17
  type: stdlib
18
+ - name: forwardable
19
+ version: '0'
20
+ source:
21
+ type: stdlib
18
22
  - name: rake
19
23
  version: '13.0'
20
24
  source:
data/rbs_collection.yaml CHANGED
@@ -20,3 +20,4 @@ gems:
20
20
  # It's unnecessary if you don't use rbs as a library.
21
21
  - name: rbs
22
22
  ignore: true
23
+ - name: forwardable
@@ -0,0 +1,15 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Code
4
+ class PrinterCode < Code
5
+ @tag: untyped
6
+ def initialize: (?type: untyped, ?token_code: untyped, ?tag: untyped) -> void
7
+
8
+ private
9
+
10
+ # ref: Lrama::Grammar::Code.token_code.references
11
+ def reference_to_c: (untyped ref) -> untyped
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,24 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Code
4
+ extend Forwardable
5
+
6
+ attr_accessor type: untyped
7
+ attr_accessor token_code: untyped
8
+
9
+ # delegated
10
+ def s_value: -> String
11
+ def line: -> Integer
12
+ def column: -> untyped
13
+ def references: -> untyped
14
+
15
+ def initialize: (?type: untyped, ?token_code: untyped) -> void
16
+
17
+ def translated_code: () -> String
18
+
19
+ private
20
+
21
+ def reference_to_c: (untyped ref) -> untyped
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,11 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Counter
4
+ @number: Integer
5
+
6
+ def initialize: (Integer number) -> void
7
+
8
+ def increment: () -> Integer
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,10 @@
1
+ module Lrama
2
+ class Grammar
3
+ class ParameterizingRules
4
+ class Builder
5
+ def initialize: (Lexer::Token token, Counter rule_counter, Lexer::Token lhs, Lexer::Token::UserCode? user_code, Lexer::Token? precedence_sym, Integer? line) -> void
6
+ def build: () -> Array[Rule]
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,11 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Precedence
4
+ include Comparable
5
+ attr_accessor type: Symbol
6
+ attr_accessor precedence: Integer
7
+
8
+ def <=>: (Precedence other) -> Integer
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,11 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Printer
4
+ attr_accessor ident_or_tags: Array[Lexer::Token::Ident|Lexer::Token::Tag]
5
+ attr_accessor token_code: Grammar::Code
6
+ attr_accessor lineno: Integer
7
+
8
+ def translated_code: (Lexer::Token member) -> String
9
+ end
10
+ end
11
+ end
@@ -1,21 +1,21 @@
1
1
  module Lrama
2
2
  class Grammar
3
3
  class Reference
4
- # TODO: Replace untyped referring_symbol with (Grammar::Symbol|Lexer::Token)
5
4
  attr_accessor type: Symbol
6
- attr_accessor value: (String|Integer)
5
+ attr_accessor name: String
6
+ attr_accessor index: Integer
7
7
  attr_accessor ex_tag: Lexer::Token?
8
8
  attr_accessor first_column: Integer
9
9
  attr_accessor last_column: Integer
10
- attr_accessor referring_symbol: untyped
11
10
  attr_accessor position_in_rhs: Integer?
12
11
 
13
12
  def initialize: (
14
- type: Symbol, value: (String|Integer), ex_tag: Lexer::Token?,
13
+ type: Symbol, ?name: String, ?index: Integer, ?ex_tag: Lexer::Token?,
15
14
  first_column: Integer, last_column: Integer,
16
- referring_symbol: untyped, position_in_rhs: Integer?
15
+ ?position_in_rhs: Integer?
17
16
  ) -> void
18
17
 
18
+ def value: () -> (String|Integer)
19
19
  def tag: () -> untyped
20
20
  end
21
21
  end
@@ -0,0 +1,13 @@
1
+ module Lrama
2
+ class Grammar
3
+ class Rule
4
+ attr_accessor original_rule: Rule
5
+
6
+ def initialize: (
7
+ ?id: untyped, ?_lhs: untyped, ?lhs: untyped, ?_rhs: untyped, ?rhs: untyped,
8
+ ?token_code: untyped, ?position_in_original_rule_rhs: untyped, ?nullable: untyped,
9
+ ?precedence_sym: untyped, ?lineno: untyped
10
+ ) -> void
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,41 @@
1
+ module Lrama
2
+ class Grammar
3
+ class RuleBuilder
4
+ attr_accessor lhs: Lexer::Token
5
+ attr_accessor line: Integer?
6
+ attr_reader rhs: Array[Lexer::Token]
7
+ attr_reader user_code: Lexer::Token::UserCode?
8
+ attr_reader precedence_sym: Lexer::Token?
9
+
10
+ @rule_counter: Counter
11
+ @midrule_action_counter: Counter
12
+ @position_in_original_rule_rhs: Integer?
13
+ @skip_preprocess_references: bool
14
+ @user_code: Lexer::Token::UserCode?
15
+ @rule_builders_for_derived_rules: Array[RuleBuilder]
16
+ @rules: Array[Rule]
17
+ @replaced_rhs: Array[Lexer::Token]
18
+ @parameterizing_rules: Array[Rule]
19
+ @midrule_action_rules: Array[Rule]
20
+
21
+ def initialize: (Counter rule_counter, Counter midrule_action_counter, ?Integer position_in_original_rule_rhs, ?skip_preprocess_references: bool) -> void
22
+ def add_rhs: (Lexer::Token rhs) -> void
23
+ def user_code=: (Lexer::Token::UserCode user_code) -> void
24
+ def precedence_sym=: (Lexer::Token user_code) -> void
25
+ def complete_input: () -> void
26
+ def setup_rules: () -> void
27
+ def parameterizing_rules: () -> Array[Rule]
28
+ def midrule_action_rules: () -> Array[Rule]
29
+ def rules: () -> Array[Rule]
30
+
31
+ private
32
+
33
+ def freeze_rhs: () -> void
34
+ def preprocess_references: () -> void
35
+ def build_rules: () -> void
36
+ def process_rhs: () -> void
37
+ def numberize_references: () -> void
38
+ def flush_user_code: () -> void
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,14 @@
1
+ module Lrama
2
+ class Lexer
3
+ class Location
4
+ attr_reader first_line: Integer
5
+ attr_reader first_column: Integer
6
+ attr_reader last_line: Integer
7
+ attr_reader last_column: Integer
8
+
9
+ def initialize: (first_line: Integer, first_column: Integer, last_line: Integer, last_column: Integer) -> void
10
+
11
+ def ==: (Location other) -> bool
12
+ end
13
+ end
14
+ end
@@ -2,6 +2,13 @@ module Lrama
2
2
  class Lexer
3
3
  class Token
4
4
  class Parameterizing < Token
5
+ attr_accessor args: Array[Lrama::Lexer::Token]
6
+
7
+ def option?: () -> bool
8
+ def nonempty_list?: () -> bool
9
+ def list?: () -> bool
10
+ def separated_nonempty_list?: () -> bool
11
+ def separated_list?: () -> bool
5
12
  end
6
13
  end
7
14
  end
@@ -2,6 +2,7 @@ module Lrama
2
2
  class Lexer
3
3
  class Token
4
4
  class Tag < Token
5
+ def member: () -> String
5
6
  end
6
7
  end
7
8
  end
@@ -2,7 +2,14 @@ module Lrama
2
2
  class Lexer
3
3
  class Token
4
4
  class UserCode < Token
5
- attr_accessor references: Array[[Symbol, (String|Integer), Token?, Integer, Integer]]
5
+ @references: Array[Lrama::Grammar::Reference]
6
+
7
+ def references: () -> Array[Lrama::Grammar::Reference]
8
+
9
+ private
10
+
11
+ def _references: () -> Array[Lrama::Grammar::Reference]
12
+ def scan_reference: (StringScanner scanner) -> Lrama::Grammar::Reference?
6
13
  end
7
14
  end
8
15
  end
@@ -3,15 +3,20 @@ module Lrama
3
3
  class Token
4
4
  attr_accessor s_value: String
5
5
  attr_accessor alias_name: String
6
- attr_accessor line: Integer
7
- attr_accessor column: Integer
6
+ attr_accessor location: Location
8
7
  attr_accessor referred: bool
9
8
 
10
- def initialize: (?s_value: String, ?alias_name: String) -> void
9
+ def initialize: (?s_value: String, ?alias_name: String, ?location: Location) -> void
11
10
 
12
11
  def to_s: () -> String
13
12
  def referred_by?: (String string) -> bool
14
13
  def ==: (Token other) -> bool
14
+ def first_line: () -> Integer
15
+ def first_column: () -> Integer
16
+ def last_line: () -> Integer
17
+ def last_column: () -> Integer
18
+ alias line first_line
19
+ alias column first_column
15
20
  end
16
21
  end
17
22
  end
@@ -0,0 +1,5 @@
1
+ class StringScanner
2
+ # TODO: Is it better to define `StringScanner#fetch` whose type '(Integer) -> String' ?
3
+ def []: (Integer) -> String
4
+ | ...
5
+ end
@@ -69,11 +69,13 @@
69
69
 
70
70
 
71
71
  <%# b4_user_pre_prologue -%>
72
+ <%- if output.aux.prologue -%>
72
73
  /* First part of user prologue. */
73
74
  #line <%= output.aux.prologue_first_lineno %> "<%= output.grammar_file_path %>"
74
75
 
75
76
  <%= output.aux.prologue %>
76
77
  #line [@oline@] [@ofile@]
78
+ <%- end -%>
77
79
 
78
80
  <%# b4_cast_define -%>
79
81
  # ifndef YY_CAST
@@ -1483,6 +1485,7 @@ YYLTYPE yylloc = yyloc_default;
1483
1485
  <%# b4_declare_parser_state_variables -%>
1484
1486
  /* Number of syntax errors so far. */
1485
1487
  int yynerrs = 0;
1488
+ YY_USE (yynerrs); /* Silence compiler warning. */
1486
1489
 
1487
1490
  yy_state_fast_t yystate = 0;
1488
1491
  /* Number of tokens to shift before error messages enabled. */
@@ -2043,7 +2046,9 @@ yyreturnlab:
2043
2046
  }
2044
2047
 
2045
2048
  <%# b4_percent_code_get([[epilogue]]) -%>
2049
+ <%- if output.aux.epilogue -%>
2046
2050
  #line <%= output.aux.epilogue_first_lineno - 1 %> "<%= output.grammar_file_path %>"
2047
2051
 
2048
2052
  <%= output.aux.epilogue -%>
2053
+ <%- end -%>
2049
2054