lrama 0.5.11 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yaml +2 -2
- data/Gemfile +1 -1
- data/LEGAL.md +1 -0
- data/NEWS.md +187 -0
- data/README.md +15 -4
- data/Steepfile +3 -0
- data/lib/lrama/grammar/code/printer_code.rb +1 -1
- data/lib/lrama/grammar/code/rule_action.rb +19 -3
- data/lib/lrama/grammar/code.rb +19 -7
- data/lib/lrama/grammar/parameterizing_rule.rb +6 -0
- data/lib/lrama/grammar/parameterizing_rule_builder.rb +34 -0
- data/lib/lrama/grammar/parameterizing_rule_resolver.rb +30 -0
- data/lib/lrama/grammar/parameterizing_rule_rhs_builder.rb +53 -0
- data/lib/lrama/grammar/rule_builder.rb +26 -22
- data/lib/lrama/grammar.rb +15 -41
- data/lib/lrama/lexer/grammar_file.rb +21 -0
- data/lib/lrama/lexer/location.rb +77 -2
- data/lib/lrama/lexer/token/instantiate_rule.rb +18 -0
- data/lib/lrama/lexer/token/user_code.rb +10 -10
- data/lib/lrama/lexer/token.rb +1 -1
- data/lib/lrama/lexer.rb +21 -11
- data/lib/lrama/parser.rb +619 -454
- data/lib/lrama/states_reporter.rb +1 -1
- data/lib/lrama/version.rb +1 -1
- data/parser.y +95 -30
- data/sig/lrama/grammar/code/printer_code.rbs +1 -1
- data/sig/lrama/grammar/code.rbs +5 -5
- data/sig/lrama/grammar/parameterizing_rule.rbs +10 -0
- data/sig/lrama/grammar/parameterizing_rule_builder.rbs +19 -0
- data/sig/lrama/grammar/parameterizing_rule_resolver.rbs +16 -0
- data/sig/lrama/grammar/parameterizing_rule_rhs_builder.rbs +18 -0
- data/sig/lrama/grammar/parameterizing_rules/builder/base.rbs +5 -3
- data/sig/lrama/grammar/parameterizing_rules/builder/separated_list.rbs +2 -0
- data/sig/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rbs +2 -0
- data/sig/lrama/grammar/parameterizing_rules/builder.rbs +4 -3
- data/sig/lrama/grammar/rule_builder.rbs +2 -4
- data/sig/lrama/lexer/grammar_file.rbs +15 -0
- data/sig/lrama/lexer/location.rbs +13 -1
- data/sig/lrama/lexer/token/instantiate_rule.rbs +12 -0
- metadata +16 -6
- data/doc/TODO.md +0 -59
- data/lib/lrama/lexer/token/parameterizing.rb +0 -34
- data/sig/lrama/lexer/token/parameterizing.rbs +0 -17
data/lib/lrama/version.rb
CHANGED
data/parser.y
CHANGED
@@ -29,6 +29,7 @@ rule
|
|
29
29
|
| bison_declarations bison_declaration
|
30
30
|
|
31
31
|
bison_declaration: grammar_declaration
|
32
|
+
| rule_declaration
|
32
33
|
| "%expect" INTEGER { @grammar.expect = val[1] }
|
33
34
|
| "%define" variable value
|
34
35
|
| "%param" params
|
@@ -202,6 +203,85 @@ rule
|
|
202
203
|
|
203
204
|
token_declaration: id int_opt alias { result = val }
|
204
205
|
|
206
|
+
rule_declaration: "%rule" IDENTIFIER "(" rule_args ")" ":" rule_rhs_list
|
207
|
+
{
|
208
|
+
builder = Grammar::ParameterizingRuleBuilder.new(val[1].s_value, val[3], val[6])
|
209
|
+
@grammar.add_parameterizing_rule_builder(builder)
|
210
|
+
}
|
211
|
+
|
212
|
+
rule_args: IDENTIFIER { result = [val[0]] }
|
213
|
+
| rule_args "," IDENTIFIER { result = val[0].append(val[2]) }
|
214
|
+
|
215
|
+
rule_rhs_list: rule_rhs
|
216
|
+
{
|
217
|
+
builder = val[0]
|
218
|
+
result = [builder]
|
219
|
+
}
|
220
|
+
| rule_rhs_list "|" rule_rhs
|
221
|
+
{
|
222
|
+
builder = val[2]
|
223
|
+
result = val[0].append(builder)
|
224
|
+
}
|
225
|
+
|
226
|
+
rule_rhs: /* empty */
|
227
|
+
{
|
228
|
+
reset_precs
|
229
|
+
result = Grammar::ParameterizingRuleRhsBuilder.new
|
230
|
+
}
|
231
|
+
| "%empty"
|
232
|
+
{
|
233
|
+
reset_precs
|
234
|
+
result = Grammar::ParameterizingRuleRhsBuilder.new
|
235
|
+
}
|
236
|
+
| rule_rhs symbol named_ref_opt
|
237
|
+
{
|
238
|
+
token = val[1]
|
239
|
+
token.alias_name = val[2]
|
240
|
+
builder = val[0]
|
241
|
+
builder.symbols << token
|
242
|
+
result = builder
|
243
|
+
}
|
244
|
+
| rule_rhs IDENTIFIER parameterizing_suffix
|
245
|
+
{
|
246
|
+
builder = val[0]
|
247
|
+
builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], location: @lexer.location, args: [val[1]])
|
248
|
+
result = builder
|
249
|
+
}
|
250
|
+
| rule_rhs IDENTIFIER "(" parameterizing_args ")"
|
251
|
+
{
|
252
|
+
builder = val[0]
|
253
|
+
builder.symbols << Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[3])
|
254
|
+
result = builder
|
255
|
+
}
|
256
|
+
| rule_rhs "{"
|
257
|
+
{
|
258
|
+
if @prec_seen
|
259
|
+
on_action_error("multiple User_code after %prec", val[0]) if @code_after_prec
|
260
|
+
@code_after_prec = true
|
261
|
+
end
|
262
|
+
begin_c_declaration("}")
|
263
|
+
}
|
264
|
+
C_DECLARATION
|
265
|
+
{
|
266
|
+
end_c_declaration
|
267
|
+
}
|
268
|
+
"}" named_ref_opt
|
269
|
+
{
|
270
|
+
user_code = val[3]
|
271
|
+
user_code.alias_name = val[6]
|
272
|
+
builder = val[0]
|
273
|
+
builder.user_code = user_code
|
274
|
+
result = builder
|
275
|
+
}
|
276
|
+
| rule_rhs "%prec" symbol
|
277
|
+
{
|
278
|
+
sym = @grammar.find_symbol_by_id!(val[2])
|
279
|
+
@prec_seen = true
|
280
|
+
builder = val[0]
|
281
|
+
builder.precedence_sym = sym
|
282
|
+
result = builder
|
283
|
+
}
|
284
|
+
|
205
285
|
int_opt: # empty
|
206
286
|
| INTEGER
|
207
287
|
|
@@ -326,9 +406,9 @@ rule
|
|
326
406
|
builder.add_rhs(token)
|
327
407
|
result = builder
|
328
408
|
}
|
329
|
-
| rhs
|
409
|
+
| rhs symbol parameterizing_suffix tag_opt
|
330
410
|
{
|
331
|
-
token = Lrama::Lexer::Token::
|
411
|
+
token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[2], location: @lexer.location, args: [val[1]])
|
332
412
|
builder = val[0]
|
333
413
|
builder.add_rhs(token)
|
334
414
|
builder.lhs_tag = val[3]
|
@@ -337,7 +417,7 @@ rule
|
|
337
417
|
}
|
338
418
|
| rhs IDENTIFIER "(" parameterizing_args ")" tag_opt
|
339
419
|
{
|
340
|
-
token = Lrama::Lexer::Token::
|
420
|
+
token = Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[3])
|
341
421
|
builder = val[0]
|
342
422
|
builder.add_rhs(token)
|
343
423
|
builder.lhs_tag = val[5]
|
@@ -421,8 +501,7 @@ end
|
|
421
501
|
include Lrama::Report::Duration
|
422
502
|
|
423
503
|
def initialize(text, path, debug = false)
|
424
|
-
@
|
425
|
-
@path = path
|
504
|
+
@grammar_file = Lrama::Lexer::GrammarFile.new(path, text)
|
426
505
|
@yydebug = debug
|
427
506
|
@rule_counter = Lrama::Grammar::Counter.new(0)
|
428
507
|
@midrule_action_counter = Lrama::Grammar::Counter.new(1)
|
@@ -430,7 +509,7 @@ end
|
|
430
509
|
|
431
510
|
def parse
|
432
511
|
report_duration(:parse) do
|
433
|
-
@lexer = Lrama::Lexer.new(@
|
512
|
+
@lexer = Lrama::Lexer.new(@grammar_file)
|
434
513
|
@grammar = Lrama::Grammar.new(@rule_counter)
|
435
514
|
@precedence_number = 0
|
436
515
|
reset_precs
|
@@ -447,40 +526,26 @@ end
|
|
447
526
|
|
448
527
|
def on_error(error_token_id, error_value, value_stack)
|
449
528
|
if error_value.is_a?(Lrama::Lexer::Token)
|
450
|
-
|
451
|
-
first_column = error_value.first_column
|
452
|
-
last_column = error_value.last_column
|
529
|
+
location = error_value.location
|
453
530
|
value = "'#{error_value.s_value}'"
|
454
531
|
else
|
455
|
-
|
456
|
-
first_column = @lexer.head_column
|
457
|
-
last_column = @lexer.column
|
532
|
+
location = @lexer.location
|
458
533
|
value = error_value.inspect
|
459
534
|
end
|
460
535
|
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
#{carrets(first_column, last_column)}
|
465
|
-
ERROR
|
536
|
+
error_message = "parse error on value #{value} (#{token_to_str(error_token_id) || '?'})"
|
537
|
+
|
538
|
+
raise_parse_error(error_message, location)
|
466
539
|
end
|
467
540
|
|
468
541
|
def on_action_error(error_message, error_value)
|
469
542
|
if error_value.is_a?(Lrama::Lexer::Token)
|
470
|
-
|
471
|
-
first_column = error_value.first_column
|
472
|
-
last_column = error_value.last_column
|
543
|
+
location = error_value.location
|
473
544
|
else
|
474
|
-
|
475
|
-
first_column = @lexer.head_column
|
476
|
-
last_column = @lexer.column
|
545
|
+
location = @lexer.location
|
477
546
|
end
|
478
547
|
|
479
|
-
|
480
|
-
#{@path}:#{line}: #{error_message}
|
481
|
-
#{@text.split("\n")[line - 1]}
|
482
|
-
#{carrets(first_column, last_column)}
|
483
|
-
ERROR
|
548
|
+
raise_parse_error(error_message, location)
|
484
549
|
end
|
485
550
|
|
486
551
|
private
|
@@ -500,6 +565,6 @@ def end_c_declaration
|
|
500
565
|
@lexer.end_symbol = nil
|
501
566
|
end
|
502
567
|
|
503
|
-
def
|
504
|
-
|
568
|
+
def raise_parse_error(error_message, location)
|
569
|
+
raise ParseError, location.generate_error_message(error_message)
|
505
570
|
end
|
data/sig/lrama/grammar/code.rbs
CHANGED
@@ -3,16 +3,16 @@ module Lrama
|
|
3
3
|
class Code
|
4
4
|
extend Forwardable
|
5
5
|
|
6
|
-
attr_accessor type:
|
7
|
-
attr_accessor token_code:
|
6
|
+
attr_accessor type: Symbol
|
7
|
+
attr_accessor token_code: Lexer::Token::UserCode
|
8
8
|
|
9
9
|
# delegated
|
10
10
|
def s_value: -> String
|
11
11
|
def line: -> Integer
|
12
|
-
def column: ->
|
13
|
-
def references: ->
|
12
|
+
def column: -> Integer
|
13
|
+
def references: -> Array[Lrama::Grammar::Reference]
|
14
14
|
|
15
|
-
def initialize: (
|
15
|
+
def initialize: (type: Symbol, token_code: Lexer::Token::UserCode) -> void
|
16
16
|
|
17
17
|
def translated_code: () -> String
|
18
18
|
|
@@ -0,0 +1,19 @@
|
|
1
|
+
module Lrama
|
2
|
+
class Grammar
|
3
|
+
class ParameterizingRuleBuilder
|
4
|
+
attr_reader name: String
|
5
|
+
attr_reader parameters: Array[Lexer::Token]
|
6
|
+
attr_reader rhs: Array[Grammar::ParameterizingRuleRhsBuilder]
|
7
|
+
|
8
|
+
@required_parameters_count: Integer
|
9
|
+
|
10
|
+
def initialize: (String name, Array[Lexer::Token] parameters, Array[Grammar::ParameterizingRuleRhsBuilder] rhs) -> void
|
11
|
+
def build_rules: (Lexer::Token::InstantiateRule token, Array[Lexer::Token] actual_args, Counter rule_counter, untyped lhs_tag, Integer? line, Array[ParameterizingRuleBuilder] rule_builders) -> Array[Grammar::ParameterizingRule]
|
12
|
+
|
13
|
+
private
|
14
|
+
|
15
|
+
def validate_argument_number!: (Lexer::Token::InstantiateRule token) -> void
|
16
|
+
def lhs: (Array[Lexer::Token] actual_args) -> Lexer::Token
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
module Lrama
|
2
|
+
class Grammar
|
3
|
+
class ParameterizingRuleResolver
|
4
|
+
@parameterizing_rule_builders: Array[Grammar::ParameterizingRuleBuilder]
|
5
|
+
|
6
|
+
def initialize: () -> void
|
7
|
+
def add_parameterizing_rule_builder: (Grammar::ParameterizingRuleBuilder builder) -> void
|
8
|
+
def defined?: (String) -> bool
|
9
|
+
def build_rules: (Lexer::Token::InstantiateRule token, Counter rule_counter, untyped lhs_tag, Integer? line) -> Array[Grammar::ParameterizingRule]
|
10
|
+
|
11
|
+
private
|
12
|
+
|
13
|
+
def rule_builders: (String) -> Array[Grammar::ParameterizingRuleBuilder]
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
module Lrama
|
2
|
+
class Grammar
|
3
|
+
class ParameterizingRuleRhsBuilder
|
4
|
+
attr_reader symbols: Array[untyped]
|
5
|
+
attr_reader user_code: Lexer::Token::UserCode?
|
6
|
+
attr_reader precedence_sym: Lexer::Token?
|
7
|
+
|
8
|
+
def initialize: () -> void
|
9
|
+
def build_rules: (Lexer::Token::InstantiateRule token, Array[Lexer::Token] actual_args, Array[Lexer::Token] parameters, Counter rule_counter, Lexer::Token lhs, untyped lhs_tag, Integer? line, Array[ParameterizingRuleBuilder] rule_builders) -> Grammar::ParameterizingRule
|
10
|
+
|
11
|
+
private
|
12
|
+
|
13
|
+
def build_nested_rules: (Lexer::Token::InstantiateRule token, Array[Lexer::Token] actual_args, Array[Lexer::Token] parameters, Counter rule_counter, untyped lhs_tag, Integer? line, Array[ParameterizingRuleBuilder] rule_builders) -> Array[Grammar::ParameterizingRule]
|
14
|
+
def nested_actual_args: (Array[Lexer::Token] actual_args, Array[Lexer::Token] parameters, Integer idx) -> Array[Lexer::Token]
|
15
|
+
def rhs: (Lexer::Token token, Array[Lexer::Token] actual_args, Array[Lexer::Token] parameters, ParameterizingRule? nested_rule) -> Array[Lexer::Token]
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -3,14 +3,16 @@ module Lrama
|
|
3
3
|
class ParameterizingRules
|
4
4
|
class Builder
|
5
5
|
class Base
|
6
|
-
|
6
|
+
attr_reader build_token: Lexer::Token::Ident
|
7
|
+
|
8
|
+
def initialize: (Lexer::Token::InstantiateRule token, Counter rule_counter, untyped lhs_tag, Lexer::Token::UserCode? user_code, Lexer::Token? precedence_sym, Integer? line) -> void
|
7
9
|
def build: () -> Array[Rule]
|
8
10
|
|
9
11
|
@args: Array[Lexer::Token]
|
10
|
-
@token: Lexer::Token::
|
12
|
+
@token: Lexer::Token::InstantiateRule
|
11
13
|
@key: Symbol
|
12
14
|
@rule_counter: Counter
|
13
|
-
@
|
15
|
+
@lhs_tag: untyped
|
14
16
|
@user_code: Lexer::Token::UserCode?
|
15
17
|
@precedence_sym: Lexer::Token?
|
16
18
|
@line: Integer?
|
@@ -3,6 +3,8 @@ module Lrama
|
|
3
3
|
class ParameterizingRules
|
4
4
|
class Builder
|
5
5
|
class SeparatedList < Base
|
6
|
+
@separator: Lexer::Token
|
7
|
+
|
6
8
|
def initialize: (Lexer::Token token, Counter rule_counter, untyped lhs_tag, Lexer::Token::UserCode? user_code, Lexer::Token? precedence_sym, Integer? line) -> void
|
7
9
|
end
|
8
10
|
end
|
@@ -3,6 +3,8 @@ module Lrama
|
|
3
3
|
class ParameterizingRules
|
4
4
|
class Builder
|
5
5
|
class SeparatedNonemptyList < Base
|
6
|
+
@separator: Lexer::Token
|
7
|
+
|
6
8
|
def initialize: (Lexer::Token token, Counter rule_counter, untyped lhs_tag, Lexer::Token::UserCode? user_code, Lexer::Token? precedence_sym, Integer? line) -> void
|
7
9
|
end
|
8
10
|
end
|
@@ -4,17 +4,18 @@ module Lrama
|
|
4
4
|
class Builder
|
5
5
|
RULES: Hash[Symbol, singleton(Base)]
|
6
6
|
|
7
|
-
@token: Lexer::Token::
|
7
|
+
@token: Lexer::Token::InstantiateRule
|
8
8
|
@key: Symbol
|
9
9
|
@rule_counter: Counter
|
10
|
-
@
|
10
|
+
@lhs_tag: untyped
|
11
11
|
@user_code: Lexer::Token::UserCode?
|
12
12
|
@precedence_sym: Lexer::Token?
|
13
13
|
@line: Integer?
|
14
|
+
@builder: Grammar::ParameterizingRules::Builder::Base
|
14
15
|
|
15
16
|
def initialize: (Lexer::Token token, Counter rule_counter, untyped lhs_tag, Lexer::Token::UserCode? user_code, Lexer::Token? precedence_sym, Integer? line) -> void
|
16
17
|
def build: () -> Array[Rule]
|
17
|
-
def build_token: () ->
|
18
|
+
def build_token: () -> Lexer::Token
|
18
19
|
def create_builder: () -> void
|
19
20
|
def validate_key!: () -> void
|
20
21
|
end
|
@@ -24,9 +24,7 @@ module Lrama
|
|
24
24
|
def user_code=: (Lexer::Token::UserCode user_code) -> void
|
25
25
|
def precedence_sym=: (Lexer::Token user_code) -> void
|
26
26
|
def complete_input: () -> void
|
27
|
-
def setup_rules: () -> void
|
28
|
-
def parameterizing_rules: () -> Array[Rule]
|
29
|
-
def midrule_action_rules: () -> Array[Rule]
|
27
|
+
def setup_rules: (Grammar::ParameterizingRuleResolver parameterizing_resolver) -> void
|
30
28
|
def rules: () -> Array[Rule]
|
31
29
|
|
32
30
|
private
|
@@ -34,7 +32,7 @@ module Lrama
|
|
34
32
|
def freeze_rhs: () -> void
|
35
33
|
def preprocess_references: () -> void
|
36
34
|
def build_rules: () -> void
|
37
|
-
def process_rhs: () -> void
|
35
|
+
def process_rhs: (Grammar::ParameterizingRuleResolver parameterizing_resolver) -> void
|
38
36
|
def numberize_references: () -> void
|
39
37
|
def flush_user_code: () -> void
|
40
38
|
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module Lrama
|
2
|
+
class Lexer
|
3
|
+
class GrammarFile
|
4
|
+
attr_reader path: String
|
5
|
+
attr_reader text: String
|
6
|
+
|
7
|
+
@lines: Array[String]
|
8
|
+
|
9
|
+
def initialize: (String path, String text) -> void
|
10
|
+
|
11
|
+
def ==: (GrammarFile other) -> bool
|
12
|
+
def lines: () -> Array[String]
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -1,14 +1,26 @@
|
|
1
1
|
module Lrama
|
2
2
|
class Lexer
|
3
3
|
class Location
|
4
|
+
attr_reader grammar_file: GrammarFile
|
4
5
|
attr_reader first_line: Integer
|
5
6
|
attr_reader first_column: Integer
|
6
7
|
attr_reader last_line: Integer
|
7
8
|
attr_reader last_column: Integer
|
8
9
|
|
9
|
-
def initialize: (first_line: Integer, first_column: Integer, last_line: Integer, last_column: Integer) -> void
|
10
|
+
def initialize: (grammar_file: GrammarFile, first_line: Integer, first_column: Integer, last_line: Integer, last_column: Integer) -> void
|
10
11
|
|
11
12
|
def ==: (Location other) -> bool
|
13
|
+
def partial_location: (Integer, Integer) -> Location
|
14
|
+
def generate_error_message: (String) -> String
|
15
|
+
def line_with_carets: () -> String
|
16
|
+
|
17
|
+
private
|
18
|
+
|
19
|
+
def path: () -> String
|
20
|
+
def blanks: () -> String
|
21
|
+
def carets: () -> String
|
22
|
+
def text: () -> String
|
23
|
+
def _text: () -> Array[String]
|
12
24
|
end
|
13
25
|
end
|
14
26
|
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
module Lrama
|
2
|
+
class Lexer
|
3
|
+
class Token
|
4
|
+
class InstantiateRule < Token
|
5
|
+
attr_accessor args: Array[Lexer::Token]
|
6
|
+
|
7
|
+
def initialize: (s_value: String, ?alias_name: String, ?location: Location, ?args: Array[Lexer::Token]) -> void
|
8
|
+
def rule_name: () -> String
|
9
|
+
end
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: lrama
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.6.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Yuichiro Kaneko
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-12-
|
11
|
+
date: 2023-12-25 00:00:00.000000000 Z
|
12
12
|
dependencies: []
|
13
13
|
description: LALR (1) parser generator written by Ruby
|
14
14
|
email:
|
@@ -27,10 +27,10 @@ files:
|
|
27
27
|
- Gemfile
|
28
28
|
- LEGAL.md
|
29
29
|
- MIT
|
30
|
+
- NEWS.md
|
30
31
|
- README.md
|
31
32
|
- Rakefile
|
32
33
|
- Steepfile
|
33
|
-
- doc/TODO.md
|
34
34
|
- exe/lrama
|
35
35
|
- lib/lrama.rb
|
36
36
|
- lib/lrama/bitmap.rb
|
@@ -55,6 +55,10 @@ files:
|
|
55
55
|
- lib/lrama/grammar/code/rule_action.rb
|
56
56
|
- lib/lrama/grammar/counter.rb
|
57
57
|
- lib/lrama/grammar/error_token.rb
|
58
|
+
- lib/lrama/grammar/parameterizing_rule.rb
|
59
|
+
- lib/lrama/grammar/parameterizing_rule_builder.rb
|
60
|
+
- lib/lrama/grammar/parameterizing_rule_resolver.rb
|
61
|
+
- lib/lrama/grammar/parameterizing_rule_rhs_builder.rb
|
58
62
|
- lib/lrama/grammar/parameterizing_rules/builder.rb
|
59
63
|
- lib/lrama/grammar/parameterizing_rules/builder/base.rb
|
60
64
|
- lib/lrama/grammar/parameterizing_rules/builder/list.rb
|
@@ -72,11 +76,12 @@ files:
|
|
72
76
|
- lib/lrama/grammar/type.rb
|
73
77
|
- lib/lrama/grammar/union.rb
|
74
78
|
- lib/lrama/lexer.rb
|
79
|
+
- lib/lrama/lexer/grammar_file.rb
|
75
80
|
- lib/lrama/lexer/location.rb
|
76
81
|
- lib/lrama/lexer/token.rb
|
77
82
|
- lib/lrama/lexer/token/char.rb
|
78
83
|
- lib/lrama/lexer/token/ident.rb
|
79
|
-
- lib/lrama/lexer/token/
|
84
|
+
- lib/lrama/lexer/token/instantiate_rule.rb
|
80
85
|
- lib/lrama/lexer/token/tag.rb
|
81
86
|
- lib/lrama/lexer/token/user_code.rb
|
82
87
|
- lib/lrama/option_parser.rb
|
@@ -111,6 +116,10 @@ files:
|
|
111
116
|
- sig/lrama/grammar/code/printer_code.rbs
|
112
117
|
- sig/lrama/grammar/counter.rbs
|
113
118
|
- sig/lrama/grammar/error_token.rbs
|
119
|
+
- sig/lrama/grammar/parameterizing_rule.rbs
|
120
|
+
- sig/lrama/grammar/parameterizing_rule_builder.rbs
|
121
|
+
- sig/lrama/grammar/parameterizing_rule_resolver.rbs
|
122
|
+
- sig/lrama/grammar/parameterizing_rule_rhs_builder.rbs
|
114
123
|
- sig/lrama/grammar/parameterizing_rules/builder.rbs
|
115
124
|
- sig/lrama/grammar/parameterizing_rules/builder/base.rbs
|
116
125
|
- sig/lrama/grammar/parameterizing_rules/builder/list.rbs
|
@@ -125,11 +134,12 @@ files:
|
|
125
134
|
- sig/lrama/grammar/rule.rbs
|
126
135
|
- sig/lrama/grammar/rule_builder.rbs
|
127
136
|
- sig/lrama/grammar/symbol.rbs
|
137
|
+
- sig/lrama/lexer/grammar_file.rbs
|
128
138
|
- sig/lrama/lexer/location.rbs
|
129
139
|
- sig/lrama/lexer/token.rbs
|
130
140
|
- sig/lrama/lexer/token/char.rbs
|
131
141
|
- sig/lrama/lexer/token/ident.rbs
|
132
|
-
- sig/lrama/lexer/token/
|
142
|
+
- sig/lrama/lexer/token/instantiate_rule.rbs
|
133
143
|
- sig/lrama/lexer/token/tag.rbs
|
134
144
|
- sig/lrama/lexer/token/user_code.rbs
|
135
145
|
- sig/lrama/report/duration.rbs
|
@@ -158,7 +168,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
158
168
|
- !ruby/object:Gem::Version
|
159
169
|
version: '0'
|
160
170
|
requirements: []
|
161
|
-
rubygems_version: 3.5.
|
171
|
+
rubygems_version: 3.5.1
|
162
172
|
signing_key:
|
163
173
|
specification_version: 4
|
164
174
|
summary: LALR (1) parser generator written by Ruby
|
data/doc/TODO.md
DELETED
@@ -1,59 +0,0 @@
|
|
1
|
-
# TODO
|
2
|
-
|
3
|
-
* command
|
4
|
-
* lexer
|
5
|
-
* [x] Basic functionalities
|
6
|
-
* parser
|
7
|
-
* [x] Basic functionalities
|
8
|
-
* [x] Precedence in grammar
|
9
|
-
* LALR
|
10
|
-
* [x] compute_nullable
|
11
|
-
* [x] compute_lr0_states
|
12
|
-
* [x] Direct Read Sets
|
13
|
-
* [x] Reads Relation
|
14
|
-
* [x] Read Sets
|
15
|
-
* [x] Includes Relation
|
16
|
-
* [x] Lookback Relation
|
17
|
-
* [x] Follow Sets
|
18
|
-
* [x] Look-Ahead Sets
|
19
|
-
* [x] Precedence support
|
20
|
-
* [x] Conflict check
|
21
|
-
* [x] Algorithm Digraph
|
22
|
-
* [ ] Conflict resolution
|
23
|
-
* [x] Do not generate default action if states have conflicts
|
24
|
-
* [ ] Fix number of s/r conflicts of basic.y. See basic.output file generated by Bison.
|
25
|
-
* Rendering
|
26
|
-
* [x] Table compaction
|
27
|
-
* [x] -d option
|
28
|
-
* yacc.c
|
29
|
-
* [x] %lex-param
|
30
|
-
* [x] %parse-param
|
31
|
-
* [x] %printer
|
32
|
-
* [x] Replace $, @ in user codes
|
33
|
-
* [x] `[@oline@]`
|
34
|
-
* [ ] b4_symbol (for eof, error and so on)
|
35
|
-
* Assumption
|
36
|
-
* b4_locations_if is true
|
37
|
-
* b4_pure_if is true
|
38
|
-
* b4_pull_if is false
|
39
|
-
* b4_lac_if is false
|
40
|
-
* Performance improvement
|
41
|
-
* [ ]
|
42
|
-
* Licenses
|
43
|
-
* [x] Write down something about licenses
|
44
|
-
* Reporting
|
45
|
-
* [ ] Bison style
|
46
|
-
* [ ] Wrap not selected reduce with "[]". See basic.output file generated by Bison.
|
47
|
-
* Counterexamples
|
48
|
-
* [x] Nonunifying Counterexamples
|
49
|
-
* [ ] Unifying Counterexamples
|
50
|
-
* [ ] Performance improvement using reverse_transitions and reverse_productions
|
51
|
-
* Error Tolerance
|
52
|
-
* [x] Corchuelo et al. algorithm with N = 1 (this means the next token when error is raised)
|
53
|
-
* [x] Add new decl for error token semantic value initialization (%error-token)
|
54
|
-
* [x] Use YYMALLOC & YYFREE
|
55
|
-
* Lex state
|
56
|
-
* CI
|
57
|
-
* [x] Setup CI
|
58
|
-
* [x] Add ruby 3.1 or under
|
59
|
-
* [x] Add integration tests which installs Lrama, build ruby and run `make test`
|
@@ -1,34 +0,0 @@
|
|
1
|
-
module Lrama
|
2
|
-
class Lexer
|
3
|
-
class Token
|
4
|
-
class Parameterizing < Token
|
5
|
-
attr_accessor :args
|
6
|
-
|
7
|
-
def initialize(s_value:, alias_name: nil, location: nil, args: [])
|
8
|
-
super s_value: s_value, alias_name: alias_name, location: location
|
9
|
-
@args = args
|
10
|
-
end
|
11
|
-
|
12
|
-
def option?
|
13
|
-
%w(option ?).include?(self.s_value)
|
14
|
-
end
|
15
|
-
|
16
|
-
def nonempty_list?
|
17
|
-
%w(nonempty_list +).include?(self.s_value)
|
18
|
-
end
|
19
|
-
|
20
|
-
def list?
|
21
|
-
%w(list *).include?(self.s_value)
|
22
|
-
end
|
23
|
-
|
24
|
-
def separated_nonempty_list?
|
25
|
-
%w(separated_nonempty_list).include?(self.s_value)
|
26
|
-
end
|
27
|
-
|
28
|
-
def separated_list?
|
29
|
-
%w(separated_list).include?(self.s_value)
|
30
|
-
end
|
31
|
-
end
|
32
|
-
end
|
33
|
-
end
|
34
|
-
end
|
@@ -1,17 +0,0 @@
|
|
1
|
-
module Lrama
|
2
|
-
class Lexer
|
3
|
-
class Token
|
4
|
-
class Parameterizing < Token
|
5
|
-
attr_accessor args: Array[Lrama::Lexer::Token]
|
6
|
-
|
7
|
-
def initialize: (s_value: String, ?alias_name: String, ?location: Location, ?args: Array[Lrama::Lexer::Token]) -> void
|
8
|
-
|
9
|
-
def option?: () -> bool
|
10
|
-
def nonempty_list?: () -> bool
|
11
|
-
def list?: () -> bool
|
12
|
-
def separated_nonempty_list?: () -> bool
|
13
|
-
def separated_list?: () -> bool
|
14
|
-
end
|
15
|
-
end
|
16
|
-
end
|
17
|
-
end
|