rbs-inline 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +10 -7
  3. data/Rakefile +12 -0
  4. data/lib/rbs/inline/annotation_parser/tokenizer.rb +361 -0
  5. data/lib/rbs/inline/annotation_parser.rb +548 -326
  6. data/lib/rbs/inline/ast/annotations.rb +446 -136
  7. data/lib/rbs/inline/ast/comment_lines.rb +32 -18
  8. data/lib/rbs/inline/ast/declarations.rb +67 -28
  9. data/lib/rbs/inline/ast/members.rb +137 -140
  10. data/lib/rbs/inline/ast/tree.rb +104 -5
  11. data/lib/rbs/inline/cli.rb +12 -12
  12. data/lib/rbs/inline/node_utils.rb +4 -0
  13. data/lib/rbs/inline/parser.rb +140 -59
  14. data/lib/rbs/inline/version.rb +1 -1
  15. data/lib/rbs/inline/writer.rb +243 -94
  16. data/lib/rbs/inline.rb +4 -0
  17. data/rbs_collection.lock.yaml +3 -7
  18. data/rbs_collection.yaml +2 -0
  19. data/sig/generated/rbs/inline/annotation_parser/tokenizer.rbs +221 -0
  20. data/sig/generated/rbs/inline/annotation_parser.rbs +148 -92
  21. data/sig/generated/rbs/inline/ast/annotations.rbs +142 -36
  22. data/sig/generated/rbs/inline/ast/comment_lines.rbs +35 -0
  23. data/sig/generated/rbs/inline/ast/declarations.rbs +29 -10
  24. data/sig/generated/rbs/inline/ast/members.rbs +33 -24
  25. data/sig/generated/rbs/inline/ast/tree.rbs +132 -0
  26. data/sig/generated/rbs/inline/cli.rbs +3 -3
  27. data/sig/generated/rbs/inline/node_utils.rbs +11 -0
  28. data/sig/generated/rbs/inline/parser.rbs +38 -18
  29. data/sig/generated/rbs/inline/version.rbs +7 -0
  30. data/sig/generated/rbs/inline/writer.rbs +104 -0
  31. data/sig/generated/rbs/inline.rbs +7 -0
  32. metadata +14 -14
  33. data/sig/rbs/inline/annotation_parser.rbs +0 -0
  34. data/sig/rbs/inline/ast/comment_lines.rbs +0 -27
  35. data/sig/rbs/inline/ast/tree.rbs +0 -98
  36. data/sig/rbs/inline/node_utils.rbs +0 -7
  37. data/sig/rbs/inline/writer.rbs +0 -27
  38. data/sig/rbs/inline.rbs +0 -41
  39. data/yard-samples/hello.rb +0 -6
  40. data/yard-samples/sample1.rb +0 -26
@@ -3,13 +3,43 @@
3
3
  module RBS
4
4
  module Inline
5
5
  class AnnotationParser
6
+ # ParsingResut groups consecutive comments, which may contain several annotations
7
+ #
8
+ # *Consecutive comments* are comments are defined in below.
9
+ # They are basically comments that follows from the previous line, but there are some more requirements.
10
+ #
11
+ # ```ruby
12
+ # # Line 1
13
+ # # Line 2 #=> Line 1 and Line 2 are consecutive
14
+ #
15
+ # # Line 3
16
+ # # Line4 #=> Line 3 and Line 4 are not consecutive, because the starting column are different
17
+ #
18
+ # # Line 5
19
+ # foo() # Line 6 #=> Line 5 and Line 6 are not consecutive, because Line 6 has leading code
20
+ # ```
21
+ #
6
22
  class ParsingResult
7
- attr_reader :comments #:: Array[Prism::Comment]
8
- attr_reader :annotations #:: Array[AST::Annotations::t]
9
- attr_reader :first_comment_offset #:: Integer
23
+ attr_reader :comments #: Array[Prism::Comment]
24
+ attr_reader :annotations #: Array[AST::Annotations::t | AST::CommentLines]
25
+ attr_reader :first_comment_offset #: Integer
26
+
27
+ #: () { (AST::Annotations::t) -> void } -> void
28
+ #: () -> Enumerator[AST::Annotations::t, void]
29
+ def each_annotation(&block)
30
+ if block
31
+ annotations.each do |annot|
32
+ if annot.is_a?(AST::Annotations::Base)
33
+ yield annot
34
+ end
35
+ end
36
+ else
37
+ enum_for :each_annotation
38
+ end
39
+ end
10
40
 
11
41
  # @rbs first_comment: Prism::Comment
12
- def initialize(first_comment)
42
+ def initialize(first_comment) #: void
13
43
  @comments = [first_comment]
14
44
  @annotations = []
15
45
  content = first_comment.location.slice
@@ -17,7 +47,7 @@ module RBS
17
47
  @first_comment_offset = index
18
48
  end
19
49
 
20
- # @rbs returns Range[Integer]
50
+ # @rbs return: Range[Integer]
21
51
  def line_range
22
52
  first = comments.first or raise
23
53
  last = comments.last or raise
@@ -25,13 +55,13 @@ module RBS
25
55
  first.location.start_line .. last.location.end_line
26
56
  end
27
57
 
28
- # @rbs returns Prism::Comment
58
+ # @rbs return: Prism::Comment
29
59
  def last_comment
30
60
  comments.last or raise
31
61
  end
32
62
 
33
63
  # @rbs comment: Prism::Comment
34
- # @rbs returns self?
64
+ # @rbs return: self?
35
65
  def add_comment(comment)
36
66
  if last_comment.location.end_line + 1 == comment.location.start_line
37
67
  if last_comment.location.start_column == comment.location.start_column
@@ -46,40 +76,46 @@ module RBS
46
76
  end
47
77
  end
48
78
 
49
- # @rbs returns Array[[String, Prism::Comment]]
50
- def lines
51
- comments.map do |comment|
52
- slice = comment.location.slice
53
- index = slice.index(/[^#\s]/) || slice.size
54
- string = if index > first_comment_offset
55
- slice[first_comment_offset..] || ""
56
- else
57
- slice[index..] || ""
58
- end
59
- [string, comment]
79
+ # @rbs trim: bool -- `true` to trim the leading whitespaces
80
+ def content(trim: false) #: String
81
+ if trim
82
+ leading_spaces = lines[0][/\A\s*/]
83
+ offset = leading_spaces ? leading_spaces.length : 0
84
+
85
+ lines.map do |line|
86
+ prefix = line[0..offset] || ""
87
+ if prefix.strip.empty?
88
+ line[offset..]
89
+ else
90
+ line.lstrip
91
+ end
92
+ end.join("\n")
93
+ else
94
+ lines.join("\n")
60
95
  end
61
96
  end
62
97
 
63
- # @rbs returns String
64
- def content
65
- lines.map(&:first).join("\n")
98
+ def lines #: Array[String]
99
+ comments.map { _1.location.slice[1...] || "" }
66
100
  end
67
101
  end
68
102
 
69
- attr_reader :input #:: Array[Prism::Comment]
103
+ include Tokens
104
+
105
+ attr_reader :input #: Array[Prism::Comment]
70
106
 
71
107
  # @rbs input: Array[Prism::Comment]
72
- def initialize(input)
108
+ def initialize(input) #: void
73
109
  @input = input
74
110
  end
75
111
 
76
112
  # @rbs input: Array[Prism::Comment]
77
- # @rbs returns Array[ParsingResult]
113
+ # @rbs return: Array[ParsingResult]
78
114
  def self.parse(input)
79
115
  new(input).parse
80
116
  end
81
117
 
82
- # @rbs returns Array[ParsingResult]
118
+ # @rbs return: Array[ParsingResult]
83
119
  def parse
84
120
  results = [] #: Array[ParsingResult]
85
121
 
@@ -97,9 +133,13 @@ module RBS
97
133
  end
98
134
 
99
135
  results.each do |result|
100
- each_annotation_paragraph(result) do |comments|
101
- if annot = parse_annotation(AST::CommentLines.new(comments))
136
+ each_annotation_paragraph(result) do |comments, annotation|
137
+ lines = AST::CommentLines.new(comments)
138
+
139
+ if annotation && annot = parse_annotation(lines)
102
140
  result.annotations << annot
141
+ else
142
+ result.annotations << lines
103
143
  end
104
144
  end
105
145
  end
@@ -109,308 +149,311 @@ module RBS
109
149
 
110
150
  private
111
151
 
112
- # @rbs result: ParsingResult
113
- # @rbs block: ^(Array[Prism::Comment]) -> void
114
- # @rbs returns void
115
- def each_annotation_paragraph(result, &block)
116
- lines = result.lines
117
-
118
- while true
119
- line, comment = lines.shift
120
- break unless line && comment
121
-
122
- next_line, next_comment = lines.first
123
-
124
- possible_annotation = false
125
- possible_annotation ||= line.start_with?('@rbs', '@rbs!')
126
- possible_annotation ||= comment.location.slice.start_with?("#::", "#[") # No leading whitespace is allowed
127
-
128
- if possible_annotation
129
- line_offset = line.index(/\S/) || raise
130
-
131
- comments = [comment]
132
-
133
- while true
134
- break unless next_line && next_comment
135
- next_offset = next_line.index(/\S/) || 0
136
- break unless next_offset > line_offset
137
-
138
- comments << next_comment
139
- lines.shift
152
+ # Test if the comment is an annotation comment
153
+ #
154
+ # - Returns `nil` if the comment is not an annotation.
155
+ # - Returns `true` if the comment is `#:` or `#[` annotation. (Offset is `1`)
156
+ # - Returns Integer if the comment is `#@rbs` annotation. (Offset is the number of leading spaces including `#`)
157
+ #
158
+ #: (Prism::Comment) -> (Integer | true | nil)
159
+ def annotation_comment?(comment)
160
+ line = comment.location.slice
140
161
 
141
- next_line, next_comment = lines.first
142
- end
162
+ # No leading whitespace is allowed
163
+ return true if line.start_with?("#:")
164
+ return true if line.start_with?("#[")
143
165
 
144
- yield comments
145
- end
166
+ if match = line.match(/\A#(\s*)@rbs(\b|!)/)
167
+ leading_spaces = match[1] or raise
168
+ leading_spaces.size + 1
146
169
  end
147
170
  end
148
171
 
149
- class Tokenizer
150
- attr_reader :scanner #:: StringScanner
151
- attr_reader :current_token #:: token?
152
-
153
- KEYWORDS = {
154
- "returns" => :kRETURNS,
155
- "inherits" => :kINHERITS,
156
- "as" => :kAS,
157
- "override" => :kOVERRIDE,
158
- "use" => :kUSE,
159
- "module-self" => :kMODULESELF,
160
- "generic" => :kGENERIC,
161
- "in" => :kIN,
162
- "out" => :kOUT,
163
- "unchecked" => :kUNCHECKED,
164
- "self" => :kSELF,
165
- "skip" => :kSKIP,
166
- "yields" => :kYIELDS,
167
- } #:: Hash[String, Symbol]
168
- KW_RE = /#{Regexp.union(KEYWORDS.keys)}\b/
169
-
170
- PUNCTS = {
171
- "[optional]" => :kOPTIONAL,
172
- "::" => :kCOLON2,
173
- ":" => :kCOLON,
174
- "[" => :kLBRACKET,
175
- "]" => :kRBRACKET,
176
- "," => :kCOMMA,
177
- "*" => :kSTAR,
178
- "--" => :kMINUS2,
179
- "<" => :kLT,
180
- "." => :kDOT,
181
- } #:: Hash[String, Symbol]
182
- PUNCTS_RE = Regexp.union(PUNCTS.keys) #:: Regexp
183
-
184
- # @rbs scanner: StringScanner
185
- # @rbs returns void
186
- def initialize(scanner)
187
- @scanner = scanner
188
- @current_token = nil
189
- end
190
-
191
- # @rbs tree: AST::Tree
192
- # @rbs returns token?
193
- def advance(tree)
194
- last = current_token
172
+ # Split lines of comments in `result` into paragraphs
173
+ #
174
+ # A paragraph consists of:
175
+ #
176
+ # * An annotation syntax constructs -- starting with `@rbs` or `::`, or
177
+ # * A lines something else
178
+ #
179
+ # Yields an array of comments, and a boolean indicating if the comments may be an annotation.
180
+ #
181
+ #: (ParsingResult) { (Array[Prism::Comment], bool is_annotation) -> void } -> void
182
+ def each_annotation_paragraph(result, &block)
183
+ yield_paragraph([], result.comments.dup, &block)
184
+ end
185
+
186
+ # The first annotation line is already detected and consumed.
187
+ # The annotation comment is already in `comments`.
188
+ #
189
+ # @rbs comments: Array[Prism::Comment] -- Annotation comments
190
+ # @rbs lines: Array[Prism::Comment] -- Lines to be consumed
191
+ # @rbs offset: Integer -- Offset of the first character of the first annotation comment from the `#` (>= 1)
192
+ # @rbs allow_empty_lines: bool -- `true` if empty line is allowed inside the annotation comments
193
+ # @rbs &block: (Array[Prism::Comment], bool is_annotation) -> void
194
+ # @rbs return: void
195
+ def yield_annotation(comments, lines, offset, allow_empty_lines:, &block)
196
+ first_comment = lines.first
197
+
198
+ if first_comment
199
+ nonspace_index = first_comment.location.slice.index(/\S/, 1)
195
200
 
196
201
  case
197
- when s = scanner.scan(/\s+/)
198
- tree << [:tWHITESPACE, s] if tree
199
- advance(tree)
200
- when s = scanner.scan(/@rbs!/)
201
- @current_token = [:kRBSE, s]
202
- when s = scanner.scan(/@rbs\b/)
203
- @current_token = [:kRBS, s]
204
- when s = scanner.scan(PUNCTS_RE)
205
- @current_token = [PUNCTS.fetch(s), s]
206
- when s = scanner.scan(KW_RE)
207
- @current_token = [KEYWORDS.fetch(s), s]
208
- when s = scanner.scan(/[A-Z]\w*/)
209
- @current_token = [:tUIDENT, s]
210
- when s = scanner.scan(/_[A-Z]\w*/)
211
- @current_token = [:tIFIDENT, s]
212
- when s = scanner.scan(/[a-z]\w*/)
213
- @current_token = [:tLVAR, s]
214
- when s = scanner.scan(/![a-z]\w*/)
215
- @current_token = [:tELVAR, s]
216
- when s = scanner.scan(/@\w+/)
217
- @current_token = [:tATIDENT, s]
218
- when s = scanner.scan(/%a\{[^}]+\}/)
219
- @current_token = [:tANNOTATION, s]
220
- when s = scanner.scan(/%a\[[^\]]+\]/)
221
- @current_token = [:tANNOTATION, s]
222
- when s = scanner.scan(/%a\([^)]+\)/)
223
- @current_token = [:tANNOTATION, s]
202
+ when nonspace_index.nil?
203
+ if allow_empty_lines
204
+ lines.shift
205
+ yield_empty_annotation(comments, [first_comment], lines, offset, &block)
206
+ else
207
+ # Starting next paragraph (or annotation)
208
+ yield(comments, true)
209
+ yield_paragraph([], lines, &block)
210
+ end
211
+ when nonspace_index > offset
212
+ # Continuation of the annotation
213
+ lines.shift
214
+ comments.push(first_comment)
215
+ yield_annotation(comments, lines, offset, allow_empty_lines: allow_empty_lines, &block)
224
216
  else
225
- @current_token = nil
217
+ # Starting next paragraph (or annotation)
218
+ yield(comments, true)
219
+ yield_paragraph([], lines, &block)
226
220
  end
227
-
228
- last
221
+ else
222
+ yield(comments, true)
229
223
  end
224
+ end
230
225
 
231
- # Consume given token type and inserts the token to the tree or `nil`
232
- #
233
- # @rbs type: Array[Symbol]
234
- # @rbs tree: AST::Tree
235
- # @rbs returns void
236
- def consume_token(*types, tree:)
237
- if type?(*types)
238
- tree << advance(tree)
226
+ # The first line is NOT consumed.
227
+ #
228
+ # The `comments` may be empty.
229
+ #
230
+ # @rbs comments: Array[Prism::Comment] -- Leading comments
231
+ # @rbs lines: Array[Prism::Comment] -- Lines to be consumed
232
+ # @rbs &block: (Array[Prism::Comment], bool is_annotation) -> void
233
+ # @rbs return: void
234
+ def yield_paragraph(comments, lines, &block)
235
+ while first_comment = lines.first
236
+ if offset = annotation_comment?(first_comment)
237
+ yield comments, false unless comments.empty?
238
+ lines.shift
239
+ case offset
240
+ when Integer
241
+ yield_annotation([first_comment], lines, offset, allow_empty_lines: true, &block)
242
+ when true
243
+ yield_annotation([first_comment], lines, 1, allow_empty_lines: false, &block)
244
+ end
245
+ return
239
246
  else
240
- tree << nil
247
+ lines.shift
248
+ comments.push(first_comment)
241
249
  end
242
250
  end
243
251
 
244
- # Consume given token type and inserts the token to the tree or raise
245
- #
246
- # @rbs type: Array[Symbol]
247
- # @rbs tree: AST::Tree
248
- # @rbs returns void
249
- def consume_token!(*types, tree:)
250
- type!(*types)
251
- tree << advance(tree)
252
- end
253
-
254
- # Test if current token has specified `type`
255
- #
256
- # @rbs type: Array[Symbol]
257
- # @rbs returns bool
258
- def type?(*type)
259
- type.any? { current_token && current_token[0] == _1 }
260
- end
261
-
262
- # Ensure current token is one of the specified in types
263
- #
264
- # @rbs types: Array[Symbol]
265
- # @rbs returns void
266
- def type!(*types)
267
- raise "Unexpected token: #{current_token&.[](0)}, where expected token: #{types.join(",")}" unless type?(*types)
268
- end
252
+ yield comments, false unless comments.empty?
253
+ end
269
254
 
270
- # Reset the current_token to incoming comment `--`
271
- #
272
- # Reset to the end of the input if `--` token cannot be found.
273
- #
274
- # @rbs returns String -- String that is skipped
275
- def skip_to_comment
276
- return "" if type?(:kMINUS2)
255
+ # Consumes empty lines between annotation lines
256
+ #
257
+ # An empty line is already detected and consumed.
258
+ # The line is already removed from `lines` and put in `empty_comments`.
259
+ #
260
+ # Note that the arguments, `comments`, `empty_comments`, and `lines` are modified in place.
261
+ #
262
+ # @rbs comments: Array[Prism::Comment] -- Non empty annotation comments
263
+ # @rbs empty_comments: Array[Prism::Comment] -- Empty comments that may be part of the annotation
264
+ # @rbs lines: Array[Prism::Comment] -- Lines
265
+ # @rbs offset: Integer -- Offset of the first character of the annotation
266
+ # @rbs &block: (Array[Prism::Comment], bool is_annotation) -> void
267
+ # @rbs return: void
268
+ def yield_empty_annotation(comments, empty_comments, lines, offset, &block)
269
+ first_comment = lines.first
270
+
271
+ if first_comment
272
+ nonspace_index = first_comment.location.slice.index(/\S/, 1)
277
273
 
278
- if string = scanner.scan_until(/--/)
279
- @current_token = [:kMINUS2, "--"]
280
- string.delete_suffix("--")
274
+ case
275
+ when nonspace_index.nil?
276
+ # Empty line, possibly continues the annotation
277
+ lines.shift
278
+ empty_comments << first_comment
279
+ yield_empty_annotation(comments, empty_comments, lines, offset, &block)
280
+ when nonspace_index > offset
281
+ # Continuation of the annotation
282
+ lines.shift
283
+ comments.concat(empty_comments)
284
+ comments.push(first_comment)
285
+ yield_annotation(comments, lines, offset, allow_empty_lines: true, &block)
281
286
  else
282
- s = scanner.rest
283
- @current_token = [:kEOF, ""]
284
- scanner.terminate
285
- s
287
+ yield comments, true
288
+ yield_paragraph(empty_comments, lines, &block)
286
289
  end
290
+ else
291
+ # EOF
292
+ yield comments, true
293
+ yield empty_comments, false
287
294
  end
288
295
  end
289
296
 
290
297
  # @rbs comments: AST::CommentLines
291
- # @rbs returns AST::Annotations::t?
298
+ # @rbs return: AST::Annotations::t?
292
299
  def parse_annotation(comments)
293
300
  scanner = StringScanner.new(comments.string)
294
301
  tokenizer = Tokenizer.new(scanner)
295
302
 
296
303
  tree = AST::Tree.new(:rbs_annotation)
297
304
  tokenizer.advance(tree)
305
+ tokenizer.advance(tree)
298
306
 
299
307
  case
300
- when tokenizer.type?(:kRBSE)
301
- tree << tokenizer.current_token
302
- tree << [:EMBEDDED_RBS, tokenizer.scanner.rest]
308
+ when tokenizer.type?(K_RBSE)
309
+ tokenizer.consume_trivias(tree)
310
+ tree << tokenizer.lookahead1
311
+ rest = tokenizer.rest
312
+ rest.delete_prefix!("@rbs!")
313
+ tree << [:EMBEDDED_RBS, rest]
303
314
  tokenizer.scanner.terminate
304
315
  AST::Annotations::Embedded.new(tree, comments)
305
- when tokenizer.type?(:kRBS)
306
- tree << tokenizer.current_token
307
-
308
- tokenizer.advance(tree)
316
+ when tokenizer.type?(K_RBS)
317
+ tokenizer.advance(tree, eat: true)
309
318
 
310
319
  case
311
- when tokenizer.type?(:tLVAR, :tELVAR)
320
+ when tokenizer.type?(T_LVAR, :tELVAR)
321
+ tree << parse_var_decl(tokenizer)
322
+ AST::Annotations::VarType.new(tree, comments)
323
+ when tokenizer.type?(K_SKIP, K_INHERITS, K_OVERRIDE, K_USE, K_GENERIC, K_MODULE, K_CLASS) &&
324
+ tokenizer.type2?(K_COLON)
312
325
  tree << parse_var_decl(tokenizer)
313
326
  AST::Annotations::VarType.new(tree, comments)
314
- when tokenizer.type?(:kSKIP)
327
+ when tokenizer.type?(K_MODULE)
328
+ tree << parse_module_decl(tokenizer)
329
+ AST::Annotations::ModuleDecl.new(tree, comments)
330
+ when tokenizer.type?(K_CLASS)
331
+ tree << parse_class_decl(tokenizer)
332
+ AST::Annotations::ClassDecl.new(tree, comments)
333
+ when tokenizer.type?(K_SKIP)
315
334
  AST::Annotations::Skip.new(tree, comments)
316
- when tokenizer.type?(:kRETURNS)
335
+ when tokenizer.type?(K_RETURN)
317
336
  tree << parse_return_type_decl(tokenizer)
318
337
  AST::Annotations::ReturnType.new(tree, comments)
319
- when tokenizer.type?(:tANNOTATION)
338
+ when tokenizer.type?(T_ANNOTATION)
320
339
  tree << parse_rbs_annotation(tokenizer)
321
340
  AST::Annotations::RBSAnnotation.new(tree, comments)
322
- when tokenizer.type?(:kINHERITS)
341
+ when tokenizer.type?(K_INHERITS)
323
342
  tree << parse_inherits(tokenizer)
324
343
  AST::Annotations::Inherits.new(tree, comments)
325
- when tokenizer.type?(:kOVERRIDE)
344
+ when tokenizer.type?(K_OVERRIDE)
326
345
  tree << parse_override(tokenizer)
327
346
  AST::Annotations::Override.new(tree, comments)
328
- when tokenizer.type?(:kUSE)
347
+ when tokenizer.type?(K_USE)
329
348
  tree << parse_use(tokenizer)
330
349
  AST::Annotations::Use.new(tree, comments)
331
- when tokenizer.type?(:kMODULESELF)
350
+ when tokenizer.type?(K_MODULE_SELF)
332
351
  tree << parse_module_self(tokenizer)
333
352
  AST::Annotations::ModuleSelf.new(tree, comments)
334
- when tokenizer.type?(:kGENERIC)
353
+ when tokenizer.type?(K_GENERIC)
335
354
  tree << parse_generic(tokenizer)
336
355
  AST::Annotations::Generic.new(tree, comments)
337
- when tokenizer.type?(:kSELF, :tATIDENT)
356
+ when tokenizer.type?(K_SELF, T_ATIDENT)
338
357
  tree << parse_ivar_type(tokenizer)
339
358
  AST::Annotations::IvarType.new(tree, comments)
340
- when tokenizer.type?(:kYIELDS)
341
- tree << parse_yields(tokenizer)
342
- AST::Annotations::Yields.new(tree, comments)
359
+ when tokenizer.type?(K_STAR)
360
+ tree << parse_splat_param_type(tokenizer)
361
+ AST::Annotations::SplatParamType.new(tree, comments)
362
+ when tokenizer.type?(K_STAR2)
363
+ tree << parse_splat_param_type(tokenizer)
364
+ AST::Annotations::DoubleSplatParamType.new(tree, comments)
365
+ when tokenizer.type?(K_AMP)
366
+ tree << parse_block_type(tokenizer)
367
+ AST::Annotations::BlockType.new(tree, comments)
368
+ when tokenizer.type?(K_LPAREN, K_ARROW, K_LBRACE, K_LBRACKET, K_DOT3)
369
+ tree << parse_method_type_annotation(tokenizer)
370
+ AST::Annotations::Method.new(tree, comments)
343
371
  end
344
- when tokenizer.type?(:kCOLON2)
345
- tree << tokenizer.current_token
346
- tokenizer.advance(tree)
347
- tree << parse_type_method_type(tokenizer, tree)
348
- AST::Annotations::Assertion.new(tree, comments)
349
- when tokenizer.type?(:kLBRACKET)
372
+ when tokenizer.type?(K_COLON)
373
+ tokenizer.advance(tree, eat: true)
374
+
375
+ if tokenizer.type?(K_DOT3)
376
+ tokenizer.advance(tree, eat: true)
377
+ AST::Annotations::Dot3Assertion.new(tree, comments)
378
+ else
379
+ type = parse_type_method_type(tokenizer, tree)
380
+ tree << type
381
+
382
+ case type
383
+ when MethodType
384
+ AST::Annotations::MethodTypeAssertion.new(tree, comments)
385
+ when AST::Tree, nil
386
+ AST::Annotations::SyntaxErrorAssertion.new(tree, comments)
387
+ else
388
+ AST::Annotations::TypeAssertion.new(tree, comments)
389
+ end
390
+ end
391
+ when tokenizer.type?(K_LBRACKET)
350
392
  tree << parse_type_app(tokenizer)
351
393
  AST::Annotations::Application.new(tree, comments)
352
394
  end
353
395
  end
354
396
 
355
397
  # @rbs tokenizer: Tokenizer
356
- # @rbs returns AST::Tree
398
+ # @rbs return: AST::Tree
357
399
  def parse_var_decl(tokenizer)
358
400
  tree = AST::Tree.new(:var_decl)
359
401
 
360
- tokenizer.consume_token!(:tLVAR, :tELVAR, tree: tree)
402
+ tokenizer.advance(tree, eat: true)
361
403
 
362
- if tokenizer.type?(:kCOLON)
363
- tree << tokenizer.current_token
404
+ if tokenizer.type?(K_COLON)
405
+ tree << tokenizer.lookahead1
364
406
  tokenizer.advance(tree)
365
407
  else
366
408
  tree << nil
367
409
  end
368
410
 
411
+ tokenizer.consume_trivias(tree)
369
412
  tree << parse_type(tokenizer, tree)
370
413
 
371
- if tokenizer.type?(:kMINUS2)
372
- tree << parse_comment(tokenizer)
373
- else
374
- tree << nil
414
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
415
+ parse_comment(tokenizer)
375
416
  end
376
417
 
377
418
  tree
378
419
  end
379
420
 
380
421
  # @rbs tokenizer: Tokenizer
381
- # @rbs returns AST::Tree
422
+ # @rbs return: AST::Tree
382
423
  def parse_return_type_decl(tokenizer)
383
424
  tree = AST::Tree.new(:return_type_decl)
384
425
 
385
- tokenizer.consume_token!(:kRETURNS, tree: tree)
426
+ tokenizer.consume_token!(K_RETURN, tree: tree)
427
+ tokenizer.consume_token(K_COLON, tree: tree)
386
428
  tree << parse_type(tokenizer, tree)
387
- tree << parse_optional(tokenizer, :kMINUS2) { parse_comment(tokenizer) }
429
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
430
+ parse_comment(tokenizer)
431
+ end
388
432
 
389
433
  tree
390
434
  end
391
435
 
392
436
  # @rbs tokenizer: Tokenizer
393
- # @rbs returns AST::Tree
437
+ # @rbs return: AST::Tree
394
438
  def parse_comment(tokenizer)
395
439
  tree = AST::Tree.new(:comment)
396
440
 
397
- tokenizer.type!(:kMINUS2)
441
+ tokenizer.consume_token(K_MINUS2, tree: tree)
398
442
 
399
- tree << tokenizer.current_token
400
- rest = tokenizer.scanner.rest || ""
443
+ rest = tokenizer.rest
401
444
  tokenizer.scanner.terminate
402
- tree << [:tCOMMENT, rest]
445
+ tree << [T_COMMENT, rest]
403
446
 
404
447
  tree
405
448
  end
406
449
 
407
450
  # @rbs tokenizer: Tokenizer
408
- # @rbs returns AST::Tree
451
+ # @rbs return: AST::Tree
409
452
  def parse_type_app(tokenizer)
410
453
  tree = AST::Tree.new(:tapp)
411
454
 
412
- if tokenizer.type?(:kLBRACKET)
413
- tree << tokenizer.current_token
455
+ if tokenizer.type?(K_LBRACKET)
456
+ tree << tokenizer.lookahead1
414
457
  tokenizer.advance(tree)
415
458
  end
416
459
 
@@ -422,25 +465,55 @@ module RBS
422
465
  break unless type
423
466
  break if type.is_a?(AST::Tree)
424
467
 
425
- if tokenizer.type?(:kCOMMA)
426
- types << tokenizer.current_token
468
+ if tokenizer.type?(K_COMMA)
469
+ types << tokenizer.lookahead1
427
470
  tokenizer.advance(types)
428
471
  end
429
472
 
430
- if tokenizer.type?(:kRBRACKET)
473
+ if tokenizer.type?(K_RBRACKET)
431
474
  break
432
475
  end
433
476
  end
434
477
  tree << types
435
478
 
436
- if tokenizer.type?(:kRBRACKET)
437
- tree << tokenizer.current_token
479
+ if tokenizer.type?(K_RBRACKET)
480
+ tree << tokenizer.lookahead1
438
481
  tokenizer.advance(tree)
439
482
  end
440
483
 
441
484
  tree
442
485
  end
443
486
 
487
+ # @rbs (Tokenizer) -> AST::Tree
488
+ def parse_method_type_annotation(tokenizer)
489
+ tree = AST::Tree.new(:method_type_annotation)
490
+
491
+ until tokenizer.type?(K_EOF)
492
+ if tokenizer.type?(K_DOT3)
493
+ tree << tokenizer.lookahead1
494
+ tokenizer.advance(tree)
495
+ break
496
+ else
497
+ method_type = parse_method_type(tokenizer, tree)
498
+ case method_type
499
+ when MethodType
500
+ tree << method_type
501
+
502
+ if tokenizer.type?(K_VBAR)
503
+ tokenizer.advance(tree, eat: true)
504
+ else
505
+ break
506
+ end
507
+ when AST::Tree
508
+ tree << method_type
509
+ break
510
+ end
511
+ end
512
+ end
513
+
514
+ tree
515
+ end
516
+
444
517
  # Parse a RBS method type or type and returns it
445
518
  #
446
519
  # It tries parsing a method type, and then parsing a type if failed.
@@ -451,44 +524,66 @@ module RBS
451
524
  #
452
525
  # @rbs tokenizer: Tokenizer
453
526
  # @rbs parent_tree: AST::Tree
454
- # @rbs returns MethodType | AST::Tree | Types::t | nil
527
+ # @rbs return: MethodType | AST::Tree | Types::t | nil
455
528
  def parse_type_method_type(tokenizer, parent_tree)
529
+ tokenizer.consume_trivias(parent_tree)
456
530
  buffer = RBS::Buffer.new(name: "", content: tokenizer.scanner.string)
457
- range = (tokenizer.scanner.charpos - (tokenizer.scanner.matched_size || 0) ..)
531
+ range = (tokenizer.current_position..)
458
532
  begin
459
533
  if type = RBS::Parser.parse_method_type(buffer, range: range, require_eof: false)
460
534
  loc = type.location or raise
461
- size = loc.end_pos - loc.start_pos
462
- (size - (tokenizer.scanner.matched_size || 0)).times do
463
- tokenizer.scanner.skip(/./)
464
- end
465
- tokenizer.advance(parent_tree)
535
+ tokenizer.reset(loc.end_pos, parent_tree)
466
536
  type
467
537
  else
468
- tokenizer.advance(parent_tree)
469
538
  nil
470
539
  end
471
540
  rescue RBS::ParsingError
472
541
  begin
473
542
  if type = RBS::Parser.parse_type(buffer, range: range, require_eof: false)
474
543
  loc = type.location or raise
475
- size = loc.end_pos - loc.start_pos
476
- (size - (tokenizer.scanner.matched_size || 0)).times do
477
- tokenizer.scanner.skip(/./)
478
- end
479
- tokenizer.advance(parent_tree)
544
+ tokenizer.reset(loc.end_pos, parent_tree)
480
545
  type
481
546
  else
482
- tokenizer.advance(parent_tree)
483
547
  nil
484
548
  end
485
549
  rescue RBS::ParsingError
486
- content = (tokenizer.scanner.matched || "") + (tokenizer.scanner.rest || "")
487
550
  tree = AST::Tree.new(:type_syntax_error)
488
- tree << [:tSOURCE, content]
551
+ tree << [T_SOURCE, tokenizer.rest]
552
+ tokenizer.scanner.terminate
553
+ tree
554
+ end
555
+ end
556
+ end
557
+
558
+ # Parse a RBS method type
559
+ #
560
+ # If parsing failed, it returns a Tree(`:type_syntax_error), consuming all of the remaining input.
561
+ #
562
+ # Note that this doesn't recognize `--` comment unlike `parse_type`.
563
+ #
564
+ # @rbs tokenizer: Tokenizer
565
+ # @rbs parent_tree: AST::Tree
566
+ # @rbs return: MethodType | AST::Tree
567
+ def parse_method_type(tokenizer, parent_tree)
568
+ tokenizer.consume_trivias(parent_tree)
569
+ buffer = RBS::Buffer.new(name: "", content: tokenizer.scanner.string)
570
+ range = (tokenizer.current_position..)
571
+ begin
572
+ if type = RBS::Parser.parse_method_type(buffer, range: range, require_eof: false)
573
+ loc = type.location or raise
574
+ tokenizer.reset(loc.end_pos, parent_tree)
575
+ type
576
+ else
577
+ tree = AST::Tree.new(:type_syntax_error)
578
+ tree << [T_SOURCE, tokenizer.rest]
489
579
  tokenizer.scanner.terminate
490
580
  tree
491
581
  end
582
+ rescue RBS::ParsingError
583
+ tree = AST::Tree.new(:type_syntax_error)
584
+ tree << [T_SOURCE, tokenizer.rest]
585
+ tokenizer.scanner.terminate
586
+ tree
492
587
  end
493
588
  end
494
589
 
@@ -507,49 +602,45 @@ module RBS
507
602
  #
508
603
  # @rbs tokenizer: Tokenizer
509
604
  # @rbs parent_tree: AST::Tree
510
- # @rbs returns Types::t | AST::Tree | nil
605
+ # @rbs return: Types::t | AST::Tree | nil
511
606
  def parse_type(tokenizer, parent_tree)
607
+ tokenizer.consume_trivias(parent_tree)
512
608
  buffer = RBS::Buffer.new(name: "", content: tokenizer.scanner.string)
513
- range = (tokenizer.scanner.charpos - (tokenizer.scanner.matched_size || 0) ..)
609
+ range = (tokenizer.current_position..)
514
610
  if type = RBS::Parser.parse_type(buffer, range: range, require_eof: false)
515
611
  loc = type.location or raise
516
- size = loc.end_pos - loc.start_pos
517
- (size - (tokenizer.scanner.matched_size || 0)).times do
518
- tokenizer.scanner.skip(/./)
519
- end
520
- tokenizer.advance(parent_tree)
612
+ tokenizer.reset(loc.end_pos, parent_tree)
521
613
  type
522
614
  else
523
- tokenizer.advance(parent_tree)
524
615
  nil
525
616
  end
526
617
  rescue RBS::ParsingError
527
618
  content = tokenizer.skip_to_comment
528
619
  tree = AST::Tree.new(:type_syntax_error)
529
- tree << [:tSOURCE, content]
620
+ tree << [T_SOURCE, content]
530
621
  tree
531
622
  end
532
623
 
533
624
  # @rbs tokenizer: Tokenizer
534
- # @rbs returns AST::Tree
625
+ # @rbs return: AST::Tree
535
626
  def parse_rbs_annotation(tokenizer)
536
627
  tree = AST::Tree.new(:rbs_annotation)
537
628
 
538
- while tokenizer.type?(:tANNOTATION)
539
- tree << tokenizer.current_token
629
+ while tokenizer.type?(T_ANNOTATION)
630
+ tree << tokenizer.lookahead1
540
631
  tokenizer.advance(tree)
541
632
  end
542
633
 
543
634
  tree
544
635
  end
545
636
 
546
- # @rbs tokznier: Tokenizer
547
- # @rbs returns AST::Tree
637
+ # @rbs tokenizer: Tokenizer
638
+ # @rbs return: AST::Tree
548
639
  def parse_inherits(tokenizer)
549
640
  tree = AST::Tree.new(:rbs_inherits)
550
641
 
551
- if tokenizer.type?(:kINHERITS)
552
- tree << tokenizer.current_token
642
+ if tokenizer.type?(K_INHERITS)
643
+ tree << tokenizer.lookahead1
553
644
  tokenizer.advance(tree)
554
645
  end
555
646
 
@@ -561,12 +652,12 @@ module RBS
561
652
  # Parse `@rbs override` annotation
562
653
  #
563
654
  # @rbs tokenizer: Tokenizer
564
- # @rbs returns AST::Tree
655
+ # @rbs return: AST::Tree
565
656
  def parse_override(tokenizer)
566
657
  tree = AST::Tree.new(:override)
567
658
 
568
- if tokenizer.type?(:kOVERRIDE)
569
- tree << tokenizer.current_token
659
+ if tokenizer.type?(K_OVERRIDE)
660
+ tree << tokenizer.lookahead1
570
661
  tokenizer.advance(tree)
571
662
  end
572
663
 
@@ -576,20 +667,20 @@ module RBS
576
667
  # Parse `@rbs use [CLAUSES]` annotation
577
668
  #
578
669
  # @rbs tokenizer: Tokenizer
579
- # @rbs returns AST::Tree
670
+ # @rbs return: AST::Tree
580
671
  def parse_use(tokenizer)
581
672
  tree = AST::Tree.new(:use)
582
673
 
583
- if tokenizer.type?(:kUSE)
584
- tree << tokenizer.current_token
674
+ if tokenizer.type?(K_USE)
675
+ tree << tokenizer.lookahead1
585
676
  tokenizer.advance(tree)
586
677
  end
587
678
 
588
- while tokenizer.type?(:kCOLON2, :tUIDENT, :tIFIDENT, :tLVAR)
679
+ while tokenizer.type?(K_COLON2, T_UIDENT, :tIFIDENT, :tLVAR)
589
680
  tree << parse_use_clause(tokenizer)
590
681
 
591
- if tokenizer.type?(:kCOMMA)
592
- tree << tokenizer.advance(tree)
682
+ if tokenizer.type?(K_COMMA)
683
+ tokenizer.advance(tree, eat: true)
593
684
  else
594
685
  tree << nil
595
686
  end
@@ -608,23 +699,23 @@ module RBS
608
699
  # * [`::`?, [UIDENT) `::`]*, `*`]
609
700
  #
610
701
  # @rbs tokenizer: Tokenizer
611
- # @rbs returns AST::Tree
702
+ # @rbs return: AST::Tree
612
703
  def parse_use_clause(tokenizer)
613
704
  tree = AST::Tree.new(:use_clause)
614
705
 
615
- if tokenizer.type?(:kCOLON2)
616
- tree << tokenizer.current_token
706
+ if tokenizer.type?(K_COLON2)
707
+ tree << tokenizer.lookahead1
617
708
  tokenizer.advance(tree)
618
709
  end
619
710
 
620
711
  while true
621
712
  case
622
- when tokenizer.type?(:tUIDENT)
623
- tree << tokenizer.advance(tree)
713
+ when tokenizer.type?(T_UIDENT)
714
+ tokenizer.advance(tree, eat: true)
624
715
 
625
716
  case
626
- when tokenizer.type?(:kCOLON2)
627
- tree << tokenizer.advance(tree)
717
+ when tokenizer.type?(K_COLON2)
718
+ tokenizer.advance(tree, eat: true)
628
719
  else
629
720
  break
630
721
  end
@@ -634,20 +725,20 @@ module RBS
634
725
  end
635
726
 
636
727
  case
637
- when tokenizer.type?(:tLVAR)
638
- tree << tokenizer.advance(tree)
639
- when tokenizer.type?(:tIFIDENT)
640
- tree << tokenizer.advance(tree)
641
- when tokenizer.type?(:kSTAR)
642
- tree << tokenizer.advance(tree)
728
+ when tokenizer.type?(T_LVAR)
729
+ tokenizer.advance(tree, eat: true)
730
+ when tokenizer.type?(T_IFIDENT)
731
+ tokenizer.advance(tree, eat: true)
732
+ when tokenizer.type?(K_STAR)
733
+ tokenizer.advance(tree, eat: true)
643
734
  return tree
644
735
  end
645
736
 
646
- if tokenizer.type?(:kAS)
737
+ if tokenizer.type?(K_AS)
647
738
  as_tree = AST::Tree.new(:as)
648
739
 
649
- tokenizer.consume_token!(:kAS, tree: as_tree)
650
- tokenizer.consume_token(:tLVAR, :tIFIDENT, :tUIDENT, tree: as_tree)
740
+ tokenizer.consume_token!(K_AS, tree: as_tree)
741
+ tokenizer.consume_token(T_LVAR, T_IFIDENT, T_UIDENT, tree: as_tree)
651
742
 
652
743
  tree << as_tree
653
744
  else
@@ -658,17 +749,15 @@ module RBS
658
749
  end
659
750
 
660
751
  # @rbs tokenizer: Tokenizer
661
- # @rbs returns AST::Tree
752
+ # @rbs return: AST::Tree
662
753
  def parse_module_self(tokenizer)
663
754
  tree = AST::Tree.new(:module_self)
664
755
 
665
- tokenizer.consume_token!(:kMODULESELF, tree: tree)
756
+ tokenizer.consume_token!(K_MODULE_SELF, tree: tree)
666
757
  tree << parse_type(tokenizer, tree)
667
758
 
668
- if tokenizer.type?(:kMINUS2)
669
- tree << parse_comment(tokenizer)
670
- else
671
- tree << nil
759
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
760
+ parse_comment(tokenizer)
672
761
  end
673
762
 
674
763
  tree
@@ -679,87 +768,220 @@ module RBS
679
768
  # ```rb
680
769
  # # Test if tokenize has `--` token, then parse comment or insert `nil` to tree
681
770
  #
682
- # tree << parse_optional(tokenizer, :kMINUS2) do
771
+ # tree << parse_optional(tokenizer, K_MINUS2) do
683
772
  # parse_comment(tokenizer)
684
773
  # end
685
774
  # ```
686
775
  #
776
+ # If `tree:` is given, it consumes trivia tokens before yielding the block.
777
+ #
687
778
  # @rbs tokenizer: Tokenizer
688
- # @rbs types: Array[Symbol]
689
- # @rbs block: ^() -> AST::Tree
690
- # @rbs returns AST::Tree?
691
- def parse_optional(tokenizer, *types, &block)
779
+ # @rbs *types: Symbol
780
+ # @rbs tree: AST::Tree? -- the parent tree to consume leading trivia tokens
781
+ # @rbs &block: () -> AST::Tree
782
+ # @rbs return: AST::Tree?
783
+ def parse_optional(tokenizer, *types, tree: nil, &block)
692
784
  if tokenizer.type?(*types)
785
+ if tree
786
+ tokenizer.consume_trivias(tree)
787
+ end
693
788
  yield
694
789
  end
695
790
  end
696
791
 
697
792
  # @rbs tokenizer: Tokenizer
698
- # @rbs returns AST::Tree
793
+ # @rbs return: AST::Tree
699
794
  def parse_generic(tokenizer)
700
795
  tree = AST::Tree.new(:generic)
701
796
 
702
- tokenizer.consume_token!(:kGENERIC, tree: tree)
797
+ tokenizer.consume_token!(K_GENERIC, tree: tree)
703
798
 
704
- tokenizer.consume_token(:kUNCHECKED, tree: tree)
705
- tokenizer.consume_token(:kIN, :kOUT, tree: tree)
799
+ tree << parse_type_param(tokenizer)
706
800
 
707
- tokenizer.consume_token(:tUIDENT, tree: tree)
801
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
802
+ parse_comment(tokenizer)
803
+ end
708
804
 
709
- tree << parse_optional(tokenizer, :kLT) do
805
+ tree
806
+ end
807
+
808
+ # @rbs (Tokenizer) -> AST::Tree
809
+ def parse_type_param(tokenizer)
810
+ tree = AST::Tree.new(:type_param)
811
+
812
+ tokenizer.consume_token(K_UNCHECKED, tree: tree)
813
+ tokenizer.consume_token(K_IN, K_OUT, tree: tree)
814
+
815
+ tokenizer.consume_token(T_UIDENT, tree: tree)
816
+
817
+ tree << parse_optional(tokenizer, K_LT, tree: tree) do
710
818
  bound = AST::Tree.new(:upper_bound)
711
819
 
712
- tokenizer.consume_token!(:kLT, tree: bound)
820
+ tokenizer.consume_token!(K_LT, tree: bound)
713
821
  bound << parse_type(tokenizer, bound)
714
822
 
715
823
  bound
716
824
  end
717
825
 
718
- tree << parse_optional(tokenizer, :kMINUS2) do
719
- parse_comment(tokenizer)
720
- end
721
-
722
826
  tree
723
827
  end
724
828
 
725
- #:: (Tokenizer) -> AST::Tree
829
+ #: (Tokenizer) -> AST::Tree
726
830
  def parse_ivar_type(tokenizer)
727
831
  tree = AST::Tree.new(:ivar_type)
728
832
 
729
- tokenizer.consume_token(:kSELF, tree: tree)
730
- tokenizer.consume_token(:kDOT, tree: tree)
833
+ tokenizer.consume_token(K_SELF, tree: tree)
834
+ tokenizer.consume_token(K_DOT, tree: tree)
835
+
836
+ tokenizer.consume_token(T_ATIDENT, tree: tree)
837
+ tokenizer.consume_token(K_COLON, tree: tree)
838
+
839
+ tree << parse_type(tokenizer, tree)
840
+
841
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
842
+ parse_comment(tokenizer)
843
+ end
844
+
845
+ tree
846
+ end
847
+
848
+ #: (Tokenizer) -> AST::Tree
849
+ def parse_splat_param_type(tokenizer)
850
+ tree = AST::Tree.new(:splat_param_type)
731
851
 
732
- tokenizer.consume_token(:tATIDENT, tree: tree)
733
- tokenizer.consume_token(:kCOLON, tree: tree)
852
+ tokenizer.consume_token!(K_STAR, :kSTAR2, tree: tree)
853
+ tokenizer.consume_token(T_LVAR, tree: tree)
854
+ tokenizer.consume_token(K_COLON, tree: tree)
734
855
 
735
856
  tree << parse_type(tokenizer, tree)
736
857
 
737
- tree << parse_optional(tokenizer, :kMINUS2) do
858
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
738
859
  parse_comment(tokenizer)
739
860
  end
740
861
 
741
862
  tree
742
863
  end
743
864
 
744
- #:: (Tokenizer) -> AST::Tree
745
- def parse_yields(tokenizer)
746
- tree = AST::Tree.new(:yields)
865
+ #: (Tokenizer) -> AST::Tree
866
+ def parse_block_type(tokenizer)
867
+ tree = AST::Tree.new(:block_type)
868
+
869
+ tokenizer.consume_token!(K_AMP, tree: tree)
870
+ tokenizer.consume_token(T_LVAR, tree: tree)
871
+ tokenizer.consume_token(K_COLON, tree: tree)
872
+
873
+ tokenizer.consume_token(K_QUESTION, tree: tree)
747
874
 
748
- tokenizer.consume_token!(:kYIELDS, tree: tree)
749
- tokenizer.consume_token(:kOPTIONAL, tree: tree)
875
+ tokenizer.consume_trivias(tree)
750
876
 
751
877
  unless (string = tokenizer.skip_to_comment()).empty?
752
- tree << [:tBLOCKSTR, string]
878
+ tree << [T_BLOCKSTR, string]
753
879
  else
754
880
  tree << nil
755
881
  end
756
882
 
757
- tree << parse_optional(tokenizer, :kMINUS2) do
883
+ tree << parse_optional(tokenizer, K_MINUS2, tree: tree) do
758
884
  parse_comment(tokenizer)
759
885
  end
760
886
 
761
887
  tree
762
888
  end
889
+
890
+ # @rbs (Tokenizer) -> AST::Tree
891
+ def parse_module_decl(tokenizer)
892
+ tree = AST::Tree.new(:module_decl)
893
+
894
+ tokenizer.consume_token!(K_MODULE, tree: tree)
895
+
896
+ tree << parse_module_name(tokenizer)
897
+
898
+ tree << parse_optional(tokenizer, K_LBRACKET) do
899
+ parse_type_params(tokenizer)
900
+ end
901
+
902
+ tree << parse_optional(tokenizer, K_COLON) do
903
+ parse_module_selfs(tokenizer)
904
+ end
905
+
906
+ tree
907
+ end
908
+
909
+ # @rbs (Tokenizer) -> AST::Tree
910
+ def parse_class_decl(tokenizer)
911
+ tree = AST::Tree.new(:class_decl)
912
+
913
+ tokenizer.consume_token!(K_CLASS, tree: tree)
914
+
915
+ tree << parse_module_name(tokenizer)
916
+
917
+ tree << parse_optional(tokenizer, K_LBRACKET) do
918
+ parse_type_params(tokenizer)
919
+ end
920
+
921
+ tree << parse_optional(tokenizer, K_LT) do
922
+ super_class = AST::Tree.new(:super_class)
923
+ tokenizer.consume_token!(K_LT, tree: super_class)
924
+ super_class << parse_type(tokenizer, super_class)
925
+ super_class
926
+ end
927
+
928
+ tree
929
+ end
930
+
931
+ # @rbs (Tokenizer) -> AST::Tree
932
+ def parse_module_name(tokenizer)
933
+ tree = AST::Tree.new(:module_name)
934
+
935
+ tokenizer.consume_token(K_COLON2, tree: tree)
936
+
937
+ while tokenizer.type?(T_UIDENT) && tokenizer.type2?(K_COLON2)
938
+ tokenizer.consume_token!(T_UIDENT, tree: tree)
939
+ tokenizer.consume_token!(K_COLON2, tree: tree)
940
+ end
941
+
942
+ tokenizer.consume_token(T_UIDENT, tree: tree)
943
+
944
+ tree
945
+ end
946
+
947
+ # @rbs (Tokenizer) -> AST::Tree
948
+ def parse_type_params(tokenizer)
949
+ tree = AST::Tree.new(:type_params)
950
+
951
+ tokenizer.consume_token!(K_LBRACKET, tree: tree)
952
+
953
+ while true
954
+ if type_param = parse_optional(tokenizer, T_UIDENT, K_UNCHECKED, K_IN, K_OUT) { parse_type_param(tokenizer) }
955
+ tree << type_param
956
+ break if tokenizer.type?(K_RBRACKET)
957
+ tokenizer.consume_token(K_COMMA, tree: tree)
958
+ else
959
+ break
960
+ end
961
+ end
962
+
963
+ tokenizer.consume_token(K_RBRACKET, tree: tree)
964
+
965
+ tree
966
+ end
967
+
968
+ # @rbs (Tokenizer) -> AST::Tree
969
+ def parse_module_selfs(tokenizer)
970
+ tree = AST::Tree.new(:module_selfs)
971
+
972
+ tokenizer.consume_token!(K_COLON, tree: tree)
973
+
974
+ while true
975
+ tree << parse_type(tokenizer, tree)
976
+ if tokenizer.type?(K_COMMA)
977
+ tokenizer.advance(tree, eat: true)
978
+ else
979
+ break
980
+ end
981
+ end
982
+
983
+ tree
984
+ end
763
985
  end
764
986
  end
765
987
  end