format_parser 2.2.1 → 2.4.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,7 +1,7 @@
1
- # This class provides generic methods for parsing file formats based on QuickTime-style "atoms", such as those seen in
1
+ # This class provides generic methods for parsing file formats based on QuickTime-style "boxes", such as those seen in
2
2
  # the ISO base media file format (ISO/IEC 14496-12), a.k.a MPEG-4, and those that extend it (MP4, CR3, HEIF, etc.).
3
3
  #
4
- # For more information on atoms, see https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html
4
+ # For more information on boxes, see https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html
5
5
  # or https://b.goeswhere.com/ISO_IEC_14496-12_2015.pdf.
6
6
  #
7
7
  # TODO: The vast majority of the methods have been commented out here. This decision was taken to expedite the release
@@ -9,66 +9,38 @@
9
9
  # entirety. We should migrate existing formats that are based on the ISO base media file format and reintroduce these
10
10
  # methods with tests down-the-line.
11
11
 
12
+ require 'matrix'
13
+ require 'parsers/iso_base_media_file_format/box'
14
+
12
15
  module FormatParser
13
16
  module ISOBaseMediaFileFormat
14
17
  class Decoder
15
18
  include FormatParser::IOUtils
16
19
 
17
- class Atom < Struct.new(:type, :position, :size, :fields, :children)
18
- def initialize(type, position, size, fields = nil, children = nil)
19
- super
20
- self.fields ||= {}
21
- self.children ||= []
22
- end
23
-
24
- # Find and return the first descendent (using depth-first search) of a given type.
25
- #
26
- # @param [Array<String>] types
27
- # @return [Atom, nil]
28
- def find_first_descendent(types)
29
- children.each do |child|
30
- return child if types.include?(child.type)
31
- if (descendent = child.find_first_descendent(types))
32
- return descendent
33
- end
34
- end
35
- nil
36
- end
37
-
38
- # Find and return all descendents of a given type.
39
- #
40
- # @param [Array<String>] types
41
- # @return [Array<Atom>]
42
- def select_descendents(types)
43
- children.map do |child|
44
- descendents = child.select_descendents(types)
45
- types.include?(child.type) ? [child] + descendents : descendents
46
- end.flatten
47
- end
48
- end
49
-
20
+ # Attempt to build the ISOBMFF box tree represented in the given IO object.
21
+ #
50
22
  # @param [Integer] max_read
51
- # @param [IO, FormatParser::IOConstraint] io
52
- # @return [Array<Atom>]
53
- def build_atom_tree(max_read, io = nil)
23
+ # @param [IO, StringIO, FormatParser::IOConstraint] io
24
+ # @return [Array<Box>]
25
+ def build_box_tree(max_read, io = nil)
54
26
  @buf = FormatParser::IOConstraint.new(io) if io
55
27
  raise ArgumentError, "IO missing - supply a valid IO object" unless @buf
56
- atoms = []
28
+ boxes = []
57
29
  max_pos = @buf.pos + max_read
58
30
  loop do
59
31
  break if @buf.pos >= max_pos
60
- atom = parse_atom
61
- break unless atom
62
- atoms << atom
32
+ box = parse_box
33
+ break unless box
34
+ boxes << box
63
35
  end
64
- atoms
36
+ boxes
65
37
  end
66
38
 
67
39
  protected
68
40
 
69
- # A mapping of atom types to their respective parser methods. Each method must take a single Integer parameter, size,
70
- # and return the atom's fields and children where appropriate as a Hash and Array of Atoms respectively.
71
- ATOM_PARSERS = {
41
+ # A mapping of box types to their respective parser methods. Each method must take a single Integer parameter, size,
42
+ # and return the box's fields and children where appropriate as a Hash and Array of Boxes respectively.
43
+ BOX_PARSERS = {
72
44
  # 'bxml' => :bxml,
73
45
  # 'co64' => :co64,
74
46
  # 'cprt' => :cprt,
@@ -81,9 +53,9 @@ module FormatParser
81
53
  # 'fiin' => :fiin,
82
54
  # 'fire' => :fire,
83
55
  # 'fpar' => :fpar,
84
- # 'ftyp' => :typ,
56
+ 'ftyp' => :typ,
85
57
  # 'gitn' => :gitn,
86
- # 'hdlr' => :hdlr,
58
+ 'hdlr' => :hdlr,
87
59
  # 'hmhd' => :hmhd,
88
60
  # 'iinf' => :iinf,
89
61
  # 'iloc' => :iloc,
@@ -91,7 +63,7 @@ module FormatParser
91
63
  # 'ipro' => :ipro,
92
64
  # 'iref' => :iref,
93
65
  # 'leva' => :leva,
94
- # 'mdhd' => :mdhd,
66
+ 'mdhd' => :mdhd,
95
67
  'mdia' => :container,
96
68
  'meco' => :container,
97
69
  # 'mehd' => :mehd,
@@ -104,7 +76,7 @@ module FormatParser
104
76
  'moof' => :container,
105
77
  'moov' => :container,
106
78
  'mvex' => :container,
107
- # 'mvhd' => :mvhd,
79
+ 'mvhd' => :mvhd,
108
80
  'nmhd' => :empty,
109
81
  # 'padb' => :padb,
110
82
  'paen' => :container,
@@ -131,16 +103,16 @@ module FormatParser
131
103
  # 'stri' => :stri,
132
104
  'strk' => :container,
133
105
  # 'stsc' => :stsc,
134
- # 'stsd' => :stsd,
106
+ 'stsd' => :stsd,
135
107
  # 'stsh' => :stsh,
136
108
  # 'stss' => :stss,
137
109
  # 'stsz' => :stsz,
138
- # 'stts' => :stts,
110
+ 'stts' => :stts,
139
111
  # 'styp' => :typ,
140
112
  # 'stz2' => :stz2,
141
113
  # 'subs' => :subs,
142
114
  # 'tfra' => :tfra,
143
- # 'tkhd' => :tkhd,
115
+ 'tkhd' => :tkhd,
144
116
  'trak' => :container,
145
117
  # 'trex' => :trex,
146
118
  # 'tsel' => :tsel,
@@ -152,69 +124,69 @@ module FormatParser
152
124
  # 'xml ' => :xml,
153
125
  }
154
126
 
155
- # Parse the atom at the IO's current position.
127
+ # Parse the box at the IO's current position.
156
128
  #
157
- # @return [Atom, nil]
158
- def parse_atom
129
+ # @return [Box, nil]
130
+ def parse_box
159
131
  position = @buf.pos
160
132
 
161
- size = read_int_32
133
+ size = read_int
162
134
  type = read_string(4)
163
- size = read_int_64 if size == 1
135
+ size = read_int(n: 8) if size == 1
164
136
  body_size = size - (@buf.pos - position)
165
- next_atom_position = position + size
166
-
167
- if self.class::ATOM_PARSERS.include?(type)
168
- fields, children = method(self.class::ATOM_PARSERS[type]).call(body_size)
169
- if @buf.pos != next_atom_position
170
- # We should never end up in this state. If we do, it likely indicates a bug in the atom's parser method.
171
- warn("Unexpected IO position after parsing #{type} atom at position #{position}. Atom size: #{size}. Expected position: #{next_atom_position}. Actual position: #{@buf.pos}.")
172
- @buf.seek(next_atom_position)
137
+ next_box_position = position + size
138
+
139
+ if self.class::BOX_PARSERS.include?(type)
140
+ fields, children = method(self.class::BOX_PARSERS[type]).call(body_size)
141
+ if @buf.pos != next_box_position
142
+ # We should never end up in this state. If we do, it likely indicates a bug in the box's parser method.
143
+ warn("Unexpected IO position after parsing #{type} box at position #{position}. Box size: #{size}. Expected position: #{next_box_position}. Actual position: #{@buf.pos}.")
144
+ @buf.seek(next_box_position)
173
145
  end
174
- Atom.new(type, position, size, fields, children)
146
+ Box.new(type, position, size, fields, children)
175
147
  else
176
148
  skip_bytes(body_size)
177
- Atom.new(type, position, size)
149
+ Box.new(type, position, size)
178
150
  end
179
151
  rescue FormatParser::IOUtils::InvalidRead
180
152
  nil
181
153
  end
182
154
 
183
- # Parse any atom that serves as a container, with only children and no fields of its own.
155
+ # Parse any box that serves as a container, with only children and no fields of its own.
184
156
  def container(size)
185
- [nil, build_atom_tree(size)]
157
+ [nil, build_box_tree(size)]
186
158
  end
187
159
 
188
- # Parse only an atom's version and flags, skipping the remainder of the atom's body.
160
+ # Parse only an box's version and flags, skipping the remainder of the box's body.
189
161
  def empty(size)
190
162
  fields = read_version_and_flags
191
163
  skip_bytes(size - 4)
192
164
  [fields, nil]
193
165
  end
194
166
 
195
- # Parse a binary XML atom.
167
+ # Parse a binary XML box.
196
168
  # def bxml(size)
197
169
  # fields = read_version_and_flags.merge({
198
- # data: (size - 4).times.map { read_int_8 }
170
+ # data: (size - 4).times.map { read_int(n: 1) }
199
171
  # })
200
172
  # [fields, nil]
201
173
  # end
202
174
 
203
- # Parse a chunk large offset atom.
175
+ # Parse a chunk large offset box.
204
176
  # def co64(_)
205
177
  # fields = read_version_and_flags
206
- # entry_count = read_int_32
178
+ # entry_count = read_int
207
179
  # fields.merge!({
208
180
  # entry_count: entry_count,
209
- # entries: entry_count.times.map { { chunk_offset: read_int_64 } }
181
+ # entries: entry_count.times.map { { chunk_offset: read_int(n: 8) } }
210
182
  # })
211
183
  # [fields, nil]
212
184
  # end
213
185
 
214
- # Parse a copyright atom.
186
+ # Parse a copyright box.
215
187
  # def cprt(size)
216
188
  # fields = read_version_and_flags
217
- # tmp = read_int_16
189
+ # tmp = read_int(n: 2)
218
190
  # fields.merge!({
219
191
  # language: [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F],
220
192
  # notice: read_string(size - 6)
@@ -222,45 +194,45 @@ module FormatParser
222
194
  # [fields, nil]
223
195
  # end
224
196
 
225
- # Parse a composition to decode atom.
197
+ # Parse a composition to decode box.
226
198
  # def cslg(_)
227
199
  # fields = read_version_and_flags
228
200
  # version = fields[:version]
229
201
  # fields.merge!({
230
- # composition_to_dts_shift: version == 1 ? read_int_64 : read_int_32,
231
- # least_decode_to_display_delta: version == 1 ? read_int_64 : read_int_32,
232
- # greatest_decode_to_display_delta: version == 1 ? read_int_64 : read_int_32,
233
- # composition_start_time: version == 1 ? read_int_64 : read_int_32,
234
- # composition_end_time: version == 1 ? read_int_64 : read_int_32,
202
+ # composition_to_dts_shift: version == 1 ? read_int(n: 8) : read_int,
203
+ # least_decode_to_display_delta: version == 1 ? read_int(n: 8) : read_int,
204
+ # greatest_decode_to_display_delta: version == 1 ? read_int(n: 8) : read_int,
205
+ # composition_start_time: version == 1 ? read_int(n: 8) : read_int,
206
+ # composition_end_time: version == 1 ? read_int(n: 8) : read_int,
235
207
  # })
236
208
  # [fields, nil]
237
209
  # end
238
210
 
239
- # Parse a composition time to sample atom.
211
+ # Parse a composition time to sample box.
240
212
  # def ctts(_)
241
213
  # fields = read_version_and_flags
242
- # entry_count = read_int_32
214
+ # entry_count = read_int
243
215
  # fields.merge!({
244
216
  # entry_count: entry_count,
245
217
  # entries: entry_count.times.map do
246
218
  # {
247
- # sample_count: read_int_32,
248
- # sample_offset: read_int_32
219
+ # sample_count: read_int,
220
+ # sample_offset: read_int
249
221
  # }
250
222
  # end
251
223
  # })
252
224
  # [fields, nil]
253
225
  # end
254
226
 
255
- # Parse a data reference atom.
227
+ # Parse a data reference box.
256
228
  # def dref(size)
257
229
  # fields = read_version_and_flags.merge({
258
- # entry_count: read_int_32
230
+ # entry_count: read_int
259
231
  # })
260
- # [fields, build_atom_tree(size - 8)]
232
+ # [fields, build_box_tree(size - 8)]
261
233
  # end
262
234
 
263
- # Parse a data reference URL entry atom.
235
+ # Parse a data reference URL entry box.
264
236
  # def dref_url(size)
265
237
  # fields = read_version_and_flags.merge({
266
238
  # location: read_string(size - 4)
@@ -268,7 +240,7 @@ module FormatParser
268
240
  # [fields, nil]
269
241
  # end
270
242
 
271
- # Parse a data reference URN entry atom.
243
+ # Parse a data reference URN entry box.
272
244
  # def dref_urn(size)
273
245
  # fields = read_version_and_flags
274
246
  # name, location = read_bytes(size - 4).unpack('Z2')
@@ -279,58 +251,58 @@ module FormatParser
279
251
  # [fields, nil]
280
252
  # end
281
253
 
282
- # Parse an FEC reservoir atom.
254
+ # Parse an FEC reservoir box.
283
255
  # def fecr(_)
284
256
  # fields = read_version_and_flags
285
257
  # version = fields[:version]
286
- # entry_count = version == 0 ? read_int_16 : read_int_32
258
+ # entry_count = version == 0 ? read_int(n: 2) : read_int
287
259
  # fields.merge!({
288
260
  # entry_count: entry_count,
289
261
  # entries: entry_count.times.map do
290
262
  # {
291
- # item_id: version == 0 ? read_int_16 : read_int_32,
292
- # symbol_count: read_int_8
263
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
264
+ # symbol_count: read_int(n: 1)
293
265
  # }
294
266
  # end
295
267
  # })
296
268
  # end
297
269
 
298
- # Parse an FD item information atom.
270
+ # Parse an FD item information box.
299
271
  # def fiin(size)
300
272
  # fields = read_version_and_flags.merge({
301
- # entry_count: read_int_16
273
+ # entry_count: read_int(n: 2)
302
274
  # })
303
- # [fields, build_atom_tree(size - 6)]
275
+ # [fields, build_box_tree(size - 6)]
304
276
  # end
305
277
 
306
- # Parse a file reservoir atom.
278
+ # Parse a file reservoir box.
307
279
  # def fire(_)
308
280
  # fields = read_version_and_flags
309
- # entry_count = version == 0 ? read_int_16 : read_int_32
281
+ # entry_count = version == 0 ? read_int(n: 2) : read_int
310
282
  # fields.merge!({
311
283
  # entry_count: entry_count,
312
284
  # entries: entry_count.times.map do
313
285
  # {
314
- # item_id: version == 0 ? read_int_16 : read_int_32,
315
- # symbol_count: read_int_32
286
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
287
+ # symbol_count: read_int
316
288
  # }
317
289
  # end
318
290
  # })
319
291
  # [fields, nil]
320
292
  # end
321
293
 
322
- # Parse a file partition atom.
294
+ # Parse a file partition box.
323
295
  # def fpar(_)
324
296
  # fields = read_version_and_flags
325
297
  # version = fields[:version]
326
298
  # fields.merge!({
327
- # item_id: version == 0 ? read_int_16 : read_int_32,
328
- # packet_payload_size: read_int_16,
329
- # fec_encoding_id: skip_bytes(1) { read_int_8 },
330
- # fec_instance_id: read_int_16,
331
- # max_source_block_length: read_int_16,
332
- # encoding_symbol_length: read_int_16,
333
- # max_number_of_encoding_symbols: read_int_16,
299
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
300
+ # packet_payload_size: read_int(n: 2),
301
+ # fec_encoding_id: skip_bytes(1) { read_int(n: 1) },
302
+ # fec_instance_id: read_int(n: 2),
303
+ # max_source_block_length: read_int(n: 2),
304
+ # encoding_symbol_length: read_int(n: 2),
305
+ # max_number_of_encoding_symbols: read_int(n: 2),
334
306
  # })
335
307
  # # TODO: Parse scheme_specific_info, entry_count and entries { block_count, block_size }.
336
308
  # skip_bytes(size - 20)
@@ -338,10 +310,10 @@ module FormatParser
338
310
  # [fields, nil]
339
311
  # end
340
312
 
341
- # Parse a group ID to name atom.
313
+ # Parse a group ID to name box.
342
314
  # def gitn(size)
343
315
  # fields = read_version_and_flags
344
- # entry_count = read_int_16
316
+ # entry_count = read_int(n: 2)
345
317
  # fields.merge!({
346
318
  # entry_count: entry_count
347
319
  # })
@@ -350,43 +322,43 @@ module FormatParser
350
322
  # [fields, nil]
351
323
  # end
352
324
 
353
- # Parse a handler atom.
354
- # def hdlr(size)
355
- # fields = read_version_and_flags.merge({
356
- # handler_type: skip_bytes(4) { read_int_32 },
357
- # name: skip_bytes(12) { read_string(size - 24) }
358
- # })
359
- # [fields, nil]
360
- # end
325
+ # Parse a handler box.
326
+ def hdlr(size)
327
+ fields = read_version_and_flags.merge({
328
+ handler_type: skip_bytes(4) { read_string(4) },
329
+ name: skip_bytes(12) { read_string(size - 24) }
330
+ })
331
+ [fields, nil]
332
+ end
361
333
 
362
- # Parse a hint media header atom.
334
+ # Parse a hint media header box.
363
335
  # def hmhd(_)
364
336
  # fields = read_version_and_flags.merge({
365
- # max_pdu_size: read_int_16,
366
- # avg_pdu_size: read_int_16,
367
- # max_bitrate: read_int_32,
368
- # avg_bitrate: read_int_32
337
+ # max_pdu_size: read_int(n: 2),
338
+ # avg_pdu_size: read_int(n: 2),
339
+ # max_bitrate: read_int,
340
+ # avg_bitrate: read_int
369
341
  # })
370
342
  # skip_bytes(4)
371
343
  # [fields, nil]
372
344
  # end
373
345
 
374
- # Parse an item info atom.
346
+ # Parse an item info box.
375
347
  # def iinf(size)
376
348
  # fields = read_version_and_flags.merge({
377
- # entry_count: version == 0 ? read_int_16 : read_int_32
349
+ # entry_count: version == 0 ? read_int(n: 2) : read_int
378
350
  # })
379
- # [fields, build_atom_tree(size - 8)]
351
+ # [fields, build_box_tree(size - 8)]
380
352
  # end
381
353
 
382
- # Parse an item location atom.
354
+ # Parse an item location box.
383
355
  # def iloc(_)
384
356
  # fields = read_version_and_flags
385
- # tmp = read_int_16
357
+ # tmp = read_int(n: 2)
386
358
  # item_count = if version < 2
387
- # read_int_16
359
+ # read_int(n: 2)
388
360
  # elsif version == 2
389
- # read_int_32
361
+ # read_int
390
362
  # end
391
363
  # offset_size = (tmp >> 12) & 0x7
392
364
  # length_size = (tmp >> 8) & 0x7
@@ -400,15 +372,15 @@ module FormatParser
400
372
  # items: item_count.times.map do
401
373
  # item = {
402
374
  # item_id: if version < 2
403
- # read_int_16
375
+ # read_int(n: 2)
404
376
  # elsif version == 2
405
- # read_int_32
377
+ # read_int
406
378
  # end
407
379
  # }
408
- # item[:construction_method] = read_int_16 & 0x7 if version == 1 || version == 2
409
- # item[:data_reference_index] = read_int_16
380
+ # item[:construction_method] = read_int(n: 2) & 0x7 if version == 1 || version == 2
381
+ # item[:data_reference_index] = read_int(n: 2)
410
382
  # skip_bytes(base_offset_size) # TODO: Dynamically parse base_offset based on base_offset_size
411
- # extent_count = read_int_16
383
+ # extent_count = read_int(n: 2)
412
384
  # item[:extent_count] = extent_count
413
385
  # # TODO: Dynamically parse extent_index, extent_offset and extent_length based on their respective sizes.
414
386
  # skip_bytes(extent_count * (offset_size + length_size))
@@ -417,34 +389,34 @@ module FormatParser
417
389
  # })
418
390
  # end
419
391
 
420
- # Parse an item info entry atom.
392
+ # Parse an item info entry box.
421
393
  # def infe(size)
422
- # # TODO: This atom is super-complicated with optional and/or version-dependent fields and children.
394
+ # # TODO: This box is super-complicated with optional and/or version-dependent fields and children.
423
395
  # empty(size)
424
396
  # end
425
397
 
426
- # Parse an item protection atom.
398
+ # Parse an item protection box.
427
399
  # def ipro(size)
428
400
  # fields = read_version_and_flags.merge({
429
- # protection_count: read_int_16
401
+ # protection_count: read_int(n: 2)
430
402
  # })
431
- # [fields, build_atom_tree(size - 6)]
403
+ # [fields, build_box_tree(size - 6)]
432
404
  # end
433
405
 
434
- # Parse an item reference atom.
406
+ # Parse an item reference box.
435
407
  # def iref(_)
436
- # [read_version_and_flags, build_atom_tree(size - 4)]
408
+ # [read_version_and_flags, build_box_tree(size - 4)]
437
409
  # end
438
410
 
439
- # Parse a level assignment atom.
411
+ # Parse a level assignment box.
440
412
  # def leva(_)
441
413
  # fields = read_version_and_flags
442
- # level_count = read_int_8
414
+ # level_count = read_int(n: 1)
443
415
  # fields.merge!({
444
416
  # level_count: level_count,
445
417
  # levels: level_count.times.map do
446
- # track_id = read_int_32
447
- # tmp = read_int_8
418
+ # track_id = read_int
419
+ # tmp = read_int(n: 1)
448
420
  # assignment_type = tmp & 0x7F
449
421
  # level = {
450
422
  # track_id: track_id,
@@ -452,14 +424,14 @@ module FormatParser
452
424
  # assignment_type: assignment_type
453
425
  # }
454
426
  # if assignment_type == 0
455
- # level[:grouping_type] = read_int_32
427
+ # level[:grouping_type] = read_int
456
428
  # elsif assignment_type == 1
457
429
  # level.merge!({
458
- # grouping_type: read_int_32,
459
- # grouping_type_parameter: read_int_32
430
+ # grouping_type: read_int,
431
+ # grouping_type_parameter: read_int
460
432
  # })
461
433
  # elsif assignment_type == 4
462
- # level[:sub_track_id] = read_int_32
434
+ # level[:sub_track_id] = read_int
463
435
  # end
464
436
  # level
465
437
  # end
@@ -467,87 +439,87 @@ module FormatParser
467
439
  # [fields, nil]
468
440
  # end
469
441
 
470
- # Parse a media header atom.
471
- # def mdhd(_)
472
- # fields = read_version_and_flags
473
- # version = fields[:version]
474
- # fields.merge!({
475
- # creation_time: version == 1 ? read_int_64 : read_int_32,
476
- # modification_time: version == 1 ? read_int_64 : read_int_32,
477
- # timescale: read_int_32,
478
- # duration: version == 1 ? read_int_64 : read_int_32,
479
- # })
480
- # tmp = read_int_16
481
- # fields[:language] = [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F]
482
- # skip_bytes(2)
483
- # [fields, nil]
484
- # end
442
+ # Parse a media header box.
443
+ def mdhd(_)
444
+ fields = read_version_and_flags
445
+ version = fields[:version]
446
+ fields.merge!({
447
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
448
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
449
+ timescale: read_int,
450
+ duration: version == 1 ? read_int(n: 8) : read_int,
451
+ })
452
+ tmp = read_int(n: 2)
453
+ fields[:language] = [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F]
454
+ skip_bytes(2)
455
+ [fields, nil]
456
+ end
485
457
 
486
- # Parse a movie extends header atom.
458
+ # Parse a movie extends header box.
487
459
  # def mehd(_)
488
460
  # fields = read_version_and_flags
489
461
  # version = fields[:version]
490
- # fields[:fragment_duration] = version == 1 ? read_int_64 : read_int_32
462
+ # fields[:fragment_duration] = version == 1 ? read_int(n: 8) : read_int
491
463
  # [fields, nil]
492
464
  # end
493
465
 
494
- # Parse an metabox relation atom.
466
+ # Parse an metabox relation box.
495
467
  # def mere(_)
496
468
  # fields = read_version_and_flags.merge({
497
- # first_metabox_handler_type: read_int_32,
498
- # second_metabox_handler_type: read_int_32,
499
- # metabox_relation: read_int_8
469
+ # first_metabox_handler_type: read_int,
470
+ # second_metabox_handler_type: read_int,
471
+ # metabox_relation: read_int(n: 1)
500
472
  # })
501
473
  # [fields, nil]
502
474
  # end
503
475
 
504
- # Parse a meta atom.
476
+ # Parse a meta box.
505
477
  # def meta(size)
506
478
  # fields = read_version_and_flags
507
- # [fields, build_atom_tree(size - 4)]
479
+ # [fields, build_box_tree(size - 4)]
508
480
  # end
509
481
 
510
- # Parse a movie fragment header atom.
482
+ # Parse a movie fragment header box.
511
483
  # def mfhd(_)
512
484
  # fields = read_version_and_flags.merge({
513
- # sequence_number: read_int_32
485
+ # sequence_number: read_int
514
486
  # })
515
487
  # [fields, nil]
516
488
  # end
517
489
 
518
- # Parse a movie fragment random access offset atom.
490
+ # Parse a movie fragment random access offset box.
519
491
  # def mfro(_)
520
492
  # fields = read_version_and_flags.merge({
521
- # size: read_int_32
493
+ # size: read_int
522
494
  # })
523
495
  # [fields, nil]
524
496
  # end
525
497
 
526
- # Parse a movie header atom.
527
- # def mvhd(_)
528
- # fields = read_version_and_flags
529
- # version = fields[:version]
530
- # fields.merge!({
531
- # creation_time: version == 1 ? read_int_64 : read_int_32,
532
- # modification_time: version == 1 ? read_int_64 : read_int_32,
533
- # timescale: read_int_32,
534
- # duration: version == 1 ? read_int_64 : read_int_32,
535
- # rate: read_fixed_point_32,
536
- # volume: read_fixed_point_16,
537
- # matrix: skip_bytes(10) { read_matrix },
538
- # next_trak_id: skip_bytes(24) { read_int_32 },
539
- # })
540
- # [fields, nil]
541
- # end
498
+ # Parse a movie header box.
499
+ def mvhd(_)
500
+ fields = read_version_and_flags
501
+ version = fields[:version]
502
+ fields.merge!({
503
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
504
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
505
+ timescale: read_int,
506
+ duration: version == 1 ? read_int(n: 8) : read_int,
507
+ rate: read_fixed_point(n: 4),
508
+ volume: read_fixed_point(n: 2, signed: true),
509
+ matrix: skip_bytes(10) { read_matrix },
510
+ next_trak_id: skip_bytes(24) { read_int },
511
+ })
512
+ [fields, nil]
513
+ end
542
514
 
543
- # Parse a padding bits atom.
515
+ # Parse a padding bits box.
544
516
  # def padb(_)
545
517
  # fields = read_version_and_flags
546
- # sample_count = read_int_32
518
+ # sample_count = read_int
547
519
  # fields.merge!({
548
520
  # sample_count: sample_count,
549
521
  # padding: ((sample_count + 1) / 2).times.map do
550
- # tmp = read_int_8
522
+ # tmp = read_int(n: 1)
551
523
  # {
552
524
  # padding_1: tmp >> 4,
553
525
  # padding_2: tmp & 0x07
@@ -557,170 +529,170 @@ module FormatParser
557
529
  # [fields, nil]
558
530
  # end
559
531
 
560
- # Parse a progressive download information atom.
532
+ # Parse a progressive download information box.
561
533
  # def pdin(size)
562
534
  # fields = read_version_and_flags.merge({
563
535
  # entries: ((size - 4) / 8).times.map do
564
536
  # {
565
- # rate: read_int_32,
566
- # initial_delay: read_int_32
537
+ # rate: read_int,
538
+ # initial_delay: read_int
567
539
  # }
568
540
  # end
569
541
  # })
570
542
  # [fields, nil]
571
543
  # end
572
544
 
573
- # Parse a primary item atom.
545
+ # Parse a primary item box.
574
546
  # def pitm(_)
575
547
  # fields = read_version_and_flags.merge({
576
- # item_id: version == 0 ? read_int_16 : read_int_32
548
+ # item_id: version == 0 ? read_int(n: 2) : read_int
577
549
  # })
578
550
  # [fields, nil]
579
551
  # end
580
552
 
581
- # Parse a producer reference time atom.
553
+ # Parse a producer reference time box.
582
554
  # def prft(_)
583
555
  # fields = read_version_and_flags
584
556
  # version = fields[:version]
585
557
  # fields.merge!({
586
- # reference_track_id: read_int_32,
587
- # ntp_timestamp: read_int_64,
588
- # media_time: version == 0 ? read_int_32 : read_int_64
558
+ # reference_track_id: read_int,
559
+ # ntp_timestamp: read_int(n: 8),
560
+ # media_time: version == 0 ? read_int : read_int(n: 8)
589
561
  # })
590
562
  # [fields, nil]
591
563
  # end
592
564
 
593
- # Parse a sample auxiliary information offsets atom.
565
+ # Parse a sample auxiliary information offsets box.
594
566
  # def saio(_)
595
567
  # fields = read_version_and_flags
596
568
  # version = field[:version]
597
569
  # flags = fields[:flags]
598
570
  # fields.merge!({
599
- # aux_info_type: read_int_32,
600
- # aux_info_type_parameter: read_int_32
571
+ # aux_info_type: read_int,
572
+ # aux_info_type_parameter: read_int
601
573
  # }) if flags & 0x1
602
- # entry_count = read_int_32
574
+ # entry_count = read_int
603
575
  # fields.merge!({
604
576
  # entry_count: entry_count,
605
- # offsets: entry_count.times.map { version == 0 ? read_int_32 : read_int_64 }
577
+ # offsets: entry_count.times.map { version == 0 ? read_int : read_int(n: 8) }
606
578
  # })
607
579
  # [fields, nil]
608
580
  # end
609
581
 
610
- # Parse a sample auxiliary information sizes atom.
582
+ # Parse a sample auxiliary information sizes box.
611
583
  # def saiz(_)
612
584
  # fields = read_version_and_flags
613
585
  # flags = fields[:flags]
614
586
  # fields.merge!({
615
- # aux_info_type: read_int_32,
616
- # aux_info_type_parameter: read_int_32
587
+ # aux_info_type: read_int,
588
+ # aux_info_type_parameter: read_int
617
589
  # }) if flags & 0x1
618
- # default_sample_info_size = read_int_8
619
- # sample_count = read_int_32
590
+ # default_sample_info_size = read_int(n: 1)
591
+ # sample_count = read_int
620
592
  # fields.merge!({
621
593
  # default_sample_info_size: default_sample_info_size,
622
594
  # sample_count: sample_count
623
595
  # })
624
- # fields[:sample_info_sizes] = sample_count.times.map { read_int_8 } if default_sample_info_size == 0
596
+ # fields[:sample_info_sizes] = sample_count.times.map { read_int(n: 1) } if default_sample_info_size == 0
625
597
  # [fields, nil]
626
598
  # end
627
599
 
628
- # Parse a sample to group atom.
600
+ # Parse a sample to group box.
629
601
  # def sbgp(_)
630
602
  # fields = read_version_and_flags
631
- # fields[:grouping_type] = read_int_32
632
- # fields[:grouping_type_parameter] = read_int_32 if fields[:version] == 1
633
- # entry_count = read_int_32
603
+ # fields[:grouping_type] = read_int
604
+ # fields[:grouping_type_parameter] = read_int if fields[:version] == 1
605
+ # entry_count = read_int
634
606
  # fields.merge!({
635
607
  # entry_count: entry_count,
636
608
  # entries: entry_count.times.map do
637
609
  # {
638
- # sample_count: read_int_32,
639
- # group_description_index: read_int_32
610
+ # sample_count: read_int,
611
+ # group_description_index: read_int
640
612
  # }
641
613
  # end
642
614
  # })
643
615
  # [fields, nil]
644
616
  # end
645
617
 
646
- # Parse a scheme type atom.
618
+ # Parse a scheme type box.
647
619
  # def schm(_)
648
620
  # fields = read_version_and_flags.merge({
649
621
  # scheme_type: read_string(4),
650
- # scheme_version: read_int_32,
622
+ # scheme_version: read_int,
651
623
  # })
652
- # fields[:scheme_uri] = (size - 12).times.map { read_int_8 } if flags & 0x1 != 0
624
+ # fields[:scheme_uri] = (size - 12).times.map { read_int(n: 1) } if flags & 0x1 != 0
653
625
  # [fields, nil]
654
626
  # end
655
627
 
656
- # Parse an independent and disposable samples atom.
628
+ # Parse an independent and disposable samples box.
657
629
  # def sdtp(size)
658
- # # TODO: Parsing this atom needs the sample_count from the sample size atom (`stsz`).
630
+ # # TODO: Parsing this box needs the sample_count from the sample size box (`stsz`).
659
631
  # empty(size)
660
632
  # end
661
633
 
662
- # Parse an FD session group atom.
634
+ # Parse an FD session group box.
663
635
  # def segr(_)
664
- # num_session_groups = read_int_16
636
+ # num_session_groups = read_int(n: 2)
665
637
  # fields = {
666
638
  # num_session_groups: num_session_groups,
667
639
  # session_groups: num_session_groups.times.map do
668
- # entry_count = read_int_8
640
+ # entry_count = read_int(n: 1)
669
641
  # session_group = {
670
642
  # entry_count: entry_count,
671
- # entries: entry_count.times.map { { group_id: read_int_32 } }
643
+ # entries: entry_count.times.map { { group_id: read_int } }
672
644
  # }
673
- # num_channels_in_session_group = read_int_16
645
+ # num_channels_in_session_group = read_int(n: 2)
674
646
  # session_group.merge({
675
647
  # num_channels_in_session_group: num_channels_in_session_group,
676
- # channels: num_channels_in_session_group.times.map { { hint_track_id: read_int_32 } }
648
+ # channels: num_channels_in_session_group.times.map { { hint_track_id: read_int } }
677
649
  # })
678
650
  # end
679
651
  # }
680
652
  # [fields, nil]
681
653
  # end
682
654
 
683
- # Parse a sample group description atom.
655
+ # Parse a sample group description box.
684
656
  # def sgpd(_)
685
657
  # fields = read_version_and_flags
686
658
  # version = fields[:version]
687
- # fields[:grouping_type] = read_int_32
688
- # fields[:default_length] = read_int_32 if version == 1
689
- # fields[:default_sample_description_index] = read_int_32 if version >= 2
690
- # entry_count = read_int_32
659
+ # fields[:grouping_type] = read_int
660
+ # fields[:default_length] = read_int if version == 1
661
+ # fields[:default_sample_description_index] = read_int if version >= 2
662
+ # entry_count = read_int
691
663
  # fields.merge!({
692
664
  # entry_count: entry_count,
693
665
  # entries: entry_count.times.map do
694
666
  # entry = {}
695
- # entry[:description_length] = read_int_32 if version == 1 && fields[:default_length] == 0
696
- # entry[:atom] = parse_atom
667
+ # entry[:description_length] = read_int if version == 1 && fields[:default_length] == 0
668
+ # entry[:box] = parse_box
697
669
  # end
698
670
  # })
699
671
  # [fields, nil]
700
672
  # end
701
673
 
702
- # Parse a segment index atom.
674
+ # Parse a segment index box.
703
675
  # def sidx(_)
704
676
  # fields = read_version_and_flags.merge({
705
- # reference_id: read_int_32,
706
- # timescale: read_int_32
677
+ # reference_id: read_int,
678
+ # timescale: read_int
707
679
  # })
708
680
  # version = fields[:version]
709
681
  # fields.merge!({
710
- # earliest_presentation_time: version == 0 ? read_int_32 : read_int_64,
711
- # first_offset: version == 0 ? read_int_32 : read_int_64,
682
+ # earliest_presentation_time: version == 0 ? read_int : read_int(n: 8),
683
+ # first_offset: version == 0 ? read_int : read_int(n: 8),
712
684
  # })
713
- # reference_count = skip_bytes(2) { read_int_16 }
685
+ # reference_count = skip_bytes(2) { read_int(n: 2) }
714
686
  # fields.merge!({
715
687
  # reference_count: reference_count,
716
688
  # references: reference_count.times.map do
717
- # tmp = read_int_32
689
+ # tmp = read_int
718
690
  # reference = {
719
691
  # reference_type: tmp >> 31,
720
692
  # referenced_size: tmp & 0x7FFFFFFF,
721
- # subsegment_duration: read_int_32
693
+ # subsegment_duration: read_int
722
694
  # }
723
- # tmp = read_int_32
695
+ # tmp = read_int
724
696
  # reference.merge({
725
697
  # starts_with_sap: tmp >> 31,
726
698
  # sap_type: (tmp >> 28) & 0x7,
@@ -731,27 +703,27 @@ module FormatParser
731
703
  # [fields, nil]
732
704
  # end
733
705
 
734
- # Parse a sound media header atom.
706
+ # Parse a sound media header box.
735
707
  # def smhd(_)
736
708
  # fields = read_version_and_flags.merge({
737
- # balance: read_fixed_point_16,
709
+ # balance: read_fixed_point(n: 2, signed: true),
738
710
  # })
739
711
  # skip_bytes(2)
740
712
  # [fields, nil]
741
713
  # end
742
714
 
743
- # Parse a subsegment index atom.
715
+ # Parse a subsegment index box.
744
716
  # def ssix(_)
745
717
  # fields = read_version_and_flags
746
- # subsegment_count = read_int_32
718
+ # subsegment_count = read_int
747
719
  # fields.merge!({
748
720
  # subsegment_count: subsegment_count,
749
721
  # subsegments: subsegment_count.times.map do
750
- # range_count = read_int_32
722
+ # range_count = read_int
751
723
  # {
752
724
  # range_count: range_count,
753
725
  # ranges: range_count.times.map do
754
- # tmp = read_int_32
726
+ # tmp = read_int
755
727
  # {
756
728
  # level: tmp >> 24,
757
729
  # range_size: tmp & 0x00FFFFFF
@@ -763,142 +735,142 @@ module FormatParser
763
735
  # [fields, nil]
764
736
  # end
765
737
 
766
- # Parse a chunk offset atom.
738
+ # Parse a chunk offset box.
767
739
  # def stco(_)
768
740
  # fields = read_version_and_flags
769
- # entry_count = read_int_32
741
+ # entry_count = read_int
770
742
  # fields.merge!({
771
743
  # entry_count: entry_count,
772
- # entries: entry_count.times.map { { chunk_offset: read_int_32 } }
744
+ # entries: entry_count.times.map { { chunk_offset: read_int } }
773
745
  # })
774
746
  # [fields, nil]
775
747
  # end
776
748
 
777
- # Parse a degradation priority atom.
749
+ # Parse a degradation priority box.
778
750
  # def stdp(size)
779
- # # TODO: Parsing this atom needs the sample_count from the sample size atom (`stsz`).
751
+ # # TODO: Parsing this box needs the sample_count from the sample size box (`stsz`).
780
752
  # empty(size)
781
753
  # end
782
754
 
783
- # Parse a sub track information atom.
755
+ # Parse a sub track information box.
784
756
  # def stri(size)
785
757
  # fields = read_version_and_flags.merge({
786
- # switch_group: read_int_16,
787
- # alternate_group: read_int_16,
788
- # sub_track_id: read_int_32,
789
- # attribute_list: ((size - 12) / 4).times.map { read_int_32 }
758
+ # switch_group: read_int(n: 2),
759
+ # alternate_group: read_int(n: 2),
760
+ # sub_track_id: read_int,
761
+ # attribute_list: ((size - 12) / 4).times.map { read_int }
790
762
  # })
791
763
  # [fields, nil]
792
764
  # end
793
765
 
794
- # Parse a sample to chunk atom.
766
+ # Parse a sample to chunk box.
795
767
  # def stsc(_)
796
768
  # fields = read_version_and_flags
797
- # entry_count = read_int_32
769
+ # entry_count = read_int
798
770
  # fields.merge!({
799
771
  # entry_count: entry_count,
800
772
  # entries: entry_count.times.map do
801
773
  # {
802
- # first_chunk: read_int_32,
803
- # samples_per_chunk: read_int_32,
804
- # sample_description_index: read_int_32
774
+ # first_chunk: read_int,
775
+ # samples_per_chunk: read_int,
776
+ # sample_description_index: read_int
805
777
  # }
806
778
  # end
807
779
  # })
808
780
  # [fields, nil]
809
781
  # end
810
782
 
811
- # Parse a sample descriptions atom.
812
- # def stsd(size)
813
- # fields = read_version_and_flags.merge({
814
- # entry_count: read_int_32
815
- # })
816
- # [fields, build_atom_tree(size - 8)]
817
- # end
783
+ # Parse a sample descriptions box.
784
+ def stsd(size)
785
+ fields = read_version_and_flags.merge({
786
+ entry_count: read_int
787
+ })
788
+ [fields, build_box_tree(size - 8)]
789
+ end
818
790
 
819
- # Parse a shadow sync sample atom.
791
+ # Parse a shadow sync sample box.
820
792
  # def stsh(_)
821
793
  # fields = read_version_and_flags
822
- # entry_count = read_int_32
794
+ # entry_count = read_int
823
795
  # fields.merge!({
824
796
  # entry_count: entry_count,
825
797
  # entries: entry_count.times.map {
826
798
  # {
827
- # shadowed_sample_number: read_int_32,
828
- # sync_sample_number: read_int_32
799
+ # shadowed_sample_number: read_int,
800
+ # sync_sample_number: read_int
829
801
  # }
830
802
  # }
831
803
  # })
832
804
  # [fields, nil]
833
805
  # end
834
806
 
835
- # Parse a sync sample atom.
807
+ # Parse a sync sample box.
836
808
  # def stss(_)
837
809
  # fields = read_version_and_flags
838
- # entry_count = read_int_32
810
+ # entry_count = read_int
839
811
  # fields.merge!({
840
812
  # entry_count: entry_count,
841
- # entries: entry_count.times.map { { sample_number: read_int_32 } }
813
+ # entries: entry_count.times.map { { sample_number: read_int } }
842
814
  # })
843
815
  # [fields, nil]
844
816
  # end
845
817
 
846
- # Parse a sample size atom.
818
+ # Parse a sample size box.
847
819
  # def stsz(_)
848
820
  # fields = read_version_and_flags
849
- # sample_size = read_int_32
850
- # sample_count = read_int_32
821
+ # sample_size = read_int
822
+ # sample_count = read_int
851
823
  # fields.merge!({
852
824
  # sample_size: sample_size,
853
825
  # sample_count: sample_count,
854
826
  # })
855
- # fields[:entries] = sample_count.times.map { { entry_size: read_int_32 } } if sample_size == 0
827
+ # fields[:entries] = sample_count.times.map { { entry_size: read_int } } if sample_size == 0
856
828
  # [fields, nil]
857
829
  # end
858
830
 
859
- # Parse a decoding time to sample atom.
860
- # def stts(_)
861
- # fields = read_version_and_flags
862
- # entry_count = read_int_32
863
- # fields.merge!({
864
- # entry_count: entry_count,
865
- # entries: entry_count.times.map do
866
- # {
867
- # sample_count: read_int_32,
868
- # sample_delta: read_int_32
869
- # }
870
- # end
871
- # })
872
- # [fields, nil]
873
- # end
831
+ # Parse a decoding time to sample box.
832
+ def stts(_)
833
+ fields = read_version_and_flags
834
+ entry_count = read_int
835
+ fields.merge!({
836
+ entry_count: entry_count,
837
+ entries: entry_count.times.map do
838
+ {
839
+ sample_count: read_int,
840
+ sample_delta: read_int
841
+ }
842
+ end
843
+ })
844
+ [fields, nil]
845
+ end
874
846
 
875
- # Parse a compact sample size atom.
847
+ # Parse a compact sample size box.
876
848
  # def stz2(size)
877
849
  # fields = read_version_and_flags.merge({
878
- # field_size: skip_bytes(3) { read_int_8 },
879
- # sample_count: read_int_32
850
+ # field_size: skip_bytes(3) { read_int(n: 1) },
851
+ # sample_count: read_int
880
852
  # })
881
853
  # # TODO: Handling for parsing entry sizes dynamically based on field size.
882
854
  # skip_bytes(size - 12)
883
855
  # [fields, nil]
884
856
  # end
885
857
 
886
- # Parse a sub-sample information atom.
858
+ # Parse a sub-sample information box.
887
859
  # def subs(_)
888
860
  # fields = read_version_and_flags
889
- # entry_count = read_int_32
861
+ # entry_count = read_int
890
862
  # fields[:entries] = entry_count.times.map do
891
- # sample_delta = read_int_32
892
- # subsample_count = read_int_16
863
+ # sample_delta = read_int
864
+ # subsample_count = read_int(n: 2)
893
865
  # {
894
866
  # sample_delta: sample_delta,
895
867
  # subsample_count: subsample_count,
896
868
  # subsample_information: subsample_count.times.map do
897
869
  # {
898
- # subsample_size: version == 1 ? read_int_32 : read_int_16,
899
- # subsample_priority: read_int_8,
900
- # discardable: read_int_8,
901
- # codec_specific_parameters: read_int_32
870
+ # subsample_size: version == 1 ? read_int : read_int(n: 2),
871
+ # subsample_priority: read_int(n: 1),
872
+ # discardable: read_int(n: 1),
873
+ # codec_specific_parameters: read_int
902
874
  # }
903
875
  # end
904
876
  # }
@@ -906,17 +878,17 @@ module FormatParser
906
878
  # [fields, nil]
907
879
  # end
908
880
 
909
- # Parse a track fragment random access atom.
881
+ # Parse a track fragment random access box.
910
882
  # def tfra(_)
911
883
  # fields = read_version_and_flags
912
884
  # version = fields[:version]
913
- # fields[:track_id] = read_int_32
885
+ # fields[:track_id] = read_int
914
886
  # skip_bytes(3)
915
- # tmp = read_int_8
887
+ # tmp = read_int(n: 1)
916
888
  # size_of_traf_number = (tmp >> 4) & 0x3
917
889
  # size_of_trun_number = (tmp >> 2) & 0x3
918
890
  # size_of_sample_number = tmp & 0x3
919
- # entry_count = read_int_32
891
+ # entry_count = read_int
920
892
  # fields.merge!({
921
893
  # size_of_traf_number: size_of_traf_number,
922
894
  # size_of_trun_number: size_of_trun_number,
@@ -924,8 +896,8 @@ module FormatParser
924
896
  # entry_count: entry_count,
925
897
  # entries: entry_count.times.map do
926
898
  # entry = {
927
- # time: version == 1 ? read_int_64 : read_int_32,
928
- # moof_offset: version == 1 ? read_int_64 : read_int_32
899
+ # time: version == 1 ? read_int(n: 8) : read_int,
900
+ # moof_offset: version == 1 ? read_int(n: 8) : read_int
929
901
  # }
930
902
  # # TODO: Handling for parsing traf_number, trun_number and sample_number dynamically based on their sizes.
931
903
  # skip_bytes(size_of_traf_number + size_of_trun_number + size_of_sample_number + 3)
@@ -935,74 +907,74 @@ module FormatParser
935
907
  # [fields, nil]
936
908
  # end
937
909
 
938
- # Parse a track header atom.
939
- # def tkhd(_)
940
- # fields = read_version_and_flags
941
- # version = fields[:version]
942
- # fields.merge!({
943
- # creation_time: version == 1 ? read_int_64 : read_int_32,
944
- # modification_time: version == 1 ? read_int_64 : read_int_32,
945
- # track_id: read_int_32,
946
- # duration: skip_bytes(4) { version == 1 ? read_int_64 : read_int_32 },
947
- # layer: skip_bytes(8) { read_int_16 },
948
- # alternate_group: read_int_16,
949
- # volume: read_fixed_point_16,
950
- # matrix: skip_bytes(2) { read_matrix },
951
- # width: read_fixed_point_32,
952
- # height: read_fixed_point_32
953
- # })
954
- # [fields, nil]
955
- # end
910
+ # Parse a track header box.
911
+ def tkhd(_)
912
+ fields = read_version_and_flags
913
+ version = fields[:version]
914
+ fields.merge!({
915
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
916
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
917
+ track_id: read_int,
918
+ duration: skip_bytes(4) { version == 1 ? read_int(n: 8) : read_int },
919
+ layer: skip_bytes(8) { read_int(n: 2) },
920
+ alternate_group: read_int(n: 2),
921
+ volume: read_fixed_point(n: 2, signed: true),
922
+ matrix: skip_bytes(2) { read_matrix },
923
+ width: read_fixed_point(n: 4),
924
+ height: read_fixed_point(n: 4)
925
+ })
926
+ [fields, nil]
927
+ end
956
928
 
957
- # Parse a track extends atom.
929
+ # Parse a track extends box.
958
930
  # def trex(_)
959
931
  # fields = read_version_and_flags.merge({
960
- # track_id: read_int_32,
961
- # default_sample_description_index: read_int_32,
962
- # default_sample_duration: read_int_32,
963
- # default_sample_size: read_int_32,
964
- # default_sample_flags: read_int_32
932
+ # track_id: read_int,
933
+ # default_sample_description_index: read_int,
934
+ # default_sample_duration: read_int,
935
+ # default_sample_size: read_int,
936
+ # default_sample_flags: read_int
965
937
  # })
966
938
  # [fields, nil]
967
939
  # end
968
940
 
969
- # Parse a track selection atom.
941
+ # Parse a track selection box.
970
942
  # def tsel(size)
971
943
  # fields = read_version_and_flags.merge({
972
- # switch_group: read_int_32,
973
- # attribute_list: ((size - 8) / 4).times.map { read_int_32 }
944
+ # switch_group: read_int,
945
+ # attribute_list: ((size - 8) / 4).times.map { read_int }
974
946
  # })
975
947
  # [fields, nil]
976
948
  # end
977
949
 
978
- # Parse a file/segment type compatibility atom.
979
- # def typ(size)
980
- # compatible_brands_count = (size - 8) / 4
981
- # fields = {
982
- # major_brand: read_string(4),
983
- # minor_version: read_int_32,
984
- # compatible_brands: compatible_brands_count.times.map { read_string(4) }
985
- # }
986
- # [fields, nil]
987
- # end
950
+ # Parse a file/segment type compatibility box.
951
+ def typ(size)
952
+ compatible_brands_count = (size - 8) / 4
953
+ fields = {
954
+ major_brand: read_string(4),
955
+ minor_version: read_int,
956
+ compatible_brands: compatible_brands_count.times.map { read_string(4) }
957
+ }
958
+ [fields, nil]
959
+ end
988
960
 
989
- # Parse a UUID atom.
961
+ # Parse a UUID box.
990
962
  def uuid(size)
991
963
  fields = { usertype: read_bytes(16).unpack('H*').first }
992
964
  skip_bytes(size - 16)
993
965
  [fields, nil]
994
966
  end
995
967
 
996
- # Parse a video media header atom.
968
+ # Parse a video media header box.
997
969
  # def vmhd(_)
998
970
  # fields = read_version_and_flags.merge({
999
- # graphics_mode: read_int_16,
1000
- # op_color: (1..3).map { read_int_16 }
971
+ # graphics_mode: read_int(n: 2),
972
+ # op_color: (1..3).map { read_int(n: 2) }
1001
973
  # })
1002
974
  # [fields, nil]
1003
975
  # end
1004
976
 
1005
- # Parse an XML atom.
977
+ # Parse an XML box.
1006
978
  # def xml(size)
1007
979
  # fields = read_version_and_flags.merge({
1008
980
  # xml: read_string(size - 4)
@@ -1017,22 +989,16 @@ module FormatParser
1017
989
  #
1018
990
  # See https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap4/qtff4.html#//apple_ref/doc/uid/TP40000939-CH206-18737.
1019
991
  def read_matrix
1020
- 9.times.map do |i|
1021
- if i % 3 == 2
1022
- read_fixed_point_32_2_30
1023
- else
1024
- read_fixed_point_32
1025
- end
1026
- end
992
+ Matrix.build(3) { |_, c| read_fixed_point(fractional_digits: c % 3 == 2 ? 30 : 16, signed: true) }
1027
993
  end
1028
994
 
1029
- # Parse an atom's version and flags.
995
+ # Parse an box's version and flags.
1030
996
  #
1031
- # It's common for atoms to begin with a single byte representing the version followed by three bytes representing any
997
+ # It's common for boxes to begin with a single byte representing the version followed by three bytes representing any
1032
998
  # associated flags. Both of these are often 0.
1033
999
  def read_version_and_flags
1034
1000
  {
1035
- version: read_int_8,
1001
+ version: read_int(n: 1),
1036
1002
  flags: read_bytes(3)
1037
1003
  }
1038
1004
  end