format_parser 2.3.0 → 2.4.4

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,7 +1,7 @@
1
- # This class provides generic methods for parsing file formats based on QuickTime-style "atoms", such as those seen in
1
+ # This class provides generic methods for parsing file formats based on QuickTime-style "boxes", such as those seen in
2
2
  # the ISO base media file format (ISO/IEC 14496-12), a.k.a MPEG-4, and those that extend it (MP4, CR3, HEIF, etc.).
3
3
  #
4
- # For more information on atoms, see https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html
4
+ # For more information on boxes, see https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html
5
5
  # or https://b.goeswhere.com/ISO_IEC_14496-12_2015.pdf.
6
6
  #
7
7
  # TODO: The vast majority of the methods have been commented out here. This decision was taken to expedite the release
@@ -9,66 +9,38 @@
9
9
  # entirety. We should migrate existing formats that are based on the ISO base media file format and reintroduce these
10
10
  # methods with tests down-the-line.
11
11
 
12
+ require 'matrix'
13
+ require 'parsers/iso_base_media_file_format/box'
14
+
12
15
  module FormatParser
13
16
  module ISOBaseMediaFileFormat
14
17
  class Decoder
15
18
  include FormatParser::IOUtils
16
19
 
17
- class Atom < Struct.new(:type, :position, :size, :fields, :children)
18
- def initialize(type, position, size, fields = nil, children = nil)
19
- super
20
- self.fields ||= {}
21
- self.children ||= []
22
- end
23
-
24
- # Find and return the first descendent (using depth-first search) of a given type.
25
- #
26
- # @param [Array<String>] types
27
- # @return [Atom, nil]
28
- def find_first_descendent(types)
29
- children.each do |child|
30
- return child if types.include?(child.type)
31
- if (descendent = child.find_first_descendent(types))
32
- return descendent
33
- end
34
- end
35
- nil
36
- end
37
-
38
- # Find and return all descendents of a given type.
39
- #
40
- # @param [Array<String>] types
41
- # @return [Array<Atom>]
42
- def select_descendents(types)
43
- children.map do |child|
44
- descendents = child.select_descendents(types)
45
- types.include?(child.type) ? [child] + descendents : descendents
46
- end.flatten
47
- end
48
- end
49
-
20
+ # Attempt to build the ISOBMFF box tree represented in the given IO object.
21
+ #
50
22
  # @param [Integer] max_read
51
- # @param [IO, FormatParser::IOConstraint] io
52
- # @return [Array<Atom>]
53
- def build_atom_tree(max_read, io = nil)
23
+ # @param [IO, StringIO, FormatParser::IOConstraint] io
24
+ # @return [Array<Box>]
25
+ def build_box_tree(max_read, io = nil)
54
26
  @buf = FormatParser::IOConstraint.new(io) if io
55
27
  raise ArgumentError, "IO missing - supply a valid IO object" unless @buf
56
- atoms = []
28
+ boxes = []
57
29
  max_pos = @buf.pos + max_read
58
30
  loop do
59
31
  break if @buf.pos >= max_pos
60
- atom = parse_atom
61
- break unless atom
62
- atoms << atom
32
+ box = parse_box
33
+ break unless box
34
+ boxes << box
63
35
  end
64
- atoms
36
+ boxes
65
37
  end
66
38
 
67
39
  protected
68
40
 
69
- # A mapping of atom types to their respective parser methods. Each method must take a single Integer parameter, size,
70
- # and return the atom's fields and children where appropriate as a Hash and Array of Atoms respectively.
71
- ATOM_PARSERS = {
41
+ # A mapping of box types to their respective parser methods. Each method must take a single Integer parameter, size,
42
+ # and return the box's fields and children where appropriate as a Hash and Array of Boxes respectively.
43
+ BOX_PARSERS = {
72
44
  # 'bxml' => :bxml,
73
45
  # 'co64' => :co64,
74
46
  # 'cprt' => :cprt,
@@ -81,9 +53,9 @@ module FormatParser
81
53
  # 'fiin' => :fiin,
82
54
  # 'fire' => :fire,
83
55
  # 'fpar' => :fpar,
84
- # 'ftyp' => :typ,
56
+ 'ftyp' => :typ,
85
57
  # 'gitn' => :gitn,
86
- # 'hdlr' => :hdlr,
58
+ 'hdlr' => :hdlr,
87
59
  # 'hmhd' => :hmhd,
88
60
  # 'iinf' => :iinf,
89
61
  # 'iloc' => :iloc,
@@ -91,7 +63,7 @@ module FormatParser
91
63
  # 'ipro' => :ipro,
92
64
  # 'iref' => :iref,
93
65
  # 'leva' => :leva,
94
- # 'mdhd' => :mdhd,
66
+ 'mdhd' => :mdhd,
95
67
  'mdia' => :container,
96
68
  'meco' => :container,
97
69
  # 'mehd' => :mehd,
@@ -104,7 +76,7 @@ module FormatParser
104
76
  'moof' => :container,
105
77
  'moov' => :container,
106
78
  'mvex' => :container,
107
- # 'mvhd' => :mvhd,
79
+ 'mvhd' => :mvhd,
108
80
  'nmhd' => :empty,
109
81
  # 'padb' => :padb,
110
82
  'paen' => :container,
@@ -131,16 +103,16 @@ module FormatParser
131
103
  # 'stri' => :stri,
132
104
  'strk' => :container,
133
105
  # 'stsc' => :stsc,
134
- # 'stsd' => :stsd,
106
+ 'stsd' => :stsd,
135
107
  # 'stsh' => :stsh,
136
108
  # 'stss' => :stss,
137
109
  # 'stsz' => :stsz,
138
- # 'stts' => :stts,
110
+ 'stts' => :stts,
139
111
  # 'styp' => :typ,
140
112
  # 'stz2' => :stz2,
141
113
  # 'subs' => :subs,
142
114
  # 'tfra' => :tfra,
143
- # 'tkhd' => :tkhd,
115
+ 'tkhd' => :tkhd,
144
116
  'trak' => :container,
145
117
  # 'trex' => :trex,
146
118
  # 'tsel' => :tsel,
@@ -152,69 +124,74 @@ module FormatParser
152
124
  # 'xml ' => :xml,
153
125
  }
154
126
 
155
- # Parse the atom at the IO's current position.
127
+ # Parse the box at the IO's current position.
156
128
  #
157
- # @return [Atom, nil]
158
- def parse_atom
129
+ # @return [Box, nil]
130
+ def parse_box
159
131
  position = @buf.pos
160
-
161
- size = read_int_32
132
+ size = read_int
162
133
  type = read_string(4)
163
- size = read_int_64 if size == 1
134
+
135
+ if size == 1
136
+ size = read_int(n: 8)
137
+ elsif size == 0
138
+ size = @buf.size - position
139
+ end
140
+
164
141
  body_size = size - (@buf.pos - position)
165
- next_atom_position = position + size
166
-
167
- if self.class::ATOM_PARSERS.include?(type)
168
- fields, children = method(self.class::ATOM_PARSERS[type]).call(body_size)
169
- if @buf.pos != next_atom_position
170
- # We should never end up in this state. If we do, it likely indicates a bug in the atom's parser method.
171
- warn("Unexpected IO position after parsing #{type} atom at position #{position}. Atom size: #{size}. Expected position: #{next_atom_position}. Actual position: #{@buf.pos}.")
172
- @buf.seek(next_atom_position)
142
+ next_box_position = position + size
143
+
144
+ if self.class::BOX_PARSERS.include?(type)
145
+ fields, children = method(self.class::BOX_PARSERS[type]).call(body_size)
146
+ if @buf.pos != next_box_position
147
+ # We should never end up in this state. If we do, it likely indicates a bug in the box's parser method.
148
+ warn("Unexpected IO position after parsing #{type} box at position #{position}. Box size: #{size}. Expected position: #{next_box_position}. Actual position: #{@buf.pos}.")
149
+ @buf.seek(next_box_position)
173
150
  end
174
- Atom.new(type, position, size, fields, children)
151
+ Box.new(type, position, size, fields, children)
175
152
  else
176
153
  skip_bytes(body_size)
177
- Atom.new(type, position, size)
154
+ Box.new(type, position, size)
178
155
  end
179
156
  rescue FormatParser::IOUtils::InvalidRead
180
157
  nil
181
158
  end
182
159
 
183
- # Parse any atom that serves as a container, with only children and no fields of its own.
160
+ # Parse any box that serves as a container, with only children and no fields of its own.
184
161
  def container(size)
185
- [nil, build_atom_tree(size)]
162
+ [nil, build_box_tree(size)]
186
163
  end
187
164
 
188
- # Parse only an atom's version and flags, skipping the remainder of the atom's body.
165
+ # Parse only an box's version and flags, skipping the remainder of the box's body.
189
166
  def empty(size)
190
167
  fields = read_version_and_flags
191
168
  skip_bytes(size - 4)
192
169
  [fields, nil]
193
170
  end
194
171
 
195
- # Parse a binary XML atom.
172
+ # Parse a binary XML box.
196
173
  # def bxml(size)
197
174
  # fields = read_version_and_flags.merge({
198
- # data: (size - 4).times.map { read_int_8 }
175
+ # data: (size - 4).times.map { read_int(n: 1) }
199
176
  # })
200
177
  # [fields, nil]
201
178
  # end
202
179
 
203
- # Parse a chunk large offset atom.
180
+ # Parse a chunk large offset box.
204
181
  # def co64(_)
205
182
  # fields = read_version_and_flags
206
- # entry_count = read_int_32
183
+ # entry_count = read_int
207
184
  # fields.merge!({
208
185
  # entry_count: entry_count,
209
- # entries: entry_count.times.map { { chunk_offset: read_int_64 } }
186
+ # entries: entry_count.times.map { { chunk_offset: read_int(n: 8) } }
210
187
  # })
211
188
  # [fields, nil]
212
189
  # end
213
190
 
214
- # Parse a copyright atom.
191
+ # Parse a copyright box.
215
192
  # def cprt(size)
216
193
  # fields = read_version_and_flags
217
- # tmp = read_int_16
194
+ # tmp = read_int(n: 2)
218
195
  # fields.merge!({
219
196
  # language: [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F],
220
197
  # notice: read_string(size - 6)
@@ -222,45 +199,45 @@ module FormatParser
222
199
  # [fields, nil]
223
200
  # end
224
201
 
225
- # Parse a composition to decode atom.
202
+ # Parse a composition to decode box.
226
203
  # def cslg(_)
227
204
  # fields = read_version_and_flags
228
205
  # version = fields[:version]
229
206
  # fields.merge!({
230
- # composition_to_dts_shift: version == 1 ? read_int_64 : read_int_32,
231
- # least_decode_to_display_delta: version == 1 ? read_int_64 : read_int_32,
232
- # greatest_decode_to_display_delta: version == 1 ? read_int_64 : read_int_32,
233
- # composition_start_time: version == 1 ? read_int_64 : read_int_32,
234
- # composition_end_time: version == 1 ? read_int_64 : read_int_32,
207
+ # composition_to_dts_shift: version == 1 ? read_int(n: 8) : read_int,
208
+ # least_decode_to_display_delta: version == 1 ? read_int(n: 8) : read_int,
209
+ # greatest_decode_to_display_delta: version == 1 ? read_int(n: 8) : read_int,
210
+ # composition_start_time: version == 1 ? read_int(n: 8) : read_int,
211
+ # composition_end_time: version == 1 ? read_int(n: 8) : read_int,
235
212
  # })
236
213
  # [fields, nil]
237
214
  # end
238
215
 
239
- # Parse a composition time to sample atom.
216
+ # Parse a composition time to sample box.
240
217
  # def ctts(_)
241
218
  # fields = read_version_and_flags
242
- # entry_count = read_int_32
219
+ # entry_count = read_int
243
220
  # fields.merge!({
244
221
  # entry_count: entry_count,
245
222
  # entries: entry_count.times.map do
246
223
  # {
247
- # sample_count: read_int_32,
248
- # sample_offset: read_int_32
224
+ # sample_count: read_int,
225
+ # sample_offset: read_int
249
226
  # }
250
227
  # end
251
228
  # })
252
229
  # [fields, nil]
253
230
  # end
254
231
 
255
- # Parse a data reference atom.
232
+ # Parse a data reference box.
256
233
  # def dref(size)
257
234
  # fields = read_version_and_flags.merge({
258
- # entry_count: read_int_32
235
+ # entry_count: read_int
259
236
  # })
260
- # [fields, build_atom_tree(size - 8)]
237
+ # [fields, build_box_tree(size - 8)]
261
238
  # end
262
239
 
263
- # Parse a data reference URL entry atom.
240
+ # Parse a data reference URL entry box.
264
241
  # def dref_url(size)
265
242
  # fields = read_version_and_flags.merge({
266
243
  # location: read_string(size - 4)
@@ -268,7 +245,7 @@ module FormatParser
268
245
  # [fields, nil]
269
246
  # end
270
247
 
271
- # Parse a data reference URN entry atom.
248
+ # Parse a data reference URN entry box.
272
249
  # def dref_urn(size)
273
250
  # fields = read_version_and_flags
274
251
  # name, location = read_bytes(size - 4).unpack('Z2')
@@ -279,58 +256,58 @@ module FormatParser
279
256
  # [fields, nil]
280
257
  # end
281
258
 
282
- # Parse an FEC reservoir atom.
259
+ # Parse an FEC reservoir box.
283
260
  # def fecr(_)
284
261
  # fields = read_version_and_flags
285
262
  # version = fields[:version]
286
- # entry_count = version == 0 ? read_int_16 : read_int_32
263
+ # entry_count = version == 0 ? read_int(n: 2) : read_int
287
264
  # fields.merge!({
288
265
  # entry_count: entry_count,
289
266
  # entries: entry_count.times.map do
290
267
  # {
291
- # item_id: version == 0 ? read_int_16 : read_int_32,
292
- # symbol_count: read_int_8
268
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
269
+ # symbol_count: read_int(n: 1)
293
270
  # }
294
271
  # end
295
272
  # })
296
273
  # end
297
274
 
298
- # Parse an FD item information atom.
275
+ # Parse an FD item information box.
299
276
  # def fiin(size)
300
277
  # fields = read_version_and_flags.merge({
301
- # entry_count: read_int_16
278
+ # entry_count: read_int(n: 2)
302
279
  # })
303
- # [fields, build_atom_tree(size - 6)]
280
+ # [fields, build_box_tree(size - 6)]
304
281
  # end
305
282
 
306
- # Parse a file reservoir atom.
283
+ # Parse a file reservoir box.
307
284
  # def fire(_)
308
285
  # fields = read_version_and_flags
309
- # entry_count = version == 0 ? read_int_16 : read_int_32
286
+ # entry_count = version == 0 ? read_int(n: 2) : read_int
310
287
  # fields.merge!({
311
288
  # entry_count: entry_count,
312
289
  # entries: entry_count.times.map do
313
290
  # {
314
- # item_id: version == 0 ? read_int_16 : read_int_32,
315
- # symbol_count: read_int_32
291
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
292
+ # symbol_count: read_int
316
293
  # }
317
294
  # end
318
295
  # })
319
296
  # [fields, nil]
320
297
  # end
321
298
 
322
- # Parse a file partition atom.
299
+ # Parse a file partition box.
323
300
  # def fpar(_)
324
301
  # fields = read_version_and_flags
325
302
  # version = fields[:version]
326
303
  # fields.merge!({
327
- # item_id: version == 0 ? read_int_16 : read_int_32,
328
- # packet_payload_size: read_int_16,
329
- # fec_encoding_id: skip_bytes(1) { read_int_8 },
330
- # fec_instance_id: read_int_16,
331
- # max_source_block_length: read_int_16,
332
- # encoding_symbol_length: read_int_16,
333
- # max_number_of_encoding_symbols: read_int_16,
304
+ # item_id: version == 0 ? read_int(n: 2) : read_int,
305
+ # packet_payload_size: read_int(n: 2),
306
+ # fec_encoding_id: skip_bytes(1) { read_int(n: 1) },
307
+ # fec_instance_id: read_int(n: 2),
308
+ # max_source_block_length: read_int(n: 2),
309
+ # encoding_symbol_length: read_int(n: 2),
310
+ # max_number_of_encoding_symbols: read_int(n: 2),
334
311
  # })
335
312
  # # TODO: Parse scheme_specific_info, entry_count and entries { block_count, block_size }.
336
313
  # skip_bytes(size - 20)
@@ -338,10 +315,10 @@ module FormatParser
338
315
  # [fields, nil]
339
316
  # end
340
317
 
341
- # Parse a group ID to name atom.
318
+ # Parse a group ID to name box.
342
319
  # def gitn(size)
343
320
  # fields = read_version_and_flags
344
- # entry_count = read_int_16
321
+ # entry_count = read_int(n: 2)
345
322
  # fields.merge!({
346
323
  # entry_count: entry_count
347
324
  # })
@@ -350,43 +327,43 @@ module FormatParser
350
327
  # [fields, nil]
351
328
  # end
352
329
 
353
- # Parse a handler atom.
354
- # def hdlr(size)
355
- # fields = read_version_and_flags.merge({
356
- # handler_type: skip_bytes(4) { read_int_32 },
357
- # name: skip_bytes(12) { read_string(size - 24) }
358
- # })
359
- # [fields, nil]
360
- # end
330
+ # Parse a handler box.
331
+ def hdlr(size)
332
+ fields = read_version_and_flags.merge({
333
+ handler_type: skip_bytes(4) { read_string(4) },
334
+ name: skip_bytes(12) { read_string(size - 24) }
335
+ })
336
+ [fields, nil]
337
+ end
361
338
 
362
- # Parse a hint media header atom.
339
+ # Parse a hint media header box.
363
340
  # def hmhd(_)
364
341
  # fields = read_version_and_flags.merge({
365
- # max_pdu_size: read_int_16,
366
- # avg_pdu_size: read_int_16,
367
- # max_bitrate: read_int_32,
368
- # avg_bitrate: read_int_32
342
+ # max_pdu_size: read_int(n: 2),
343
+ # avg_pdu_size: read_int(n: 2),
344
+ # max_bitrate: read_int,
345
+ # avg_bitrate: read_int
369
346
  # })
370
347
  # skip_bytes(4)
371
348
  # [fields, nil]
372
349
  # end
373
350
 
374
- # Parse an item info atom.
351
+ # Parse an item info box.
375
352
  # def iinf(size)
376
353
  # fields = read_version_and_flags.merge({
377
- # entry_count: version == 0 ? read_int_16 : read_int_32
354
+ # entry_count: version == 0 ? read_int(n: 2) : read_int
378
355
  # })
379
- # [fields, build_atom_tree(size - 8)]
356
+ # [fields, build_box_tree(size - 8)]
380
357
  # end
381
358
 
382
- # Parse an item location atom.
359
+ # Parse an item location box.
383
360
  # def iloc(_)
384
361
  # fields = read_version_and_flags
385
- # tmp = read_int_16
362
+ # tmp = read_int(n: 2)
386
363
  # item_count = if version < 2
387
- # read_int_16
364
+ # read_int(n: 2)
388
365
  # elsif version == 2
389
- # read_int_32
366
+ # read_int
390
367
  # end
391
368
  # offset_size = (tmp >> 12) & 0x7
392
369
  # length_size = (tmp >> 8) & 0x7
@@ -400,15 +377,15 @@ module FormatParser
400
377
  # items: item_count.times.map do
401
378
  # item = {
402
379
  # item_id: if version < 2
403
- # read_int_16
380
+ # read_int(n: 2)
404
381
  # elsif version == 2
405
- # read_int_32
382
+ # read_int
406
383
  # end
407
384
  # }
408
- # item[:construction_method] = read_int_16 & 0x7 if version == 1 || version == 2
409
- # item[:data_reference_index] = read_int_16
385
+ # item[:construction_method] = read_int(n: 2) & 0x7 if version == 1 || version == 2
386
+ # item[:data_reference_index] = read_int(n: 2)
410
387
  # skip_bytes(base_offset_size) # TODO: Dynamically parse base_offset based on base_offset_size
411
- # extent_count = read_int_16
388
+ # extent_count = read_int(n: 2)
412
389
  # item[:extent_count] = extent_count
413
390
  # # TODO: Dynamically parse extent_index, extent_offset and extent_length based on their respective sizes.
414
391
  # skip_bytes(extent_count * (offset_size + length_size))
@@ -417,34 +394,34 @@ module FormatParser
417
394
  # })
418
395
  # end
419
396
 
420
- # Parse an item info entry atom.
397
+ # Parse an item info entry box.
421
398
  # def infe(size)
422
- # # TODO: This atom is super-complicated with optional and/or version-dependent fields and children.
399
+ # # TODO: This box is super-complicated with optional and/or version-dependent fields and children.
423
400
  # empty(size)
424
401
  # end
425
402
 
426
- # Parse an item protection atom.
403
+ # Parse an item protection box.
427
404
  # def ipro(size)
428
405
  # fields = read_version_and_flags.merge({
429
- # protection_count: read_int_16
406
+ # protection_count: read_int(n: 2)
430
407
  # })
431
- # [fields, build_atom_tree(size - 6)]
408
+ # [fields, build_box_tree(size - 6)]
432
409
  # end
433
410
 
434
- # Parse an item reference atom.
411
+ # Parse an item reference box.
435
412
  # def iref(_)
436
- # [read_version_and_flags, build_atom_tree(size - 4)]
413
+ # [read_version_and_flags, build_box_tree(size - 4)]
437
414
  # end
438
415
 
439
- # Parse a level assignment atom.
416
+ # Parse a level assignment box.
440
417
  # def leva(_)
441
418
  # fields = read_version_and_flags
442
- # level_count = read_int_8
419
+ # level_count = read_int(n: 1)
443
420
  # fields.merge!({
444
421
  # level_count: level_count,
445
422
  # levels: level_count.times.map do
446
- # track_id = read_int_32
447
- # tmp = read_int_8
423
+ # track_id = read_int
424
+ # tmp = read_int(n: 1)
448
425
  # assignment_type = tmp & 0x7F
449
426
  # level = {
450
427
  # track_id: track_id,
@@ -452,14 +429,14 @@ module FormatParser
452
429
  # assignment_type: assignment_type
453
430
  # }
454
431
  # if assignment_type == 0
455
- # level[:grouping_type] = read_int_32
432
+ # level[:grouping_type] = read_int
456
433
  # elsif assignment_type == 1
457
434
  # level.merge!({
458
- # grouping_type: read_int_32,
459
- # grouping_type_parameter: read_int_32
435
+ # grouping_type: read_int,
436
+ # grouping_type_parameter: read_int
460
437
  # })
461
438
  # elsif assignment_type == 4
462
- # level[:sub_track_id] = read_int_32
439
+ # level[:sub_track_id] = read_int
463
440
  # end
464
441
  # level
465
442
  # end
@@ -467,87 +444,87 @@ module FormatParser
467
444
  # [fields, nil]
468
445
  # end
469
446
 
470
- # Parse a media header atom.
471
- # def mdhd(_)
472
- # fields = read_version_and_flags
473
- # version = fields[:version]
474
- # fields.merge!({
475
- # creation_time: version == 1 ? read_int_64 : read_int_32,
476
- # modification_time: version == 1 ? read_int_64 : read_int_32,
477
- # timescale: read_int_32,
478
- # duration: version == 1 ? read_int_64 : read_int_32,
479
- # })
480
- # tmp = read_int_16
481
- # fields[:language] = [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F]
482
- # skip_bytes(2)
483
- # [fields, nil]
484
- # end
447
+ # Parse a media header box.
448
+ def mdhd(_)
449
+ fields = read_version_and_flags
450
+ version = fields[:version]
451
+ fields.merge!({
452
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
453
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
454
+ timescale: read_int,
455
+ duration: version == 1 ? read_int(n: 8) : read_int,
456
+ })
457
+ tmp = read_int(n: 2)
458
+ fields[:language] = [(tmp >> 10) & 0x1F, (tmp >> 5) & 0x1F, tmp & 0x1F]
459
+ skip_bytes(2)
460
+ [fields, nil]
461
+ end
485
462
 
486
- # Parse a movie extends header atom.
463
+ # Parse a movie extends header box.
487
464
  # def mehd(_)
488
465
  # fields = read_version_and_flags
489
466
  # version = fields[:version]
490
- # fields[:fragment_duration] = version == 1 ? read_int_64 : read_int_32
467
+ # fields[:fragment_duration] = version == 1 ? read_int(n: 8) : read_int
491
468
  # [fields, nil]
492
469
  # end
493
470
 
494
- # Parse an metabox relation atom.
471
+ # Parse an metabox relation box.
495
472
  # def mere(_)
496
473
  # fields = read_version_and_flags.merge({
497
- # first_metabox_handler_type: read_int_32,
498
- # second_metabox_handler_type: read_int_32,
499
- # metabox_relation: read_int_8
474
+ # first_metabox_handler_type: read_int,
475
+ # second_metabox_handler_type: read_int,
476
+ # metabox_relation: read_int(n: 1)
500
477
  # })
501
478
  # [fields, nil]
502
479
  # end
503
480
 
504
- # Parse a meta atom.
481
+ # Parse a meta box.
505
482
  # def meta(size)
506
483
  # fields = read_version_and_flags
507
- # [fields, build_atom_tree(size - 4)]
484
+ # [fields, build_box_tree(size - 4)]
508
485
  # end
509
486
 
510
- # Parse a movie fragment header atom.
487
+ # Parse a movie fragment header box.
511
488
  # def mfhd(_)
512
489
  # fields = read_version_and_flags.merge({
513
- # sequence_number: read_int_32
490
+ # sequence_number: read_int
514
491
  # })
515
492
  # [fields, nil]
516
493
  # end
517
494
 
518
- # Parse a movie fragment random access offset atom.
495
+ # Parse a movie fragment random access offset box.
519
496
  # def mfro(_)
520
497
  # fields = read_version_and_flags.merge({
521
- # size: read_int_32
498
+ # size: read_int
522
499
  # })
523
500
  # [fields, nil]
524
501
  # end
525
502
 
526
- # Parse a movie header atom.
527
- # def mvhd(_)
528
- # fields = read_version_and_flags
529
- # version = fields[:version]
530
- # fields.merge!({
531
- # creation_time: version == 1 ? read_int_64 : read_int_32,
532
- # modification_time: version == 1 ? read_int_64 : read_int_32,
533
- # timescale: read_int_32,
534
- # duration: version == 1 ? read_int_64 : read_int_32,
535
- # rate: read_fixed_point_32,
536
- # volume: read_fixed_point_16,
537
- # matrix: skip_bytes(10) { read_matrix },
538
- # next_trak_id: skip_bytes(24) { read_int_32 },
539
- # })
540
- # [fields, nil]
541
- # end
503
+ # Parse a movie header box.
504
+ def mvhd(_)
505
+ fields = read_version_and_flags
506
+ version = fields[:version]
507
+ fields.merge!({
508
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
509
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
510
+ timescale: read_int,
511
+ duration: version == 1 ? read_int(n: 8) : read_int,
512
+ rate: read_fixed_point(n: 4),
513
+ volume: read_fixed_point(n: 2, signed: true),
514
+ matrix: skip_bytes(10) { read_matrix },
515
+ next_trak_id: skip_bytes(24) { read_int },
516
+ })
517
+ [fields, nil]
518
+ end
542
519
 
543
- # Parse a padding bits atom.
520
+ # Parse a padding bits box.
544
521
  # def padb(_)
545
522
  # fields = read_version_and_flags
546
- # sample_count = read_int_32
523
+ # sample_count = read_int
547
524
  # fields.merge!({
548
525
  # sample_count: sample_count,
549
526
  # padding: ((sample_count + 1) / 2).times.map do
550
- # tmp = read_int_8
527
+ # tmp = read_int(n: 1)
551
528
  # {
552
529
  # padding_1: tmp >> 4,
553
530
  # padding_2: tmp & 0x07
@@ -557,170 +534,170 @@ module FormatParser
557
534
  # [fields, nil]
558
535
  # end
559
536
 
560
- # Parse a progressive download information atom.
537
+ # Parse a progressive download information box.
561
538
  # def pdin(size)
562
539
  # fields = read_version_and_flags.merge({
563
540
  # entries: ((size - 4) / 8).times.map do
564
541
  # {
565
- # rate: read_int_32,
566
- # initial_delay: read_int_32
542
+ # rate: read_int,
543
+ # initial_delay: read_int
567
544
  # }
568
545
  # end
569
546
  # })
570
547
  # [fields, nil]
571
548
  # end
572
549
 
573
- # Parse a primary item atom.
550
+ # Parse a primary item box.
574
551
  # def pitm(_)
575
552
  # fields = read_version_and_flags.merge({
576
- # item_id: version == 0 ? read_int_16 : read_int_32
553
+ # item_id: version == 0 ? read_int(n: 2) : read_int
577
554
  # })
578
555
  # [fields, nil]
579
556
  # end
580
557
 
581
- # Parse a producer reference time atom.
558
+ # Parse a producer reference time box.
582
559
  # def prft(_)
583
560
  # fields = read_version_and_flags
584
561
  # version = fields[:version]
585
562
  # fields.merge!({
586
- # reference_track_id: read_int_32,
587
- # ntp_timestamp: read_int_64,
588
- # media_time: version == 0 ? read_int_32 : read_int_64
563
+ # reference_track_id: read_int,
564
+ # ntp_timestamp: read_int(n: 8),
565
+ # media_time: version == 0 ? read_int : read_int(n: 8)
589
566
  # })
590
567
  # [fields, nil]
591
568
  # end
592
569
 
593
- # Parse a sample auxiliary information offsets atom.
570
+ # Parse a sample auxiliary information offsets box.
594
571
  # def saio(_)
595
572
  # fields = read_version_and_flags
596
573
  # version = field[:version]
597
574
  # flags = fields[:flags]
598
575
  # fields.merge!({
599
- # aux_info_type: read_int_32,
600
- # aux_info_type_parameter: read_int_32
576
+ # aux_info_type: read_int,
577
+ # aux_info_type_parameter: read_int
601
578
  # }) if flags & 0x1
602
- # entry_count = read_int_32
579
+ # entry_count = read_int
603
580
  # fields.merge!({
604
581
  # entry_count: entry_count,
605
- # offsets: entry_count.times.map { version == 0 ? read_int_32 : read_int_64 }
582
+ # offsets: entry_count.times.map { version == 0 ? read_int : read_int(n: 8) }
606
583
  # })
607
584
  # [fields, nil]
608
585
  # end
609
586
 
610
- # Parse a sample auxiliary information sizes atom.
587
+ # Parse a sample auxiliary information sizes box.
611
588
  # def saiz(_)
612
589
  # fields = read_version_and_flags
613
590
  # flags = fields[:flags]
614
591
  # fields.merge!({
615
- # aux_info_type: read_int_32,
616
- # aux_info_type_parameter: read_int_32
592
+ # aux_info_type: read_int,
593
+ # aux_info_type_parameter: read_int
617
594
  # }) if flags & 0x1
618
- # default_sample_info_size = read_int_8
619
- # sample_count = read_int_32
595
+ # default_sample_info_size = read_int(n: 1)
596
+ # sample_count = read_int
620
597
  # fields.merge!({
621
598
  # default_sample_info_size: default_sample_info_size,
622
599
  # sample_count: sample_count
623
600
  # })
624
- # fields[:sample_info_sizes] = sample_count.times.map { read_int_8 } if default_sample_info_size == 0
601
+ # fields[:sample_info_sizes] = sample_count.times.map { read_int(n: 1) } if default_sample_info_size == 0
625
602
  # [fields, nil]
626
603
  # end
627
604
 
628
- # Parse a sample to group atom.
605
+ # Parse a sample to group box.
629
606
  # def sbgp(_)
630
607
  # fields = read_version_and_flags
631
- # fields[:grouping_type] = read_int_32
632
- # fields[:grouping_type_parameter] = read_int_32 if fields[:version] == 1
633
- # entry_count = read_int_32
608
+ # fields[:grouping_type] = read_int
609
+ # fields[:grouping_type_parameter] = read_int if fields[:version] == 1
610
+ # entry_count = read_int
634
611
  # fields.merge!({
635
612
  # entry_count: entry_count,
636
613
  # entries: entry_count.times.map do
637
614
  # {
638
- # sample_count: read_int_32,
639
- # group_description_index: read_int_32
615
+ # sample_count: read_int,
616
+ # group_description_index: read_int
640
617
  # }
641
618
  # end
642
619
  # })
643
620
  # [fields, nil]
644
621
  # end
645
622
 
646
- # Parse a scheme type atom.
623
+ # Parse a scheme type box.
647
624
  # def schm(_)
648
625
  # fields = read_version_and_flags.merge({
649
626
  # scheme_type: read_string(4),
650
- # scheme_version: read_int_32,
627
+ # scheme_version: read_int,
651
628
  # })
652
- # fields[:scheme_uri] = (size - 12).times.map { read_int_8 } if flags & 0x1 != 0
629
+ # fields[:scheme_uri] = (size - 12).times.map { read_int(n: 1) } if flags & 0x1 != 0
653
630
  # [fields, nil]
654
631
  # end
655
632
 
656
- # Parse an independent and disposable samples atom.
633
+ # Parse an independent and disposable samples box.
657
634
  # def sdtp(size)
658
- # # TODO: Parsing this atom needs the sample_count from the sample size atom (`stsz`).
635
+ # # TODO: Parsing this box needs the sample_count from the sample size box (`stsz`).
659
636
  # empty(size)
660
637
  # end
661
638
 
662
- # Parse an FD session group atom.
639
+ # Parse an FD session group box.
663
640
  # def segr(_)
664
- # num_session_groups = read_int_16
641
+ # num_session_groups = read_int(n: 2)
665
642
  # fields = {
666
643
  # num_session_groups: num_session_groups,
667
644
  # session_groups: num_session_groups.times.map do
668
- # entry_count = read_int_8
645
+ # entry_count = read_int(n: 1)
669
646
  # session_group = {
670
647
  # entry_count: entry_count,
671
- # entries: entry_count.times.map { { group_id: read_int_32 } }
648
+ # entries: entry_count.times.map { { group_id: read_int } }
672
649
  # }
673
- # num_channels_in_session_group = read_int_16
650
+ # num_channels_in_session_group = read_int(n: 2)
674
651
  # session_group.merge({
675
652
  # num_channels_in_session_group: num_channels_in_session_group,
676
- # channels: num_channels_in_session_group.times.map { { hint_track_id: read_int_32 } }
653
+ # channels: num_channels_in_session_group.times.map { { hint_track_id: read_int } }
677
654
  # })
678
655
  # end
679
656
  # }
680
657
  # [fields, nil]
681
658
  # end
682
659
 
683
- # Parse a sample group description atom.
660
+ # Parse a sample group description box.
684
661
  # def sgpd(_)
685
662
  # fields = read_version_and_flags
686
663
  # version = fields[:version]
687
- # fields[:grouping_type] = read_int_32
688
- # fields[:default_length] = read_int_32 if version == 1
689
- # fields[:default_sample_description_index] = read_int_32 if version >= 2
690
- # entry_count = read_int_32
664
+ # fields[:grouping_type] = read_int
665
+ # fields[:default_length] = read_int if version == 1
666
+ # fields[:default_sample_description_index] = read_int if version >= 2
667
+ # entry_count = read_int
691
668
  # fields.merge!({
692
669
  # entry_count: entry_count,
693
670
  # entries: entry_count.times.map do
694
671
  # entry = {}
695
- # entry[:description_length] = read_int_32 if version == 1 && fields[:default_length] == 0
696
- # entry[:atom] = parse_atom
672
+ # entry[:description_length] = read_int if version == 1 && fields[:default_length] == 0
673
+ # entry[:box] = parse_box
697
674
  # end
698
675
  # })
699
676
  # [fields, nil]
700
677
  # end
701
678
 
702
- # Parse a segment index atom.
679
+ # Parse a segment index box.
703
680
  # def sidx(_)
704
681
  # fields = read_version_and_flags.merge({
705
- # reference_id: read_int_32,
706
- # timescale: read_int_32
682
+ # reference_id: read_int,
683
+ # timescale: read_int
707
684
  # })
708
685
  # version = fields[:version]
709
686
  # fields.merge!({
710
- # earliest_presentation_time: version == 0 ? read_int_32 : read_int_64,
711
- # first_offset: version == 0 ? read_int_32 : read_int_64,
687
+ # earliest_presentation_time: version == 0 ? read_int : read_int(n: 8),
688
+ # first_offset: version == 0 ? read_int : read_int(n: 8),
712
689
  # })
713
- # reference_count = skip_bytes(2) { read_int_16 }
690
+ # reference_count = skip_bytes(2) { read_int(n: 2) }
714
691
  # fields.merge!({
715
692
  # reference_count: reference_count,
716
693
  # references: reference_count.times.map do
717
- # tmp = read_int_32
694
+ # tmp = read_int
718
695
  # reference = {
719
696
  # reference_type: tmp >> 31,
720
697
  # referenced_size: tmp & 0x7FFFFFFF,
721
- # subsegment_duration: read_int_32
698
+ # subsegment_duration: read_int
722
699
  # }
723
- # tmp = read_int_32
700
+ # tmp = read_int
724
701
  # reference.merge({
725
702
  # starts_with_sap: tmp >> 31,
726
703
  # sap_type: (tmp >> 28) & 0x7,
@@ -731,27 +708,27 @@ module FormatParser
731
708
  # [fields, nil]
732
709
  # end
733
710
 
734
- # Parse a sound media header atom.
711
+ # Parse a sound media header box.
735
712
  # def smhd(_)
736
713
  # fields = read_version_and_flags.merge({
737
- # balance: read_fixed_point_16,
714
+ # balance: read_fixed_point(n: 2, signed: true),
738
715
  # })
739
716
  # skip_bytes(2)
740
717
  # [fields, nil]
741
718
  # end
742
719
 
743
- # Parse a subsegment index atom.
720
+ # Parse a subsegment index box.
744
721
  # def ssix(_)
745
722
  # fields = read_version_and_flags
746
- # subsegment_count = read_int_32
723
+ # subsegment_count = read_int
747
724
  # fields.merge!({
748
725
  # subsegment_count: subsegment_count,
749
726
  # subsegments: subsegment_count.times.map do
750
- # range_count = read_int_32
727
+ # range_count = read_int
751
728
  # {
752
729
  # range_count: range_count,
753
730
  # ranges: range_count.times.map do
754
- # tmp = read_int_32
731
+ # tmp = read_int
755
732
  # {
756
733
  # level: tmp >> 24,
757
734
  # range_size: tmp & 0x00FFFFFF
@@ -763,142 +740,142 @@ module FormatParser
763
740
  # [fields, nil]
764
741
  # end
765
742
 
766
- # Parse a chunk offset atom.
743
+ # Parse a chunk offset box.
767
744
  # def stco(_)
768
745
  # fields = read_version_and_flags
769
- # entry_count = read_int_32
746
+ # entry_count = read_int
770
747
  # fields.merge!({
771
748
  # entry_count: entry_count,
772
- # entries: entry_count.times.map { { chunk_offset: read_int_32 } }
749
+ # entries: entry_count.times.map { { chunk_offset: read_int } }
773
750
  # })
774
751
  # [fields, nil]
775
752
  # end
776
753
 
777
- # Parse a degradation priority atom.
754
+ # Parse a degradation priority box.
778
755
  # def stdp(size)
779
- # # TODO: Parsing this atom needs the sample_count from the sample size atom (`stsz`).
756
+ # # TODO: Parsing this box needs the sample_count from the sample size box (`stsz`).
780
757
  # empty(size)
781
758
  # end
782
759
 
783
- # Parse a sub track information atom.
760
+ # Parse a sub track information box.
784
761
  # def stri(size)
785
762
  # fields = read_version_and_flags.merge({
786
- # switch_group: read_int_16,
787
- # alternate_group: read_int_16,
788
- # sub_track_id: read_int_32,
789
- # attribute_list: ((size - 12) / 4).times.map { read_int_32 }
763
+ # switch_group: read_int(n: 2),
764
+ # alternate_group: read_int(n: 2),
765
+ # sub_track_id: read_int,
766
+ # attribute_list: ((size - 12) / 4).times.map { read_int }
790
767
  # })
791
768
  # [fields, nil]
792
769
  # end
793
770
 
794
- # Parse a sample to chunk atom.
771
+ # Parse a sample to chunk box.
795
772
  # def stsc(_)
796
773
  # fields = read_version_and_flags
797
- # entry_count = read_int_32
774
+ # entry_count = read_int
798
775
  # fields.merge!({
799
776
  # entry_count: entry_count,
800
777
  # entries: entry_count.times.map do
801
778
  # {
802
- # first_chunk: read_int_32,
803
- # samples_per_chunk: read_int_32,
804
- # sample_description_index: read_int_32
779
+ # first_chunk: read_int,
780
+ # samples_per_chunk: read_int,
781
+ # sample_description_index: read_int
805
782
  # }
806
783
  # end
807
784
  # })
808
785
  # [fields, nil]
809
786
  # end
810
787
 
811
- # Parse a sample descriptions atom.
812
- # def stsd(size)
813
- # fields = read_version_and_flags.merge({
814
- # entry_count: read_int_32
815
- # })
816
- # [fields, build_atom_tree(size - 8)]
817
- # end
788
+ # Parse a sample descriptions box.
789
+ def stsd(size)
790
+ fields = read_version_and_flags.merge({
791
+ entry_count: read_int
792
+ })
793
+ [fields, build_box_tree(size - 8)]
794
+ end
818
795
 
819
- # Parse a shadow sync sample atom.
796
+ # Parse a shadow sync sample box.
820
797
  # def stsh(_)
821
798
  # fields = read_version_and_flags
822
- # entry_count = read_int_32
799
+ # entry_count = read_int
823
800
  # fields.merge!({
824
801
  # entry_count: entry_count,
825
802
  # entries: entry_count.times.map {
826
803
  # {
827
- # shadowed_sample_number: read_int_32,
828
- # sync_sample_number: read_int_32
804
+ # shadowed_sample_number: read_int,
805
+ # sync_sample_number: read_int
829
806
  # }
830
807
  # }
831
808
  # })
832
809
  # [fields, nil]
833
810
  # end
834
811
 
835
- # Parse a sync sample atom.
812
+ # Parse a sync sample box.
836
813
  # def stss(_)
837
814
  # fields = read_version_and_flags
838
- # entry_count = read_int_32
815
+ # entry_count = read_int
839
816
  # fields.merge!({
840
817
  # entry_count: entry_count,
841
- # entries: entry_count.times.map { { sample_number: read_int_32 } }
818
+ # entries: entry_count.times.map { { sample_number: read_int } }
842
819
  # })
843
820
  # [fields, nil]
844
821
  # end
845
822
 
846
- # Parse a sample size atom.
823
+ # Parse a sample size box.
847
824
  # def stsz(_)
848
825
  # fields = read_version_and_flags
849
- # sample_size = read_int_32
850
- # sample_count = read_int_32
826
+ # sample_size = read_int
827
+ # sample_count = read_int
851
828
  # fields.merge!({
852
829
  # sample_size: sample_size,
853
830
  # sample_count: sample_count,
854
831
  # })
855
- # fields[:entries] = sample_count.times.map { { entry_size: read_int_32 } } if sample_size == 0
832
+ # fields[:entries] = sample_count.times.map { { entry_size: read_int } } if sample_size == 0
856
833
  # [fields, nil]
857
834
  # end
858
835
 
859
- # Parse a decoding time to sample atom.
860
- # def stts(_)
861
- # fields = read_version_and_flags
862
- # entry_count = read_int_32
863
- # fields.merge!({
864
- # entry_count: entry_count,
865
- # entries: entry_count.times.map do
866
- # {
867
- # sample_count: read_int_32,
868
- # sample_delta: read_int_32
869
- # }
870
- # end
871
- # })
872
- # [fields, nil]
873
- # end
836
+ # Parse a decoding time to sample box.
837
+ def stts(_)
838
+ fields = read_version_and_flags
839
+ entry_count = read_int
840
+ fields.merge!({
841
+ entry_count: entry_count,
842
+ entries: entry_count.times.map do
843
+ {
844
+ sample_count: read_int,
845
+ sample_delta: read_int
846
+ }
847
+ end
848
+ })
849
+ [fields, nil]
850
+ end
874
851
 
875
- # Parse a compact sample size atom.
852
+ # Parse a compact sample size box.
876
853
  # def stz2(size)
877
854
  # fields = read_version_and_flags.merge({
878
- # field_size: skip_bytes(3) { read_int_8 },
879
- # sample_count: read_int_32
855
+ # field_size: skip_bytes(3) { read_int(n: 1) },
856
+ # sample_count: read_int
880
857
  # })
881
858
  # # TODO: Handling for parsing entry sizes dynamically based on field size.
882
859
  # skip_bytes(size - 12)
883
860
  # [fields, nil]
884
861
  # end
885
862
 
886
- # Parse a sub-sample information atom.
863
+ # Parse a sub-sample information box.
887
864
  # def subs(_)
888
865
  # fields = read_version_and_flags
889
- # entry_count = read_int_32
866
+ # entry_count = read_int
890
867
  # fields[:entries] = entry_count.times.map do
891
- # sample_delta = read_int_32
892
- # subsample_count = read_int_16
868
+ # sample_delta = read_int
869
+ # subsample_count = read_int(n: 2)
893
870
  # {
894
871
  # sample_delta: sample_delta,
895
872
  # subsample_count: subsample_count,
896
873
  # subsample_information: subsample_count.times.map do
897
874
  # {
898
- # subsample_size: version == 1 ? read_int_32 : read_int_16,
899
- # subsample_priority: read_int_8,
900
- # discardable: read_int_8,
901
- # codec_specific_parameters: read_int_32
875
+ # subsample_size: version == 1 ? read_int : read_int(n: 2),
876
+ # subsample_priority: read_int(n: 1),
877
+ # discardable: read_int(n: 1),
878
+ # codec_specific_parameters: read_int
902
879
  # }
903
880
  # end
904
881
  # }
@@ -906,17 +883,17 @@ module FormatParser
906
883
  # [fields, nil]
907
884
  # end
908
885
 
909
- # Parse a track fragment random access atom.
886
+ # Parse a track fragment random access box.
910
887
  # def tfra(_)
911
888
  # fields = read_version_and_flags
912
889
  # version = fields[:version]
913
- # fields[:track_id] = read_int_32
890
+ # fields[:track_id] = read_int
914
891
  # skip_bytes(3)
915
- # tmp = read_int_8
892
+ # tmp = read_int(n: 1)
916
893
  # size_of_traf_number = (tmp >> 4) & 0x3
917
894
  # size_of_trun_number = (tmp >> 2) & 0x3
918
895
  # size_of_sample_number = tmp & 0x3
919
- # entry_count = read_int_32
896
+ # entry_count = read_int
920
897
  # fields.merge!({
921
898
  # size_of_traf_number: size_of_traf_number,
922
899
  # size_of_trun_number: size_of_trun_number,
@@ -924,8 +901,8 @@ module FormatParser
924
901
  # entry_count: entry_count,
925
902
  # entries: entry_count.times.map do
926
903
  # entry = {
927
- # time: version == 1 ? read_int_64 : read_int_32,
928
- # moof_offset: version == 1 ? read_int_64 : read_int_32
904
+ # time: version == 1 ? read_int(n: 8) : read_int,
905
+ # moof_offset: version == 1 ? read_int(n: 8) : read_int
929
906
  # }
930
907
  # # TODO: Handling for parsing traf_number, trun_number and sample_number dynamically based on their sizes.
931
908
  # skip_bytes(size_of_traf_number + size_of_trun_number + size_of_sample_number + 3)
@@ -935,74 +912,74 @@ module FormatParser
935
912
  # [fields, nil]
936
913
  # end
937
914
 
938
- # Parse a track header atom.
939
- # def tkhd(_)
940
- # fields = read_version_and_flags
941
- # version = fields[:version]
942
- # fields.merge!({
943
- # creation_time: version == 1 ? read_int_64 : read_int_32,
944
- # modification_time: version == 1 ? read_int_64 : read_int_32,
945
- # track_id: read_int_32,
946
- # duration: skip_bytes(4) { version == 1 ? read_int_64 : read_int_32 },
947
- # layer: skip_bytes(8) { read_int_16 },
948
- # alternate_group: read_int_16,
949
- # volume: read_fixed_point_16,
950
- # matrix: skip_bytes(2) { read_matrix },
951
- # width: read_fixed_point_32,
952
- # height: read_fixed_point_32
953
- # })
954
- # [fields, nil]
955
- # end
915
+ # Parse a track header box.
916
+ def tkhd(_)
917
+ fields = read_version_and_flags
918
+ version = fields[:version]
919
+ fields.merge!({
920
+ creation_time: version == 1 ? read_int(n: 8) : read_int,
921
+ modification_time: version == 1 ? read_int(n: 8) : read_int,
922
+ track_id: read_int,
923
+ duration: skip_bytes(4) { version == 1 ? read_int(n: 8) : read_int },
924
+ layer: skip_bytes(8) { read_int(n: 2) },
925
+ alternate_group: read_int(n: 2),
926
+ volume: read_fixed_point(n: 2, signed: true),
927
+ matrix: skip_bytes(2) { read_matrix },
928
+ width: read_fixed_point(n: 4),
929
+ height: read_fixed_point(n: 4)
930
+ })
931
+ [fields, nil]
932
+ end
956
933
 
957
- # Parse a track extends atom.
934
+ # Parse a track extends box.
958
935
  # def trex(_)
959
936
  # fields = read_version_and_flags.merge({
960
- # track_id: read_int_32,
961
- # default_sample_description_index: read_int_32,
962
- # default_sample_duration: read_int_32,
963
- # default_sample_size: read_int_32,
964
- # default_sample_flags: read_int_32
937
+ # track_id: read_int,
938
+ # default_sample_description_index: read_int,
939
+ # default_sample_duration: read_int,
940
+ # default_sample_size: read_int,
941
+ # default_sample_flags: read_int
965
942
  # })
966
943
  # [fields, nil]
967
944
  # end
968
945
 
969
- # Parse a track selection atom.
946
+ # Parse a track selection box.
970
947
  # def tsel(size)
971
948
  # fields = read_version_and_flags.merge({
972
- # switch_group: read_int_32,
973
- # attribute_list: ((size - 8) / 4).times.map { read_int_32 }
949
+ # switch_group: read_int,
950
+ # attribute_list: ((size - 8) / 4).times.map { read_int }
974
951
  # })
975
952
  # [fields, nil]
976
953
  # end
977
954
 
978
- # Parse a file/segment type compatibility atom.
979
- # def typ(size)
980
- # compatible_brands_count = (size - 8) / 4
981
- # fields = {
982
- # major_brand: read_string(4),
983
- # minor_version: read_int_32,
984
- # compatible_brands: compatible_brands_count.times.map { read_string(4) }
985
- # }
986
- # [fields, nil]
987
- # end
955
+ # Parse a file/segment type compatibility box.
956
+ def typ(size)
957
+ compatible_brands_count = (size - 8) / 4
958
+ fields = {
959
+ major_brand: read_string(4),
960
+ minor_version: read_int,
961
+ compatible_brands: compatible_brands_count.times.map { read_string(4) }
962
+ }
963
+ [fields, nil]
964
+ end
988
965
 
989
- # Parse a UUID atom.
966
+ # Parse a UUID box.
990
967
  def uuid(size)
991
968
  fields = { usertype: read_bytes(16).unpack('H*').first }
992
969
  skip_bytes(size - 16)
993
970
  [fields, nil]
994
971
  end
995
972
 
996
- # Parse a video media header atom.
973
+ # Parse a video media header box.
997
974
  # def vmhd(_)
998
975
  # fields = read_version_and_flags.merge({
999
- # graphics_mode: read_int_16,
1000
- # op_color: (1..3).map { read_int_16 }
976
+ # graphics_mode: read_int(n: 2),
977
+ # op_color: (1..3).map { read_int(n: 2) }
1001
978
  # })
1002
979
  # [fields, nil]
1003
980
  # end
1004
981
 
1005
- # Parse an XML atom.
982
+ # Parse an XML box.
1006
983
  # def xml(size)
1007
984
  # fields = read_version_and_flags.merge({
1008
985
  # xml: read_string(size - 4)
@@ -1017,22 +994,16 @@ module FormatParser
1017
994
  #
1018
995
  # See https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap4/qtff4.html#//apple_ref/doc/uid/TP40000939-CH206-18737.
1019
996
  def read_matrix
1020
- 9.times.map do |i|
1021
- if i % 3 == 2
1022
- read_fixed_point_32_2_30
1023
- else
1024
- read_fixed_point_32
1025
- end
1026
- end
997
+ Matrix.build(3) { |_, c| read_fixed_point(fractional_digits: c % 3 == 2 ? 30 : 16, signed: true) }
1027
998
  end
1028
999
 
1029
- # Parse an atom's version and flags.
1000
+ # Parse an box's version and flags.
1030
1001
  #
1031
- # It's common for atoms to begin with a single byte representing the version followed by three bytes representing any
1002
+ # It's common for boxes to begin with a single byte representing the version followed by three bytes representing any
1032
1003
  # associated flags. Both of these are often 0.
1033
1004
  def read_version_and_flags
1034
1005
  {
1035
- version: read_int_8,
1006
+ version: read_int(n: 1),
1036
1007
  flags: read_bytes(3)
1037
1008
  }
1038
1009
  end