@kenzuya/mediabunny 1.26.0 → 1.28.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. package/README.md +1 -1
  2. package/dist/bundles/{mediabunny.mjs → mediabunny.js} +21963 -21388
  3. package/dist/bundles/mediabunny.min.js +490 -0
  4. package/dist/modules/shared/mp3-misc.d.ts.map +1 -1
  5. package/dist/modules/src/adts/adts-demuxer.d.ts +6 -6
  6. package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -1
  7. package/dist/modules/src/adts/adts-muxer.d.ts +4 -4
  8. package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -1
  9. package/dist/modules/src/adts/adts-reader.d.ts +1 -1
  10. package/dist/modules/src/adts/adts-reader.d.ts.map +1 -1
  11. package/dist/modules/src/avi/avi-demuxer.d.ts +44 -0
  12. package/dist/modules/src/avi/avi-demuxer.d.ts.map +1 -0
  13. package/dist/modules/src/avi/avi-misc.d.ts +88 -0
  14. package/dist/modules/src/avi/avi-misc.d.ts.map +1 -0
  15. package/dist/modules/src/avi/avi-muxer.d.ts +45 -0
  16. package/dist/modules/src/avi/avi-muxer.d.ts.map +1 -0
  17. package/dist/modules/src/avi/riff-writer.d.ts +26 -0
  18. package/dist/modules/src/avi/riff-writer.d.ts.map +1 -0
  19. package/dist/modules/src/codec-data.d.ts +8 -3
  20. package/dist/modules/src/codec-data.d.ts.map +1 -1
  21. package/dist/modules/src/codec.d.ts +10 -10
  22. package/dist/modules/src/codec.d.ts.map +1 -1
  23. package/dist/modules/src/conversion.d.ts +33 -16
  24. package/dist/modules/src/conversion.d.ts.map +1 -1
  25. package/dist/modules/src/custom-coder.d.ts +8 -8
  26. package/dist/modules/src/custom-coder.d.ts.map +1 -1
  27. package/dist/modules/src/demuxer.d.ts +3 -3
  28. package/dist/modules/src/demuxer.d.ts.map +1 -1
  29. package/dist/modules/src/encode.d.ts +8 -8
  30. package/dist/modules/src/encode.d.ts.map +1 -1
  31. package/dist/modules/src/flac/flac-demuxer.d.ts +7 -7
  32. package/dist/modules/src/flac/flac-demuxer.d.ts.map +1 -1
  33. package/dist/modules/src/flac/flac-misc.d.ts +3 -3
  34. package/dist/modules/src/flac/flac-misc.d.ts.map +1 -1
  35. package/dist/modules/src/flac/flac-muxer.d.ts +5 -5
  36. package/dist/modules/src/flac/flac-muxer.d.ts.map +1 -1
  37. package/dist/modules/src/id3.d.ts +3 -3
  38. package/dist/modules/src/id3.d.ts.map +1 -1
  39. package/dist/modules/src/index.d.ts +20 -20
  40. package/dist/modules/src/index.d.ts.map +1 -1
  41. package/dist/modules/src/input-format.d.ts +22 -0
  42. package/dist/modules/src/input-format.d.ts.map +1 -1
  43. package/dist/modules/src/input-track.d.ts +8 -8
  44. package/dist/modules/src/input-track.d.ts.map +1 -1
  45. package/dist/modules/src/input.d.ts +12 -12
  46. package/dist/modules/src/isobmff/isobmff-boxes.d.ts +2 -2
  47. package/dist/modules/src/isobmff/isobmff-boxes.d.ts.map +1 -1
  48. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts +12 -12
  49. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts.map +1 -1
  50. package/dist/modules/src/isobmff/isobmff-misc.d.ts.map +1 -1
  51. package/dist/modules/src/isobmff/isobmff-muxer.d.ts +11 -11
  52. package/dist/modules/src/isobmff/isobmff-muxer.d.ts.map +1 -1
  53. package/dist/modules/src/isobmff/isobmff-reader.d.ts +2 -2
  54. package/dist/modules/src/isobmff/isobmff-reader.d.ts.map +1 -1
  55. package/dist/modules/src/matroska/ebml.d.ts +3 -3
  56. package/dist/modules/src/matroska/ebml.d.ts.map +1 -1
  57. package/dist/modules/src/matroska/matroska-demuxer.d.ts +13 -13
  58. package/dist/modules/src/matroska/matroska-demuxer.d.ts.map +1 -1
  59. package/dist/modules/src/matroska/matroska-input.d.ts +33 -0
  60. package/dist/modules/src/matroska/matroska-input.d.ts.map +1 -0
  61. package/dist/modules/src/matroska/matroska-misc.d.ts.map +1 -1
  62. package/dist/modules/src/matroska/matroska-muxer.d.ts +5 -5
  63. package/dist/modules/src/matroska/matroska-muxer.d.ts.map +1 -1
  64. package/dist/modules/src/media-sink.d.ts +5 -5
  65. package/dist/modules/src/media-sink.d.ts.map +1 -1
  66. package/dist/modules/src/media-source.d.ts +22 -4
  67. package/dist/modules/src/media-source.d.ts.map +1 -1
  68. package/dist/modules/src/metadata.d.ts +2 -2
  69. package/dist/modules/src/metadata.d.ts.map +1 -1
  70. package/dist/modules/src/misc.d.ts +5 -4
  71. package/dist/modules/src/misc.d.ts.map +1 -1
  72. package/dist/modules/src/mp3/mp3-demuxer.d.ts +7 -7
  73. package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
  74. package/dist/modules/src/mp3/mp3-muxer.d.ts +4 -4
  75. package/dist/modules/src/mp3/mp3-muxer.d.ts.map +1 -1
  76. package/dist/modules/src/mp3/mp3-reader.d.ts +2 -2
  77. package/dist/modules/src/mp3/mp3-reader.d.ts.map +1 -1
  78. package/dist/modules/src/mp3/mp3-writer.d.ts +1 -1
  79. package/dist/modules/src/mp3/mp3-writer.d.ts.map +1 -1
  80. package/dist/modules/src/muxer.d.ts +4 -4
  81. package/dist/modules/src/muxer.d.ts.map +1 -1
  82. package/dist/modules/src/node.d.ts +1 -1
  83. package/dist/modules/src/ogg/ogg-demuxer.d.ts +7 -7
  84. package/dist/modules/src/ogg/ogg-demuxer.d.ts.map +1 -1
  85. package/dist/modules/src/ogg/ogg-misc.d.ts +1 -1
  86. package/dist/modules/src/ogg/ogg-misc.d.ts.map +1 -1
  87. package/dist/modules/src/ogg/ogg-muxer.d.ts +5 -5
  88. package/dist/modules/src/ogg/ogg-muxer.d.ts.map +1 -1
  89. package/dist/modules/src/ogg/ogg-reader.d.ts +1 -1
  90. package/dist/modules/src/ogg/ogg-reader.d.ts.map +1 -1
  91. package/dist/modules/src/output-format.d.ts +51 -6
  92. package/dist/modules/src/output-format.d.ts.map +1 -1
  93. package/dist/modules/src/output.d.ts +13 -13
  94. package/dist/modules/src/output.d.ts.map +1 -1
  95. package/dist/modules/src/packet.d.ts +1 -1
  96. package/dist/modules/src/packet.d.ts.map +1 -1
  97. package/dist/modules/src/pcm.d.ts.map +1 -1
  98. package/dist/modules/src/reader.d.ts +2 -2
  99. package/dist/modules/src/reader.d.ts.map +1 -1
  100. package/dist/modules/src/sample.d.ts +57 -15
  101. package/dist/modules/src/sample.d.ts.map +1 -1
  102. package/dist/modules/src/source.d.ts +3 -3
  103. package/dist/modules/src/source.d.ts.map +1 -1
  104. package/dist/modules/src/subtitles.d.ts +1 -1
  105. package/dist/modules/src/subtitles.d.ts.map +1 -1
  106. package/dist/modules/src/target.d.ts +2 -2
  107. package/dist/modules/src/target.d.ts.map +1 -1
  108. package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
  109. package/dist/modules/src/wave/riff-writer.d.ts +1 -1
  110. package/dist/modules/src/wave/riff-writer.d.ts.map +1 -1
  111. package/dist/modules/src/wave/wave-demuxer.d.ts +6 -6
  112. package/dist/modules/src/wave/wave-demuxer.d.ts.map +1 -1
  113. package/dist/modules/src/wave/wave-muxer.d.ts +4 -4
  114. package/dist/modules/src/wave/wave-muxer.d.ts.map +1 -1
  115. package/dist/modules/src/writer.d.ts +1 -1
  116. package/dist/modules/src/writer.d.ts.map +1 -1
  117. package/dist/packages/eac3/eac3.wasm +0 -0
  118. package/dist/packages/eac3/mediabunny-eac3.js +1058 -0
  119. package/dist/packages/eac3/mediabunny-eac3.min.js +44 -0
  120. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.js +694 -0
  121. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.min.js +58 -0
  122. package/dist/packages/mpeg4/mediabunny-mpeg4.js +1198 -0
  123. package/dist/packages/mpeg4/mediabunny-mpeg4.min.js +44 -0
  124. package/dist/packages/mpeg4/xvid.wasm +0 -0
  125. package/package.json +18 -57
  126. package/dist/bundles/mediabunny.cjs +0 -26140
  127. package/dist/bundles/mediabunny.min.cjs +0 -147
  128. package/dist/bundles/mediabunny.min.mjs +0 -146
  129. package/dist/mediabunny.d.ts +0 -3319
  130. package/dist/modules/shared/mp3-misc.js +0 -147
  131. package/dist/modules/src/adts/adts-demuxer.js +0 -239
  132. package/dist/modules/src/adts/adts-muxer.js +0 -80
  133. package/dist/modules/src/adts/adts-reader.js +0 -63
  134. package/dist/modules/src/codec-data.js +0 -1730
  135. package/dist/modules/src/codec.js +0 -869
  136. package/dist/modules/src/conversion.js +0 -1459
  137. package/dist/modules/src/custom-coder.js +0 -117
  138. package/dist/modules/src/demuxer.js +0 -12
  139. package/dist/modules/src/encode.js +0 -442
  140. package/dist/modules/src/flac/flac-demuxer.js +0 -504
  141. package/dist/modules/src/flac/flac-misc.js +0 -135
  142. package/dist/modules/src/flac/flac-muxer.js +0 -222
  143. package/dist/modules/src/id3.js +0 -848
  144. package/dist/modules/src/index.js +0 -28
  145. package/dist/modules/src/input-format.js +0 -480
  146. package/dist/modules/src/input-track.js +0 -372
  147. package/dist/modules/src/input.js +0 -188
  148. package/dist/modules/src/isobmff/isobmff-boxes.js +0 -1480
  149. package/dist/modules/src/isobmff/isobmff-demuxer.js +0 -2618
  150. package/dist/modules/src/isobmff/isobmff-misc.js +0 -20
  151. package/dist/modules/src/isobmff/isobmff-muxer.js +0 -966
  152. package/dist/modules/src/isobmff/isobmff-reader.js +0 -72
  153. package/dist/modules/src/matroska/ebml.js +0 -653
  154. package/dist/modules/src/matroska/matroska-demuxer.js +0 -2133
  155. package/dist/modules/src/matroska/matroska-misc.js +0 -20
  156. package/dist/modules/src/matroska/matroska-muxer.js +0 -1017
  157. package/dist/modules/src/media-sink.js +0 -1736
  158. package/dist/modules/src/media-source.js +0 -1825
  159. package/dist/modules/src/metadata.js +0 -193
  160. package/dist/modules/src/misc.js +0 -623
  161. package/dist/modules/src/mp3/mp3-demuxer.js +0 -285
  162. package/dist/modules/src/mp3/mp3-muxer.js +0 -123
  163. package/dist/modules/src/mp3/mp3-reader.js +0 -26
  164. package/dist/modules/src/mp3/mp3-writer.js +0 -78
  165. package/dist/modules/src/muxer.js +0 -50
  166. package/dist/modules/src/node.js +0 -9
  167. package/dist/modules/src/ogg/ogg-demuxer.js +0 -763
  168. package/dist/modules/src/ogg/ogg-misc.js +0 -78
  169. package/dist/modules/src/ogg/ogg-muxer.js +0 -353
  170. package/dist/modules/src/ogg/ogg-reader.js +0 -65
  171. package/dist/modules/src/output-format.js +0 -527
  172. package/dist/modules/src/output.js +0 -300
  173. package/dist/modules/src/packet.js +0 -182
  174. package/dist/modules/src/pcm.js +0 -85
  175. package/dist/modules/src/reader.js +0 -236
  176. package/dist/modules/src/sample.js +0 -1056
  177. package/dist/modules/src/source.js +0 -1182
  178. package/dist/modules/src/subtitles.js +0 -575
  179. package/dist/modules/src/target.js +0 -140
  180. package/dist/modules/src/wave/riff-writer.js +0 -30
  181. package/dist/modules/src/wave/wave-demuxer.js +0 -447
  182. package/dist/modules/src/wave/wave-muxer.js +0 -318
  183. package/dist/modules/src/writer.js +0 -370
  184. package/src/adts/adts-demuxer.ts +0 -331
  185. package/src/adts/adts-muxer.ts +0 -111
  186. package/src/adts/adts-reader.ts +0 -85
  187. package/src/codec-data.ts +0 -2078
  188. package/src/codec.ts +0 -1092
  189. package/src/conversion.ts +0 -2112
  190. package/src/custom-coder.ts +0 -197
  191. package/src/demuxer.ts +0 -24
  192. package/src/encode.ts +0 -739
  193. package/src/flac/flac-demuxer.ts +0 -730
  194. package/src/flac/flac-misc.ts +0 -164
  195. package/src/flac/flac-muxer.ts +0 -320
  196. package/src/id3.ts +0 -925
  197. package/src/index.ts +0 -221
  198. package/src/input-format.ts +0 -541
  199. package/src/input-track.ts +0 -529
  200. package/src/input.ts +0 -235
  201. package/src/isobmff/isobmff-boxes.ts +0 -1719
  202. package/src/isobmff/isobmff-demuxer.ts +0 -3190
  203. package/src/isobmff/isobmff-misc.ts +0 -29
  204. package/src/isobmff/isobmff-muxer.ts +0 -1348
  205. package/src/isobmff/isobmff-reader.ts +0 -91
  206. package/src/matroska/ebml.ts +0 -730
  207. package/src/matroska/matroska-demuxer.ts +0 -2481
  208. package/src/matroska/matroska-misc.ts +0 -29
  209. package/src/matroska/matroska-muxer.ts +0 -1276
  210. package/src/media-sink.ts +0 -2179
  211. package/src/media-source.ts +0 -2243
  212. package/src/metadata.ts +0 -320
  213. package/src/misc.ts +0 -798
  214. package/src/mp3/mp3-demuxer.ts +0 -383
  215. package/src/mp3/mp3-muxer.ts +0 -166
  216. package/src/mp3/mp3-reader.ts +0 -34
  217. package/src/mp3/mp3-writer.ts +0 -120
  218. package/src/muxer.ts +0 -88
  219. package/src/node.ts +0 -11
  220. package/src/ogg/ogg-demuxer.ts +0 -1053
  221. package/src/ogg/ogg-misc.ts +0 -116
  222. package/src/ogg/ogg-muxer.ts +0 -497
  223. package/src/ogg/ogg-reader.ts +0 -93
  224. package/src/output-format.ts +0 -945
  225. package/src/output.ts +0 -488
  226. package/src/packet.ts +0 -263
  227. package/src/pcm.ts +0 -112
  228. package/src/reader.ts +0 -323
  229. package/src/sample.ts +0 -1461
  230. package/src/source.ts +0 -1688
  231. package/src/subtitles.ts +0 -711
  232. package/src/target.ts +0 -204
  233. package/src/tsconfig.json +0 -16
  234. package/src/wave/riff-writer.ts +0 -36
  235. package/src/wave/wave-demuxer.ts +0 -529
  236. package/src/wave/wave-muxer.ts +0 -371
  237. package/src/writer.ts +0 -490
@@ -1,3319 +0,0 @@
1
- /// <reference types="dom-mediacapture-transform" />
2
- /// <reference types="dom-webcodecs" />
3
-
4
- /**
5
- * ADTS input format singleton.
6
- * @group Input formats
7
- * @public
8
- */
9
- export declare const ADTS: AdtsInputFormat;
10
-
11
- /**
12
- * ADTS file format.
13
- *
14
- * Do not instantiate this class; use the {@link ADTS} singleton instead.
15
- *
16
- * @group Input formats
17
- * @public
18
- */
19
- export declare class AdtsInputFormat extends InputFormat {
20
- get name(): string;
21
- get mimeType(): string;
22
- }
23
-
24
- /**
25
- * ADTS file format.
26
- * @group Output formats
27
- * @public
28
- */
29
- export declare class AdtsOutputFormat extends OutputFormat {
30
- /** Creates a new {@link AdtsOutputFormat} configured with the specified `options`. */
31
- constructor(options?: AdtsOutputFormatOptions);
32
- getSupportedTrackCounts(): TrackCountLimits;
33
- get fileExtension(): string;
34
- get mimeType(): string;
35
- getSupportedCodecs(): MediaCodec[];
36
- get supportsVideoRotationMetadata(): boolean;
37
- }
38
-
39
- /**
40
- * ADTS-specific output options.
41
- * @group Output formats
42
- * @public
43
- */
44
- export declare type AdtsOutputFormatOptions = {
45
- /**
46
- * Will be called for each ADTS frame that is written.
47
- *
48
- * @param data - The raw bytes.
49
- * @param position - The byte offset of the data in the file.
50
- */
51
- onFrame?: (data: Uint8Array, position: number) => unknown;
52
- };
53
-
54
- /**
55
- * List of all input format singletons. If you don't need to support all input formats, you should specify the
56
- * formats individually for better tree shaking.
57
- * @group Input formats
58
- * @public
59
- */
60
- export declare const ALL_FORMATS: InputFormat[];
61
-
62
- /**
63
- * List of all track types.
64
- * @group Miscellaneous
65
- * @public
66
- */
67
- export declare const ALL_TRACK_TYPES: readonly ["video", "audio", "subtitle"];
68
-
69
- /**
70
- * Sync or async iterable.
71
- * @group Miscellaneous
72
- * @public
73
- */
74
- export declare type AnyIterable<T> = Iterable<T> | AsyncIterable<T>;
75
-
76
- /**
77
- * A file attached to a media file.
78
- *
79
- * @group Metadata tags
80
- * @public
81
- */
82
- export declare class AttachedFile {
83
- /** The raw file data. */
84
- data: Uint8Array;
85
- /** An RFC 6838 MIME type (e.g. image/jpeg, image/png, font/ttf, etc.) */
86
- mimeType?: string | undefined;
87
- /** The name of the file. */
88
- name?: string | undefined;
89
- /** A description of the file. */
90
- description?: string | undefined;
91
- /** Creates a new {@link AttachedFile}. */
92
- constructor(
93
- /** The raw file data. */
94
- data: Uint8Array,
95
- /** An RFC 6838 MIME type (e.g. image/jpeg, image/png, font/ttf, etc.) */
96
- mimeType?: string | undefined,
97
- /** The name of the file. */
98
- name?: string | undefined,
99
- /** A description of the file. */
100
- description?: string | undefined);
101
- }
102
-
103
- /**
104
- * An embedded image such as cover art, booklet scan, artwork or preview frame.
105
- *
106
- * @group Metadata tags
107
- * @public
108
- */
109
- export declare type AttachedImage = {
110
- /** The raw image data. */
111
- data: Uint8Array;
112
- /** An RFC 6838 MIME type (e.g. image/jpeg, image/png, etc.) */
113
- mimeType: string;
114
- /** The kind or purpose of the image. */
115
- kind: 'coverFront' | 'coverBack' | 'unknown';
116
- /** The name of the image file. */
117
- name?: string;
118
- /** A description of the image. */
119
- description?: string;
120
- };
121
-
122
- /**
123
- * List of known audio codecs, ordered by encoding preference.
124
- * @group Codecs
125
- * @public
126
- */
127
- export declare const AUDIO_CODECS: readonly ["aac", "opus", "mp3", "vorbis", "flac", "pcm-s16", "pcm-s16be", "pcm-s24", "pcm-s24be", "pcm-s32", "pcm-s32be", "pcm-f32", "pcm-f32be", "pcm-f64", "pcm-f64be", "pcm-u8", "pcm-s8", "ulaw", "alaw"];
128
-
129
- /**
130
- * A sink that retrieves decoded audio samples from an audio track and converts them to `AudioBuffer` instances. This is
131
- * often more useful than directly retrieving audio samples, as audio buffers can be directly used with the
132
- * Web Audio API.
133
- * @group Media sinks
134
- * @public
135
- */
136
- export declare class AudioBufferSink {
137
- /** Creates a new {@link AudioBufferSink} for the given {@link InputAudioTrack}. */
138
- constructor(audioTrack: InputAudioTrack);
139
- /**
140
- * Retrieves the audio buffer corresponding to the given timestamp, in seconds. More specifically, returns
141
- * the last audio buffer (in presentation order) with a start timestamp less than or equal to the given timestamp.
142
- * Returns null if the timestamp is before the track's first timestamp.
143
- *
144
- * @param timestamp - The timestamp used for retrieval, in seconds.
145
- */
146
- getBuffer(timestamp: number): Promise<WrappedAudioBuffer | null>;
147
- /**
148
- * Creates an async iterator that yields audio buffers of this track in presentation order. This method
149
- * will intelligently pre-decode a few buffers ahead to enable fast iteration.
150
- *
151
- * @param startTimestamp - The timestamp in seconds at which to start yielding buffers (inclusive).
152
- * @param endTimestamp - The timestamp in seconds at which to stop yielding buffers (exclusive).
153
- */
154
- buffers(startTimestamp?: number, endTimestamp?: number): AsyncGenerator<WrappedAudioBuffer, void, unknown>;
155
- /**
156
- * Creates an async iterator that yields an audio buffer for each timestamp in the argument. This method
157
- * uses an optimized decoding pipeline if these timestamps are monotonically sorted, decoding each packet at most
158
- * once, and is therefore more efficient than manually getting the buffer for every timestamp. The iterator may
159
- * yield null if no buffer is available for a given timestamp.
160
- *
161
- * @param timestamps - An iterable or async iterable of timestamps in seconds.
162
- */
163
- buffersAtTimestamps(timestamps: AnyIterable<number>): AsyncGenerator<WrappedAudioBuffer | null, void, unknown>;
164
- }
165
-
166
- /**
167
- * This source can be used to add audio data from an AudioBuffer to the output track. This is useful when working with
168
- * the Web Audio API.
169
- * @group Media sources
170
- * @public
171
- */
172
- export declare class AudioBufferSource extends AudioSource {
173
- /**
174
- * Creates a new {@link AudioBufferSource} whose `AudioBuffer` instances are encoded according to the specified
175
- * {@link AudioEncodingConfig}.
176
- */
177
- constructor(encodingConfig: AudioEncodingConfig);
178
- /**
179
- * Converts an AudioBuffer to audio samples, encodes them and adds them to the output. The first AudioBuffer will
180
- * be played at timestamp 0, and any subsequent AudioBuffer will have a timestamp equal to the total duration of
181
- * all previous AudioBuffers.
182
- *
183
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
184
- * to respect writer and encoder backpressure.
185
- */
186
- add(audioBuffer: AudioBuffer): Promise<void>;
187
- }
188
-
189
- /**
190
- * Union type of known audio codecs.
191
- * @group Codecs
192
- * @public
193
- */
194
- export declare type AudioCodec = typeof AUDIO_CODECS[number];
195
-
196
- /**
197
- * Additional options that control audio encoding.
198
- * @group Encoding
199
- * @public
200
- */
201
- export declare type AudioEncodingAdditionalOptions = {
202
- /** Configures the bitrate mode. */
203
- bitrateMode?: 'constant' | 'variable';
204
- /**
205
- * The full codec string as specified in the WebCodecs Codec Registry. This string must match the codec
206
- * specified in `codec`. When not set, a fitting codec string will be constructed automatically by the library.
207
- */
208
- fullCodecString?: string;
209
- };
210
-
211
- /**
212
- * Configuration object that controls audio encoding. Can be used to set codec, quality, and more.
213
- * @group Encoding
214
- * @public
215
- */
216
- export declare type AudioEncodingConfig = {
217
- /** The audio codec that should be used for encoding the audio samples. */
218
- codec: AudioCodec;
219
- /**
220
- * The target bitrate for the encoded audio, in bits per second. Alternatively, a subjective {@link Quality} can
221
- * be provided. Required for compressed audio codecs, unused for PCM codecs.
222
- */
223
- bitrate?: number | Quality;
224
- /** Called for each successfully encoded packet. Both the packet and the encoding metadata are passed. */
225
- onEncodedPacket?: (packet: EncodedPacket, meta: EncodedAudioChunkMetadata | undefined) => unknown;
226
- /**
227
- * Called when the internal [encoder config](https://www.w3.org/TR/webcodecs/#audio-encoder-config), as used by the
228
- * WebCodecs API, is created.
229
- */
230
- onEncoderConfig?: (config: AudioEncoderConfig) => unknown;
231
- } & AudioEncodingAdditionalOptions;
232
-
233
- /**
234
- * Represents a raw, unencoded audio sample. Mainly used as an expressive wrapper around WebCodecs API's
235
- * [`AudioData`](https://developer.mozilla.org/en-US/docs/Web/API/AudioData), but can also be used standalone.
236
- * @group Samples
237
- * @public
238
- */
239
- export declare class AudioSample implements Disposable {
240
- /**
241
- * The audio sample format.
242
- * [See sample formats](https://developer.mozilla.org/en-US/docs/Web/API/AudioData/format)
243
- */
244
- readonly format: AudioSampleFormat;
245
- /** The audio sample rate in hertz. */
246
- readonly sampleRate: number;
247
- /**
248
- * The number of audio frames in the sample, per channel. In other words, the length of this audio sample in frames.
249
- */
250
- readonly numberOfFrames: number;
251
- /** The number of audio channels. */
252
- readonly numberOfChannels: number;
253
- /** The duration of the sample in seconds. */
254
- readonly duration: number;
255
- /**
256
- * The presentation timestamp of the sample in seconds. May be negative. Samples with negative end timestamps should
257
- * not be presented.
258
- */
259
- readonly timestamp: number;
260
- /** The presentation timestamp of the sample in microseconds. */
261
- get microsecondTimestamp(): number;
262
- /** The duration of the sample in microseconds. */
263
- get microsecondDuration(): number;
264
- /**
265
- * Creates a new {@link AudioSample}, either from an existing
266
- * [`AudioData`](https://developer.mozilla.org/en-US/docs/Web/API/AudioData) or from raw bytes specified in
267
- * {@link AudioSampleInit}.
268
- */
269
- constructor(init: AudioData | AudioSampleInit);
270
- /** Returns the number of bytes required to hold the audio sample's data as specified by the given options. */
271
- allocationSize(options: AudioSampleCopyToOptions): number;
272
- /** Copies the audio sample's data to an ArrayBuffer or ArrayBufferView as specified by the given options. */
273
- copyTo(destination: AllowSharedBufferSource, options: AudioSampleCopyToOptions): void;
274
- /** Clones this audio sample. */
275
- clone(): AudioSample;
276
- /**
277
- * Closes this audio sample, releasing held resources. Audio samples should be closed as soon as they are not
278
- * needed anymore.
279
- */
280
- close(): void;
281
- /**
282
- * Converts this audio sample to an AudioData for use with the WebCodecs API. The AudioData returned by this
283
- * method *must* be closed separately from this audio sample.
284
- */
285
- toAudioData(): AudioData;
286
- /** Convert this audio sample to an AudioBuffer for use with the Web Audio API. */
287
- toAudioBuffer(): AudioBuffer;
288
- /** Sets the presentation timestamp of this audio sample, in seconds. */
289
- setTimestamp(newTimestamp: number): void;
290
- /** Calls `.close()`. */
291
- [Symbol.dispose](): void;
292
- /**
293
- * Creates AudioSamples from an AudioBuffer, starting at the given timestamp in seconds. Typically creates exactly
294
- * one sample, but may create multiple if the AudioBuffer is exceedingly large.
295
- */
296
- static fromAudioBuffer(audioBuffer: AudioBuffer, timestamp: number): AudioSample[];
297
- }
298
-
299
- /**
300
- * Options used for copying audio sample data.
301
- * @group Samples
302
- * @public
303
- */
304
- export declare type AudioSampleCopyToOptions = {
305
- /**
306
- * The index identifying the plane to copy from. This must be 0 if using a non-planar (interleaved) output format.
307
- */
308
- planeIndex: number;
309
- /**
310
- * The output format for the destination data. Defaults to the AudioSample's format.
311
- * [See sample formats](https://developer.mozilla.org/en-US/docs/Web/API/AudioData/format)
312
- */
313
- format?: AudioSampleFormat;
314
- /** An offset into the source plane data indicating which frame to begin copying from. Defaults to 0. */
315
- frameOffset?: number;
316
- /**
317
- * The number of frames to copy. If not provided, the copy will include all frames in the plane beginning
318
- * with frameOffset.
319
- */
320
- frameCount?: number;
321
- };
322
-
323
- /**
324
- * Metadata used for AudioSample initialization.
325
- * @group Samples
326
- * @public
327
- */
328
- export declare type AudioSampleInit = {
329
- /** The audio data for this sample. */
330
- data: AllowSharedBufferSource;
331
- /**
332
- * The audio sample format. [See sample formats](https://developer.mozilla.org/en-US/docs/Web/API/AudioData/format)
333
- */
334
- format: AudioSampleFormat;
335
- /** The number of audio channels. */
336
- numberOfChannels: number;
337
- /** The audio sample rate in hertz. */
338
- sampleRate: number;
339
- /** The presentation timestamp of the sample in seconds. */
340
- timestamp: number;
341
- };
342
-
343
- /**
344
- * Sink for retrieving decoded audio samples from an audio track.
345
- * @group Media sinks
346
- * @public
347
- */
348
- export declare class AudioSampleSink extends BaseMediaSampleSink<AudioSample> {
349
- /** Creates a new {@link AudioSampleSink} for the given {@link InputAudioTrack}. */
350
- constructor(audioTrack: InputAudioTrack);
351
- /**
352
- * Retrieves the audio sample corresponding to the given timestamp, in seconds. More specifically, returns
353
- * the last audio sample (in presentation order) with a start timestamp less than or equal to the given timestamp.
354
- * Returns null if the timestamp is before the track's first timestamp.
355
- *
356
- * @param timestamp - The timestamp used for retrieval, in seconds.
357
- */
358
- getSample(timestamp: number): Promise<AudioSample | null>;
359
- /**
360
- * Creates an async iterator that yields the audio samples of this track in presentation order. This method
361
- * will intelligently pre-decode a few samples ahead to enable fast iteration.
362
- *
363
- * @param startTimestamp - The timestamp in seconds at which to start yielding samples (inclusive).
364
- * @param endTimestamp - The timestamp in seconds at which to stop yielding samples (exclusive).
365
- */
366
- samples(startTimestamp?: number, endTimestamp?: number): AsyncGenerator<AudioSample, void, unknown>;
367
- /**
368
- * Creates an async iterator that yields an audio sample for each timestamp in the argument. This method
369
- * uses an optimized decoding pipeline if these timestamps are monotonically sorted, decoding each packet at most
370
- * once, and is therefore more efficient than manually getting the sample for every timestamp. The iterator may
371
- * yield null if no sample is available for a given timestamp.
372
- *
373
- * @param timestamps - An iterable or async iterable of timestamps in seconds.
374
- */
375
- samplesAtTimestamps(timestamps: AnyIterable<number>): AsyncGenerator<AudioSample | null, void, unknown>;
376
- }
377
-
378
- /**
379
- * This source can be used to add raw, unencoded audio samples to an output audio track. These samples will
380
- * automatically be encoded and then piped into the output.
381
- * @group Media sources
382
- * @public
383
- */
384
- export declare class AudioSampleSource extends AudioSource {
385
- /**
386
- * Creates a new {@link AudioSampleSource} whose samples are encoded according to the specified
387
- * {@link AudioEncodingConfig}.
388
- */
389
- constructor(encodingConfig: AudioEncodingConfig);
390
- /**
391
- * Encodes an audio sample and then adds it to the output.
392
- *
393
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
394
- * to respect writer and encoder backpressure.
395
- */
396
- add(audioSample: AudioSample): Promise<void>;
397
- }
398
-
399
- /**
400
- * Base class for audio sources - sources for audio tracks.
401
- * @group Media sources
402
- * @public
403
- */
404
- export declare abstract class AudioSource extends MediaSource_2 {
405
- /** Internal constructor. */
406
- constructor(codec: AudioCodec);
407
- }
408
-
409
- /**
410
- * Additional metadata for audio tracks.
411
- * @group Output files
412
- * @public
413
- */
414
- export declare type AudioTrackMetadata = BaseTrackMetadata & {};
415
-
416
- /**
417
- * Base class for decoded media sample sinks.
418
- * @group Media sinks
419
- * @public
420
- */
421
- export declare abstract class BaseMediaSampleSink<MediaSample extends VideoSample | AudioSample> {
422
- }
423
-
424
- /**
425
- * Base track metadata, applicable to all tracks.
426
- * @group Output files
427
- * @public
428
- */
429
- export declare type BaseTrackMetadata = {
430
- /** The three-letter, ISO 639-2/T language code specifying the language of this track. */
431
- languageCode?: string;
432
- /** A user-defined name for this track, like "English" or "Director Commentary". */
433
- name?: string;
434
- /** The track's disposition, i.e. information about its intended usage. */
435
- disposition?: Partial<TrackDisposition>;
436
- /**
437
- * The maximum amount of encoded packets that will be added to this track. Setting this field provides the muxer
438
- * with an additional signal that it can use to preallocate space in the file.
439
- *
440
- * When this field is set, it is an error to provide more packets than whatever this field specifies.
441
- *
442
- * Predicting the maximum packet count requires considering both the maximum duration as well as the codec.
443
- * - For video codecs, you can assume one packet per frame.
444
- * - For audio codecs, there is one packet for each "audio chunk", the duration of which depends on the codec. For
445
- * simplicity, you can assume each packet is roughly 10 ms or 512 samples long, whichever is shorter.
446
- * - For subtitles, assume each cue and each gap in the subtitles adds a packet.
447
- *
448
- * If you're not fully sure, make sure to add a buffer of around 33% to make sure you stay below the maximum.
449
- */
450
- maximumPacketCount?: number;
451
- };
452
-
453
- /**
454
- * A source backed by a [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob). Since a
455
- * [`File`](https://developer.mozilla.org/en-US/docs/Web/API/File) is also a `Blob`, this is the source to use when
456
- * reading files off the disk.
457
- * @group Input sources
458
- * @public
459
- */
460
- export declare class BlobSource extends Source {
461
- /**
462
- * Creates a new {@link BlobSource} backed by the specified
463
- * [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob).
464
- */
465
- constructor(blob: Blob, options?: BlobSourceOptions);
466
- }
467
-
468
- /**
469
- * Options for {@link BlobSource}.
470
- * @group Input sources
471
- * @public
472
- */
473
- export declare type BlobSourceOptions = {
474
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
475
- maxCacheSize?: number;
476
- };
477
-
478
- /**
479
- * A source backed by an ArrayBuffer or ArrayBufferView, with the entire file held in memory.
480
- * @group Input sources
481
- * @public
482
- */
483
- declare class BufferSource_2 extends Source {
484
- /** Creates a new {@link BufferSource} backed the specified `ArrayBuffer` or `ArrayBufferView`. */
485
- constructor(buffer: ArrayBuffer | ArrayBufferView);
486
- }
487
- export { BufferSource_2 as BufferSource }
488
-
489
- /**
490
- * A target that writes data directly into an ArrayBuffer in memory. Great for performance, but not suitable for very
491
- * large files. The buffer will be available once the output has been finalized.
492
- * @group Output targets
493
- * @public
494
- */
495
- export declare class BufferTarget extends Target {
496
- /** Stores the final output buffer. Until the output is finalized, this will be `null`. */
497
- buffer: ArrayBuffer | null;
498
- }
499
-
500
- /**
501
- * Checks if the browser is able to encode the given codec.
502
- * @group Encoding
503
- * @public
504
- */
505
- export declare const canEncode: (codec: MediaCodec) => Promise<boolean>;
506
-
507
- /**
508
- * Checks if the browser is able to encode the given audio codec with the given parameters.
509
- * @group Encoding
510
- * @public
511
- */
512
- export declare const canEncodeAudio: (codec: AudioCodec, options?: {
513
- numberOfChannels?: number;
514
- sampleRate?: number;
515
- bitrate?: number | Quality;
516
- } & AudioEncodingAdditionalOptions) => Promise<boolean>;
517
-
518
- /**
519
- * Checks if the browser is able to encode the given subtitle codec.
520
- * @group Encoding
521
- * @public
522
- */
523
- export declare const canEncodeSubtitles: (codec: SubtitleCodec) => Promise<boolean>;
524
-
525
- /**
526
- * Checks if the browser is able to encode the given video codec with the given parameters.
527
- * @group Encoding
528
- * @public
529
- */
530
- export declare const canEncodeVideo: (codec: VideoCodec, options?: {
531
- width?: number;
532
- height?: number;
533
- bitrate?: number | Quality;
534
- } & VideoEncodingAdditionalOptions) => Promise<boolean>;
535
-
536
- /**
537
- * A sink that renders video samples (frames) of the given video track to canvases. This is often more useful than
538
- * directly retrieving frames, as it comes with common preprocessing steps such as resizing or applying rotation
539
- * metadata.
540
- *
541
- * This sink will yield `HTMLCanvasElement`s when in a DOM context, and `OffscreenCanvas`es otherwise.
542
- *
543
- * @group Media sinks
544
- * @public
545
- */
546
- export declare class CanvasSink {
547
- /** Creates a new {@link CanvasSink} for the given {@link InputVideoTrack}. */
548
- constructor(videoTrack: InputVideoTrack, options?: CanvasSinkOptions);
549
- /**
550
- * Retrieves a canvas with the video frame corresponding to the given timestamp, in seconds. More specifically,
551
- * returns the last video frame (in presentation order) with a start timestamp less than or equal to the given
552
- * timestamp. Returns null if the timestamp is before the track's first timestamp.
553
- *
554
- * @param timestamp - The timestamp used for retrieval, in seconds.
555
- */
556
- getCanvas(timestamp: number): Promise<WrappedCanvas | null>;
557
- /**
558
- * Creates an async iterator that yields canvases with the video frames of this track in presentation order. This
559
- * method will intelligently pre-decode a few frames ahead to enable fast iteration.
560
- *
561
- * @param startTimestamp - The timestamp in seconds at which to start yielding canvases (inclusive).
562
- * @param endTimestamp - The timestamp in seconds at which to stop yielding canvases (exclusive).
563
- */
564
- canvases(startTimestamp?: number, endTimestamp?: number): AsyncGenerator<WrappedCanvas, void, unknown>;
565
- /**
566
- * Creates an async iterator that yields a canvas for each timestamp in the argument. This method uses an optimized
567
- * decoding pipeline if these timestamps are monotonically sorted, decoding each packet at most once, and is
568
- * therefore more efficient than manually getting the canvas for every timestamp. The iterator may yield null if
569
- * no frame is available for a given timestamp.
570
- *
571
- * @param timestamps - An iterable or async iterable of timestamps in seconds.
572
- */
573
- canvasesAtTimestamps(timestamps: AnyIterable<number>): AsyncGenerator<WrappedCanvas | null, void, unknown>;
574
- }
575
-
576
- /**
577
- * Options for constructing a CanvasSink.
578
- * @group Media sinks
579
- * @public
580
- */
581
- export declare type CanvasSinkOptions = {
582
- /**
583
- * Whether the output canvases should have transparency instead of a black background. Defaults to `false`. Set
584
- * this to `true` when using this sink to read transparent videos.
585
- */
586
- alpha?: boolean;
587
- /**
588
- * The width of the output canvas in pixels, defaulting to the display width of the video track. If height is not
589
- * set, it will be deduced automatically based on aspect ratio.
590
- */
591
- width?: number;
592
- /**
593
- * The height of the output canvas in pixels, defaulting to the display height of the video track. If width is not
594
- * set, it will be deduced automatically based on aspect ratio.
595
- */
596
- height?: number;
597
- /**
598
- * The fitting algorithm in case both width and height are set.
599
- *
600
- * - `'fill'` will stretch the image to fill the entire box, potentially altering aspect ratio.
601
- * - `'contain'` will contain the entire image within the box while preserving aspect ratio. This may lead to
602
- * letterboxing.
603
- * - `'cover'` will scale the image until the entire box is filled, while preserving aspect ratio.
604
- */
605
- fit?: 'fill' | 'contain' | 'cover';
606
- /**
607
- * The clockwise rotation by which to rotate the raw video frame. Defaults to the rotation set in the file metadata.
608
- * Rotation is applied before resizing.
609
- */
610
- rotation?: Rotation;
611
- /**
612
- * Specifies the rectangular region of the input video to crop to. The crop region will automatically be clamped to
613
- * the dimensions of the input video track. Cropping is performed after rotation but before resizing.
614
- */
615
- crop?: CropRectangle;
616
- /**
617
- * When set, specifies the number of canvases in the pool. These canvases will be reused in a ring buffer /
618
- * round-robin type fashion. This keeps the amount of allocated VRAM constant and relieves the browser from
619
- * constantly allocating/deallocating canvases. A pool size of 0 or `undefined` disables the pool and means a new
620
- * canvas is created each time.
621
- */
622
- poolSize?: number;
623
- };
624
-
625
- /**
626
- * This source can be used to add video frames to the output track from a fixed canvas element. Since canvases are often
627
- * used for rendering, this source provides a convenient wrapper around {@link VideoSampleSource}.
628
- * @group Media sources
629
- * @public
630
- */
631
- export declare class CanvasSource extends VideoSource {
632
- /**
633
- * Creates a new {@link CanvasSource} from a canvas element or `OffscreenCanvas` whose samples are encoded
634
- * according to the specified {@link VideoEncodingConfig}.
635
- */
636
- constructor(canvas: HTMLCanvasElement | OffscreenCanvas, encodingConfig: VideoEncodingConfig);
637
- /**
638
- * Captures the current canvas state as a video sample (frame), encodes it and adds it to the output.
639
- *
640
- * @param timestamp - The timestamp of the sample, in seconds.
641
- * @param duration - The duration of the sample, in seconds.
642
- *
643
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
644
- * to respect writer and encoder backpressure.
645
- */
646
- add(timestamp: number, duration?: number, encodeOptions?: VideoEncoderEncodeOptions): Promise<void>;
647
- }
648
-
649
- /**
650
- * Represents a media file conversion process, used to convert one media file into another. In addition to conversion,
651
- * this class can be used to resize and rotate video, resample audio, drop tracks, or trim to a specific time range.
652
- * @group Conversion
653
- * @public
654
- */
655
- export declare class Conversion {
656
- /** The input file. */
657
- readonly input: Input;
658
- /** The output file. */
659
- readonly output: Output;
660
- /**
661
- * A callback that is fired whenever the conversion progresses. Returns a number between 0 and 1, indicating the
662
- * completion of the conversion. Note that a progress of 1 doesn't necessarily mean the conversion is complete;
663
- * the conversion is complete once `execute()` resolves.
664
- *
665
- * In order for progress to be computed, this property must be set before `execute` is called.
666
- */
667
- onProgress?: (progress: number) => unknown;
668
- /**
669
- * Whether this conversion, as it has been configured, is valid and can be executed. If this field is `false`, check
670
- * the `discardedTracks` field for reasons.
671
- */
672
- isValid: boolean;
673
- /** The list of tracks that are included in the output file. */
674
- readonly utilizedTracks: InputTrack[];
675
- /** The list of tracks from the input file that have been discarded, alongside the discard reason. */
676
- readonly discardedTracks: DiscardedTrack[];
677
- /** Initializes a new conversion process without starting the conversion. */
678
- static init(options: ConversionOptions): Promise<Conversion>;
679
- /** Creates a new Conversion instance (duh). */
680
- private constructor();
681
- /**
682
- * Adds an external subtitle track to the output. This can be called after `init()` but before `execute()`.
683
- * This is useful for adding subtitle tracks from separate files that are not part of the input video.
684
- *
685
- * @param source - The subtitle source to add
686
- * @param metadata - Optional metadata for the subtitle track
687
- * @param contentProvider - Optional async function that will be called after the output starts to add content to the subtitle source
688
- */
689
- addExternalSubtitleTrack(source: SubtitleSource, metadata?: SubtitleTrackMetadata, contentProvider?: () => Promise<void>): void;
690
- /**
691
- * Executes the conversion process. Resolves once conversion is complete.
692
- *
693
- * Will throw if `isValid` is `false`.
694
- */
695
- execute(): Promise<void>;
696
- /** Cancels the conversion process. Does nothing if the conversion is already complete. */
697
- cancel(): Promise<void>;
698
- }
699
-
700
- /**
701
- * Audio-specific options.
702
- * @group Conversion
703
- * @public
704
- */
705
- export declare type ConversionAudioOptions = {
706
- /** If `true`, all audio tracks will be discarded and will not be present in the output. */
707
- discard?: boolean;
708
- /** The desired channel count of the output audio. */
709
- numberOfChannels?: number;
710
- /** The desired sample rate of the output audio, in hertz. */
711
- sampleRate?: number;
712
- /** The desired output audio codec. */
713
- codec?: AudioCodec;
714
- /** The desired bitrate of the output audio. */
715
- bitrate?: number | Quality;
716
- /** When `true`, audio will always be re-encoded instead of directly copying over the encoded samples. */
717
- forceTranscode?: boolean;
718
- /**
719
- * Allows for custom user-defined processing of audio samples, e.g. for applying audio effects, transformations, or
720
- * timestamp modifications. Will be called for each input audio sample after remixing and resampling.
721
- *
722
- * Must return an {@link AudioSample}, an array of them, or `null` for dropping the sample.
723
- *
724
- * This function can also be used to manually perform remixing or resampling. When doing so, you should signal the
725
- * post-process parameters using the `processedNumberOfChannels` and `processedSampleRate` fields, which enables the
726
- * encoder to better know what to expect. If these fields aren't set, Mediabunny will assume you won't perform
727
- * remixing or resampling.
728
- */
729
- process?: (sample: AudioSample) => MaybePromise<AudioSample | AudioSample[] | null>;
730
- /**
731
- * An optional hint specifying the channel count of audio samples returned by the `process` function, for better
732
- * encoder configuration.
733
- */
734
- processedNumberOfChannels?: number;
735
- /**
736
- * An optional hint specifying the sample rate of audio samples returned by the `process` function, for better
737
- * encoder configuration.
738
- */
739
- processedSampleRate?: number;
740
- };
741
-
742
- /**
743
- * The options for media file conversion.
744
- * @group Conversion
745
- * @public
746
- */
747
- export declare type ConversionOptions = {
748
- /** The input file. */
749
- input: Input;
750
- /** The output file. */
751
- output: Output;
752
- /**
753
- * Video-specific options. When passing an object, the same options are applied to all video tracks. When passing a
754
- * function, it will be invoked for each video track and is expected to return or resolve to the options
755
- * for that specific track. The function is passed an instance of {@link InputVideoTrack} as well as a number `n`,
756
- * which is the 1-based index of the track in the list of all video tracks.
757
- */
758
- video?: ConversionVideoOptions | ((track: InputVideoTrack, n: number) => MaybePromise<ConversionVideoOptions | undefined>);
759
- /**
760
- * Audio-specific options. When passing an object, the same options are applied to all audio tracks. When passing a
761
- * function, it will be invoked for each audio track and is expected to return or resolve to the options
762
- * for that specific track. The function is passed an instance of {@link InputAudioTrack} as well as a number `n`,
763
- * which is the 1-based index of the track in the list of all audio tracks.
764
- */
765
- audio?: ConversionAudioOptions | ((track: InputAudioTrack, n: number) => MaybePromise<ConversionAudioOptions | undefined>);
766
- /**
767
- * Subtitle-specific options. When passing an object, the same options are applied to all subtitle tracks. When passing a
768
- * function, it will be invoked for each subtitle track and is expected to return or resolve to the options
769
- * for that specific track. The function is passed an instance of {@link InputSubtitleTrack} as well as a number `n`,
770
- * which is the 1-based index of the track in the list of all subtitle tracks.
771
- */
772
- subtitle?: ConversionSubtitleOptions | ((track: InputSubtitleTrack, n: number) => MaybePromise<ConversionSubtitleOptions | undefined>);
773
- /** Options to trim the input file. */
774
- trim?: {
775
- /** The time in the input file in seconds at which the output file should start. Must be less than `end`. */
776
- start: number;
777
- /** The time in the input file in seconds at which the output file should end. Must be greater than `start`. */
778
- end: number;
779
- };
780
- /**
781
- * An object or a callback that returns or resolves to an object containing the descriptive metadata tags that
782
- * should be written to the output file. If a function is passed, it will be passed the tags of the input file as
783
- * its first argument, allowing you to modify, augment or extend them.
784
- *
785
- * If no function is set, the input's metadata tags will be copied to the output.
786
- */
787
- tags?: MetadataTags | ((inputTags: MetadataTags) => MaybePromise<MetadataTags>);
788
- /**
789
- * Whether to show potential console warnings about discarded tracks after calling `Conversion.init()`, defaults to
790
- * `true`. Set this to `false` if you're properly handling the `discardedTracks` and `isValid` fields already and
791
- * want to keep the console output clean.
792
- */
793
- showWarnings?: boolean;
794
- };
795
-
796
- /**
797
- * Subtitle-specific options.
798
- * @group Conversion
799
- * @public
800
- */
801
- export declare type ConversionSubtitleOptions = {
802
- /** If `true`, all subtitle tracks will be discarded and will not be present in the output. */
803
- discard?: boolean;
804
- /** The desired output subtitle codec. */
805
- codec?: SubtitleCodec;
806
- };
807
-
808
- /**
809
- * Video-specific options.
810
- * @group Conversion
811
- * @public
812
- */
813
- export declare type ConversionVideoOptions = {
814
- /** If `true`, all video tracks will be discarded and will not be present in the output. */
815
- discard?: boolean;
816
- /**
817
- * The desired width of the output video in pixels, defaulting to the video's natural display width. If height
818
- * is not set, it will be deduced automatically based on aspect ratio.
819
- */
820
- width?: number;
821
- /**
822
- * The desired height of the output video in pixels, defaulting to the video's natural display height. If width
823
- * is not set, it will be deduced automatically based on aspect ratio.
824
- */
825
- height?: number;
826
- /**
827
- * The fitting algorithm in case both width and height are set, or if the input video changes its size over time.
828
- *
829
- * - `'fill'` will stretch the image to fill the entire box, potentially altering aspect ratio.
830
- * - `'contain'` will contain the entire image within the box while preserving aspect ratio. This may lead to
831
- * letterboxing.
832
- * - `'cover'` will scale the image until the entire box is filled, while preserving aspect ratio.
833
- */
834
- fit?: 'fill' | 'contain' | 'cover';
835
- /**
836
- * The angle in degrees to rotate the input video by, clockwise. Rotation is applied before cropping and resizing.
837
- * This rotation is _in addition to_ the natural rotation of the input video as specified in input file's metadata.
838
- */
839
- rotate?: Rotation;
840
- /**
841
- * Specifies the rectangular region of the input video to crop to. The crop region will automatically be clamped to
842
- * the dimensions of the input video track. Cropping is performed after rotation but before resizing.
843
- */
844
- crop?: {
845
- /** The distance in pixels from the left edge of the source frame to the left edge of the crop rectangle. */
846
- left: number;
847
- /** The distance in pixels from the top edge of the source frame to the top edge of the crop rectangle. */
848
- top: number;
849
- /** The width in pixels of the crop rectangle. */
850
- width: number;
851
- /** The height in pixels of the crop rectangle. */
852
- height: number;
853
- };
854
- /**
855
- * The desired frame rate of the output video, in hertz. If not specified, the original input frame rate will
856
- * be used (which may be variable).
857
- */
858
- frameRate?: number;
859
- /** The desired output video codec. */
860
- codec?: VideoCodec;
861
- /** The desired bitrate of the output video. */
862
- bitrate?: number | Quality;
863
- /**
864
- * Whether to discard or keep the transparency information of the input video. The default is `'discard'`. Note that
865
- * for `'keep'` to produce a transparent video, you must use an output config that supports it, such as WebM with
866
- * VP9.
867
- */
868
- alpha?: 'discard' | 'keep';
869
- /**
870
- * The interval, in seconds, of how often frames are encoded as a key frame. The default is 5 seconds. Frequent key
871
- * frames improve seeking behavior but increase file size. When using multiple video tracks, you should give them
872
- * all the same key frame interval.
873
- *
874
- * Setting this fields forces a transcode.
875
- */
876
- keyFrameInterval?: number;
877
- /** When `true`, video will always be re-encoded instead of directly copying over the encoded samples. */
878
- forceTranscode?: boolean;
879
- /**
880
- * Allows for custom user-defined processing of video frames, e.g. for applying overlays, color transformations, or
881
- * timestamp modifications. Will be called for each input video sample after transformations and frame rate
882
- * corrections.
883
- *
884
- * Must return a {@link VideoSample} or a `CanvasImageSource`, an array of them, or `null` for dropping the frame.
885
- * When non-timestamped data is returned, the timestamp and duration from the source sample will be used. Rotation
886
- * metadata of the returned sample will be ignored.
887
- *
888
- * This function can also be used to manually resize frames. When doing so, you should signal the post-process
889
- * dimensions using the `processedWidth` and `processedHeight` fields, which enables the encoder to better know what
890
- * to expect. If these fields aren't set, Mediabunny will assume you won't perform any resizing.
891
- */
892
- process?: (sample: VideoSample) => MaybePromise<CanvasImageSource | VideoSample | (CanvasImageSource | VideoSample)[] | null>;
893
- /**
894
- * An optional hint specifying the width of video samples returned by the `process` function, for better
895
- * encoder configuration.
896
- */
897
- processedWidth?: number;
898
- /**
899
- * An optional hint specifying the height of video samples returned by the `process` function, for better
900
- * encoder configuration.
901
- */
902
- processedHeight?: number;
903
- };
904
-
905
- /**
906
- * Specifies the rectangular cropping region.
907
- * @group Miscellaneous
908
- * @public
909
- */
910
- export declare type CropRectangle = {
911
- /** The distance in pixels from the left edge of the source frame to the left edge of the crop rectangle. */
912
- left: number;
913
- /** The distance in pixels from the top edge of the source frame to the top edge of the crop rectangle. */
914
- top: number;
915
- /** The width in pixels of the crop rectangle. */
916
- width: number;
917
- /** The height in pixels of the crop rectangle. */
918
- height: number;
919
- };
920
-
921
- /**
922
- * Base class for custom audio decoders. To add your own custom audio decoder, extend this class, implement the
923
- * abstract methods and static `supports` method, and register the decoder using {@link registerDecoder}.
924
- * @group Custom coders
925
- * @public
926
- */
927
- export declare abstract class CustomAudioDecoder {
928
- /** The input audio's codec. */
929
- readonly codec: AudioCodec;
930
- /** The input audio's decoder config. */
931
- readonly config: AudioDecoderConfig;
932
- /** The callback to call when a decoded AudioSample is available. */
933
- readonly onSample: (sample: AudioSample) => unknown;
934
- /** Returns true if and only if the decoder can decode the given codec configuration. */
935
- static supports(codec: AudioCodec, config: AudioDecoderConfig): boolean;
936
- /** Called after decoder creation; can be used for custom initialization logic. */
937
- abstract init(): MaybePromise<void>;
938
- /** Decodes the provided encoded packet. */
939
- abstract decode(packet: EncodedPacket): MaybePromise<void>;
940
- /** Decodes all remaining packets and then resolves. */
941
- abstract flush(): MaybePromise<void>;
942
- /** Called when the decoder is no longer needed and its resources can be freed. */
943
- abstract close(): MaybePromise<void>;
944
- }
945
-
946
- /**
947
- * Base class for custom audio encoders. To add your own custom audio encoder, extend this class, implement the
948
- * abstract methods and static `supports` method, and register the encoder using {@link registerEncoder}.
949
- * @group Custom coders
950
- * @public
951
- */
952
- export declare abstract class CustomAudioEncoder {
953
- /** The codec with which to encode the audio. */
954
- readonly codec: AudioCodec;
955
- /** Config for the encoder. */
956
- readonly config: AudioEncoderConfig;
957
- /** The callback to call when an EncodedPacket is available. */
958
- readonly onPacket: (packet: EncodedPacket, meta?: EncodedAudioChunkMetadata) => unknown;
959
- /** Returns true if and only if the encoder can encode the given codec configuration. */
960
- static supports(codec: AudioCodec, config: AudioEncoderConfig): boolean;
961
- /** Called after encoder creation; can be used for custom initialization logic. */
962
- abstract init(): MaybePromise<void>;
963
- /** Encodes the provided audio sample. */
964
- abstract encode(audioSample: AudioSample): MaybePromise<void>;
965
- /** Encodes all remaining audio samples and then resolves. */
966
- abstract flush(): MaybePromise<void>;
967
- /** Called when the encoder is no longer needed and its resources can be freed. */
968
- abstract close(): MaybePromise<void>;
969
- }
970
-
971
- /**
972
- * Base class for custom video decoders. To add your own custom video decoder, extend this class, implement the
973
- * abstract methods and static `supports` method, and register the decoder using {@link registerDecoder}.
974
- * @group Custom coders
975
- * @public
976
- */
977
- export declare abstract class CustomVideoDecoder {
978
- /** The input video's codec. */
979
- readonly codec: VideoCodec;
980
- /** The input video's decoder config. */
981
- readonly config: VideoDecoderConfig;
982
- /** The callback to call when a decoded VideoSample is available. */
983
- readonly onSample: (sample: VideoSample) => unknown;
984
- /** Returns true if and only if the decoder can decode the given codec configuration. */
985
- static supports(codec: VideoCodec, config: VideoDecoderConfig): boolean;
986
- /** Called after decoder creation; can be used for custom initialization logic. */
987
- abstract init(): MaybePromise<void>;
988
- /** Decodes the provided encoded packet. */
989
- abstract decode(packet: EncodedPacket): MaybePromise<void>;
990
- /** Decodes all remaining packets and then resolves. */
991
- abstract flush(): MaybePromise<void>;
992
- /** Called when the decoder is no longer needed and its resources can be freed. */
993
- abstract close(): MaybePromise<void>;
994
- }
995
-
996
- /**
997
- * Base class for custom video encoders. To add your own custom video encoder, extend this class, implement the
998
- * abstract methods and static `supports` method, and register the encoder using {@link registerEncoder}.
999
- * @group Custom coders
1000
- * @public
1001
- */
1002
- export declare abstract class CustomVideoEncoder {
1003
- /** The codec with which to encode the video. */
1004
- readonly codec: VideoCodec;
1005
- /** Config for the encoder. */
1006
- readonly config: VideoEncoderConfig;
1007
- /** The callback to call when an EncodedPacket is available. */
1008
- readonly onPacket: (packet: EncodedPacket, meta?: EncodedVideoChunkMetadata) => unknown;
1009
- /** Returns true if and only if the encoder can encode the given codec configuration. */
1010
- static supports(codec: VideoCodec, config: VideoEncoderConfig): boolean;
1011
- /** Called after encoder creation; can be used for custom initialization logic. */
1012
- abstract init(): MaybePromise<void>;
1013
- /** Encodes the provided video sample. */
1014
- abstract encode(videoSample: VideoSample, options: VideoEncoderEncodeOptions): MaybePromise<void>;
1015
- /** Encodes all remaining video samples and then resolves. */
1016
- abstract flush(): MaybePromise<void>;
1017
- /** Called when the encoder is no longer needed and its resources can be freed. */
1018
- abstract close(): MaybePromise<void>;
1019
- }
1020
-
1021
- /**
1022
- * An input track that was discarded (excluded) from a {@link Conversion} alongside the discard reason.
1023
- * @group Conversion
1024
- * @public
1025
- */
1026
- export declare type DiscardedTrack = {
1027
- /** The track that was discarded. */
1028
- track: InputTrack;
1029
- /**
1030
- * The reason for discarding the track.
1031
- *
1032
- * - `'discarded_by_user'`: You discarded this track by setting `discard: true`.
1033
- * - `'max_track_count_reached'`: The output had no more room for another track.
1034
- * - `'max_track_count_of_type_reached'`: The output had no more room for another track of this type, or the output
1035
- * doesn't support this track type at all.
1036
- * - `'unknown_source_codec'`: We don't know the codec of the input track and therefore don't know what to do
1037
- * with it.
1038
- * - `'undecodable_source_codec'`: The input track's codec is known, but we are unable to decode it.
1039
- * - `'no_encodable_target_codec'`: We can't find a codec that we are able to encode and that can be contained
1040
- * within the output format. This reason can be hit if the environment doesn't support the necessary encoders, or if
1041
- * you requested a codec that cannot be contained within the output format.
1042
- */
1043
- reason: 'discarded_by_user' | 'max_track_count_reached' | 'max_track_count_of_type_reached' | 'unknown_source_codec' | 'undecodable_source_codec' | 'no_encodable_target_codec';
1044
- };
1045
-
1046
- /**
1047
- * The most basic audio source; can be used to directly pipe encoded packets into the output file.
1048
- * @group Media sources
1049
- * @public
1050
- */
1051
- export declare class EncodedAudioPacketSource extends AudioSource {
1052
- /** Creates a new {@link EncodedAudioPacketSource} whose packets are encoded using `codec`. */
1053
- constructor(codec: AudioCodec);
1054
- /**
1055
- * Adds an encoded packet to the output audio track. Packets must be added in *decode order*.
1056
- *
1057
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
1058
- * decoder config.
1059
- *
1060
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1061
- * to respect writer and encoder backpressure.
1062
- */
1063
- add(packet: EncodedPacket, meta?: EncodedAudioChunkMetadata): Promise<void>;
1064
- }
1065
-
1066
- /**
1067
- * Represents an encoded chunk of media. Mainly used as an expressive wrapper around WebCodecs API's
1068
- * [`EncodedVideoChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedVideoChunk) and
1069
- * [`EncodedAudioChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedAudioChunk), but can also be used
1070
- * standalone.
1071
- * @group Packets
1072
- * @public
1073
- */
1074
- export declare class EncodedPacket {
1075
- /** The encoded data of this packet. */
1076
- readonly data: Uint8Array;
1077
- /** The type of this packet. */
1078
- readonly type: PacketType;
1079
- /**
1080
- * The presentation timestamp of this packet in seconds. May be negative. Samples with negative end timestamps
1081
- * should not be presented.
1082
- */
1083
- readonly timestamp: number;
1084
- /** The duration of this packet in seconds. */
1085
- readonly duration: number;
1086
- /**
1087
- * The sequence number indicates the decode order of the packets. Packet A must be decoded before packet B if A
1088
- * has a lower sequence number than B. If two packets have the same sequence number, they are the same packet.
1089
- * Otherwise, sequence numbers are arbitrary and are not guaranteed to have any meaning besides their relative
1090
- * ordering. Negative sequence numbers mean the sequence number is undefined.
1091
- */
1092
- readonly sequenceNumber: number;
1093
- /**
1094
- * The actual byte length of the data in this packet. This field is useful for metadata-only packets where the
1095
- * `data` field contains no bytes.
1096
- */
1097
- readonly byteLength: number;
1098
- /** Additional data carried with this packet. */
1099
- readonly sideData: EncodedPacketSideData;
1100
- /** Creates a new {@link EncodedPacket} from raw bytes and timing information. */
1101
- constructor(
1102
- /** The encoded data of this packet. */
1103
- data: Uint8Array,
1104
- /** The type of this packet. */
1105
- type: PacketType,
1106
- /**
1107
- * The presentation timestamp of this packet in seconds. May be negative. Samples with negative end timestamps
1108
- * should not be presented.
1109
- */
1110
- timestamp: number,
1111
- /** The duration of this packet in seconds. */
1112
- duration: number,
1113
- /**
1114
- * The sequence number indicates the decode order of the packets. Packet A must be decoded before packet B if A
1115
- * has a lower sequence number than B. If two packets have the same sequence number, they are the same packet.
1116
- * Otherwise, sequence numbers are arbitrary and are not guaranteed to have any meaning besides their relative
1117
- * ordering. Negative sequence numbers mean the sequence number is undefined.
1118
- */
1119
- sequenceNumber?: number, byteLength?: number, sideData?: EncodedPacketSideData);
1120
- /**
1121
- * If this packet is a metadata-only packet. Metadata-only packets don't contain their packet data. They are the
1122
- * result of retrieving packets with {@link PacketRetrievalOptions.metadataOnly} set to `true`.
1123
- */
1124
- get isMetadataOnly(): boolean;
1125
- /** The timestamp of this packet in microseconds. */
1126
- get microsecondTimestamp(): number;
1127
- /** The duration of this packet in microseconds. */
1128
- get microsecondDuration(): number;
1129
- /** Converts this packet to an
1130
- * [`EncodedVideoChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedVideoChunk) for use with the
1131
- * WebCodecs API. */
1132
- toEncodedVideoChunk(): EncodedVideoChunk;
1133
- /**
1134
- * Converts this packet to an
1135
- * [`EncodedVideoChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedVideoChunk) for use with the
1136
- * WebCodecs API, using the alpha side data instead of the color data. Throws if no alpha side data is defined.
1137
- */
1138
- alphaToEncodedVideoChunk(type?: PacketType): EncodedVideoChunk;
1139
- /** Converts this packet to an
1140
- * [`EncodedAudioChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedAudioChunk) for use with the
1141
- * WebCodecs API. */
1142
- toEncodedAudioChunk(): EncodedAudioChunk;
1143
- /**
1144
- * Creates an {@link EncodedPacket} from an
1145
- * [`EncodedVideoChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedVideoChunk) or
1146
- * [`EncodedAudioChunk`](https://developer.mozilla.org/en-US/docs/Web/API/EncodedAudioChunk). This method is useful
1147
- * for converting chunks from the WebCodecs API to `EncodedPacket` instances.
1148
- */
1149
- static fromEncodedChunk(chunk: EncodedVideoChunk | EncodedAudioChunk, sideData?: EncodedPacketSideData): EncodedPacket;
1150
- /** Clones this packet while optionally updating timing information. */
1151
- clone(options?: {
1152
- /** The timestamp of the cloned packet in seconds. */
1153
- timestamp?: number;
1154
- /** The duration of the cloned packet in seconds. */
1155
- duration?: number;
1156
- }): EncodedPacket;
1157
- }
1158
-
1159
- /**
1160
- * Holds additional data accompanying an {@link EncodedPacket}.
1161
- * @group Packets
1162
- * @public
1163
- */
1164
- export declare type EncodedPacketSideData = {
1165
- /**
1166
- * An encoded alpha frame, encoded with the same codec as the packet. Typically used for transparent videos, where
1167
- * the alpha information is stored separately from the color information.
1168
- */
1169
- alpha?: Uint8Array;
1170
- /**
1171
- * The actual byte length of the alpha data. This field is useful for metadata-only packets where the
1172
- * `alpha` field contains no bytes.
1173
- */
1174
- alphaByteLength?: number;
1175
- };
1176
-
1177
- /**
1178
- * Sink for retrieving encoded packets from an input track.
1179
- * @group Media sinks
1180
- * @public
1181
- */
1182
- export declare class EncodedPacketSink {
1183
- /** Creates a new {@link EncodedPacketSink} for the given {@link InputTrack}. */
1184
- constructor(track: InputTrack);
1185
- /**
1186
- * Retrieves the track's first packet (in decode order), or null if it has no packets. The first packet is very
1187
- * likely to be a key packet.
1188
- */
1189
- getFirstPacket(options?: PacketRetrievalOptions): Promise<EncodedPacket | null>;
1190
- /**
1191
- * Retrieves the packet corresponding to the given timestamp, in seconds. More specifically, returns the last packet
1192
- * (in presentation order) with a start timestamp less than or equal to the given timestamp. This method can be
1193
- * used to retrieve a track's last packet using `getPacket(Infinity)`. The method returns null if the timestamp
1194
- * is before the first packet in the track.
1195
- *
1196
- * @param timestamp - The timestamp used for retrieval, in seconds.
1197
- */
1198
- getPacket(timestamp: number, options?: PacketRetrievalOptions): Promise<EncodedPacket | null>;
1199
- /**
1200
- * Retrieves the packet following the given packet (in decode order), or null if the given packet is the
1201
- * last packet.
1202
- */
1203
- getNextPacket(packet: EncodedPacket, options?: PacketRetrievalOptions): Promise<EncodedPacket | null>;
1204
- /**
1205
- * Retrieves the key packet corresponding to the given timestamp, in seconds. More specifically, returns the last
1206
- * key packet (in presentation order) with a start timestamp less than or equal to the given timestamp. A key packet
1207
- * is a packet that doesn't require previous packets to be decoded. This method can be used to retrieve a track's
1208
- * last key packet using `getKeyPacket(Infinity)`. The method returns null if the timestamp is before the first
1209
- * key packet in the track.
1210
- *
1211
- * To ensure that the returned packet is guaranteed to be a real key frame, enable `options.verifyKeyPackets`.
1212
- *
1213
- * @param timestamp - The timestamp used for retrieval, in seconds.
1214
- */
1215
- getKeyPacket(timestamp: number, options?: PacketRetrievalOptions): Promise<EncodedPacket | null>;
1216
- /**
1217
- * Retrieves the key packet following the given packet (in decode order), or null if the given packet is the last
1218
- * key packet.
1219
- *
1220
- * To ensure that the returned packet is guaranteed to be a real key frame, enable `options.verifyKeyPackets`.
1221
- */
1222
- getNextKeyPacket(packet: EncodedPacket, options?: PacketRetrievalOptions): Promise<EncodedPacket | null>;
1223
- /**
1224
- * Creates an async iterator that yields the packets in this track in decode order. To enable fast iteration, this
1225
- * method will intelligently preload packets based on the speed of the consumer.
1226
- *
1227
- * @param startPacket - (optional) The packet from which iteration should begin. This packet will also be yielded.
1228
- * @param endTimestamp - (optional) The timestamp at which iteration should end. This packet will _not_ be yielded.
1229
- */
1230
- packets(startPacket?: EncodedPacket, endPacket?: EncodedPacket, options?: PacketRetrievalOptions): AsyncGenerator<EncodedPacket, void, unknown>;
1231
- }
1232
-
1233
- /**
1234
- * The most basic video source; can be used to directly pipe encoded packets into the output file.
1235
- * @group Media sources
1236
- * @public
1237
- */
1238
- export declare class EncodedVideoPacketSource extends VideoSource {
1239
- /** Creates a new {@link EncodedVideoPacketSource} whose packets are encoded using `codec`. */
1240
- constructor(codec: VideoCodec);
1241
- /**
1242
- * Adds an encoded packet to the output video track. Packets must be added in *decode order*, while a packet's
1243
- * timestamp must be its *presentation timestamp*. B-frames are handled automatically.
1244
- *
1245
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
1246
- * decoder config.
1247
- *
1248
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1249
- * to respect writer and encoder backpressure.
1250
- */
1251
- add(packet: EncodedPacket, meta?: EncodedVideoChunkMetadata): Promise<void>;
1252
- }
1253
-
1254
- /**
1255
- * A source backed by a path to a file. Intended for server-side usage in Node, Bun, or Deno.
1256
- *
1257
- * Make sure to call `.dispose()` on the corresponding {@link Input} when done to explicitly free the internal file
1258
- * handle acquired by this source.
1259
- * @group Input sources
1260
- * @public
1261
- */
1262
- export declare class FilePathSource extends Source {
1263
- /** Creates a new {@link FilePathSource} backed by the file at the specified file path. */
1264
- constructor(filePath: string, options?: BlobSourceOptions);
1265
- }
1266
-
1267
- /**
1268
- * Options for {@link FilePathSource}.
1269
- * @group Input sources
1270
- * @public
1271
- */
1272
- export declare type FilePathSourceOptions = {
1273
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
1274
- maxCacheSize?: number;
1275
- };
1276
-
1277
- /**
1278
- * A target that writes to a file at the specified path. Intended for server-side usage in Node, Bun, or Deno.
1279
- *
1280
- * Writing is chunked by default. The internally held file handle will be closed when `.finalize()` or `.cancel()` are
1281
- * called on the corresponding {@link Output}.
1282
- * @group Output targets
1283
- * @public
1284
- */
1285
- export declare class FilePathTarget extends Target {
1286
- /** Creates a new {@link FilePathTarget} that writes to the file at the specified file path. */
1287
- constructor(filePath: string, options?: FilePathTargetOptions);
1288
- }
1289
-
1290
- /**
1291
- * Options for {@link FilePathTarget}.
1292
- * @group Output targets
1293
- * @public
1294
- */
1295
- export declare type FilePathTargetOptions = StreamTargetOptions;
1296
-
1297
- /**
1298
- * FLAC input format singleton.
1299
- * @group Input formats
1300
- * @public
1301
- */
1302
- export declare const FLAC: FlacInputFormat;
1303
-
1304
- /**
1305
- * FLAC file format.
1306
- *
1307
- * Do not instantiate this class; use the {@link FLAC} singleton instead.
1308
- *
1309
- * @group Input formats
1310
- * @public
1311
- */
1312
- export declare class FlacInputFormat extends InputFormat {
1313
- get name(): string;
1314
- get mimeType(): string;
1315
- }
1316
-
1317
- /**
1318
- * FLAC file format.
1319
- * @group Output formats
1320
- * @public
1321
- */
1322
- export declare class FlacOutputFormat extends OutputFormat {
1323
- /** Creates a new {@link FlacOutputFormat} configured with the specified `options`. */
1324
- constructor(options?: FlacOutputFormatOptions);
1325
- getSupportedTrackCounts(): TrackCountLimits;
1326
- get fileExtension(): string;
1327
- get mimeType(): string;
1328
- getSupportedCodecs(): MediaCodec[];
1329
- get supportsVideoRotationMetadata(): boolean;
1330
- }
1331
-
1332
- /**
1333
- * FLAC-specific output options.
1334
- * @group Output formats
1335
- * @public
1336
- */
1337
- export declare type FlacOutputFormatOptions = {
1338
- /**
1339
- * Will be called for each FLAC frame that is written.
1340
- *
1341
- * @param data - The raw bytes.
1342
- * @param position - The byte offset of the data in the file.
1343
- */
1344
- onFrame?: (data: Uint8Array, position: number) => unknown;
1345
- };
1346
-
1347
- /**
1348
- * Formats seconds to ASS/SSA timestamp format (H:MM:SS.cc).
1349
- * @group Media sources
1350
- * @public
1351
- */
1352
- export declare const formatAssTimestamp: (seconds: number) => string;
1353
-
1354
- /**
1355
- * Formats subtitle cues back to ASS/SSA text format with header.
1356
- * Properly inserts Dialogue/Comment lines within [Events] section.
1357
- * @group Media sources
1358
- * @public
1359
- */
1360
- export declare const formatCuesToAss: (cues: SubtitleCue[], header: string) => string;
1361
-
1362
- /**
1363
- * Formats subtitle cues back to SRT text format.
1364
- * @group Media sources
1365
- * @public
1366
- */
1367
- export declare const formatCuesToSrt: (cues: SubtitleCue[]) => string;
1368
-
1369
- /**
1370
- * Formats subtitle cues back to WebVTT text format.
1371
- * @group Media sources
1372
- * @public
1373
- */
1374
- export declare const formatCuesToWebVTT: (cues: SubtitleCue[], preamble?: string) => string;
1375
-
1376
- /**
1377
- * Formats seconds to SRT timestamp format (HH:MM:SS,mmm).
1378
- * @group Media sources
1379
- * @public
1380
- */
1381
- export declare const formatSrtTimestamp: (seconds: number) => string;
1382
-
1383
- /**
1384
- * Returns the list of all audio codecs that can be encoded by the browser.
1385
- * @group Encoding
1386
- * @public
1387
- */
1388
- export declare const getEncodableAudioCodecs: (checkedCodecs?: AudioCodec[], options?: {
1389
- numberOfChannels?: number;
1390
- sampleRate?: number;
1391
- bitrate?: number | Quality;
1392
- }) => Promise<AudioCodec[]>;
1393
-
1394
- /**
1395
- * Returns the list of all media codecs that can be encoded by the browser.
1396
- * @group Encoding
1397
- * @public
1398
- */
1399
- export declare const getEncodableCodecs: () => Promise<MediaCodec[]>;
1400
-
1401
- /**
1402
- * Returns the list of all subtitle codecs that can be encoded by the browser.
1403
- * @group Encoding
1404
- * @public
1405
- */
1406
- export declare const getEncodableSubtitleCodecs: (checkedCodecs?: SubtitleCodec[]) => Promise<SubtitleCodec[]>;
1407
-
1408
- /**
1409
- * Returns the list of all video codecs that can be encoded by the browser.
1410
- * @group Encoding
1411
- * @public
1412
- */
1413
- export declare const getEncodableVideoCodecs: (checkedCodecs?: VideoCodec[], options?: {
1414
- width?: number;
1415
- height?: number;
1416
- bitrate?: number | Quality;
1417
- }) => Promise<VideoCodec[]>;
1418
-
1419
- /**
1420
- * Returns the first audio codec from the given list that can be encoded by the browser.
1421
- * @group Encoding
1422
- * @public
1423
- */
1424
- export declare const getFirstEncodableAudioCodec: (checkedCodecs: AudioCodec[], options?: {
1425
- numberOfChannels?: number;
1426
- sampleRate?: number;
1427
- bitrate?: number | Quality;
1428
- }) => Promise<AudioCodec | null>;
1429
-
1430
- /**
1431
- * Returns the first subtitle codec from the given list that can be encoded by the browser.
1432
- * @group Encoding
1433
- * @public
1434
- */
1435
- export declare const getFirstEncodableSubtitleCodec: (checkedCodecs: SubtitleCodec[]) => Promise<SubtitleCodec | null>;
1436
-
1437
- /**
1438
- * Returns the first video codec from the given list that can be encoded by the browser.
1439
- * @group Encoding
1440
- * @public
1441
- */
1442
- export declare const getFirstEncodableVideoCodec: (checkedCodecs: VideoCodec[], options?: {
1443
- width?: number;
1444
- height?: number;
1445
- bitrate?: number | Quality;
1446
- }) => Promise<VideoCodec | null>;
1447
-
1448
- /**
1449
- * Specifies an inclusive range of integers.
1450
- * @group Miscellaneous
1451
- * @public
1452
- */
1453
- export declare type InclusiveIntegerRange = {
1454
- /** The integer cannot be less than this. */
1455
- min: number;
1456
- /** The integer cannot be greater than this. */
1457
- max: number;
1458
- };
1459
-
1460
- /**
1461
- * Represents an input media file. This is the root object from which all media read operations start.
1462
- * @group Input files & tracks
1463
- * @public
1464
- */
1465
- export declare class Input<S extends Source = Source> implements Disposable {
1466
- /** True if the input has been disposed. */
1467
- get disposed(): boolean;
1468
- /**
1469
- * Creates a new input file from the specified options. No reading operations will be performed until methods are
1470
- * called on this instance.
1471
- */
1472
- constructor(options: InputOptions<S>);
1473
- /**
1474
- * Returns the source from which this input file reads its data. This is the same source that was passed to the
1475
- * constructor.
1476
- */
1477
- get source(): S;
1478
- /**
1479
- * Returns the format of the input file. You can compare this result directly to the {@link InputFormat} singletons
1480
- * or use `instanceof` checks for subset-aware logic (for example, `format instanceof MatroskaInputFormat` is true
1481
- * for both MKV and WebM).
1482
- */
1483
- getFormat(): Promise<InputFormat>;
1484
- /**
1485
- * Computes the duration of the input file, in seconds. More precisely, returns the largest end timestamp among
1486
- * all tracks.
1487
- */
1488
- computeDuration(): Promise<number>;
1489
- /** Returns the list of all tracks of this input file. */
1490
- getTracks(): Promise<InputTrack[]>;
1491
- /** Returns the list of all video tracks of this input file. */
1492
- getVideoTracks(): Promise<InputVideoTrack[]>;
1493
- /** Returns the list of all audio tracks of this input file. */
1494
- getAudioTracks(): Promise<InputAudioTrack[]>;
1495
- /** Returns the list of all subtitle tracks of this input file. */
1496
- getSubtitleTracks(): Promise<InputSubtitleTrack[]>;
1497
- /** Returns the primary video track of this input file, or null if there are no video tracks. */
1498
- getPrimaryVideoTrack(): Promise<InputVideoTrack | null>;
1499
- /** Returns the primary audio track of this input file, or null if there are no audio tracks. */
1500
- getPrimaryAudioTrack(): Promise<InputAudioTrack | null>;
1501
- /**
1502
- * Returns the list of all subtitle tracks of this input file. This is a convenience property that calls
1503
- * {@link Input.getSubtitleTracks} and caches the result. Note that this property is a promise!
1504
- */
1505
- get subtitleTracks(): Promise<InputSubtitleTrack[]>;
1506
- /**
1507
- * Returns the list of all video tracks of this input file. This is a convenience property that calls
1508
- * {@link Input.getVideoTracks} and caches the result. Note that this property is a promise!
1509
- */
1510
- get videoTracks(): Promise<InputVideoTrack[]>;
1511
- /**
1512
- * Returns the list of all audio tracks of this input file. This is a convenience property that calls
1513
- * {@link Input.getAudioTracks} and caches the result. Note that this property is a promise!
1514
- */
1515
- get audioTracks(): Promise<InputAudioTrack[]>;
1516
- /** Returns the full MIME type of this input file, including track codecs. */
1517
- getMimeType(): Promise<string>;
1518
- /**
1519
- * Returns descriptive metadata tags about the media file, such as title, author, date, cover art, or other
1520
- * attached files.
1521
- */
1522
- getMetadataTags(): Promise<MetadataTags>;
1523
- /**
1524
- * Disposes this input and frees connected resources. When an input is disposed, ongoing read operations will be
1525
- * canceled, all future read operations will fail, any open decoders will be closed, and all ongoing media sink
1526
- * operations will be canceled. Disallowed and canceled operations will throw an {@link InputDisposedError}.
1527
- *
1528
- * You are expected not to use an input after disposing it. While some operations may still work, it is not
1529
- * specified and may change in any future update.
1530
- */
1531
- dispose(): void;
1532
- /**
1533
- * Calls `.dispose()` on the input, implementing the `Disposable` interface for use with
1534
- * JavaScript Explicit Resource Management features.
1535
- */
1536
- [Symbol.dispose](): void;
1537
- }
1538
-
1539
- /**
1540
- * Represents an audio track in an input file.
1541
- * @group Input files & tracks
1542
- * @public
1543
- */
1544
- export declare class InputAudioTrack extends InputTrack {
1545
- get type(): TrackType;
1546
- get codec(): AudioCodec | null;
1547
- /** The number of audio channels in the track. */
1548
- get numberOfChannels(): number;
1549
- /** The track's audio sample rate in hertz. */
1550
- get sampleRate(): number;
1551
- /**
1552
- * Returns the [decoder configuration](https://www.w3.org/TR/webcodecs/#audio-decoder-config) for decoding the
1553
- * track's packets using an [`AudioDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/AudioDecoder). Returns
1554
- * null if the track's codec is unknown.
1555
- */
1556
- getDecoderConfig(): Promise<AudioDecoderConfig | null>;
1557
- getCodecParameterString(): Promise<string | null>;
1558
- canDecode(): Promise<boolean>;
1559
- determinePacketType(packet: EncodedPacket): Promise<PacketType | null>;
1560
- }
1561
-
1562
- /**
1563
- * Thrown when an operation was prevented because the corresponding {@link Input} has been disposed.
1564
- * @group Input files & tracks
1565
- * @public
1566
- */
1567
- export declare class InputDisposedError extends Error {
1568
- /** Creates a new {@link InputDisposedError}. */
1569
- constructor(message?: string);
1570
- }
1571
-
1572
- /**
1573
- * Base class representing an input media file format.
1574
- * @group Input formats
1575
- * @public
1576
- */
1577
- export declare abstract class InputFormat {
1578
- /** Returns the name of the input format. */
1579
- abstract get name(): string;
1580
- /** Returns the typical base MIME type of the input format. */
1581
- abstract get mimeType(): string;
1582
- }
1583
-
1584
- /**
1585
- * The options for creating an Input object.
1586
- * @group Input files & tracks
1587
- * @public
1588
- */
1589
- export declare type InputOptions<S extends Source = Source> = {
1590
- /** A list of supported formats. If the source file is not of one of these formats, then it cannot be read. */
1591
- formats: InputFormat[];
1592
- /** The source from which data will be read. */
1593
- source: S;
1594
- };
1595
-
1596
- /**
1597
- * Represents a subtitle track in an input file.
1598
- * @group Input files & tracks
1599
- * @public
1600
- */
1601
- export declare class InputSubtitleTrack extends InputTrack {
1602
- get type(): TrackType;
1603
- get codec(): SubtitleCodec | null;
1604
- /**
1605
- * Returns an async iterator that yields all subtitle cues in this track.
1606
- */
1607
- getCues(): AsyncGenerator<SubtitleCue>;
1608
- /**
1609
- * Exports all subtitle cues to text format. If targetFormat is specified,
1610
- * attempts to convert to that format (limited conversion support).
1611
- */
1612
- exportToText(targetFormat?: SubtitleCodec): Promise<string>;
1613
- getCodecParameterString(): Promise<string | null>;
1614
- canDecode(): Promise<boolean>;
1615
- determinePacketType(packet: EncodedPacket): Promise<PacketType | null>;
1616
- }
1617
-
1618
- /**
1619
- * Represents a media track in an input file.
1620
- * @group Input files & tracks
1621
- * @public
1622
- */
1623
- export declare abstract class InputTrack {
1624
- /** The input file this track belongs to. */
1625
- readonly input: Input;
1626
- /** The type of the track. */
1627
- abstract get type(): TrackType;
1628
- /** The codec of the track's packets. */
1629
- abstract get codec(): MediaCodec | null;
1630
- /** Returns the full codec parameter string for this track. */
1631
- abstract getCodecParameterString(): Promise<string | null>;
1632
- /** Checks if this track's packets can be decoded by the browser. */
1633
- abstract canDecode(): Promise<boolean>;
1634
- /**
1635
- * For a given packet of this track, this method determines the actual type of this packet (key/delta) by looking
1636
- * into its bitstream. Returns null if the type couldn't be determined.
1637
- */
1638
- abstract determinePacketType(packet: EncodedPacket): Promise<PacketType | null>;
1639
- /** Returns true if and only if this track is a video track. */
1640
- isVideoTrack(): this is InputVideoTrack;
1641
- /** Returns true if and only if this track is an audio track. */
1642
- isAudioTrack(): this is InputAudioTrack;
1643
- /** Returns true if and only if this track is a subtitle track. */
1644
- isSubtitleTrack(): this is InputSubtitleTrack;
1645
- /** The unique ID of this track in the input file. */
1646
- get id(): number;
1647
- /**
1648
- * The identifier of the codec used internally by the container. It is not homogenized by Mediabunny
1649
- * and depends entirely on the container format.
1650
- *
1651
- * This field can be used to determine the codec of a track in case Mediabunny doesn't know that codec.
1652
- *
1653
- * - For ISOBMFF files, this field returns the name of the Sample Description Box (e.g. `'avc1'`).
1654
- * - For Matroska files, this field returns the value of the `CodecID` element.
1655
- * - For WAVE files, this field returns the value of the format tag in the `'fmt '` chunk.
1656
- * - For ADTS files, this field contains the `MPEG-4 Audio Object Type`.
1657
- * - In all other cases, this field is `null`.
1658
- */
1659
- get internalCodecId(): string | number | Uint8Array<ArrayBufferLike> | null;
1660
- /**
1661
- * The ISO 639-2/T language code for this track. If the language is unknown, this field is `'und'` (undetermined).
1662
- */
1663
- get languageCode(): string;
1664
- /** A user-defined name for this track. */
1665
- get name(): string | null;
1666
- /**
1667
- * A positive number x such that all timestamps and durations of all packets of this track are
1668
- * integer multiples of 1/x.
1669
- */
1670
- get timeResolution(): number;
1671
- /** The track's disposition, i.e. information about its intended usage. */
1672
- get disposition(): TrackDisposition;
1673
- /**
1674
- * Returns the start timestamp of the first packet of this track, in seconds. While often near zero, this value
1675
- * may be positive or even negative. A negative starting timestamp means the track's timing has been offset. Samples
1676
- * with a negative timestamp should not be presented.
1677
- */
1678
- getFirstTimestamp(): Promise<number>;
1679
- /** Returns the end timestamp of the last packet of this track, in seconds. */
1680
- computeDuration(): Promise<number>;
1681
- /**
1682
- * Computes aggregate packet statistics for this track, such as average packet rate or bitrate.
1683
- *
1684
- * @param targetPacketCount - This optional parameter sets a target for how many packets this method must have
1685
- * looked at before it can return early; this means, you can use it to aggregate only a subset (prefix) of all
1686
- * packets. This is very useful for getting a great estimate of video frame rate without having to scan through the
1687
- * entire file.
1688
- */
1689
- computePacketStats(targetPacketCount?: number): Promise<PacketStats>;
1690
- }
1691
-
1692
- /**
1693
- * Represents a video track in an input file.
1694
- * @group Input files & tracks
1695
- * @public
1696
- */
1697
- export declare class InputVideoTrack extends InputTrack {
1698
- get type(): TrackType;
1699
- get codec(): VideoCodec | null;
1700
- /** The width in pixels of the track's coded samples, before any transformations or rotations. */
1701
- get codedWidth(): number;
1702
- /** The height in pixels of the track's coded samples, before any transformations or rotations. */
1703
- get codedHeight(): number;
1704
- /** The angle in degrees by which the track's frames should be rotated (clockwise). */
1705
- get rotation(): Rotation;
1706
- /** The width in pixels of the track's frames after rotation. */
1707
- get displayWidth(): number;
1708
- /** The height in pixels of the track's frames after rotation. */
1709
- get displayHeight(): number;
1710
- /** Returns the color space of the track's samples. */
1711
- getColorSpace(): Promise<VideoColorSpaceInit>;
1712
- /** If this method returns true, the track's samples use a high dynamic range (HDR). */
1713
- hasHighDynamicRange(): Promise<boolean>;
1714
- /** Checks if this track may contain transparent samples with alpha data. */
1715
- canBeTransparent(): Promise<boolean>;
1716
- /**
1717
- * Returns the [decoder configuration](https://www.w3.org/TR/webcodecs/#video-decoder-config) for decoding the
1718
- * track's packets using a [`VideoDecoder`](https://developer.mozilla.org/en-US/docs/Web/API/VideoDecoder). Returns
1719
- * null if the track's codec is unknown.
1720
- */
1721
- getDecoderConfig(): Promise<VideoDecoderConfig | null>;
1722
- getCodecParameterString(): Promise<string | null>;
1723
- canDecode(): Promise<boolean>;
1724
- determinePacketType(packet: EncodedPacket): Promise<PacketType | null>;
1725
- }
1726
-
1727
- /**
1728
- * Format representing files compatible with the ISO base media file format (ISOBMFF), like MP4 or MOV files.
1729
- * @group Input formats
1730
- * @public
1731
- */
1732
- export declare abstract class IsobmffInputFormat extends InputFormat {
1733
- }
1734
-
1735
- /**
1736
- * Format representing files compatible with the ISO base media file format (ISOBMFF), like MP4 or MOV files.
1737
- * @group Output formats
1738
- * @public
1739
- */
1740
- export declare abstract class IsobmffOutputFormat extends OutputFormat {
1741
- /** Internal constructor. */
1742
- constructor(options?: IsobmffOutputFormatOptions);
1743
- getSupportedTrackCounts(): TrackCountLimits;
1744
- get supportsVideoRotationMetadata(): boolean;
1745
- }
1746
-
1747
- /**
1748
- * ISOBMFF-specific output options.
1749
- * @group Output formats
1750
- * @public
1751
- */
1752
- export declare type IsobmffOutputFormatOptions = {
1753
- /**
1754
- * Controls the placement of metadata in the file. Placing metadata at the start of the file is known as "Fast
1755
- * Start", which results in better playback at the cost of more required processing or memory.
1756
- *
1757
- * Use `false` to disable Fast Start, placing the metadata at the end of the file. Fastest and uses the least
1758
- * memory.
1759
- *
1760
- * Use `'in-memory'` to produce a file with Fast Start by keeping all media chunks in memory until the file is
1761
- * finalized. This produces a high-quality and compact output at the cost of a more expensive finalization step and
1762
- * higher memory requirements. Data will be written monotonically (in order) when this option is set.
1763
- *
1764
- * Use `'reserve'` to reserve space at the start of the file into which the metadata will be written later. This
1765
- * produces a file with Fast Start but requires knowledge about the expected length of the file beforehand. When
1766
- * using this option, you must set the {@link BaseTrackMetadata.maximumPacketCount} field in the track metadata
1767
- * for all tracks.
1768
- *
1769
- * Use `'fragmented'` to place metadata at the start of the file by creating a fragmented file (fMP4). In a
1770
- * fragmented file, chunks of media and their metadata are written to the file in "fragments", eliminating the need
1771
- * to put all metadata in one place. Fragmented files are useful for streaming contexts, as each fragment can be
1772
- * played individually without requiring knowledge of the other fragments. Furthermore, they remain lightweight to
1773
- * create even for very large files, as they don't require all media to be kept in memory. However, fragmented files
1774
- * are not as widely and wholly supported as regular MP4/MOV files. Data will be written monotonically (in order)
1775
- * when this option is set.
1776
- *
1777
- * When this field is not defined, either `false` or `'in-memory'` will be used, automatically determined based on
1778
- * the type of output target used.
1779
- */
1780
- fastStart?: false | 'in-memory' | 'reserve' | 'fragmented';
1781
- /**
1782
- * When using `fastStart: 'fragmented'`, this field controls the minimum duration of each fragment, in seconds.
1783
- * New fragments will only be created when the current fragment is longer than this value. Defaults to 1 second.
1784
- */
1785
- minimumFragmentDuration?: number;
1786
- /**
1787
- * The metadata format to use for writing metadata tags.
1788
- *
1789
- * - `'auto'` (default): Behaves like `'mdir'` for MP4 and like `'udta'` for QuickTime, matching FFmpeg's default
1790
- * behavior.
1791
- * - `'mdir'`: Write tags into `moov/udta/meta` using the 'mdir' handler format.
1792
- * - `'mdta'`: Write tags into `moov/udta/meta` using the 'mdta' handler format, equivalent to FFmpeg's
1793
- * `use_metadata_tags` flag. This allows for custom keys of arbitrary length.
1794
- * - `'udta'`: Write tags directly into `moov/udta`.
1795
- */
1796
- metadataFormat?: 'auto' | 'mdir' | 'mdta' | 'udta';
1797
- /**
1798
- * Will be called once the ftyp (File Type) box of the output file has been written.
1799
- *
1800
- * @param data - The raw bytes.
1801
- * @param position - The byte offset of the data in the file.
1802
- */
1803
- onFtyp?: (data: Uint8Array, position: number) => unknown;
1804
- /**
1805
- * Will be called once the moov (Movie) box of the output file has been written.
1806
- *
1807
- * @param data - The raw bytes.
1808
- * @param position - The byte offset of the data in the file.
1809
- */
1810
- onMoov?: (data: Uint8Array, position: number) => unknown;
1811
- /**
1812
- * Will be called for each finalized mdat (Media Data) box of the output file. Usage of this callback is not
1813
- * recommended when not using `fastStart: 'fragmented'`, as there will be one monolithic mdat box which might
1814
- * require large amounts of memory.
1815
- *
1816
- * @param data - The raw bytes.
1817
- * @param position - The byte offset of the data in the file.
1818
- */
1819
- onMdat?: (data: Uint8Array, position: number) => unknown;
1820
- /**
1821
- * Will be called for each finalized moof (Movie Fragment) box of the output file.
1822
- *
1823
- * @param data - The raw bytes.
1824
- * @param position - The byte offset of the data in the file.
1825
- * @param timestamp - The start timestamp of the fragment in seconds.
1826
- */
1827
- onMoof?: (data: Uint8Array, position: number, timestamp: number) => unknown;
1828
- };
1829
-
1830
- /**
1831
- * Matroska input format singleton.
1832
- * @group Input formats
1833
- * @public
1834
- */
1835
- export declare const MATROSKA: MatroskaInputFormat;
1836
-
1837
- /**
1838
- * Matroska file format.
1839
- *
1840
- * Do not instantiate this class; use the {@link MATROSKA} singleton instead.
1841
- *
1842
- * @group Input formats
1843
- * @public
1844
- */
1845
- export declare class MatroskaInputFormat extends InputFormat {
1846
- get name(): string;
1847
- get mimeType(): string;
1848
- }
1849
-
1850
- /**
1851
- * T or a promise that resolves to T.
1852
- * @group Miscellaneous
1853
- * @public
1854
- */
1855
- export declare type MaybePromise<T> = T | Promise<T>;
1856
-
1857
- /**
1858
- * Union type of known media codecs.
1859
- * @group Codecs
1860
- * @public
1861
- */
1862
- export declare type MediaCodec = VideoCodec | AudioCodec | SubtitleCodec;
1863
-
1864
- /**
1865
- * Base class for media sources. Media sources are used to add media samples to an output file.
1866
- * @group Media sources
1867
- * @public
1868
- */
1869
- declare abstract class MediaSource_2 {
1870
- /**
1871
- * Closes this source. This prevents future samples from being added and signals to the output file that no further
1872
- * samples will come in for this track. Calling `.close()` is optional but recommended after adding the
1873
- * last sample - for improved performance and reduced memory usage.
1874
- */
1875
- close(): void;
1876
- }
1877
- export { MediaSource_2 as MediaSource }
1878
-
1879
- /**
1880
- * Audio source that encodes the data of a
1881
- * [`MediaStreamAudioTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes it into the
1882
- * output. This is useful for capturing live or real-time audio such as microphones or audio from other media elements.
1883
- * Audio will automatically start being captured once the connected {@link Output} is started, and will keep being
1884
- * captured until the {@link Output} is finalized or this source is closed.
1885
- * @group Media sources
1886
- * @public
1887
- */
1888
- export declare class MediaStreamAudioTrackSource extends AudioSource {
1889
- /** A promise that rejects upon any error within this source. This promise never resolves. */
1890
- get errorPromise(): Promise<void>;
1891
- /**
1892
- * Creates a new {@link MediaStreamAudioTrackSource} from a `MediaStreamAudioTrack`, which will pull audio samples
1893
- * from the stream in real time and encode them according to {@link AudioEncodingConfig}.
1894
- */
1895
- constructor(track: MediaStreamAudioTrack, encodingConfig: AudioEncodingConfig);
1896
- }
1897
-
1898
- /**
1899
- * Video source that encodes the frames of a
1900
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes them into the
1901
- * output. This is useful for capturing live or real-time data such as webcams or screen captures. Frames will
1902
- * automatically start being captured once the connected {@link Output} is started, and will keep being captured until
1903
- * the {@link Output} is finalized or this source is closed.
1904
- * @group Media sources
1905
- * @public
1906
- */
1907
- export declare class MediaStreamVideoTrackSource extends VideoSource {
1908
- /** A promise that rejects upon any error within this source. This promise never resolves. */
1909
- get errorPromise(): Promise<void>;
1910
- /**
1911
- * Creates a new {@link MediaStreamVideoTrackSource} from a
1912
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack), which will pull
1913
- * video samples from the stream in real time and encode them according to {@link VideoEncodingConfig}.
1914
- */
1915
- constructor(track: MediaStreamVideoTrack, encodingConfig: VideoEncodingConfig);
1916
- }
1917
-
1918
- /**
1919
- * Represents descriptive (non-technical) metadata about a media file, such as title, author, date, cover art, or other
1920
- * attached files. Common tags are normalized by Mediabunny into a uniform format, while the `raw` field can be used to
1921
- * directly read or write the underlying metadata tags (which differ by format).
1922
- *
1923
- * - For MP4/QuickTime files, the metadata refers to the data in `'moov'`-level `'udta'` and `'meta'` atoms.
1924
- * - For WebM/Matroska files, the metadata refers to the Tags and Attachments elements whose target is 50 (MOVIE).
1925
- * - For MP3 files, the metadata refers to the ID3v2 or ID3v1 tags.
1926
- * - For Ogg files, there is no global metadata so instead, the metadata refers to the combined metadata of all tracks,
1927
- * in Vorbis-style comment headers.
1928
- * - For WAVE files, the metadata refers to the chunks within the RIFF INFO chunk.
1929
- * - For ADTS files, there is no metadata.
1930
- * - For FLAC files, the metadata lives in Vorbis style in the Vorbis comment block.
1931
- *
1932
- * @group Metadata tags
1933
- * @public
1934
- */
1935
- export declare type MetadataTags = {
1936
- /** Title of the media (e.g. Gangnam Style, Titanic, etc.) */
1937
- title?: string;
1938
- /** Short description or subtitle of the media. */
1939
- description?: string;
1940
- /** Primary artist(s) or creator(s) of the work. */
1941
- artist?: string;
1942
- /** Album, collection, or compilation the media belongs to. */
1943
- album?: string;
1944
- /** Main credited artist for the album/collection as a whole. */
1945
- albumArtist?: string;
1946
- /** Position of this track within its album or collection (1-based). */
1947
- trackNumber?: number;
1948
- /** Total number of tracks in the album or collection. */
1949
- tracksTotal?: number;
1950
- /** Disc index if the release spans multiple discs (1-based). */
1951
- discNumber?: number;
1952
- /** Total number of discs in the release. */
1953
- discsTotal?: number;
1954
- /** Genre or category describing the media's style or content (e.g. Metal, Horror, etc.) */
1955
- genre?: string;
1956
- /** Release, recording or creation date of the media. */
1957
- date?: Date;
1958
- /** Full text lyrics or transcript associated with the media. */
1959
- lyrics?: string;
1960
- /** Freeform notes, remarks or commentary about the media. */
1961
- comment?: string;
1962
- /** Embedded images such as cover art, booklet scans, artwork or preview frames. */
1963
- images?: AttachedImage[];
1964
- /**
1965
- * The raw, underlying metadata tags.
1966
- *
1967
- * This field can be used for both reading and writing. When reading, it represents the original tags that were used
1968
- * to derive the normalized fields, and any additional metadata that Mediabunny doesn't understand. When writing, it
1969
- * can be used to set arbitrary metadata tags in the output file.
1970
- *
1971
- * The format of these tags differs per format:
1972
- * - MP4/QuickTime: By default, the keys refer to the names of the individual atoms in the `'ilst'` atom inside the
1973
- * `'meta'` atom, and the values are derived from the content of the `'data'` atom inside them. When a `'keys'` atom
1974
- * is also used, then the keys reflect the keys specified there (such as `'com.apple.quicktime.version'`).
1975
- * Additionally, any atoms within the `'udta'` atom are dumped into here, however with unknown internal format
1976
- * (`Uint8Array`).
1977
- * - WebM/Matroska: `SimpleTag` elements whose target is 50 (MOVIE), either containing string or `Uint8Array`
1978
- * values. Additionally, all attached files (such as font files) are included here, where the key corresponds to
1979
- * the FileUID and the value is an {@link AttachedFile}.
1980
- * - MP3: The ID3v2 tags, or a single `'TAG'` key with the contents of the ID3v1 tag.
1981
- * - Ogg: The key-value string pairs from the Vorbis-style comment header (see RFC 7845, Section 5.2).
1982
- * Additionally, the `'vendor'` key refers to the vendor string within this header.
1983
- * - WAVE: The individual metadata chunks within the RIFF INFO chunk. Values are always ISO 8859-1 strings.
1984
- * - FLAC: The key-value string pairs from the vorbis metadata block (see RFC 9639, Section D.2.3).
1985
- * Additionally, the `'vendor'` key refers to the vendor string within this header.
1986
- */
1987
- raw?: Record<string, string | Uint8Array | RichImageData | AttachedFile | null>;
1988
- };
1989
-
1990
- /**
1991
- * Matroska file format.
1992
- *
1993
- * Supports writing transparent video. For a video track to be marked as transparent, the first packet added must
1994
- * contain alpha side data.
1995
- *
1996
- * @group Output formats
1997
- * @public
1998
- */
1999
- export declare class MkvOutputFormat extends OutputFormat {
2000
- /** Creates a new {@link MkvOutputFormat} configured with the specified `options`. */
2001
- constructor(options?: MkvOutputFormatOptions);
2002
- getSupportedTrackCounts(): TrackCountLimits;
2003
- get fileExtension(): string;
2004
- get mimeType(): string;
2005
- getSupportedCodecs(): MediaCodec[];
2006
- get supportsVideoRotationMetadata(): boolean;
2007
- }
2008
-
2009
- /**
2010
- * Matroska-specific output options.
2011
- * @group Output formats
2012
- * @public
2013
- */
2014
- export declare type MkvOutputFormatOptions = {
2015
- /**
2016
- * Configures the output to only append new data at the end, useful for live-streaming the file as it's being
2017
- * created. When enabled, some features such as storing duration and seeking will be disabled or impacted, so don't
2018
- * use this option when you want to write out a clean file for later use.
2019
- */
2020
- appendOnly?: boolean;
2021
- /**
2022
- * This field controls the minimum duration of each Matroska cluster, in seconds. New clusters will only be created
2023
- * when the current cluster is longer than this value. Defaults to 1 second.
2024
- */
2025
- minimumClusterDuration?: number;
2026
- /**
2027
- * Will be called once the EBML header of the output file has been written.
2028
- *
2029
- * @param data - The raw bytes.
2030
- * @param position - The byte offset of the data in the file.
2031
- */
2032
- onEbmlHeader?: (data: Uint8Array, position: number) => void;
2033
- /**
2034
- * Will be called once the header part of the Matroska Segment element has been written. The header data includes
2035
- * the Segment element and everything inside it, up to (but excluding) the first Matroska Cluster.
2036
- *
2037
- * @param data - The raw bytes.
2038
- * @param position - The byte offset of the data in the file.
2039
- */
2040
- onSegmentHeader?: (data: Uint8Array, position: number) => unknown;
2041
- /**
2042
- * Will be called for each finalized Matroska Cluster of the output file.
2043
- *
2044
- * @param data - The raw bytes.
2045
- * @param position - The byte offset of the data in the file.
2046
- * @param timestamp - The start timestamp of the cluster in seconds.
2047
- */
2048
- onCluster?: (data: Uint8Array, position: number, timestamp: number) => unknown;
2049
- };
2050
-
2051
- /**
2052
- * QuickTime File Format (QTFF), often called MOV. Supports all video and audio codecs, but not subtitle codecs.
2053
- * @group Output formats
2054
- * @public
2055
- */
2056
- export declare class MovOutputFormat extends IsobmffOutputFormat {
2057
- /** Creates a new {@link MovOutputFormat} configured with the specified `options`. */
2058
- constructor(options?: IsobmffOutputFormatOptions);
2059
- get fileExtension(): string;
2060
- get mimeType(): string;
2061
- getSupportedCodecs(): MediaCodec[];
2062
- }
2063
-
2064
- /**
2065
- * MP3 input format singleton.
2066
- * @group Input formats
2067
- * @public
2068
- */
2069
- export declare const MP3: Mp3InputFormat;
2070
-
2071
- /**
2072
- * MP3 file format.
2073
- *
2074
- * Do not instantiate this class; use the {@link MP3} singleton instead.
2075
- *
2076
- * @group Input formats
2077
- * @public
2078
- */
2079
- export declare class Mp3InputFormat extends InputFormat {
2080
- get name(): string;
2081
- get mimeType(): string;
2082
- }
2083
-
2084
- /**
2085
- * MP3 file format.
2086
- * @group Output formats
2087
- * @public
2088
- */
2089
- export declare class Mp3OutputFormat extends OutputFormat {
2090
- /** Creates a new {@link Mp3OutputFormat} configured with the specified `options`. */
2091
- constructor(options?: Mp3OutputFormatOptions);
2092
- getSupportedTrackCounts(): TrackCountLimits;
2093
- get fileExtension(): string;
2094
- get mimeType(): string;
2095
- getSupportedCodecs(): MediaCodec[];
2096
- get supportsVideoRotationMetadata(): boolean;
2097
- }
2098
-
2099
- /**
2100
- * MP3-specific output options.
2101
- * @group Output formats
2102
- * @public
2103
- */
2104
- export declare type Mp3OutputFormatOptions = {
2105
- /**
2106
- * Controls whether the Xing header, which contains additional metadata as well as an index, is written to the start
2107
- * of the MP3 file. When disabled, the writing process becomes append-only. Defaults to `true`.
2108
- */
2109
- xingHeader?: boolean;
2110
- /**
2111
- * Will be called once the Xing metadata frame is finalized.
2112
- *
2113
- * @param data - The raw bytes.
2114
- * @param position - The byte offset of the data in the file.
2115
- */
2116
- onXingFrame?: (data: Uint8Array, position: number) => unknown;
2117
- };
2118
-
2119
- /**
2120
- * MP4 input format singleton.
2121
- * @group Input formats
2122
- * @public
2123
- */
2124
- export declare const MP4: Mp4InputFormat;
2125
-
2126
- /**
2127
- * MPEG-4 Part 14 (MP4) file format.
2128
- *
2129
- * Do not instantiate this class; use the {@link MP4} singleton instead.
2130
- *
2131
- * @group Input formats
2132
- * @public
2133
- */
2134
- export declare class Mp4InputFormat extends IsobmffInputFormat {
2135
- get name(): string;
2136
- get mimeType(): string;
2137
- }
2138
-
2139
- /**
2140
- * MPEG-4 Part 14 (MP4) file format. Supports most codecs.
2141
- * @group Output formats
2142
- * @public
2143
- */
2144
- export declare class Mp4OutputFormat extends IsobmffOutputFormat {
2145
- /** Creates a new {@link Mp4OutputFormat} configured with the specified `options`. */
2146
- constructor(options?: IsobmffOutputFormatOptions);
2147
- get fileExtension(): string;
2148
- get mimeType(): string;
2149
- getSupportedCodecs(): MediaCodec[];
2150
- }
2151
-
2152
- /**
2153
- * List of known compressed audio codecs, ordered by encoding preference.
2154
- * @group Codecs
2155
- * @public
2156
- */
2157
- export declare const NON_PCM_AUDIO_CODECS: readonly ["aac", "opus", "mp3", "vorbis", "flac"];
2158
-
2159
- /**
2160
- * This target just discards all incoming data. It is useful for when you need an {@link Output} but extract data from
2161
- * it differently, for example through format-specific callbacks (`onMoof`, `onMdat`, ...) or encoder events.
2162
- * @group Output targets
2163
- * @public
2164
- */
2165
- export declare class NullTarget extends Target {
2166
- }
2167
-
2168
- /**
2169
- * Ogg input format singleton.
2170
- * @group Input formats
2171
- * @public
2172
- */
2173
- export declare const OGG: OggInputFormat;
2174
-
2175
- /**
2176
- * Ogg file format.
2177
- *
2178
- * Do not instantiate this class; use the {@link OGG} singleton instead.
2179
- *
2180
- * @group Input formats
2181
- * @public
2182
- */
2183
- export declare class OggInputFormat extends InputFormat {
2184
- get name(): string;
2185
- get mimeType(): string;
2186
- }
2187
-
2188
- /**
2189
- * Ogg file format.
2190
- * @group Output formats
2191
- * @public
2192
- */
2193
- export declare class OggOutputFormat extends OutputFormat {
2194
- /** Creates a new {@link OggOutputFormat} configured with the specified `options`. */
2195
- constructor(options?: OggOutputFormatOptions);
2196
- getSupportedTrackCounts(): TrackCountLimits;
2197
- get fileExtension(): string;
2198
- get mimeType(): string;
2199
- getSupportedCodecs(): MediaCodec[];
2200
- get supportsVideoRotationMetadata(): boolean;
2201
- }
2202
-
2203
- /**
2204
- * Ogg-specific output options.
2205
- * @group Output formats
2206
- * @public
2207
- */
2208
- export declare type OggOutputFormatOptions = {
2209
- /**
2210
- * Will be called for each Ogg page that is written.
2211
- *
2212
- * @param data - The raw bytes.
2213
- * @param position - The byte offset of the data in the file.
2214
- * @param source - The {@link MediaSource} backing the page's logical bitstream (track).
2215
- */
2216
- onPage?: (data: Uint8Array, position: number, source: MediaSource_2) => unknown;
2217
- };
2218
-
2219
- /**
2220
- * Main class orchestrating the creation of a new media file.
2221
- * @group Output files
2222
- * @public
2223
- */
2224
- export declare class Output<F extends OutputFormat = OutputFormat, T extends Target = Target> {
2225
- /** The format of the output file. */
2226
- format: F;
2227
- /** The target to which the file will be written. */
2228
- target: T;
2229
- /** The current state of the output. */
2230
- state: 'pending' | 'started' | 'canceled' | 'finalizing' | 'finalized';
2231
- /**
2232
- * Creates a new instance of {@link Output} which can then be used to create a new media file according to the
2233
- * specified {@link OutputOptions}.
2234
- */
2235
- constructor(options: OutputOptions<F, T>);
2236
- /** Adds a video track to the output with the given source. Can only be called before the output is started. */
2237
- addVideoTrack(source: VideoSource, metadata?: VideoTrackMetadata): void;
2238
- /** Adds an audio track to the output with the given source. Can only be called before the output is started. */
2239
- addAudioTrack(source: AudioSource, metadata?: AudioTrackMetadata): void;
2240
- /** Adds a subtitle track to the output with the given source. Can only be called before the output is started. */
2241
- addSubtitleTrack(source: SubtitleSource, metadata?: SubtitleTrackMetadata): void;
2242
- /**
2243
- * Sets descriptive metadata tags about the media file, such as title, author, date, or cover art. When called
2244
- * multiple times, only the metadata from the last call will be used.
2245
- *
2246
- * Can only be called before the output is started.
2247
- */
2248
- setMetadataTags(tags: MetadataTags): void;
2249
- /**
2250
- * Starts the creation of the output file. This method should be called after all tracks have been added. Only after
2251
- * the output has started can media samples be added to the tracks.
2252
- *
2253
- * @returns A promise that resolves when the output has successfully started and is ready to receive media samples.
2254
- */
2255
- start(): Promise<void>;
2256
- /**
2257
- * Resolves with the full MIME type of the output file, including track codecs.
2258
- *
2259
- * The returned promise will resolve only once the precise codec strings of all tracks are known.
2260
- */
2261
- getMimeType(): Promise<string>;
2262
- /**
2263
- * Cancels the creation of the output file, releasing internal resources like encoders and preventing further
2264
- * samples from being added.
2265
- *
2266
- * @returns A promise that resolves once all internal resources have been released.
2267
- */
2268
- cancel(): Promise<void>;
2269
- /**
2270
- * Finalizes the output file. This method must be called after all media samples across all tracks have been added.
2271
- * Once the Promise returned by this method completes, the output file is ready.
2272
- */
2273
- finalize(): Promise<void>;
2274
- }
2275
-
2276
- /**
2277
- * Base class representing an output media file format.
2278
- * @group Output formats
2279
- * @public
2280
- */
2281
- export declare abstract class OutputFormat {
2282
- /** The file extension used by this output format, beginning with a dot. */
2283
- abstract get fileExtension(): string;
2284
- /** The base MIME type of the output format. */
2285
- abstract get mimeType(): string;
2286
- /** Returns a list of media codecs that this output format can contain. */
2287
- abstract getSupportedCodecs(): MediaCodec[];
2288
- /** Returns the number of tracks that this output format supports. */
2289
- abstract getSupportedTrackCounts(): TrackCountLimits;
2290
- /** Whether this output format supports video rotation metadata. */
2291
- abstract get supportsVideoRotationMetadata(): boolean;
2292
- /** Returns a list of video codecs that this output format can contain. */
2293
- getSupportedVideoCodecs(): VideoCodec[];
2294
- /** Returns a list of audio codecs that this output format can contain. */
2295
- getSupportedAudioCodecs(): AudioCodec[];
2296
- /** Returns a list of subtitle codecs that this output format can contain. */
2297
- getSupportedSubtitleCodecs(): SubtitleCodec[];
2298
- }
2299
-
2300
- /**
2301
- * The options for creating an Output object.
2302
- * @group Output files
2303
- * @public
2304
- */
2305
- export declare type OutputOptions<F extends OutputFormat = OutputFormat, T extends Target = Target> = {
2306
- /** The format of the output file. */
2307
- format: F;
2308
- /** The target to which the file will be written. */
2309
- target: T;
2310
- };
2311
-
2312
- /**
2313
- * Additional options for controlling packet retrieval.
2314
- * @group Media sinks
2315
- * @public
2316
- */
2317
- export declare type PacketRetrievalOptions = {
2318
- /**
2319
- * When set to `true`, only packet metadata (like timestamp) will be retrieved - the actual packet data will not
2320
- * be loaded.
2321
- */
2322
- metadataOnly?: boolean;
2323
- /**
2324
- * When set to true, key packets will be verified upon retrieval by looking into the packet's bitstream.
2325
- * If not enabled, the packet types will be determined solely by what's stored in the containing file and may be
2326
- * incorrect, potentially leading to decoder errors. Since determining a packet's actual type requires looking into
2327
- * its data, this option cannot be enabled together with `metadataOnly`.
2328
- */
2329
- verifyKeyPackets?: boolean;
2330
- };
2331
-
2332
- /**
2333
- * Contains aggregate statistics about the encoded packets of a track.
2334
- * @group Input files & tracks
2335
- * @public
2336
- */
2337
- export declare type PacketStats = {
2338
- /** The total number of packets. */
2339
- packetCount: number;
2340
- /** The average number of packets per second. For video tracks, this will equal the average frame rate (FPS). */
2341
- averagePacketRate: number;
2342
- /** The average number of bits per second. */
2343
- averageBitrate: number;
2344
- };
2345
-
2346
- /**
2347
- * The type of a packet. Key packets can be decoded without previous packets, while delta packets depend on previous
2348
- * packets.
2349
- * @group Packets
2350
- * @public
2351
- */
2352
- export declare type PacketType = 'key' | 'delta';
2353
-
2354
- /**
2355
- * Parses an ASS/SSA timestamp string (H:MM:SS.cc) to seconds.
2356
- * @group Media sources
2357
- * @public
2358
- */
2359
- export declare const parseAssTimestamp: (timeString: string) => number;
2360
-
2361
- /**
2362
- * Parses an SRT timestamp string (HH:MM:SS,mmm) to seconds.
2363
- * @group Media sources
2364
- * @public
2365
- */
2366
- export declare const parseSrtTimestamp: (timeString: string) => number;
2367
-
2368
- /**
2369
- * List of known PCM (uncompressed) audio codecs, ordered by encoding preference.
2370
- * @group Codecs
2371
- * @public
2372
- */
2373
- export declare const PCM_AUDIO_CODECS: readonly ["pcm-s16", "pcm-s16be", "pcm-s24", "pcm-s24be", "pcm-s32", "pcm-s32be", "pcm-f32", "pcm-f32be", "pcm-f64", "pcm-f64be", "pcm-u8", "pcm-s8", "ulaw", "alaw"];
2374
-
2375
- /**
2376
- * QuickTime File Format input format singleton.
2377
- * @group Input formats
2378
- * @public
2379
- */
2380
- export declare const QTFF: QuickTimeInputFormat;
2381
-
2382
- /**
2383
- * Represents a subjective media quality level.
2384
- * @group Encoding
2385
- * @public
2386
- */
2387
- export declare class Quality {
2388
- }
2389
-
2390
- /**
2391
- * Represents a high media quality.
2392
- * @group Encoding
2393
- * @public
2394
- */
2395
- export declare const QUALITY_HIGH: Quality;
2396
-
2397
- /**
2398
- * Represents a low media quality.
2399
- * @group Encoding
2400
- * @public
2401
- */
2402
- export declare const QUALITY_LOW: Quality;
2403
-
2404
- /**
2405
- * Represents a medium media quality.
2406
- * @group Encoding
2407
- * @public
2408
- */
2409
- export declare const QUALITY_MEDIUM: Quality;
2410
-
2411
- /**
2412
- * Represents a very high media quality.
2413
- * @group Encoding
2414
- * @public
2415
- */
2416
- export declare const QUALITY_VERY_HIGH: Quality;
2417
-
2418
- /**
2419
- * Represents a very low media quality.
2420
- * @group Encoding
2421
- * @public
2422
- */
2423
- export declare const QUALITY_VERY_LOW: Quality;
2424
-
2425
- /**
2426
- * QuickTime File Format (QTFF), often called MOV.
2427
- *
2428
- * Do not instantiate this class; use the {@link QTFF} singleton instead.
2429
- *
2430
- * @group Input formats
2431
- * @public
2432
- */
2433
- export declare class QuickTimeInputFormat extends IsobmffInputFormat {
2434
- get name(): string;
2435
- get mimeType(): string;
2436
- }
2437
-
2438
- /**
2439
- * A source backed by a [`ReadableStream`](https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream) of
2440
- * `Uint8Array`, representing an append-only byte stream of unknown length. This is the source to use for incrementally
2441
- * streaming in input files that are still being constructed and whose size we don't yet know, like for example the
2442
- * output chunks of [MediaRecorder](https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder).
2443
- *
2444
- * This source is *unsized*, meaning calls to `.getSize()` will throw and readers are more limited due to the
2445
- * lack of random file access. You should only use this source with sequential access patterns, such as reading all
2446
- * packets from start to end. This source does not work well with random access patterns unless you increase its
2447
- * max cache size.
2448
- *
2449
- * @group Input sources
2450
- * @public
2451
- */
2452
- export declare class ReadableStreamSource extends Source {
2453
- /** Creates a new {@link ReadableStreamSource} backed by the specified `ReadableStream<Uint8Array>`. */
2454
- constructor(stream: ReadableStream<Uint8Array>, options?: ReadableStreamSourceOptions);
2455
- }
2456
-
2457
- /**
2458
- * Options for {@link ReadableStreamSource}.
2459
- * @group Input sources
2460
- * @public
2461
- */
2462
- export declare type ReadableStreamSourceOptions = {
2463
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 16 MiB. */
2464
- maxCacheSize?: number;
2465
- };
2466
-
2467
- /**
2468
- * Registers a custom video or audio decoder. Registered decoders will automatically be used for decoding whenever
2469
- * possible.
2470
- * @group Custom coders
2471
- * @public
2472
- */
2473
- export declare const registerDecoder: (decoder: typeof CustomVideoDecoder | typeof CustomAudioDecoder) => void;
2474
-
2475
- /**
2476
- * Registers a custom video or audio encoder. Registered encoders will automatically be used for encoding whenever
2477
- * possible.
2478
- * @group Custom coders
2479
- * @public
2480
- */
2481
- export declare const registerEncoder: (encoder: typeof CustomVideoEncoder | typeof CustomAudioEncoder) => void;
2482
-
2483
- /**
2484
- * Image data with additional metadata.
2485
- *
2486
- * @group Metadata tags
2487
- * @public
2488
- */
2489
- export declare class RichImageData {
2490
- /** The raw image data. */
2491
- data: Uint8Array;
2492
- /** An RFC 6838 MIME type (e.g. image/jpeg, image/png, etc.) */
2493
- mimeType: string;
2494
- /** Creates a new {@link RichImageData}. */
2495
- constructor(
2496
- /** The raw image data. */
2497
- data: Uint8Array,
2498
- /** An RFC 6838 MIME type (e.g. image/jpeg, image/png, etc.) */
2499
- mimeType: string);
2500
- }
2501
-
2502
- /**
2503
- * Represents a clockwise rotation in degrees.
2504
- * @group Miscellaneous
2505
- * @public
2506
- */
2507
- export declare type Rotation = 0 | 90 | 180 | 270;
2508
-
2509
- /**
2510
- * Sets all keys K of T to be required.
2511
- * @group Miscellaneous
2512
- * @public
2513
- */
2514
- export declare type SetRequired<T, K extends keyof T> = T & Required<Pick<T, K>>;
2515
-
2516
- /**
2517
- * The source base class, representing a resource from which bytes can be read.
2518
- * @group Input sources
2519
- * @public
2520
- */
2521
- export declare abstract class Source {
2522
- /**
2523
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
2524
- * will retrieve the size.
2525
- *
2526
- * Returns null if the source is unsized.
2527
- */
2528
- getSizeOrNull(): Promise<number | null>;
2529
- /**
2530
- * Resolves with the total size of the file in bytes. This function is memoized, meaning only the first call
2531
- * will retrieve the size.
2532
- *
2533
- * Throws an error if the source is unsized.
2534
- */
2535
- getSize(): Promise<number>;
2536
- /** Called each time data is retrieved from the source. Will be called with the retrieved range (end exclusive). */
2537
- onread: ((start: number, end: number) => unknown) | null;
2538
- }
2539
-
2540
- /**
2541
- * Splits ASS/SSA subtitle text into header (styles) and individual cues.
2542
- * Preserves all sections including [Fonts], [Graphics], and Aegisub sections.
2543
- * Aegisub sections are moved to the end to avoid breaking [Events].
2544
- * @group Media sources
2545
- * @public
2546
- */
2547
- export declare const splitAssIntoCues: (text: string) => {
2548
- header: string;
2549
- cues: SubtitleCue[];
2550
- };
2551
-
2552
- /**
2553
- * Splits SRT subtitle text into individual cues.
2554
- * @group Media sources
2555
- * @public
2556
- */
2557
- export declare const splitSrtIntoCues: (text: string) => SubtitleCue[];
2558
-
2559
- /**
2560
- * A general-purpose, callback-driven source that can get its data from anywhere.
2561
- * @group Input sources
2562
- * @public
2563
- */
2564
- export declare class StreamSource extends Source {
2565
- /** Creates a new {@link StreamSource} whose behavior is specified by `options`. */
2566
- constructor(options: StreamSourceOptions);
2567
- }
2568
-
2569
- /**
2570
- * Options for defining a {@link StreamSource}.
2571
- * @group Input sources
2572
- * @public
2573
- */
2574
- export declare type StreamSourceOptions = {
2575
- /**
2576
- * Called when the size of the entire file is requested. Must return or resolve to the size in bytes. This function
2577
- * is guaranteed to be called before `read`.
2578
- */
2579
- getSize: () => MaybePromise<number>;
2580
- /**
2581
- * Called when data is requested. Must return or resolve to the bytes from the specified byte range, or a stream
2582
- * that yields these bytes.
2583
- */
2584
- read: (start: number, end: number) => MaybePromise<Uint8Array | ReadableStream<Uint8Array>>;
2585
- /**
2586
- * Called when the {@link Input} driven by this source is disposed.
2587
- */
2588
- dispose?: () => unknown;
2589
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 8 MiB. */
2590
- maxCacheSize?: number;
2591
- /**
2592
- * Specifies the prefetch profile that the reader should use with this source. A prefetch profile specifies the
2593
- * pattern with which bytes outside of the requested range are preloaded to reduce latency for future reads.
2594
- *
2595
- * - `'none'` (default): No prefetching; only the data needed in the moment is requested.
2596
- * - `'fileSystem'`: File system-optimized prefetching: a small amount of data is prefetched bidirectionally,
2597
- * aligned with page boundaries.
2598
- * - `'network'`: Network-optimized prefetching, or more generally, prefetching optimized for any high-latency
2599
- * environment: tries to minimize the amount of read calls and aggressively prefetches data when sequential access
2600
- * patterns are detected.
2601
- */
2602
- prefetchProfile?: 'none' | 'fileSystem' | 'network';
2603
- };
2604
-
2605
- /**
2606
- * This target writes data to a [`WritableStream`](https://developer.mozilla.org/en-US/docs/Web/API/WritableStream),
2607
- * making it a general-purpose target for writing data anywhere. It is also compatible with
2608
- * [`FileSystemWritableFileStream`](https://developer.mozilla.org/en-US/docs/Web/API/FileSystemWritableFileStream) for
2609
- * use with the [File System Access API](https://developer.mozilla.org/en-US/docs/Web/API/File_System_API). The
2610
- * `WritableStream` can also apply backpressure, which will propagate to the output and throttle the encoders.
2611
- * @group Output targets
2612
- * @public
2613
- */
2614
- export declare class StreamTarget extends Target {
2615
- /** Creates a new {@link StreamTarget} which writes to the specified `writable`. */
2616
- constructor(writable: WritableStream<StreamTargetChunk>, options?: StreamTargetOptions);
2617
- }
2618
-
2619
- /**
2620
- * A data chunk for {@link StreamTarget}.
2621
- * @group Output targets
2622
- * @public
2623
- */
2624
- export declare type StreamTargetChunk = {
2625
- /** The operation type. */
2626
- type: 'write';
2627
- /** The data to write. */
2628
- data: Uint8Array;
2629
- /** The byte offset in the output file at which to write the data. */
2630
- position: number;
2631
- };
2632
-
2633
- /**
2634
- * Options for {@link StreamTarget}.
2635
- * @group Output targets
2636
- * @public
2637
- */
2638
- export declare type StreamTargetOptions = {
2639
- /**
2640
- * When setting this to true, data created by the output will first be accumulated and only written out
2641
- * once it has reached sufficient size, using a default chunk size of 16 MiB. This is useful for reducing the total
2642
- * amount of writes, at the cost of latency.
2643
- */
2644
- chunked?: boolean;
2645
- /** When using `chunked: true`, this specifies the maximum size of each chunk. Defaults to 16 MiB. */
2646
- chunkSize?: number;
2647
- };
2648
-
2649
- /**
2650
- * List of known subtitle codecs, ordered by encoding preference.
2651
- * @group Codecs
2652
- * @public
2653
- */
2654
- export declare const SUBTITLE_CODECS: readonly ["webvtt", "tx3g", "ttml", "srt", "ass", "ssa"];
2655
-
2656
- /**
2657
- * Union type of known subtitle codecs.
2658
- * @group Codecs
2659
- * @public
2660
- */
2661
- export declare type SubtitleCodec = typeof SUBTITLE_CODECS[number];
2662
-
2663
- /**
2664
- * Subtitle configuration data.
2665
- * @group Media sources
2666
- * @public
2667
- */
2668
- export declare type SubtitleConfig = {
2669
- /** Format-specific description (e.g., WebVTT preamble, ASS/SSA header). */
2670
- description: string;
2671
- };
2672
-
2673
- /**
2674
- * Represents a single subtitle cue with timing and text.
2675
- * @group Media sources
2676
- * @public
2677
- */
2678
- export declare type SubtitleCue = {
2679
- /** When the subtitle should appear, in seconds. */
2680
- timestamp: number;
2681
- /** How long the subtitle should be displayed, in seconds. */
2682
- duration: number;
2683
- /** The subtitle text content. */
2684
- text: string;
2685
- /** Optional cue identifier. */
2686
- identifier?: string;
2687
- /** Optional format-specific settings (e.g., VTT positioning). */
2688
- settings?: string;
2689
- /** Optional notes or comments. */
2690
- notes?: string;
2691
- };
2692
-
2693
- /**
2694
- * Metadata associated with subtitle cues.
2695
- * @group Media sources
2696
- * @public
2697
- */
2698
- export declare type SubtitleMetadata = {
2699
- /** Optional subtitle configuration. */
2700
- config?: SubtitleConfig;
2701
- };
2702
-
2703
- /**
2704
- * Base class for subtitle sources - sources for subtitle tracks.
2705
- * @group Media sources
2706
- * @public
2707
- */
2708
- export declare abstract class SubtitleSource extends MediaSource_2 {
2709
- /** Internal constructor. */
2710
- constructor(codec: SubtitleCodec);
2711
- }
2712
-
2713
- /**
2714
- * Additional metadata for subtitle tracks.
2715
- * @group Output files
2716
- * @public
2717
- */
2718
- export declare type SubtitleTrackMetadata = BaseTrackMetadata & {};
2719
-
2720
- /**
2721
- * Base class for targets, specifying where output files are written.
2722
- * @group Output targets
2723
- * @public
2724
- */
2725
- export declare abstract class Target {
2726
- /**
2727
- * Called each time data is written to the target. Will be called with the byte range into which data was written.
2728
- *
2729
- * Use this callback to track the size of the output file as it grows. But be warned, this function is chatty and
2730
- * gets called *extremely* often.
2731
- */
2732
- onwrite: ((start: number, end: number) => unknown) | null;
2733
- }
2734
-
2735
- /**
2736
- * This source can be used to add subtitles from a subtitle text file.
2737
- * @group Media sources
2738
- * @public
2739
- */
2740
- export declare class TextSubtitleSource extends SubtitleSource {
2741
- /** Creates a new {@link TextSubtitleSource} where added text chunks are in the specified `codec`. */
2742
- constructor(codec: SubtitleCodec);
2743
- /**
2744
- * Parses the subtitle text according to the specified codec and adds it to the output track. You don't have to
2745
- * add the entire subtitle file at once here; you can provide it in chunks.
2746
- *
2747
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
2748
- * to respect writer and encoder backpressure.
2749
- */
2750
- add(text: string): Promise<void>;
2751
- }
2752
-
2753
- /**
2754
- * Specifies the number of tracks (for each track type and in total) that an output format supports.
2755
- * @group Output formats
2756
- * @public
2757
- */
2758
- export declare type TrackCountLimits = {
2759
- [K in TrackType]: InclusiveIntegerRange;
2760
- } & {
2761
- /** Specifies the overall allowed range of track counts for the output format. */
2762
- total: InclusiveIntegerRange;
2763
- };
2764
-
2765
- /**
2766
- * Specifies a track's disposition, i.e. information about its intended usage.
2767
- * @public
2768
- * @group Miscellaneous
2769
- */
2770
- export declare type TrackDisposition = {
2771
- /**
2772
- * Indicates that this track is eligible for automatic selection by a player; that it is the main track among other,
2773
- * non-default tracks of the same type.
2774
- */
2775
- default: boolean;
2776
- /**
2777
- * Indicates that players should always display this track by default, even if it goes against the user's default
2778
- * preferences. For example, a subtitle track only containing translations of foreign-language audio.
2779
- */
2780
- forced: boolean;
2781
- /** Indicates that this track is in the content's original language. */
2782
- original: boolean;
2783
- /** Indicates that this track contains commentary. */
2784
- commentary: boolean;
2785
- /** Indicates that this track is intended for hearing-impaired users. */
2786
- hearingImpaired: boolean;
2787
- /** Indicates that this track is intended for visually-impaired users. */
2788
- visuallyImpaired: boolean;
2789
- };
2790
-
2791
- /**
2792
- * Union type of all track types.
2793
- * @group Miscellaneous
2794
- * @public
2795
- */
2796
- export declare type TrackType = typeof ALL_TRACK_TYPES[number];
2797
-
2798
- /**
2799
- * A source backed by a URL. This is useful for reading data from the network. Requests will be made using an optimized
2800
- * reading and prefetching pattern to minimize request count and latency.
2801
- * @group Input sources
2802
- * @public
2803
- */
2804
- export declare class UrlSource extends Source {
2805
- /** Creates a new {@link UrlSource} backed by the resource at the specified URL. */
2806
- constructor(url: string | URL | Request, options?: UrlSourceOptions);
2807
- }
2808
-
2809
- /**
2810
- * Options for {@link UrlSource}.
2811
- * @group Input sources
2812
- * @public
2813
- */
2814
- export declare type UrlSourceOptions = {
2815
- /**
2816
- * The [`RequestInit`](https://developer.mozilla.org/en-US/docs/Web/API/RequestInit) used by the Fetch API. Can be
2817
- * used to further control the requests, such as setting custom headers.
2818
- */
2819
- requestInit?: RequestInit;
2820
- /**
2821
- * A function that returns the delay (in seconds) before retrying a failed request. The function is called
2822
- * with the number of previous, unsuccessful attempts, as well as with the error with which the previous request
2823
- * failed. If the function returns `null`, no more retries will be made.
2824
- *
2825
- * By default, it uses an exponential backoff algorithm that never gives up unless
2826
- * a CORS error is suspected (`fetch()` did reject, `navigator.onLine` is true and origin is different)
2827
- */
2828
- getRetryDelay?: (previousAttempts: number, error: unknown, url: string | URL | Request) => number | null;
2829
- /** The maximum number of bytes the cache is allowed to hold in memory. Defaults to 64 MiB. */
2830
- maxCacheSize?: number;
2831
- /**
2832
- * A WHATWG-compatible fetch function. You can use this field to polyfill the `fetch` function, add missing
2833
- * features, or use a custom implementation.
2834
- */
2835
- fetchFn?: typeof fetch;
2836
- };
2837
-
2838
- /**
2839
- * List of known video codecs, ordered by encoding preference.
2840
- * @group Codecs
2841
- * @public
2842
- */
2843
- export declare const VIDEO_CODECS: readonly ["avc", "hevc", "vp9", "av1", "vp8"];
2844
-
2845
- /**
2846
- * Union type of known video codecs.
2847
- * @group Codecs
2848
- * @public
2849
- */
2850
- export declare type VideoCodec = typeof VIDEO_CODECS[number];
2851
-
2852
- /**
2853
- * Additional options that control audio encoding.
2854
- * @group Encoding
2855
- * @public
2856
- */
2857
- export declare type VideoEncodingAdditionalOptions = {
2858
- /**
2859
- * What to do with alpha data contained in the video samples.
2860
- *
2861
- * - `'discard'` (default): Only the samples' color data is kept; the video is opaque.
2862
- * - `'keep'`: The samples' alpha data is also encoded as side data. Make sure to pair this mode with a container
2863
- * format that supports transparency (such as WebM or Matroska).
2864
- */
2865
- alpha?: 'discard' | 'keep';
2866
- /** Configures the bitrate mode; defaults to `'variable'`. */
2867
- bitrateMode?: 'constant' | 'variable';
2868
- /**
2869
- * The latency mode used by the encoder; controls the performance-quality tradeoff.
2870
- *
2871
- * - `'quality'` (default): The encoder prioritizes quality over latency, and no frames can be dropped.
2872
- * - `'realtime'`: The encoder prioritizes low latency over quality, and may drop frames if the encoder becomes
2873
- * overloaded to keep up with real-time requirements.
2874
- */
2875
- latencyMode?: 'quality' | 'realtime';
2876
- /**
2877
- * The full codec string as specified in the WebCodecs Codec Registry. This string must match the codec
2878
- * specified in `codec`. When not set, a fitting codec string will be constructed automatically by the library.
2879
- */
2880
- fullCodecString?: string;
2881
- /**
2882
- * A hint that configures the hardware acceleration method of this codec. This is best left on `'no-preference'`,
2883
- * the default.
2884
- */
2885
- hardwareAcceleration?: 'no-preference' | 'prefer-hardware' | 'prefer-software';
2886
- /**
2887
- * An encoding scalability mode identifier as defined by
2888
- * [WebRTC-SVC](https://w3c.github.io/webrtc-svc/#scalabilitymodes*).
2889
- */
2890
- scalabilityMode?: string;
2891
- /**
2892
- * An encoding video content hint as defined by
2893
- * [mst-content-hint](https://w3c.github.io/mst-content-hint/#video-content-hints).
2894
- */
2895
- contentHint?: string;
2896
- };
2897
-
2898
- /**
2899
- * Configuration object that controls video encoding. Can be used to set codec, quality, and more.
2900
- * @group Encoding
2901
- * @public
2902
- */
2903
- export declare type VideoEncodingConfig = {
2904
- /** The video codec that should be used for encoding the video samples (frames). */
2905
- codec: VideoCodec;
2906
- /**
2907
- * The target bitrate for the encoded video, in bits per second. Alternatively, a subjective {@link Quality} can
2908
- * be provided.
2909
- */
2910
- bitrate: number | Quality;
2911
- /**
2912
- * The interval, in seconds, of how often frames are encoded as a key frame. The default is 5 seconds. Frequent key
2913
- * frames improve seeking behavior but increase file size. When using multiple video tracks, you should give them
2914
- * all the same key frame interval.
2915
- */
2916
- keyFrameInterval?: number;
2917
- /**
2918
- * Video frames may change size over time. This field controls the behavior in case this happens.
2919
- *
2920
- * - `'deny'` (default) will throw an error, requiring all frames to have the exact same dimensions.
2921
- * - `'passThrough'` will allow the change and directly pass the frame to the encoder.
2922
- * - `'fill'` will stretch the image to fill the entire original box, potentially altering aspect ratio.
2923
- * - `'contain'` will contain the entire image within the original box while preserving aspect ratio. This may lead
2924
- * to letterboxing.
2925
- * - `'cover'` will scale the image until the entire original box is filled, while preserving aspect ratio.
2926
- *
2927
- * The "original box" refers to the dimensions of the first encoded frame.
2928
- */
2929
- sizeChangeBehavior?: 'deny' | 'passThrough' | 'fill' | 'contain' | 'cover';
2930
- /** Called for each successfully encoded packet. Both the packet and the encoding metadata are passed. */
2931
- onEncodedPacket?: (packet: EncodedPacket, meta: EncodedVideoChunkMetadata | undefined) => unknown;
2932
- /**
2933
- * Called when the internal [encoder config](https://www.w3.org/TR/webcodecs/#video-encoder-config), as used by the
2934
- * WebCodecs API, is created.
2935
- */
2936
- onEncoderConfig?: (config: VideoEncoderConfig) => unknown;
2937
- } & VideoEncodingAdditionalOptions;
2938
-
2939
- /**
2940
- * Represents a raw, unencoded video sample (frame). Mainly used as an expressive wrapper around WebCodecs API's
2941
- * [`VideoFrame`](https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame), but can also be used standalone.
2942
- * @group Samples
2943
- * @public
2944
- */
2945
- export declare class VideoSample implements Disposable {
2946
- /**
2947
- * The internal pixel format in which the frame is stored.
2948
- * [See pixel formats](https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame/format)
2949
- */
2950
- readonly format: VideoPixelFormat | null;
2951
- /** The width of the frame in pixels. */
2952
- readonly codedWidth: number;
2953
- /** The height of the frame in pixels. */
2954
- readonly codedHeight: number;
2955
- /** The rotation of the frame in degrees, clockwise. */
2956
- readonly rotation: Rotation;
2957
- /**
2958
- * The presentation timestamp of the frame in seconds. May be negative. Frames with negative end timestamps should
2959
- * not be presented.
2960
- */
2961
- readonly timestamp: number;
2962
- /** The duration of the frame in seconds. */
2963
- readonly duration: number;
2964
- /** The color space of the frame. */
2965
- readonly colorSpace: VideoColorSpace;
2966
- /** The width of the frame in pixels after rotation. */
2967
- get displayWidth(): number;
2968
- /** The height of the frame in pixels after rotation. */
2969
- get displayHeight(): number;
2970
- /** The presentation timestamp of the frame in microseconds. */
2971
- get microsecondTimestamp(): number;
2972
- /** The duration of the frame in microseconds. */
2973
- get microsecondDuration(): number;
2974
- /**
2975
- * Whether this sample uses a pixel format that can hold transparency data. Note that this doesn't necessarily mean
2976
- * that the sample is transparent.
2977
- */
2978
- get hasAlpha(): boolean | null;
2979
- /**
2980
- * Creates a new {@link VideoSample} from a
2981
- * [`VideoFrame`](https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame). This is essentially a near zero-cost
2982
- * wrapper around `VideoFrame`. The sample's metadata is optionally refined using the data specified in `init`.
2983
- */
2984
- constructor(data: VideoFrame, init?: VideoSampleInit);
2985
- /**
2986
- * Creates a new {@link VideoSample} from a
2987
- * [`CanvasImageSource`](https://udn.realityripple.com/docs/Web/API/CanvasImageSource), similar to the
2988
- * [`VideoFrame`](https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame) constructor. When `VideoFrame` is
2989
- * available, this is simply a wrapper around its constructor. If not, it will copy the source's image data to an
2990
- * internal canvas for later use.
2991
- */
2992
- constructor(data: CanvasImageSource, init: SetRequired<VideoSampleInit, 'timestamp'>);
2993
- /**
2994
- * Creates a new {@link VideoSample} from raw pixel data specified in `data`. Additional metadata must be provided
2995
- * in `init`.
2996
- */
2997
- constructor(data: AllowSharedBufferSource, init: SetRequired<VideoSampleInit, 'format' | 'codedWidth' | 'codedHeight' | 'timestamp'>);
2998
- /** Clones this video sample. */
2999
- clone(): VideoSample;
3000
- /**
3001
- * Closes this video sample, releasing held resources. Video samples should be closed as soon as they are not
3002
- * needed anymore.
3003
- */
3004
- close(): void;
3005
- /** Returns the number of bytes required to hold this video sample's pixel data. */
3006
- allocationSize(): number;
3007
- /** Copies this video sample's pixel data to an ArrayBuffer or ArrayBufferView. */
3008
- copyTo(destination: AllowSharedBufferSource): Promise<void>;
3009
- /**
3010
- * Converts this video sample to a VideoFrame for use with the WebCodecs API. The VideoFrame returned by this
3011
- * method *must* be closed separately from this video sample.
3012
- */
3013
- toVideoFrame(): VideoFrame;
3014
- /**
3015
- * Draws the video sample to a 2D canvas context. Rotation metadata will be taken into account.
3016
- *
3017
- * @param dx - The x-coordinate in the destination canvas at which to place the top-left corner of the source image.
3018
- * @param dy - The y-coordinate in the destination canvas at which to place the top-left corner of the source image.
3019
- * @param dWidth - The width in pixels with which to draw the image in the destination canvas.
3020
- * @param dHeight - The height in pixels with which to draw the image in the destination canvas.
3021
- */
3022
- draw(context: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, dx: number, dy: number, dWidth?: number, dHeight?: number): void;
3023
- /**
3024
- * Draws the video sample to a 2D canvas context. Rotation metadata will be taken into account.
3025
- *
3026
- * @param sx - The x-coordinate of the top left corner of the sub-rectangle of the source image to draw into the
3027
- * destination context.
3028
- * @param sy - The y-coordinate of the top left corner of the sub-rectangle of the source image to draw into the
3029
- * destination context.
3030
- * @param sWidth - The width of the sub-rectangle of the source image to draw into the destination context.
3031
- * @param sHeight - The height of the sub-rectangle of the source image to draw into the destination context.
3032
- * @param dx - The x-coordinate in the destination canvas at which to place the top-left corner of the source image.
3033
- * @param dy - The y-coordinate in the destination canvas at which to place the top-left corner of the source image.
3034
- * @param dWidth - The width in pixels with which to draw the image in the destination canvas.
3035
- * @param dHeight - The height in pixels with which to draw the image in the destination canvas.
3036
- */
3037
- draw(context: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, sx: number, sy: number, sWidth: number, sHeight: number, dx: number, dy: number, dWidth?: number, dHeight?: number): void;
3038
- /**
3039
- * Draws the sample in the middle of the canvas corresponding to the context with the specified fit behavior.
3040
- */
3041
- drawWithFit(context: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D, options: {
3042
- /**
3043
- * Controls the fitting algorithm.
3044
- *
3045
- * - `'fill'` will stretch the image to fill the entire box, potentially altering aspect ratio.
3046
- * - `'contain'` will contain the entire image within the box while preserving aspect ratio. This may lead to
3047
- * letterboxing.
3048
- * - `'cover'` will scale the image until the entire box is filled, while preserving aspect ratio.
3049
- */
3050
- fit: 'fill' | 'contain' | 'cover';
3051
- /** A way to override rotation. Defaults to the rotation of the sample. */
3052
- rotation?: Rotation;
3053
- /**
3054
- * Specifies the rectangular region of the video sample to crop to. The crop region will automatically be
3055
- * clamped to the dimensions of the video sample. Cropping is performed after rotation but before resizing.
3056
- */
3057
- crop?: CropRectangle;
3058
- }): void;
3059
- /**
3060
- * Converts this video sample to a
3061
- * [`CanvasImageSource`](https://udn.realityripple.com/docs/Web/API/CanvasImageSource) for drawing to a canvas.
3062
- *
3063
- * You must use the value returned by this method immediately, as any VideoFrame created internally will
3064
- * automatically be closed in the next microtask.
3065
- */
3066
- toCanvasImageSource(): VideoFrame | OffscreenCanvas;
3067
- /** Sets the rotation metadata of this video sample. */
3068
- setRotation(newRotation: Rotation): void;
3069
- /** Sets the presentation timestamp of this video sample, in seconds. */
3070
- setTimestamp(newTimestamp: number): void;
3071
- /** Sets the duration of this video sample, in seconds. */
3072
- setDuration(newDuration: number): void;
3073
- /** Calls `.close()`. */
3074
- [Symbol.dispose](): void;
3075
- }
3076
-
3077
- /**
3078
- * Metadata used for VideoSample initialization.
3079
- * @group Samples
3080
- * @public
3081
- */
3082
- export declare type VideoSampleInit = {
3083
- /**
3084
- * The internal pixel format in which the frame is stored.
3085
- * [See pixel formats](https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame/format)
3086
- */
3087
- format?: VideoPixelFormat;
3088
- /** The width of the frame in pixels. */
3089
- codedWidth?: number;
3090
- /** The height of the frame in pixels. */
3091
- codedHeight?: number;
3092
- /** The rotation of the frame in degrees, clockwise. */
3093
- rotation?: Rotation;
3094
- /** The presentation timestamp of the frame in seconds. */
3095
- timestamp?: number;
3096
- /** The duration of the frame in seconds. */
3097
- duration?: number;
3098
- /** The color space of the frame. */
3099
- colorSpace?: VideoColorSpaceInit;
3100
- };
3101
-
3102
- /**
3103
- * A sink that retrieves decoded video samples (video frames) from a video track.
3104
- * @group Media sinks
3105
- * @public
3106
- */
3107
- export declare class VideoSampleSink extends BaseMediaSampleSink<VideoSample> {
3108
- /** Creates a new {@link VideoSampleSink} for the given {@link InputVideoTrack}. */
3109
- constructor(videoTrack: InputVideoTrack);
3110
- /**
3111
- * Retrieves the video sample (frame) corresponding to the given timestamp, in seconds. More specifically, returns
3112
- * the last video sample (in presentation order) with a start timestamp less than or equal to the given timestamp.
3113
- * Returns null if the timestamp is before the track's first timestamp.
3114
- *
3115
- * @param timestamp - The timestamp used for retrieval, in seconds.
3116
- */
3117
- getSample(timestamp: number): Promise<VideoSample | null>;
3118
- /**
3119
- * Creates an async iterator that yields the video samples (frames) of this track in presentation order. This method
3120
- * will intelligently pre-decode a few frames ahead to enable fast iteration.
3121
- *
3122
- * @param startTimestamp - The timestamp in seconds at which to start yielding samples (inclusive).
3123
- * @param endTimestamp - The timestamp in seconds at which to stop yielding samples (exclusive).
3124
- */
3125
- samples(startTimestamp?: number, endTimestamp?: number): AsyncGenerator<VideoSample, void, unknown>;
3126
- /**
3127
- * Creates an async iterator that yields a video sample (frame) for each timestamp in the argument. This method
3128
- * uses an optimized decoding pipeline if these timestamps are monotonically sorted, decoding each packet at most
3129
- * once, and is therefore more efficient than manually getting the sample for every timestamp. The iterator may
3130
- * yield null if no frame is available for a given timestamp.
3131
- *
3132
- * @param timestamps - An iterable or async iterable of timestamps in seconds.
3133
- */
3134
- samplesAtTimestamps(timestamps: AnyIterable<number>): AsyncGenerator<VideoSample | null, void, unknown>;
3135
- }
3136
-
3137
- /**
3138
- * This source can be used to add raw, unencoded video samples (frames) to an output video track. These frames will
3139
- * automatically be encoded and then piped into the output.
3140
- * @group Media sources
3141
- * @public
3142
- */
3143
- export declare class VideoSampleSource extends VideoSource {
3144
- /**
3145
- * Creates a new {@link VideoSampleSource} whose samples are encoded according to the specified
3146
- * {@link VideoEncodingConfig}.
3147
- */
3148
- constructor(encodingConfig: VideoEncodingConfig);
3149
- /**
3150
- * Encodes a video sample (frame) and then adds it to the output.
3151
- *
3152
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
3153
- * to respect writer and encoder backpressure.
3154
- */
3155
- add(videoSample: VideoSample, encodeOptions?: VideoEncoderEncodeOptions): Promise<void>;
3156
- }
3157
-
3158
- /**
3159
- * Base class for video sources - sources for video tracks.
3160
- * @group Media sources
3161
- * @public
3162
- */
3163
- export declare abstract class VideoSource extends MediaSource_2 {
3164
- /** Internal constructor. */
3165
- constructor(codec: VideoCodec);
3166
- }
3167
-
3168
- /**
3169
- * Additional metadata for video tracks.
3170
- * @group Output files
3171
- * @public
3172
- */
3173
- export declare type VideoTrackMetadata = BaseTrackMetadata & {
3174
- /** The angle in degrees by which the track's frames should be rotated (clockwise). */
3175
- rotation?: Rotation;
3176
- /**
3177
- * The expected video frame rate in hertz. If set, all timestamps and durations of this track will be snapped to
3178
- * this frame rate. You should avoid adding more frames than the rate allows, as this will lead to multiple frames
3179
- * with the same timestamp.
3180
- */
3181
- frameRate?: number;
3182
- };
3183
-
3184
- /**
3185
- * WAVE input format singleton.
3186
- * @group Input formats
3187
- * @public
3188
- */
3189
- export declare const WAVE: WaveInputFormat;
3190
-
3191
- /**
3192
- * WAVE file format, based on RIFF.
3193
- *
3194
- * Do not instantiate this class; use the {@link WAVE} singleton instead.
3195
- *
3196
- * @group Input formats
3197
- * @public
3198
- */
3199
- export declare class WaveInputFormat extends InputFormat {
3200
- get name(): string;
3201
- get mimeType(): string;
3202
- }
3203
-
3204
- /**
3205
- * WAVE file format, based on RIFF.
3206
- * @group Output formats
3207
- * @public
3208
- */
3209
- export declare class WavOutputFormat extends OutputFormat {
3210
- /** Creates a new {@link WavOutputFormat} configured with the specified `options`. */
3211
- constructor(options?: WavOutputFormatOptions);
3212
- getSupportedTrackCounts(): TrackCountLimits;
3213
- get fileExtension(): string;
3214
- get mimeType(): string;
3215
- getSupportedCodecs(): MediaCodec[];
3216
- get supportsVideoRotationMetadata(): boolean;
3217
- }
3218
-
3219
- /**
3220
- * WAVE-specific output options.
3221
- * @group Output formats
3222
- * @public
3223
- */
3224
- export declare type WavOutputFormatOptions = {
3225
- /**
3226
- * When enabled, an RF64 file will be written, allowing for file sizes to exceed 4 GiB, which is otherwise not
3227
- * possible for regular WAVE files.
3228
- */
3229
- large?: boolean;
3230
- /**
3231
- * The metadata format to use for writing metadata tags.
3232
- *
3233
- * - `'info'` (default): Writes metadata into a RIFF INFO LIST chunk, the default way to contain metadata tags
3234
- * within WAVE. Only allows for a limited subset of tags to be written.
3235
- * - `'id3'`: Writes metadata into an ID3 chunk. Non-default, but used by many taggers in practice. Allows for a
3236
- * much larger and richer set of tags to be written.
3237
- */
3238
- metadataFormat?: 'info' | 'id3';
3239
- /**
3240
- * Will be called once the file header is written. The header consists of the RIFF header, the format chunk,
3241
- * metadata chunks, and the start of the data chunk (with a placeholder size of 0).
3242
- */
3243
- onHeader?: (data: Uint8Array, position: number) => unknown;
3244
- };
3245
-
3246
- /**
3247
- * WebM input format singleton.
3248
- * @group Input formats
3249
- * @public
3250
- */
3251
- export declare const WEBM: WebMInputFormat;
3252
-
3253
- /**
3254
- * WebM file format, based on Matroska.
3255
- *
3256
- * Do not instantiate this class; use the {@link WEBM} singleton instead.
3257
- *
3258
- * @group Input formats
3259
- * @public
3260
- */
3261
- export declare class WebMInputFormat extends MatroskaInputFormat {
3262
- get name(): string;
3263
- get mimeType(): string;
3264
- }
3265
-
3266
- /**
3267
- * WebM file format, based on Matroska.
3268
- *
3269
- * Supports writing transparent video. For a video track to be marked as transparent, the first packet added must
3270
- * contain alpha side data.
3271
- *
3272
- * @group Output formats
3273
- * @public
3274
- */
3275
- export declare class WebMOutputFormat extends MkvOutputFormat {
3276
- /** Creates a new {@link WebMOutputFormat} configured with the specified `options`. */
3277
- constructor(options?: MkvOutputFormatOptions);
3278
- getSupportedCodecs(): MediaCodec[];
3279
- get fileExtension(): string;
3280
- get mimeType(): string;
3281
- }
3282
-
3283
- /**
3284
- * WebM-specific output options.
3285
- * @group Output formats
3286
- * @public
3287
- */
3288
- export declare type WebMOutputFormatOptions = MkvOutputFormatOptions;
3289
-
3290
- /**
3291
- * An AudioBuffer with additional timing information (timestamp & duration).
3292
- * @group Media sinks
3293
- * @public
3294
- */
3295
- export declare type WrappedAudioBuffer = {
3296
- /** An AudioBuffer. */
3297
- buffer: AudioBuffer;
3298
- /** The timestamp of the corresponding audio sample, in seconds. */
3299
- timestamp: number;
3300
- /** The duration of the corresponding audio sample, in seconds. */
3301
- duration: number;
3302
- };
3303
-
3304
- /**
3305
- * A canvas with additional timing information (timestamp & duration).
3306
- * @group Media sinks
3307
- * @public
3308
- */
3309
- export declare type WrappedCanvas = {
3310
- /** A canvas element or offscreen canvas. */
3311
- canvas: HTMLCanvasElement | OffscreenCanvas;
3312
- /** The timestamp of the corresponding video sample, in seconds. */
3313
- timestamp: number;
3314
- /** The duration of the corresponding video sample, in seconds. */
3315
- duration: number;
3316
- };
3317
-
3318
- export { }
3319
- export as namespace Mediabunny;