@kenzuya/mediabunny 1.26.0 → 1.28.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. package/README.md +1 -1
  2. package/dist/bundles/{mediabunny.mjs → mediabunny.js} +21963 -21388
  3. package/dist/bundles/mediabunny.min.js +490 -0
  4. package/dist/modules/shared/mp3-misc.d.ts.map +1 -1
  5. package/dist/modules/src/adts/adts-demuxer.d.ts +6 -6
  6. package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -1
  7. package/dist/modules/src/adts/adts-muxer.d.ts +4 -4
  8. package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -1
  9. package/dist/modules/src/adts/adts-reader.d.ts +1 -1
  10. package/dist/modules/src/adts/adts-reader.d.ts.map +1 -1
  11. package/dist/modules/src/avi/avi-demuxer.d.ts +44 -0
  12. package/dist/modules/src/avi/avi-demuxer.d.ts.map +1 -0
  13. package/dist/modules/src/avi/avi-misc.d.ts +88 -0
  14. package/dist/modules/src/avi/avi-misc.d.ts.map +1 -0
  15. package/dist/modules/src/avi/avi-muxer.d.ts +45 -0
  16. package/dist/modules/src/avi/avi-muxer.d.ts.map +1 -0
  17. package/dist/modules/src/avi/riff-writer.d.ts +26 -0
  18. package/dist/modules/src/avi/riff-writer.d.ts.map +1 -0
  19. package/dist/modules/src/codec-data.d.ts +8 -3
  20. package/dist/modules/src/codec-data.d.ts.map +1 -1
  21. package/dist/modules/src/codec.d.ts +10 -10
  22. package/dist/modules/src/codec.d.ts.map +1 -1
  23. package/dist/modules/src/conversion.d.ts +33 -16
  24. package/dist/modules/src/conversion.d.ts.map +1 -1
  25. package/dist/modules/src/custom-coder.d.ts +8 -8
  26. package/dist/modules/src/custom-coder.d.ts.map +1 -1
  27. package/dist/modules/src/demuxer.d.ts +3 -3
  28. package/dist/modules/src/demuxer.d.ts.map +1 -1
  29. package/dist/modules/src/encode.d.ts +8 -8
  30. package/dist/modules/src/encode.d.ts.map +1 -1
  31. package/dist/modules/src/flac/flac-demuxer.d.ts +7 -7
  32. package/dist/modules/src/flac/flac-demuxer.d.ts.map +1 -1
  33. package/dist/modules/src/flac/flac-misc.d.ts +3 -3
  34. package/dist/modules/src/flac/flac-misc.d.ts.map +1 -1
  35. package/dist/modules/src/flac/flac-muxer.d.ts +5 -5
  36. package/dist/modules/src/flac/flac-muxer.d.ts.map +1 -1
  37. package/dist/modules/src/id3.d.ts +3 -3
  38. package/dist/modules/src/id3.d.ts.map +1 -1
  39. package/dist/modules/src/index.d.ts +20 -20
  40. package/dist/modules/src/index.d.ts.map +1 -1
  41. package/dist/modules/src/input-format.d.ts +22 -0
  42. package/dist/modules/src/input-format.d.ts.map +1 -1
  43. package/dist/modules/src/input-track.d.ts +8 -8
  44. package/dist/modules/src/input-track.d.ts.map +1 -1
  45. package/dist/modules/src/input.d.ts +12 -12
  46. package/dist/modules/src/isobmff/isobmff-boxes.d.ts +2 -2
  47. package/dist/modules/src/isobmff/isobmff-boxes.d.ts.map +1 -1
  48. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts +12 -12
  49. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts.map +1 -1
  50. package/dist/modules/src/isobmff/isobmff-misc.d.ts.map +1 -1
  51. package/dist/modules/src/isobmff/isobmff-muxer.d.ts +11 -11
  52. package/dist/modules/src/isobmff/isobmff-muxer.d.ts.map +1 -1
  53. package/dist/modules/src/isobmff/isobmff-reader.d.ts +2 -2
  54. package/dist/modules/src/isobmff/isobmff-reader.d.ts.map +1 -1
  55. package/dist/modules/src/matroska/ebml.d.ts +3 -3
  56. package/dist/modules/src/matroska/ebml.d.ts.map +1 -1
  57. package/dist/modules/src/matroska/matroska-demuxer.d.ts +13 -13
  58. package/dist/modules/src/matroska/matroska-demuxer.d.ts.map +1 -1
  59. package/dist/modules/src/matroska/matroska-input.d.ts +33 -0
  60. package/dist/modules/src/matroska/matroska-input.d.ts.map +1 -0
  61. package/dist/modules/src/matroska/matroska-misc.d.ts.map +1 -1
  62. package/dist/modules/src/matroska/matroska-muxer.d.ts +5 -5
  63. package/dist/modules/src/matroska/matroska-muxer.d.ts.map +1 -1
  64. package/dist/modules/src/media-sink.d.ts +5 -5
  65. package/dist/modules/src/media-sink.d.ts.map +1 -1
  66. package/dist/modules/src/media-source.d.ts +22 -4
  67. package/dist/modules/src/media-source.d.ts.map +1 -1
  68. package/dist/modules/src/metadata.d.ts +2 -2
  69. package/dist/modules/src/metadata.d.ts.map +1 -1
  70. package/dist/modules/src/misc.d.ts +5 -4
  71. package/dist/modules/src/misc.d.ts.map +1 -1
  72. package/dist/modules/src/mp3/mp3-demuxer.d.ts +7 -7
  73. package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
  74. package/dist/modules/src/mp3/mp3-muxer.d.ts +4 -4
  75. package/dist/modules/src/mp3/mp3-muxer.d.ts.map +1 -1
  76. package/dist/modules/src/mp3/mp3-reader.d.ts +2 -2
  77. package/dist/modules/src/mp3/mp3-reader.d.ts.map +1 -1
  78. package/dist/modules/src/mp3/mp3-writer.d.ts +1 -1
  79. package/dist/modules/src/mp3/mp3-writer.d.ts.map +1 -1
  80. package/dist/modules/src/muxer.d.ts +4 -4
  81. package/dist/modules/src/muxer.d.ts.map +1 -1
  82. package/dist/modules/src/node.d.ts +1 -1
  83. package/dist/modules/src/ogg/ogg-demuxer.d.ts +7 -7
  84. package/dist/modules/src/ogg/ogg-demuxer.d.ts.map +1 -1
  85. package/dist/modules/src/ogg/ogg-misc.d.ts +1 -1
  86. package/dist/modules/src/ogg/ogg-misc.d.ts.map +1 -1
  87. package/dist/modules/src/ogg/ogg-muxer.d.ts +5 -5
  88. package/dist/modules/src/ogg/ogg-muxer.d.ts.map +1 -1
  89. package/dist/modules/src/ogg/ogg-reader.d.ts +1 -1
  90. package/dist/modules/src/ogg/ogg-reader.d.ts.map +1 -1
  91. package/dist/modules/src/output-format.d.ts +51 -6
  92. package/dist/modules/src/output-format.d.ts.map +1 -1
  93. package/dist/modules/src/output.d.ts +13 -13
  94. package/dist/modules/src/output.d.ts.map +1 -1
  95. package/dist/modules/src/packet.d.ts +1 -1
  96. package/dist/modules/src/packet.d.ts.map +1 -1
  97. package/dist/modules/src/pcm.d.ts.map +1 -1
  98. package/dist/modules/src/reader.d.ts +2 -2
  99. package/dist/modules/src/reader.d.ts.map +1 -1
  100. package/dist/modules/src/sample.d.ts +57 -15
  101. package/dist/modules/src/sample.d.ts.map +1 -1
  102. package/dist/modules/src/source.d.ts +3 -3
  103. package/dist/modules/src/source.d.ts.map +1 -1
  104. package/dist/modules/src/subtitles.d.ts +1 -1
  105. package/dist/modules/src/subtitles.d.ts.map +1 -1
  106. package/dist/modules/src/target.d.ts +2 -2
  107. package/dist/modules/src/target.d.ts.map +1 -1
  108. package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
  109. package/dist/modules/src/wave/riff-writer.d.ts +1 -1
  110. package/dist/modules/src/wave/riff-writer.d.ts.map +1 -1
  111. package/dist/modules/src/wave/wave-demuxer.d.ts +6 -6
  112. package/dist/modules/src/wave/wave-demuxer.d.ts.map +1 -1
  113. package/dist/modules/src/wave/wave-muxer.d.ts +4 -4
  114. package/dist/modules/src/wave/wave-muxer.d.ts.map +1 -1
  115. package/dist/modules/src/writer.d.ts +1 -1
  116. package/dist/modules/src/writer.d.ts.map +1 -1
  117. package/dist/packages/eac3/eac3.wasm +0 -0
  118. package/dist/packages/eac3/mediabunny-eac3.js +1058 -0
  119. package/dist/packages/eac3/mediabunny-eac3.min.js +44 -0
  120. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.js +694 -0
  121. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.min.js +58 -0
  122. package/dist/packages/mpeg4/mediabunny-mpeg4.js +1198 -0
  123. package/dist/packages/mpeg4/mediabunny-mpeg4.min.js +44 -0
  124. package/dist/packages/mpeg4/xvid.wasm +0 -0
  125. package/package.json +18 -57
  126. package/dist/bundles/mediabunny.cjs +0 -26140
  127. package/dist/bundles/mediabunny.min.cjs +0 -147
  128. package/dist/bundles/mediabunny.min.mjs +0 -146
  129. package/dist/mediabunny.d.ts +0 -3319
  130. package/dist/modules/shared/mp3-misc.js +0 -147
  131. package/dist/modules/src/adts/adts-demuxer.js +0 -239
  132. package/dist/modules/src/adts/adts-muxer.js +0 -80
  133. package/dist/modules/src/adts/adts-reader.js +0 -63
  134. package/dist/modules/src/codec-data.js +0 -1730
  135. package/dist/modules/src/codec.js +0 -869
  136. package/dist/modules/src/conversion.js +0 -1459
  137. package/dist/modules/src/custom-coder.js +0 -117
  138. package/dist/modules/src/demuxer.js +0 -12
  139. package/dist/modules/src/encode.js +0 -442
  140. package/dist/modules/src/flac/flac-demuxer.js +0 -504
  141. package/dist/modules/src/flac/flac-misc.js +0 -135
  142. package/dist/modules/src/flac/flac-muxer.js +0 -222
  143. package/dist/modules/src/id3.js +0 -848
  144. package/dist/modules/src/index.js +0 -28
  145. package/dist/modules/src/input-format.js +0 -480
  146. package/dist/modules/src/input-track.js +0 -372
  147. package/dist/modules/src/input.js +0 -188
  148. package/dist/modules/src/isobmff/isobmff-boxes.js +0 -1480
  149. package/dist/modules/src/isobmff/isobmff-demuxer.js +0 -2618
  150. package/dist/modules/src/isobmff/isobmff-misc.js +0 -20
  151. package/dist/modules/src/isobmff/isobmff-muxer.js +0 -966
  152. package/dist/modules/src/isobmff/isobmff-reader.js +0 -72
  153. package/dist/modules/src/matroska/ebml.js +0 -653
  154. package/dist/modules/src/matroska/matroska-demuxer.js +0 -2133
  155. package/dist/modules/src/matroska/matroska-misc.js +0 -20
  156. package/dist/modules/src/matroska/matroska-muxer.js +0 -1017
  157. package/dist/modules/src/media-sink.js +0 -1736
  158. package/dist/modules/src/media-source.js +0 -1825
  159. package/dist/modules/src/metadata.js +0 -193
  160. package/dist/modules/src/misc.js +0 -623
  161. package/dist/modules/src/mp3/mp3-demuxer.js +0 -285
  162. package/dist/modules/src/mp3/mp3-muxer.js +0 -123
  163. package/dist/modules/src/mp3/mp3-reader.js +0 -26
  164. package/dist/modules/src/mp3/mp3-writer.js +0 -78
  165. package/dist/modules/src/muxer.js +0 -50
  166. package/dist/modules/src/node.js +0 -9
  167. package/dist/modules/src/ogg/ogg-demuxer.js +0 -763
  168. package/dist/modules/src/ogg/ogg-misc.js +0 -78
  169. package/dist/modules/src/ogg/ogg-muxer.js +0 -353
  170. package/dist/modules/src/ogg/ogg-reader.js +0 -65
  171. package/dist/modules/src/output-format.js +0 -527
  172. package/dist/modules/src/output.js +0 -300
  173. package/dist/modules/src/packet.js +0 -182
  174. package/dist/modules/src/pcm.js +0 -85
  175. package/dist/modules/src/reader.js +0 -236
  176. package/dist/modules/src/sample.js +0 -1056
  177. package/dist/modules/src/source.js +0 -1182
  178. package/dist/modules/src/subtitles.js +0 -575
  179. package/dist/modules/src/target.js +0 -140
  180. package/dist/modules/src/wave/riff-writer.js +0 -30
  181. package/dist/modules/src/wave/wave-demuxer.js +0 -447
  182. package/dist/modules/src/wave/wave-muxer.js +0 -318
  183. package/dist/modules/src/writer.js +0 -370
  184. package/src/adts/adts-demuxer.ts +0 -331
  185. package/src/adts/adts-muxer.ts +0 -111
  186. package/src/adts/adts-reader.ts +0 -85
  187. package/src/codec-data.ts +0 -2078
  188. package/src/codec.ts +0 -1092
  189. package/src/conversion.ts +0 -2112
  190. package/src/custom-coder.ts +0 -197
  191. package/src/demuxer.ts +0 -24
  192. package/src/encode.ts +0 -739
  193. package/src/flac/flac-demuxer.ts +0 -730
  194. package/src/flac/flac-misc.ts +0 -164
  195. package/src/flac/flac-muxer.ts +0 -320
  196. package/src/id3.ts +0 -925
  197. package/src/index.ts +0 -221
  198. package/src/input-format.ts +0 -541
  199. package/src/input-track.ts +0 -529
  200. package/src/input.ts +0 -235
  201. package/src/isobmff/isobmff-boxes.ts +0 -1719
  202. package/src/isobmff/isobmff-demuxer.ts +0 -3190
  203. package/src/isobmff/isobmff-misc.ts +0 -29
  204. package/src/isobmff/isobmff-muxer.ts +0 -1348
  205. package/src/isobmff/isobmff-reader.ts +0 -91
  206. package/src/matroska/ebml.ts +0 -730
  207. package/src/matroska/matroska-demuxer.ts +0 -2481
  208. package/src/matroska/matroska-misc.ts +0 -29
  209. package/src/matroska/matroska-muxer.ts +0 -1276
  210. package/src/media-sink.ts +0 -2179
  211. package/src/media-source.ts +0 -2243
  212. package/src/metadata.ts +0 -320
  213. package/src/misc.ts +0 -798
  214. package/src/mp3/mp3-demuxer.ts +0 -383
  215. package/src/mp3/mp3-muxer.ts +0 -166
  216. package/src/mp3/mp3-reader.ts +0 -34
  217. package/src/mp3/mp3-writer.ts +0 -120
  218. package/src/muxer.ts +0 -88
  219. package/src/node.ts +0 -11
  220. package/src/ogg/ogg-demuxer.ts +0 -1053
  221. package/src/ogg/ogg-misc.ts +0 -116
  222. package/src/ogg/ogg-muxer.ts +0 -497
  223. package/src/ogg/ogg-reader.ts +0 -93
  224. package/src/output-format.ts +0 -945
  225. package/src/output.ts +0 -488
  226. package/src/packet.ts +0 -263
  227. package/src/pcm.ts +0 -112
  228. package/src/reader.ts +0 -323
  229. package/src/sample.ts +0 -1461
  230. package/src/source.ts +0 -1688
  231. package/src/subtitles.ts +0 -711
  232. package/src/target.ts +0 -204
  233. package/src/tsconfig.json +0 -16
  234. package/src/wave/riff-writer.ts +0 -36
  235. package/src/wave/wave-demuxer.ts +0 -529
  236. package/src/wave/wave-muxer.ts +0 -371
  237. package/src/writer.ts +0 -490
@@ -1,1825 +0,0 @@
1
- /*!
2
- * Copyright (c) 2025-present, Vanilagy and contributors
3
- *
4
- * This Source Code Form is subject to the terms of the Mozilla Public
5
- * License, v. 2.0. If a copy of the MPL was not distributed with this
6
- * file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
- */
8
- import { AUDIO_CODECS, buildAacAudioSpecificConfig, parseAacAudioSpecificConfig, parsePcmCodec, PCM_AUDIO_CODECS, SUBTITLE_CODECS, VIDEO_CODECS, } from './codec.js';
9
- import { assert, assertNever, CallSerializer, clamp, isFirefox, last, promiseWithResolvers, setInt24, setUint24, toUint8Array, } from './misc.js';
10
- import { SubtitleParser } from './subtitles.js';
11
- import { toAlaw, toUlaw } from './pcm.js';
12
- import { customVideoEncoders, customAudioEncoders, } from './custom-coder.js';
13
- import { EncodedPacket } from './packet.js';
14
- import { AudioSample, VideoSample } from './sample.js';
15
- import { buildAudioEncoderConfig, buildVideoEncoderConfig, validateAudioEncodingConfig, validateVideoEncodingConfig, } from './encode.js';
16
- /**
17
- * Base class for media sources. Media sources are used to add media samples to an output file.
18
- * @group Media sources
19
- * @public
20
- */
21
- export class MediaSource {
22
- constructor() {
23
- /** @internal */
24
- this._connectedTrack = null;
25
- /** @internal */
26
- this._closingPromise = null;
27
- /** @internal */
28
- this._closed = false;
29
- /**
30
- * @internal
31
- * A time offset in seconds that is added to all timestamps generated by this source.
32
- */
33
- this._timestampOffset = 0;
34
- }
35
- /** @internal */
36
- _ensureValidAdd() {
37
- if (!this._connectedTrack) {
38
- throw new Error('Source is not connected to an output track.');
39
- }
40
- if (this._connectedTrack.output.state === 'canceled') {
41
- throw new Error('Output has been canceled.');
42
- }
43
- if (this._connectedTrack.output.state === 'finalizing' || this._connectedTrack.output.state === 'finalized') {
44
- throw new Error('Output has been finalized.');
45
- }
46
- if (this._connectedTrack.output.state === 'pending') {
47
- throw new Error('Output has not started.');
48
- }
49
- if (this._closed) {
50
- throw new Error('Source is closed.');
51
- }
52
- }
53
- /** @internal */
54
- async _start() { }
55
- /** @internal */
56
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
57
- async _flushAndClose(forceClose) { }
58
- /**
59
- * Closes this source. This prevents future samples from being added and signals to the output file that no further
60
- * samples will come in for this track. Calling `.close()` is optional but recommended after adding the
61
- * last sample - for improved performance and reduced memory usage.
62
- */
63
- close() {
64
- if (this._closingPromise) {
65
- return;
66
- }
67
- const connectedTrack = this._connectedTrack;
68
- if (!connectedTrack) {
69
- throw new Error('Cannot call close without connecting the source to an output track.');
70
- }
71
- if (connectedTrack.output.state === 'pending') {
72
- throw new Error('Cannot call close before output has been started.');
73
- }
74
- this._closingPromise = (async () => {
75
- await this._flushAndClose(false);
76
- this._closed = true;
77
- if (connectedTrack.output.state === 'finalizing' || connectedTrack.output.state === 'finalized') {
78
- return;
79
- }
80
- connectedTrack.output._muxer.onTrackClose(connectedTrack);
81
- })();
82
- }
83
- /** @internal */
84
- async _flushOrWaitForOngoingClose(forceClose) {
85
- if (this._closingPromise) {
86
- // Since closing also flushes, we don't want to do it twice
87
- return this._closingPromise;
88
- }
89
- else {
90
- return this._flushAndClose(forceClose);
91
- }
92
- }
93
- }
94
- /**
95
- * Base class for video sources - sources for video tracks.
96
- * @group Media sources
97
- * @public
98
- */
99
- export class VideoSource extends MediaSource {
100
- /** Internal constructor. */
101
- constructor(codec) {
102
- super();
103
- /** @internal */
104
- this._connectedTrack = null;
105
- if (!VIDEO_CODECS.includes(codec)) {
106
- throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(', ')}.`);
107
- }
108
- this._codec = codec;
109
- }
110
- }
111
- /**
112
- * The most basic video source; can be used to directly pipe encoded packets into the output file.
113
- * @group Media sources
114
- * @public
115
- */
116
- export class EncodedVideoPacketSource extends VideoSource {
117
- /** Creates a new {@link EncodedVideoPacketSource} whose packets are encoded using `codec`. */
118
- constructor(codec) {
119
- super(codec);
120
- }
121
- /**
122
- * Adds an encoded packet to the output video track. Packets must be added in *decode order*, while a packet's
123
- * timestamp must be its *presentation timestamp*. B-frames are handled automatically.
124
- *
125
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
126
- * decoder config.
127
- *
128
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
129
- * to respect writer and encoder backpressure.
130
- */
131
- add(packet, meta) {
132
- if (!(packet instanceof EncodedPacket)) {
133
- throw new TypeError('packet must be an EncodedPacket.');
134
- }
135
- if (packet.isMetadataOnly) {
136
- throw new TypeError('Metadata-only packets cannot be added.');
137
- }
138
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
139
- throw new TypeError('meta, when provided, must be an object.');
140
- }
141
- this._ensureValidAdd();
142
- return this._connectedTrack.output._muxer.addEncodedVideoPacket(this._connectedTrack, packet, meta);
143
- }
144
- }
145
- class VideoEncoderWrapper {
146
- constructor(source, encodingConfig) {
147
- this.source = source;
148
- this.encodingConfig = encodingConfig;
149
- this.ensureEncoderPromise = null;
150
- this.encoderInitialized = false;
151
- this.encoder = null;
152
- this.muxer = null;
153
- this.lastMultipleOfKeyFrameInterval = -1;
154
- this.codedWidth = null;
155
- this.codedHeight = null;
156
- this.resizeCanvas = null;
157
- this.customEncoder = null;
158
- this.customEncoderCallSerializer = new CallSerializer();
159
- this.customEncoderQueueSize = 0;
160
- // Alpha stuff
161
- this.alphaEncoder = null;
162
- this.splitter = null;
163
- this.splitterCreationFailed = false;
164
- this.alphaFrameQueue = [];
165
- /**
166
- * Encoders typically throw their errors "out of band", meaning asynchronously in some other execution context.
167
- * However, we want to surface these errors to the user within the normal control flow, so they don't go uncaught.
168
- * So, we keep track of the encoder error and throw it as soon as we get the chance.
169
- */
170
- this.error = null;
171
- this.errorNeedsNewStack = true;
172
- }
173
- async add(videoSample, shouldClose, encodeOptions) {
174
- try {
175
- this.checkForEncoderError();
176
- this.source._ensureValidAdd();
177
- // Ensure video sample size remains constant
178
- if (this.codedWidth !== null && this.codedHeight !== null) {
179
- if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {
180
- const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? 'deny';
181
- if (sizeChangeBehavior === 'passThrough') {
182
- // Do nada
183
- }
184
- else if (sizeChangeBehavior === 'deny') {
185
- throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},`
186
- + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to`
187
- + ` change over time, set \`sizeChangeBehavior\` to a value other than 'strict' in the`
188
- + ` encoding options.`);
189
- }
190
- else {
191
- let canvasIsNew = false;
192
- if (!this.resizeCanvas) {
193
- if (typeof document !== 'undefined') {
194
- // Prefer an HTMLCanvasElement
195
- this.resizeCanvas = document.createElement('canvas');
196
- this.resizeCanvas.width = this.codedWidth;
197
- this.resizeCanvas.height = this.codedHeight;
198
- }
199
- else {
200
- this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);
201
- }
202
- canvasIsNew = true;
203
- }
204
- const context = this.resizeCanvas.getContext('2d', {
205
- alpha: isFirefox(), // Firefox has VideoFrame glitches with opaque canvases
206
- });
207
- assert(context);
208
- if (!canvasIsNew) {
209
- if (isFirefox()) {
210
- context.fillStyle = 'black';
211
- context.fillRect(0, 0, this.codedWidth, this.codedHeight);
212
- }
213
- else {
214
- context.clearRect(0, 0, this.codedWidth, this.codedHeight);
215
- }
216
- }
217
- videoSample.drawWithFit(context, { fit: sizeChangeBehavior });
218
- if (shouldClose) {
219
- videoSample.close();
220
- }
221
- videoSample = new VideoSample(this.resizeCanvas, {
222
- timestamp: videoSample.timestamp,
223
- duration: videoSample.duration,
224
- rotation: videoSample.rotation,
225
- });
226
- shouldClose = true;
227
- }
228
- }
229
- }
230
- else {
231
- this.codedWidth = videoSample.codedWidth;
232
- this.codedHeight = videoSample.codedHeight;
233
- }
234
- if (!this.encoderInitialized) {
235
- if (!this.ensureEncoderPromise) {
236
- this.ensureEncoder(videoSample);
237
- }
238
- // No, this "if" statement is not useless. Sometimes, the above call to `ensureEncoder` might have
239
- // synchronously completed and the encoder is already initialized. In this case, we don't need to await
240
- // the promise anymore. This also fixes nasty async race condition bugs when multiple code paths are
241
- // calling this method: It's important that the call that initialized the encoder go through this
242
- // code first.
243
- if (!this.encoderInitialized) {
244
- await this.ensureEncoderPromise;
245
- }
246
- }
247
- assert(this.encoderInitialized);
248
- const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;
249
- const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);
250
- // Ensure a key frame every keyFrameInterval seconds. It is important that all video tracks follow the same
251
- // "key frame" rhythm, because aligned key frames are required to start new fragments in ISOBMFF or clusters
252
- // in Matroska (or at least desirable).
253
- const finalEncodeOptions = {
254
- ...encodeOptions,
255
- keyFrame: encodeOptions?.keyFrame
256
- || keyFrameInterval === 0
257
- || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval,
258
- };
259
- this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;
260
- if (this.customEncoder) {
261
- this.customEncoderQueueSize++;
262
- // We clone the sample so it cannot be closed on us from the outside before it reaches the encoder
263
- const clonedSample = videoSample.clone();
264
- const promise = this.customEncoderCallSerializer
265
- .call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions))
266
- .then(() => this.customEncoderQueueSize--)
267
- .catch((error) => this.error ??= error)
268
- .finally(() => {
269
- clonedSample.close();
270
- // `videoSample` gets closed in the finally block at the end of the method
271
- });
272
- if (this.customEncoderQueueSize >= 4) {
273
- await promise;
274
- }
275
- }
276
- else {
277
- assert(this.encoder);
278
- const videoFrame = videoSample.toVideoFrame();
279
- if (!this.alphaEncoder) {
280
- // No alpha encoder, simple case
281
- this.encoder.encode(videoFrame, finalEncodeOptions);
282
- videoFrame.close();
283
- }
284
- else {
285
- // We're expected to encode alpha as well
286
- const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes('A');
287
- if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {
288
- this.alphaFrameQueue.push(null);
289
- this.encoder.encode(videoFrame, finalEncodeOptions);
290
- videoFrame.close();
291
- }
292
- else {
293
- const width = videoFrame.displayWidth;
294
- const height = videoFrame.displayHeight;
295
- if (!this.splitter) {
296
- try {
297
- this.splitter = new ColorAlphaSplitter(width, height);
298
- }
299
- catch (error) {
300
- console.error('Due to an error, only color data will be encoded.', error);
301
- this.splitterCreationFailed = true;
302
- this.alphaFrameQueue.push(null);
303
- this.encoder.encode(videoFrame, finalEncodeOptions);
304
- videoFrame.close();
305
- }
306
- }
307
- if (this.splitter) {
308
- const colorFrame = this.splitter.extractColor(videoFrame);
309
- const alphaFrame = this.splitter.extractAlpha(videoFrame);
310
- this.alphaFrameQueue.push(alphaFrame);
311
- this.encoder.encode(colorFrame, finalEncodeOptions);
312
- colorFrame.close();
313
- videoFrame.close();
314
- }
315
- }
316
- }
317
- if (shouldClose) {
318
- videoSample.close();
319
- }
320
- // We need to do this after sending the frame to the encoder as the frame otherwise might be closed
321
- if (this.encoder.encodeQueueSize >= 4) {
322
- await new Promise(resolve => this.encoder.addEventListener('dequeue', resolve, { once: true }));
323
- }
324
- }
325
- await this.muxer.mutex.currentPromise; // Allow the writer to apply backpressure
326
- }
327
- finally {
328
- if (shouldClose) {
329
- // Make sure it's always closed, even if there was an error
330
- videoSample.close();
331
- }
332
- }
333
- }
334
- ensureEncoder(videoSample) {
335
- const encoderError = new Error();
336
- this.ensureEncoderPromise = (async () => {
337
- const encoderConfig = buildVideoEncoderConfig({
338
- width: videoSample.codedWidth,
339
- height: videoSample.codedHeight,
340
- ...this.encodingConfig,
341
- framerate: this.source._connectedTrack?.metadata.frameRate,
342
- });
343
- this.encodingConfig.onEncoderConfig?.(encoderConfig);
344
- const MatchingCustomEncoder = customVideoEncoders.find(x => x.supports(this.encodingConfig.codec, encoderConfig));
345
- if (MatchingCustomEncoder) {
346
- // @ts-expect-error "Can't create instance of abstract class 🤓"
347
- this.customEncoder = new MatchingCustomEncoder();
348
- // @ts-expect-error It's technically readonly
349
- this.customEncoder.codec = this.encodingConfig.codec;
350
- // @ts-expect-error It's technically readonly
351
- this.customEncoder.config = encoderConfig;
352
- // @ts-expect-error It's technically readonly
353
- this.customEncoder.onPacket = (packet, meta) => {
354
- if (!(packet instanceof EncodedPacket)) {
355
- throw new TypeError('The first argument passed to onPacket must be an EncodedPacket.');
356
- }
357
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
358
- throw new TypeError('The second argument passed to onPacket must be an object or undefined.');
359
- }
360
- this.encodingConfig.onEncodedPacket?.(packet, meta);
361
- void this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta)
362
- .catch((error) => {
363
- this.error ??= error;
364
- this.errorNeedsNewStack = false;
365
- });
366
- };
367
- await this.customEncoder.init();
368
- }
369
- else {
370
- if (typeof VideoEncoder === 'undefined') {
371
- throw new Error('VideoEncoder is not supported by this browser.');
372
- }
373
- encoderConfig.alpha = 'discard'; // Since we handle alpha ourselves
374
- if (this.encodingConfig.alpha === 'keep') {
375
- // Encoding alpha requires using two parallel encoders, so we need to make sure they stay in sync
376
- // and that neither of them drops frames. Setting latencyMode to 'quality' achieves this, because
377
- // "User Agents MUST not drop frames to achieve the target bitrate and/or framerate."
378
- encoderConfig.latencyMode = 'quality';
379
- }
380
- const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;
381
- if (hasOddDimension
382
- && (this.encodingConfig.codec === 'avc' || this.encodingConfig.codec === 'hevc')) {
383
- // Throw a special error for this case as it gets hit often
384
- throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec`
385
- + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to`
386
- + ` round your dimensions to the nearest even number.`);
387
- }
388
- const support = await VideoEncoder.isConfigSupported(encoderConfig);
389
- if (!support.supported) {
390
- throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,`
391
- + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:`
392
- + ` ${encoderConfig.hardwareAcceleration ?? 'no-preference'}) is not supported by this browser.`
393
- + ` Consider using another codec or changing your video parameters.`);
394
- }
395
- /** Queue of color chunks waiting for their alpha counterpart. */
396
- const colorChunkQueue = [];
397
- /** Each value is the number of encoded alpha chunks at which a null alpha chunk should be added. */
398
- const nullAlphaChunkQueue = [];
399
- let encodedAlphaChunkCount = 0;
400
- let alphaEncoderQueue = 0;
401
- const addPacket = (colorChunk, alphaChunk, meta) => {
402
- const sideData = {};
403
- if (alphaChunk) {
404
- const alphaData = new Uint8Array(alphaChunk.byteLength);
405
- alphaChunk.copyTo(alphaData);
406
- sideData.alpha = alphaData;
407
- }
408
- const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);
409
- this.encodingConfig.onEncodedPacket?.(packet, meta);
410
- void this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta)
411
- .catch((error) => {
412
- this.error ??= error;
413
- this.errorNeedsNewStack = false;
414
- });
415
- };
416
- this.encoder = new VideoEncoder({
417
- output: (chunk, meta) => {
418
- if (!this.alphaEncoder) {
419
- // We're done
420
- addPacket(chunk, null, meta);
421
- return;
422
- }
423
- const alphaFrame = this.alphaFrameQueue.shift();
424
- assert(alphaFrame !== undefined);
425
- if (alphaFrame) {
426
- this.alphaEncoder.encode(alphaFrame, {
427
- // Crucial: The alpha frame is forced to be a key frame whenever the color frame
428
- // also is. Without this, playback can glitch and even crash in some browsers.
429
- // This is the reason why the two encoders are wired in series and not in parallel.
430
- keyFrame: chunk.type === 'key',
431
- });
432
- alphaEncoderQueue++;
433
- alphaFrame.close();
434
- colorChunkQueue.push({ chunk, meta });
435
- }
436
- else {
437
- // There was no alpha component for this frame
438
- if (alphaEncoderQueue === 0) {
439
- // No pending alpha encodes either, so we're done
440
- addPacket(chunk, null, meta);
441
- }
442
- else {
443
- // There are still alpha encodes pending, so we can't add the packet immediately since
444
- // we'd end up with out-of-order packets. Instead, let's queue a null alpha chunk to be
445
- // added in the future, after the current encoder workload has completed:
446
- nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);
447
- colorChunkQueue.push({ chunk, meta });
448
- }
449
- }
450
- },
451
- error: (error) => {
452
- error.stack = encoderError.stack; // Provide a more useful stack trace
453
- this.error ??= error;
454
- },
455
- });
456
- this.encoder.configure(encoderConfig);
457
- if (this.encodingConfig.alpha === 'keep') {
458
- // We need to encode alpha as well, which we do with a separate encoder
459
- this.alphaEncoder = new VideoEncoder({
460
- // We ignore the alpha chunk's metadata
461
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
462
- output: (chunk, meta) => {
463
- alphaEncoderQueue--;
464
- // There has to be a color chunk because the encoders are wired in series
465
- const colorChunk = colorChunkQueue.shift();
466
- assert(colorChunk !== undefined);
467
- addPacket(colorChunk.chunk, chunk, colorChunk.meta);
468
- // See if there are any null alpha chunks queued up
469
- encodedAlphaChunkCount++;
470
- while (nullAlphaChunkQueue.length > 0
471
- && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {
472
- nullAlphaChunkQueue.shift();
473
- const colorChunk = colorChunkQueue.shift();
474
- assert(colorChunk !== undefined);
475
- addPacket(colorChunk.chunk, null, colorChunk.meta);
476
- }
477
- },
478
- error: (error) => {
479
- error.stack = encoderError.stack; // Provide a more useful stack trace
480
- this.error ??= error;
481
- },
482
- });
483
- this.alphaEncoder.configure(encoderConfig);
484
- }
485
- }
486
- assert(this.source._connectedTrack);
487
- this.muxer = this.source._connectedTrack.output._muxer;
488
- this.encoderInitialized = true;
489
- })();
490
- }
491
- async flushAndClose(forceClose) {
492
- if (!forceClose)
493
- this.checkForEncoderError();
494
- if (this.customEncoder) {
495
- if (!forceClose) {
496
- void this.customEncoderCallSerializer.call(() => this.customEncoder.flush());
497
- }
498
- await this.customEncoderCallSerializer.call(() => this.customEncoder.close());
499
- }
500
- else if (this.encoder) {
501
- if (!forceClose) {
502
- // These are wired in series, therefore they must also be flushed in series
503
- await this.encoder.flush();
504
- await this.alphaEncoder?.flush();
505
- }
506
- if (this.encoder.state !== 'closed') {
507
- this.encoder.close();
508
- }
509
- if (this.alphaEncoder && this.alphaEncoder.state !== 'closed') {
510
- this.alphaEncoder.close();
511
- }
512
- this.alphaFrameQueue.forEach(x => x?.close());
513
- this.splitter?.close();
514
- }
515
- if (!forceClose)
516
- this.checkForEncoderError();
517
- }
518
- getQueueSize() {
519
- if (this.customEncoder) {
520
- return this.customEncoderQueueSize;
521
- }
522
- else {
523
- // Because the color and alpha encoders are wired in series, there's no need to also include the alpha
524
- // encoder's queue size here
525
- return this.encoder?.encodeQueueSize ?? 0;
526
- }
527
- }
528
- checkForEncoderError() {
529
- if (this.error) {
530
- if (this.errorNeedsNewStack) {
531
- this.error.stack = new Error().stack; // Provide an even more useful stack trace
532
- }
533
- throw this.error;
534
- }
535
- }
536
- }
537
- /** Utility class for splitting a composite frame into separate color and alpha components. */
538
- class ColorAlphaSplitter {
539
- constructor(initialWidth, initialHeight) {
540
- this.lastFrame = null;
541
- if (typeof OffscreenCanvas !== 'undefined') {
542
- this.canvas = new OffscreenCanvas(initialWidth, initialHeight);
543
- }
544
- else {
545
- this.canvas = document.createElement('canvas');
546
- this.canvas.width = initialWidth;
547
- this.canvas.height = initialHeight;
548
- }
549
- const gl = this.canvas.getContext('webgl2', {
550
- alpha: true, // Needed due to the YUV thing we do for alpha
551
- }); // Casting because of some TypeScript weirdness
552
- if (!gl) {
553
- throw new Error('Couldn\'t acquire WebGL 2 context.');
554
- }
555
- this.gl = gl;
556
- this.colorProgram = this.createColorProgram();
557
- this.alphaProgram = this.createAlphaProgram();
558
- this.vao = this.createVAO();
559
- this.sourceTexture = this.createTexture();
560
- this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, 'u_resolution');
561
- this.gl.useProgram(this.colorProgram);
562
- this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, 'u_sourceTexture'), 0);
563
- this.gl.useProgram(this.alphaProgram);
564
- this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, 'u_sourceTexture'), 0);
565
- }
566
- createVertexShader() {
567
- return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es
568
- in vec2 a_position;
569
- in vec2 a_texCoord;
570
- out vec2 v_texCoord;
571
-
572
- void main() {
573
- gl_Position = vec4(a_position, 0.0, 1.0);
574
- v_texCoord = a_texCoord;
575
- }
576
- `);
577
- }
578
- createColorProgram() {
579
- const vertexShader = this.createVertexShader();
580
- // This shader is simple, simply copy the color information while setting alpha to 1
581
- const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es
582
- precision highp float;
583
-
584
- uniform sampler2D u_sourceTexture;
585
- in vec2 v_texCoord;
586
- out vec4 fragColor;
587
-
588
- void main() {
589
- vec4 source = texture(u_sourceTexture, v_texCoord);
590
- fragColor = vec4(source.rgb, 1.0);
591
- }
592
- `);
593
- const program = this.gl.createProgram();
594
- this.gl.attachShader(program, vertexShader);
595
- this.gl.attachShader(program, fragmentShader);
596
- this.gl.linkProgram(program);
597
- return program;
598
- }
599
- createAlphaProgram() {
600
- const vertexShader = this.createVertexShader();
601
- // This shader's more complex. The main reason is that this shader writes data in I420 (yuv420) pixel format
602
- // instead of regular RGBA. In other words, we use the shader to write out I420 data into an RGBA canvas, which
603
- // we then later read out with JavaScript. The reason being that browsers weirdly encode canvases and mess up
604
- // the color spaces, and the only way to have full control over the color space is by outputting YUV data
605
- // directly (avoiding the RGB conversion). Doing this conversion in JS is painfully slow, so let's utlize the
606
- // GPU since we're already calling it anyway.
607
- const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es
608
- precision highp float;
609
-
610
- uniform sampler2D u_sourceTexture;
611
- uniform vec2 u_resolution; // The width and height of the canvas
612
- in vec2 v_texCoord;
613
- out vec4 fragColor;
614
-
615
- // This function determines the value for a single byte in the YUV stream
616
- float getByteValue(float byteOffset) {
617
- float width = u_resolution.x;
618
- float height = u_resolution.y;
619
-
620
- float yPlaneSize = width * height;
621
-
622
- if (byteOffset < yPlaneSize) {
623
- // This byte is in the luma plane. Find the corresponding pixel coordinates to sample from
624
- float y = floor(byteOffset / width);
625
- float x = mod(byteOffset, width);
626
-
627
- // Add 0.5 to sample the center of the texel
628
- vec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;
629
-
630
- // The luma value is the alpha from the source texture
631
- return texture(u_sourceTexture, sampleCoord).a;
632
- } else {
633
- // Write a fixed value for chroma and beyond
634
- return 128.0 / 255.0;
635
- }
636
- }
637
-
638
- void main() {
639
- // Each fragment writes 4 bytes (R, G, B, A)
640
- float pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);
641
- float baseByteOffset = pixelIndex * 4.0;
642
-
643
- vec4 result;
644
- for (int i = 0; i < 4; i++) {
645
- float currentByteOffset = baseByteOffset + float(i);
646
- result[i] = getByteValue(currentByteOffset);
647
- }
648
-
649
- fragColor = result;
650
- }
651
- `);
652
- const program = this.gl.createProgram();
653
- this.gl.attachShader(program, vertexShader);
654
- this.gl.attachShader(program, fragmentShader);
655
- this.gl.linkProgram(program);
656
- return program;
657
- }
658
- createShader(type, source) {
659
- const shader = this.gl.createShader(type);
660
- this.gl.shaderSource(shader, source);
661
- this.gl.compileShader(shader);
662
- if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
663
- console.error('Shader compile error:', this.gl.getShaderInfoLog(shader));
664
- }
665
- return shader;
666
- }
667
- createVAO() {
668
- const vao = this.gl.createVertexArray();
669
- this.gl.bindVertexArray(vao);
670
- const vertices = new Float32Array([
671
- -1, -1, 0, 1,
672
- 1, -1, 1, 1,
673
- -1, 1, 0, 0,
674
- 1, 1, 1, 0,
675
- ]);
676
- const buffer = this.gl.createBuffer();
677
- this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);
678
- this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);
679
- const positionLocation = this.gl.getAttribLocation(this.colorProgram, 'a_position');
680
- const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, 'a_texCoord');
681
- this.gl.enableVertexAttribArray(positionLocation);
682
- this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);
683
- this.gl.enableVertexAttribArray(texCoordLocation);
684
- this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);
685
- return vao;
686
- }
687
- createTexture() {
688
- const texture = this.gl.createTexture();
689
- this.gl.bindTexture(this.gl.TEXTURE_2D, texture);
690
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);
691
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);
692
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);
693
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);
694
- return texture;
695
- }
696
- updateTexture(sourceFrame) {
697
- if (this.lastFrame === sourceFrame) {
698
- return;
699
- }
700
- if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {
701
- this.canvas.width = sourceFrame.displayWidth;
702
- this.canvas.height = sourceFrame.displayHeight;
703
- }
704
- this.gl.activeTexture(this.gl.TEXTURE0);
705
- this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);
706
- this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);
707
- this.lastFrame = sourceFrame;
708
- }
709
- extractColor(sourceFrame) {
710
- this.updateTexture(sourceFrame);
711
- this.gl.useProgram(this.colorProgram);
712
- this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);
713
- this.gl.clear(this.gl.COLOR_BUFFER_BIT);
714
- this.gl.bindVertexArray(this.vao);
715
- this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);
716
- return new VideoFrame(this.canvas, {
717
- timestamp: sourceFrame.timestamp,
718
- duration: sourceFrame.duration ?? undefined,
719
- alpha: 'discard',
720
- });
721
- }
722
- extractAlpha(sourceFrame) {
723
- this.updateTexture(sourceFrame);
724
- this.gl.useProgram(this.alphaProgram);
725
- this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);
726
- this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);
727
- this.gl.clear(this.gl.COLOR_BUFFER_BIT);
728
- this.gl.bindVertexArray(this.vao);
729
- this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);
730
- const { width, height } = this.canvas;
731
- const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);
732
- const yuvSize = width * height + chromaSamples * 2;
733
- const requiredHeight = Math.ceil(yuvSize / (width * 4));
734
- let yuv = new Uint8Array(4 * width * requiredHeight);
735
- this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);
736
- yuv = yuv.subarray(0, yuvSize);
737
- assert(yuv[width * height] === 128); // Where chroma data starts
738
- assert(yuv[yuv.length - 1] === 128); // Assert the YUV data has been fully written
739
- // Defining this separately because TypeScript doesn't know `transfer` and I can't be bothered to do declaration
740
- // merging right now
741
- const init = {
742
- format: 'I420',
743
- codedWidth: width,
744
- codedHeight: height,
745
- timestamp: sourceFrame.timestamp,
746
- duration: sourceFrame.duration ?? undefined,
747
- transfer: [yuv.buffer],
748
- };
749
- return new VideoFrame(yuv, init);
750
- }
751
- close() {
752
- this.gl.getExtension('WEBGL_lose_context')?.loseContext();
753
- this.gl = null;
754
- }
755
- }
756
- /**
757
- * This source can be used to add raw, unencoded video samples (frames) to an output video track. These frames will
758
- * automatically be encoded and then piped into the output.
759
- * @group Media sources
760
- * @public
761
- */
762
- export class VideoSampleSource extends VideoSource {
763
- /**
764
- * Creates a new {@link VideoSampleSource} whose samples are encoded according to the specified
765
- * {@link VideoEncodingConfig}.
766
- */
767
- constructor(encodingConfig) {
768
- validateVideoEncodingConfig(encodingConfig);
769
- super(encodingConfig.codec);
770
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
771
- }
772
- /**
773
- * Encodes a video sample (frame) and then adds it to the output.
774
- *
775
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
776
- * to respect writer and encoder backpressure.
777
- */
778
- add(videoSample, encodeOptions) {
779
- if (!(videoSample instanceof VideoSample)) {
780
- throw new TypeError('videoSample must be a VideoSample.');
781
- }
782
- return this._encoder.add(videoSample, false, encodeOptions);
783
- }
784
- /** @internal */
785
- _flushAndClose(forceClose) {
786
- return this._encoder.flushAndClose(forceClose);
787
- }
788
- }
789
- /**
790
- * This source can be used to add video frames to the output track from a fixed canvas element. Since canvases are often
791
- * used for rendering, this source provides a convenient wrapper around {@link VideoSampleSource}.
792
- * @group Media sources
793
- * @public
794
- */
795
- export class CanvasSource extends VideoSource {
796
- /**
797
- * Creates a new {@link CanvasSource} from a canvas element or `OffscreenCanvas` whose samples are encoded
798
- * according to the specified {@link VideoEncodingConfig}.
799
- */
800
- constructor(canvas, encodingConfig) {
801
- if (!(typeof HTMLCanvasElement !== 'undefined' && canvas instanceof HTMLCanvasElement)
802
- && !(typeof OffscreenCanvas !== 'undefined' && canvas instanceof OffscreenCanvas)) {
803
- throw new TypeError('canvas must be an HTMLCanvasElement or OffscreenCanvas.');
804
- }
805
- validateVideoEncodingConfig(encodingConfig);
806
- super(encodingConfig.codec);
807
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
808
- this._canvas = canvas;
809
- }
810
- /**
811
- * Captures the current canvas state as a video sample (frame), encodes it and adds it to the output.
812
- *
813
- * @param timestamp - The timestamp of the sample, in seconds.
814
- * @param duration - The duration of the sample, in seconds.
815
- *
816
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
817
- * to respect writer and encoder backpressure.
818
- */
819
- add(timestamp, duration = 0, encodeOptions) {
820
- if (!Number.isFinite(timestamp) || timestamp < 0) {
821
- throw new TypeError('timestamp must be a non-negative number.');
822
- }
823
- if (!Number.isFinite(duration) || duration < 0) {
824
- throw new TypeError('duration must be a non-negative number.');
825
- }
826
- const sample = new VideoSample(this._canvas, { timestamp, duration });
827
- return this._encoder.add(sample, true, encodeOptions);
828
- }
829
- /** @internal */
830
- _flushAndClose(forceClose) {
831
- return this._encoder.flushAndClose(forceClose);
832
- }
833
- }
834
- /**
835
- * Video source that encodes the frames of a
836
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes them into the
837
- * output. This is useful for capturing live or real-time data such as webcams or screen captures. Frames will
838
- * automatically start being captured once the connected {@link Output} is started, and will keep being captured until
839
- * the {@link Output} is finalized or this source is closed.
840
- * @group Media sources
841
- * @public
842
- */
843
- export class MediaStreamVideoTrackSource extends VideoSource {
844
- /** A promise that rejects upon any error within this source. This promise never resolves. */
845
- get errorPromise() {
846
- this._errorPromiseAccessed = true;
847
- return this._promiseWithResolvers.promise;
848
- }
849
- /**
850
- * Creates a new {@link MediaStreamVideoTrackSource} from a
851
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack), which will pull
852
- * video samples from the stream in real time and encode them according to {@link VideoEncodingConfig}.
853
- */
854
- constructor(track, encodingConfig) {
855
- if (!(track instanceof MediaStreamTrack) || track.kind !== 'video') {
856
- throw new TypeError('track must be a video MediaStreamTrack.');
857
- }
858
- validateVideoEncodingConfig(encodingConfig);
859
- encodingConfig = {
860
- ...encodingConfig,
861
- latencyMode: 'realtime',
862
- };
863
- super(encodingConfig.codec);
864
- /** @internal */
865
- this._abortController = null;
866
- /** @internal */
867
- this._workerTrackId = null;
868
- /** @internal */
869
- this._workerListener = null;
870
- /** @internal */
871
- this._promiseWithResolvers = promiseWithResolvers();
872
- /** @internal */
873
- this._errorPromiseAccessed = false;
874
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
875
- this._track = track;
876
- }
877
- /** @internal */
878
- async _start() {
879
- if (!this._errorPromiseAccessed) {
880
- console.warn('Make sure not to ignore the `errorPromise` field on MediaStreamVideoTrackSource, so that any internal'
881
- + ' errors get bubbled up properly.');
882
- }
883
- this._abortController = new AbortController();
884
- let firstVideoFrameTimestamp = null;
885
- let errored = false;
886
- const onVideoFrame = (videoFrame) => {
887
- if (errored) {
888
- videoFrame.close();
889
- return;
890
- }
891
- if (firstVideoFrameTimestamp === null) {
892
- firstVideoFrameTimestamp = videoFrame.timestamp / 1e6;
893
- const muxer = this._connectedTrack.output._muxer;
894
- if (muxer.firstMediaStreamTimestamp === null) {
895
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
896
- this._timestampOffset = -firstVideoFrameTimestamp;
897
- }
898
- else {
899
- this._timestampOffset = (performance.now() / 1000 - muxer.firstMediaStreamTimestamp)
900
- - firstVideoFrameTimestamp;
901
- }
902
- }
903
- if (this._encoder.getQueueSize() >= 4) {
904
- // Drop frames if the encoder is overloaded
905
- videoFrame.close();
906
- return;
907
- }
908
- void this._encoder.add(new VideoSample(videoFrame), true)
909
- .catch((error) => {
910
- errored = true;
911
- this._abortController?.abort();
912
- this._promiseWithResolvers.reject(error);
913
- if (this._workerTrackId !== null) {
914
- // Tell the worker to stop the track
915
- sendMessageToMediaStreamTrackProcessorWorker({
916
- type: 'stopTrack',
917
- trackId: this._workerTrackId,
918
- });
919
- }
920
- });
921
- };
922
- if (typeof MediaStreamTrackProcessor !== 'undefined') {
923
- // We can do it here directly, perfect
924
- const processor = new MediaStreamTrackProcessor({ track: this._track });
925
- const consumer = new WritableStream({ write: onVideoFrame });
926
- processor.readable.pipeTo(consumer, {
927
- signal: this._abortController.signal,
928
- }).catch((error) => {
929
- // Handle AbortError silently
930
- if (error instanceof DOMException && error.name === 'AbortError')
931
- return;
932
- this._promiseWithResolvers.reject(error);
933
- });
934
- }
935
- else {
936
- // It might still be supported in a worker, so let's check that
937
- const supportedInWorker = await mediaStreamTrackProcessorIsSupportedInWorker();
938
- if (supportedInWorker) {
939
- this._workerTrackId = nextMediaStreamTrackProcessorWorkerId++;
940
- sendMessageToMediaStreamTrackProcessorWorker({
941
- type: 'videoTrack',
942
- trackId: this._workerTrackId,
943
- track: this._track,
944
- });
945
- this._workerListener = (event) => {
946
- const message = event.data;
947
- if (message.type === 'videoFrame' && message.trackId === this._workerTrackId) {
948
- onVideoFrame(message.videoFrame);
949
- }
950
- else if (message.type === 'error' && message.trackId === this._workerTrackId) {
951
- this._promiseWithResolvers.reject(message.error);
952
- }
953
- };
954
- mediaStreamTrackProcessorWorker.addEventListener('message', this._workerListener);
955
- }
956
- else {
957
- throw new Error('MediaStreamTrackProcessor is required but not supported by this browser.');
958
- }
959
- }
960
- }
961
- /** @internal */
962
- async _flushAndClose(forceClose) {
963
- if (this._abortController) {
964
- this._abortController.abort();
965
- this._abortController = null;
966
- }
967
- if (this._workerTrackId !== null) {
968
- assert(this._workerListener);
969
- sendMessageToMediaStreamTrackProcessorWorker({
970
- type: 'stopTrack',
971
- trackId: this._workerTrackId,
972
- });
973
- // Wait for the worker to stop the track
974
- await new Promise((resolve) => {
975
- const listener = (event) => {
976
- const message = event.data;
977
- if (message.type === 'trackStopped' && message.trackId === this._workerTrackId) {
978
- assert(this._workerListener);
979
- mediaStreamTrackProcessorWorker.removeEventListener('message', this._workerListener);
980
- mediaStreamTrackProcessorWorker.removeEventListener('message', listener);
981
- resolve();
982
- }
983
- };
984
- mediaStreamTrackProcessorWorker.addEventListener('message', listener);
985
- });
986
- }
987
- await this._encoder.flushAndClose(forceClose);
988
- }
989
- }
990
- /**
991
- * Base class for audio sources - sources for audio tracks.
992
- * @group Media sources
993
- * @public
994
- */
995
- export class AudioSource extends MediaSource {
996
- /** Internal constructor. */
997
- constructor(codec) {
998
- super();
999
- /** @internal */
1000
- this._connectedTrack = null;
1001
- if (!AUDIO_CODECS.includes(codec)) {
1002
- throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(', ')}.`);
1003
- }
1004
- this._codec = codec;
1005
- }
1006
- }
1007
- /**
1008
- * The most basic audio source; can be used to directly pipe encoded packets into the output file.
1009
- * @group Media sources
1010
- * @public
1011
- */
1012
- export class EncodedAudioPacketSource extends AudioSource {
1013
- /** Creates a new {@link EncodedAudioPacketSource} whose packets are encoded using `codec`. */
1014
- constructor(codec) {
1015
- super(codec);
1016
- }
1017
- /**
1018
- * Adds an encoded packet to the output audio track. Packets must be added in *decode order*.
1019
- *
1020
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
1021
- * decoder config.
1022
- *
1023
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1024
- * to respect writer and encoder backpressure.
1025
- */
1026
- add(packet, meta) {
1027
- if (!(packet instanceof EncodedPacket)) {
1028
- throw new TypeError('packet must be an EncodedPacket.');
1029
- }
1030
- if (packet.isMetadataOnly) {
1031
- throw new TypeError('Metadata-only packets cannot be added.');
1032
- }
1033
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
1034
- throw new TypeError('meta, when provided, must be an object.');
1035
- }
1036
- this._ensureValidAdd();
1037
- return this._connectedTrack.output._muxer.addEncodedAudioPacket(this._connectedTrack, packet, meta);
1038
- }
1039
- }
1040
- class AudioEncoderWrapper {
1041
- constructor(source, encodingConfig) {
1042
- this.source = source;
1043
- this.encodingConfig = encodingConfig;
1044
- this.ensureEncoderPromise = null;
1045
- this.encoderInitialized = false;
1046
- this.encoder = null;
1047
- this.muxer = null;
1048
- this.lastNumberOfChannels = null;
1049
- this.lastSampleRate = null;
1050
- this.isPcmEncoder = false;
1051
- this.outputSampleSize = null;
1052
- this.writeOutputValue = null;
1053
- this.customEncoder = null;
1054
- this.customEncoderCallSerializer = new CallSerializer();
1055
- this.customEncoderQueueSize = 0;
1056
- this.lastEndSampleIndex = null;
1057
- /**
1058
- * Encoders typically throw their errors "out of band", meaning asynchronously in some other execution context.
1059
- * However, we want to surface these errors to the user within the normal control flow, so they don't go uncaught.
1060
- * So, we keep track of the encoder error and throw it as soon as we get the chance.
1061
- */
1062
- this.error = null;
1063
- this.errorNeedsNewStack = true;
1064
- }
1065
- async add(audioSample, shouldClose) {
1066
- try {
1067
- this.checkForEncoderError();
1068
- this.source._ensureValidAdd();
1069
- // Ensure audio parameters remain constant
1070
- if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {
1071
- if (audioSample.numberOfChannels !== this.lastNumberOfChannels
1072
- || audioSample.sampleRate !== this.lastSampleRate) {
1073
- throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at`
1074
- + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at`
1075
- + ` ${audioSample.sampleRate} Hz.`);
1076
- }
1077
- }
1078
- else {
1079
- this.lastNumberOfChannels = audioSample.numberOfChannels;
1080
- this.lastSampleRate = audioSample.sampleRate;
1081
- }
1082
- if (!this.encoderInitialized) {
1083
- if (!this.ensureEncoderPromise) {
1084
- this.ensureEncoder(audioSample);
1085
- }
1086
- // No, this "if" statement is not useless. Sometimes, the above call to `ensureEncoder` might have
1087
- // synchronously completed and the encoder is already initialized. In this case, we don't need to await
1088
- // the promise anymore. This also fixes nasty async race condition bugs when multiple code paths are
1089
- // calling this method: It's important that the call that initialized the encoder go through this
1090
- // code first.
1091
- if (!this.encoderInitialized) {
1092
- await this.ensureEncoderPromise;
1093
- }
1094
- }
1095
- assert(this.encoderInitialized);
1096
- // Handle padding of gaps with silence to avoid audio drift over time, like in
1097
- // https://github.com/Vanilagy/mediabunny/issues/176
1098
- // TODO An open question is how encoders deal with the first AudioData having a non-zero timestamp, and with
1099
- // AudioDatas that have an overlapping timestamp range.
1100
- {
1101
- const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);
1102
- const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);
1103
- if (this.lastEndSampleIndex !== null && startSampleIndex > this.lastEndSampleIndex) {
1104
- const sampleCount = startSampleIndex - this.lastEndSampleIndex;
1105
- const fillSample = new AudioSample({
1106
- data: new Float32Array(sampleCount * audioSample.numberOfChannels),
1107
- format: 'f32-planar',
1108
- sampleRate: audioSample.sampleRate,
1109
- numberOfChannels: audioSample.numberOfChannels,
1110
- numberOfFrames: sampleCount,
1111
- timestamp: this.lastEndSampleIndex / audioSample.sampleRate,
1112
- });
1113
- await this.add(fillSample, true); // Recursive call
1114
- }
1115
- this.lastEndSampleIndex = endSampleIndex;
1116
- }
1117
- if (this.customEncoder) {
1118
- this.customEncoderQueueSize++;
1119
- // We clone the sample so it cannot be closed on us from the outside before it reaches the encoder
1120
- const clonedSample = audioSample.clone();
1121
- const promise = this.customEncoderCallSerializer
1122
- .call(() => this.customEncoder.encode(clonedSample))
1123
- .then(() => this.customEncoderQueueSize--)
1124
- .catch((error) => this.error ??= error)
1125
- .finally(() => {
1126
- clonedSample.close();
1127
- // `audioSample` gets closed in the finally block at the end of the method
1128
- });
1129
- if (this.customEncoderQueueSize >= 4) {
1130
- await promise;
1131
- }
1132
- await this.muxer.mutex.currentPromise; // Allow the writer to apply backpressure
1133
- }
1134
- else if (this.isPcmEncoder) {
1135
- await this.doPcmEncoding(audioSample, shouldClose);
1136
- }
1137
- else {
1138
- assert(this.encoder);
1139
- const audioData = audioSample.toAudioData();
1140
- this.encoder.encode(audioData);
1141
- audioData.close();
1142
- if (shouldClose) {
1143
- audioSample.close();
1144
- }
1145
- if (this.encoder.encodeQueueSize >= 4) {
1146
- await new Promise(resolve => this.encoder.addEventListener('dequeue', resolve, { once: true }));
1147
- }
1148
- await this.muxer.mutex.currentPromise; // Allow the writer to apply backpressure
1149
- }
1150
- }
1151
- finally {
1152
- if (shouldClose) {
1153
- // Make sure it's always closed, even if there was an error
1154
- audioSample.close();
1155
- }
1156
- }
1157
- }
1158
- async doPcmEncoding(audioSample, shouldClose) {
1159
- assert(this.outputSampleSize);
1160
- assert(this.writeOutputValue);
1161
- // Need to extract data from the audio data before we close it
1162
- const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;
1163
- const CHUNK_SIZE = 2048;
1164
- const outputs = [];
1165
- // Prepare all of the output buffers, each being bounded by CHUNK_SIZE so we don't generate huge packets
1166
- for (let frame = 0; frame < numberOfFrames; frame += CHUNK_SIZE) {
1167
- const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);
1168
- const outputSize = frameCount * numberOfChannels * this.outputSampleSize;
1169
- const outputBuffer = new ArrayBuffer(outputSize);
1170
- const outputView = new DataView(outputBuffer);
1171
- outputs.push({ frameCount, view: outputView });
1172
- }
1173
- const allocationSize = audioSample.allocationSize(({ planeIndex: 0, format: 'f32-planar' }));
1174
- const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);
1175
- for (let i = 0; i < numberOfChannels; i++) {
1176
- audioSample.copyTo(floats, { planeIndex: i, format: 'f32-planar' });
1177
- for (let j = 0; j < outputs.length; j++) {
1178
- const { frameCount, view } = outputs[j];
1179
- for (let k = 0; k < frameCount; k++) {
1180
- this.writeOutputValue(view, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);
1181
- }
1182
- }
1183
- }
1184
- if (shouldClose) {
1185
- audioSample.close();
1186
- }
1187
- const meta = {
1188
- decoderConfig: {
1189
- codec: this.encodingConfig.codec,
1190
- numberOfChannels,
1191
- sampleRate,
1192
- },
1193
- };
1194
- for (let i = 0; i < outputs.length; i++) {
1195
- const { frameCount, view } = outputs[i];
1196
- const outputBuffer = view.buffer;
1197
- const startFrame = i * CHUNK_SIZE;
1198
- const packet = new EncodedPacket(new Uint8Array(outputBuffer), 'key', timestamp + startFrame / sampleRate, frameCount / sampleRate);
1199
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1200
- await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta); // With backpressure
1201
- }
1202
- }
1203
- ensureEncoder(audioSample) {
1204
- const encoderError = new Error();
1205
- this.ensureEncoderPromise = (async () => {
1206
- const { numberOfChannels, sampleRate } = audioSample;
1207
- const encoderConfig = buildAudioEncoderConfig({
1208
- numberOfChannels,
1209
- sampleRate,
1210
- ...this.encodingConfig,
1211
- });
1212
- this.encodingConfig.onEncoderConfig?.(encoderConfig);
1213
- const MatchingCustomEncoder = customAudioEncoders.find(x => x.supports(this.encodingConfig.codec, encoderConfig));
1214
- if (MatchingCustomEncoder) {
1215
- // @ts-expect-error "Can't create instance of abstract class 🤓"
1216
- this.customEncoder = new MatchingCustomEncoder();
1217
- // @ts-expect-error It's technically readonly
1218
- this.customEncoder.codec = this.encodingConfig.codec;
1219
- // @ts-expect-error It's technically readonly
1220
- this.customEncoder.config = encoderConfig;
1221
- // @ts-expect-error It's technically readonly
1222
- this.customEncoder.onPacket = (packet, meta) => {
1223
- if (!(packet instanceof EncodedPacket)) {
1224
- throw new TypeError('The first argument passed to onPacket must be an EncodedPacket.');
1225
- }
1226
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
1227
- throw new TypeError('The second argument passed to onPacket must be an object or undefined.');
1228
- }
1229
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1230
- void this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta)
1231
- .catch((error) => {
1232
- this.error ??= error;
1233
- this.errorNeedsNewStack = false;
1234
- });
1235
- };
1236
- await this.customEncoder.init();
1237
- }
1238
- else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {
1239
- this.initPcmEncoder();
1240
- }
1241
- else {
1242
- if (typeof AudioEncoder === 'undefined') {
1243
- throw new Error('AudioEncoder is not supported by this browser.');
1244
- }
1245
- const support = await AudioEncoder.isConfigSupported(encoderConfig);
1246
- if (!support.supported) {
1247
- throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,`
1248
- + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not`
1249
- + ` supported by this browser. Consider using another codec or changing your audio parameters.`);
1250
- }
1251
- this.encoder = new AudioEncoder({
1252
- output: (chunk, meta) => {
1253
- // WebKit emits an invalid description for AAC (https://bugs.webkit.org/show_bug.cgi?id=302253),
1254
- // which we try to detect here. If detected, we'll provide our own description instead, derived
1255
- // from the codec string and audio parameters.
1256
- if (this.encodingConfig.codec === 'aac' && meta?.decoderConfig) {
1257
- let needsDescriptionOverwrite = false;
1258
- if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {
1259
- needsDescriptionOverwrite = true;
1260
- }
1261
- else {
1262
- const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));
1263
- needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;
1264
- }
1265
- if (needsDescriptionOverwrite) {
1266
- const objectType = Number(last(encoderConfig.codec.split('.')));
1267
- meta.decoderConfig.description = buildAacAudioSpecificConfig({
1268
- objectType,
1269
- numberOfChannels: meta.decoderConfig.numberOfChannels,
1270
- sampleRate: meta.decoderConfig.sampleRate,
1271
- });
1272
- }
1273
- }
1274
- const packet = EncodedPacket.fromEncodedChunk(chunk);
1275
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1276
- void this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta)
1277
- .catch((error) => {
1278
- this.error ??= error;
1279
- this.errorNeedsNewStack = false;
1280
- });
1281
- },
1282
- error: (error) => {
1283
- error.stack = encoderError.stack; // Provide a more useful stack trace
1284
- this.error ??= error;
1285
- },
1286
- });
1287
- this.encoder.configure(encoderConfig);
1288
- }
1289
- assert(this.source._connectedTrack);
1290
- this.muxer = this.source._connectedTrack.output._muxer;
1291
- this.encoderInitialized = true;
1292
- })();
1293
- }
1294
- initPcmEncoder() {
1295
- this.isPcmEncoder = true;
1296
- const codec = this.encodingConfig.codec;
1297
- const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);
1298
- this.outputSampleSize = sampleSize;
1299
- // All these functions receive a float sample as input and map it into the desired format
1300
- switch (sampleSize) {
1301
- case 1:
1302
- {
1303
- if (dataType === 'unsigned') {
1304
- this.writeOutputValue = (view, byteOffset, value) => view.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));
1305
- }
1306
- else if (dataType === 'signed') {
1307
- this.writeOutputValue = (view, byteOffset, value) => {
1308
- view.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));
1309
- };
1310
- }
1311
- else if (dataType === 'ulaw') {
1312
- this.writeOutputValue = (view, byteOffset, value) => {
1313
- const int16 = clamp(Math.floor(value * 32767), -32768, 32767);
1314
- view.setUint8(byteOffset, toUlaw(int16));
1315
- };
1316
- }
1317
- else if (dataType === 'alaw') {
1318
- this.writeOutputValue = (view, byteOffset, value) => {
1319
- const int16 = clamp(Math.floor(value * 32767), -32768, 32767);
1320
- view.setUint8(byteOffset, toAlaw(int16));
1321
- };
1322
- }
1323
- else {
1324
- assert(false);
1325
- }
1326
- }
1327
- ;
1328
- break;
1329
- case 2:
1330
- {
1331
- if (dataType === 'unsigned') {
1332
- this.writeOutputValue = (view, byteOffset, value) => view.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);
1333
- }
1334
- else if (dataType === 'signed') {
1335
- this.writeOutputValue = (view, byteOffset, value) => view.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);
1336
- }
1337
- else {
1338
- assert(false);
1339
- }
1340
- }
1341
- ;
1342
- break;
1343
- case 3:
1344
- {
1345
- if (dataType === 'unsigned') {
1346
- this.writeOutputValue = (view, byteOffset, value) => setUint24(view, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);
1347
- }
1348
- else if (dataType === 'signed') {
1349
- this.writeOutputValue = (view, byteOffset, value) => setInt24(view, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);
1350
- }
1351
- else {
1352
- assert(false);
1353
- }
1354
- }
1355
- ;
1356
- break;
1357
- case 4:
1358
- {
1359
- if (dataType === 'unsigned') {
1360
- this.writeOutputValue = (view, byteOffset, value) => view.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);
1361
- }
1362
- else if (dataType === 'signed') {
1363
- this.writeOutputValue = (view, byteOffset, value) => view.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);
1364
- }
1365
- else if (dataType === 'float') {
1366
- this.writeOutputValue = (view, byteOffset, value) => view.setFloat32(byteOffset, value, littleEndian);
1367
- }
1368
- else {
1369
- assert(false);
1370
- }
1371
- }
1372
- ;
1373
- break;
1374
- case 8:
1375
- {
1376
- if (dataType === 'float') {
1377
- this.writeOutputValue = (view, byteOffset, value) => view.setFloat64(byteOffset, value, littleEndian);
1378
- }
1379
- else {
1380
- assert(false);
1381
- }
1382
- }
1383
- ;
1384
- break;
1385
- default:
1386
- {
1387
- assertNever(sampleSize);
1388
- assert(false);
1389
- }
1390
- ;
1391
- }
1392
- }
1393
- async flushAndClose(forceClose) {
1394
- if (!forceClose)
1395
- this.checkForEncoderError();
1396
- if (this.customEncoder) {
1397
- if (!forceClose) {
1398
- void this.customEncoderCallSerializer.call(() => this.customEncoder.flush());
1399
- }
1400
- await this.customEncoderCallSerializer.call(() => this.customEncoder.close());
1401
- }
1402
- else if (this.encoder) {
1403
- if (!forceClose) {
1404
- await this.encoder.flush();
1405
- }
1406
- if (this.encoder.state !== 'closed') {
1407
- this.encoder.close();
1408
- }
1409
- }
1410
- if (!forceClose)
1411
- this.checkForEncoderError();
1412
- }
1413
- getQueueSize() {
1414
- if (this.customEncoder) {
1415
- return this.customEncoderQueueSize;
1416
- }
1417
- else if (this.isPcmEncoder) {
1418
- return 0;
1419
- }
1420
- else {
1421
- return this.encoder?.encodeQueueSize ?? 0;
1422
- }
1423
- }
1424
- checkForEncoderError() {
1425
- if (this.error) {
1426
- if (this.errorNeedsNewStack) {
1427
- this.error.stack = new Error().stack; // Provide an even more useful stack trace
1428
- }
1429
- throw this.error;
1430
- }
1431
- }
1432
- }
1433
- /**
1434
- * This source can be used to add raw, unencoded audio samples to an output audio track. These samples will
1435
- * automatically be encoded and then piped into the output.
1436
- * @group Media sources
1437
- * @public
1438
- */
1439
- export class AudioSampleSource extends AudioSource {
1440
- /**
1441
- * Creates a new {@link AudioSampleSource} whose samples are encoded according to the specified
1442
- * {@link AudioEncodingConfig}.
1443
- */
1444
- constructor(encodingConfig) {
1445
- validateAudioEncodingConfig(encodingConfig);
1446
- super(encodingConfig.codec);
1447
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1448
- }
1449
- /**
1450
- * Encodes an audio sample and then adds it to the output.
1451
- *
1452
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1453
- * to respect writer and encoder backpressure.
1454
- */
1455
- add(audioSample) {
1456
- if (!(audioSample instanceof AudioSample)) {
1457
- throw new TypeError('audioSample must be an AudioSample.');
1458
- }
1459
- return this._encoder.add(audioSample, false);
1460
- }
1461
- /** @internal */
1462
- _flushAndClose(forceClose) {
1463
- return this._encoder.flushAndClose(forceClose);
1464
- }
1465
- }
1466
- /**
1467
- * This source can be used to add audio data from an AudioBuffer to the output track. This is useful when working with
1468
- * the Web Audio API.
1469
- * @group Media sources
1470
- * @public
1471
- */
1472
- export class AudioBufferSource extends AudioSource {
1473
- /**
1474
- * Creates a new {@link AudioBufferSource} whose `AudioBuffer` instances are encoded according to the specified
1475
- * {@link AudioEncodingConfig}.
1476
- */
1477
- constructor(encodingConfig) {
1478
- validateAudioEncodingConfig(encodingConfig);
1479
- super(encodingConfig.codec);
1480
- /** @internal */
1481
- this._accumulatedTime = 0;
1482
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1483
- }
1484
- /**
1485
- * Converts an AudioBuffer to audio samples, encodes them and adds them to the output. The first AudioBuffer will
1486
- * be played at timestamp 0, and any subsequent AudioBuffer will have a timestamp equal to the total duration of
1487
- * all previous AudioBuffers.
1488
- *
1489
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1490
- * to respect writer and encoder backpressure.
1491
- */
1492
- async add(audioBuffer) {
1493
- if (!(audioBuffer instanceof AudioBuffer)) {
1494
- throw new TypeError('audioBuffer must be an AudioBuffer.');
1495
- }
1496
- const iterator = AudioSample._fromAudioBuffer(audioBuffer, this._accumulatedTime);
1497
- this._accumulatedTime += audioBuffer.duration;
1498
- for (const audioSample of iterator) {
1499
- await this._encoder.add(audioSample, true);
1500
- }
1501
- }
1502
- /** @internal */
1503
- _flushAndClose(forceClose) {
1504
- return this._encoder.flushAndClose(forceClose);
1505
- }
1506
- }
1507
- /**
1508
- * Audio source that encodes the data of a
1509
- * [`MediaStreamAudioTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes it into the
1510
- * output. This is useful for capturing live or real-time audio such as microphones or audio from other media elements.
1511
- * Audio will automatically start being captured once the connected {@link Output} is started, and will keep being
1512
- * captured until the {@link Output} is finalized or this source is closed.
1513
- * @group Media sources
1514
- * @public
1515
- */
1516
- export class MediaStreamAudioTrackSource extends AudioSource {
1517
- /** A promise that rejects upon any error within this source. This promise never resolves. */
1518
- get errorPromise() {
1519
- this._errorPromiseAccessed = true;
1520
- return this._promiseWithResolvers.promise;
1521
- }
1522
- /**
1523
- * Creates a new {@link MediaStreamAudioTrackSource} from a `MediaStreamAudioTrack`, which will pull audio samples
1524
- * from the stream in real time and encode them according to {@link AudioEncodingConfig}.
1525
- */
1526
- constructor(track, encodingConfig) {
1527
- if (!(track instanceof MediaStreamTrack) || track.kind !== 'audio') {
1528
- throw new TypeError('track must be an audio MediaStreamTrack.');
1529
- }
1530
- validateAudioEncodingConfig(encodingConfig);
1531
- super(encodingConfig.codec);
1532
- /** @internal */
1533
- this._abortController = null;
1534
- /** @internal */
1535
- this._audioContext = null;
1536
- /** @internal */
1537
- this._scriptProcessorNode = null; // Deprecated but goated
1538
- /** @internal */
1539
- this._promiseWithResolvers = promiseWithResolvers();
1540
- /** @internal */
1541
- this._errorPromiseAccessed = false;
1542
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1543
- this._track = track;
1544
- }
1545
- /** @internal */
1546
- async _start() {
1547
- if (!this._errorPromiseAccessed) {
1548
- console.warn('Make sure not to ignore the `errorPromise` field on MediaStreamVideoTrackSource, so that any internal'
1549
- + ' errors get bubbled up properly.');
1550
- }
1551
- this._abortController = new AbortController();
1552
- if (typeof MediaStreamTrackProcessor !== 'undefined') {
1553
- // Great, MediaStreamTrackProcessor is supported, this is the preferred way of doing things
1554
- let firstAudioDataTimestamp = null;
1555
- const processor = new MediaStreamTrackProcessor({ track: this._track });
1556
- const consumer = new WritableStream({
1557
- write: (audioData) => {
1558
- if (firstAudioDataTimestamp === null) {
1559
- firstAudioDataTimestamp = audioData.timestamp / 1e6;
1560
- const muxer = this._connectedTrack.output._muxer;
1561
- if (muxer.firstMediaStreamTimestamp === null) {
1562
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
1563
- this._timestampOffset = -firstAudioDataTimestamp;
1564
- }
1565
- else {
1566
- this._timestampOffset = (performance.now() / 1000 - muxer.firstMediaStreamTimestamp)
1567
- - firstAudioDataTimestamp;
1568
- }
1569
- }
1570
- if (this._encoder.getQueueSize() >= 4) {
1571
- // Drop data if the encoder is overloaded
1572
- audioData.close();
1573
- return;
1574
- }
1575
- void this._encoder.add(new AudioSample(audioData), true)
1576
- .catch((error) => {
1577
- this._abortController?.abort();
1578
- this._promiseWithResolvers.reject(error);
1579
- });
1580
- },
1581
- });
1582
- processor.readable.pipeTo(consumer, {
1583
- signal: this._abortController.signal,
1584
- }).catch((error) => {
1585
- // Handle AbortError silently
1586
- if (error instanceof DOMException && error.name === 'AbortError')
1587
- return;
1588
- this._promiseWithResolvers.reject(error);
1589
- });
1590
- }
1591
- else {
1592
- // Let's fall back to an AudioContext approach
1593
- // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access
1594
- const AudioContext = window.AudioContext || window.webkitAudioContext;
1595
- this._audioContext = new AudioContext({ sampleRate: this._track.getSettings().sampleRate });
1596
- const sourceNode = this._audioContext.createMediaStreamSource(new MediaStream([this._track]));
1597
- this._scriptProcessorNode = this._audioContext.createScriptProcessor(4096);
1598
- if (this._audioContext.state === 'suspended') {
1599
- await this._audioContext.resume();
1600
- }
1601
- sourceNode.connect(this._scriptProcessorNode);
1602
- this._scriptProcessorNode.connect(this._audioContext.destination);
1603
- let audioReceived = false;
1604
- let totalDuration = 0;
1605
- this._scriptProcessorNode.onaudioprocess = (event) => {
1606
- const iterator = AudioSample._fromAudioBuffer(event.inputBuffer, totalDuration);
1607
- totalDuration += event.inputBuffer.duration;
1608
- for (const audioSample of iterator) {
1609
- if (!audioReceived) {
1610
- audioReceived = true;
1611
- const muxer = this._connectedTrack.output._muxer;
1612
- if (muxer.firstMediaStreamTimestamp === null) {
1613
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
1614
- }
1615
- else {
1616
- this._timestampOffset = performance.now() / 1000 - muxer.firstMediaStreamTimestamp;
1617
- }
1618
- }
1619
- if (this._encoder.getQueueSize() >= 4) {
1620
- // Drop data if the encoder is overloaded
1621
- audioSample.close();
1622
- continue;
1623
- }
1624
- void this._encoder.add(audioSample, true)
1625
- .catch((error) => {
1626
- void this._audioContext.suspend();
1627
- this._promiseWithResolvers.reject(error);
1628
- });
1629
- }
1630
- };
1631
- }
1632
- }
1633
- /** @internal */
1634
- async _flushAndClose(forceClose) {
1635
- if (this._abortController) {
1636
- this._abortController.abort();
1637
- this._abortController = null;
1638
- }
1639
- if (this._audioContext) {
1640
- assert(this._scriptProcessorNode);
1641
- this._scriptProcessorNode.disconnect();
1642
- await this._audioContext.suspend();
1643
- }
1644
- await this._encoder.flushAndClose(forceClose);
1645
- }
1646
- }
1647
- const mediaStreamTrackProcessorWorkerCode = () => {
1648
- const sendMessage = (message, transfer) => {
1649
- if (transfer) {
1650
- self.postMessage(message, { transfer });
1651
- }
1652
- else {
1653
- self.postMessage(message);
1654
- }
1655
- };
1656
- // Immediately send a message to the main thread, letting them know of the support
1657
- sendMessage({
1658
- type: 'support',
1659
- supported: typeof MediaStreamTrackProcessor !== 'undefined',
1660
- });
1661
- const abortControllers = new Map();
1662
- const activeTracks = new Map();
1663
- self.addEventListener('message', (event) => {
1664
- const message = event.data;
1665
- switch (message.type) {
1666
- case 'videoTrack':
1667
- {
1668
- activeTracks.set(message.trackId, message.track);
1669
- const processor = new MediaStreamTrackProcessor({ track: message.track });
1670
- const consumer = new WritableStream({
1671
- write: (videoFrame) => {
1672
- if (!activeTracks.has(message.trackId)) {
1673
- videoFrame.close();
1674
- return;
1675
- }
1676
- // Send it to the main thread
1677
- sendMessage({
1678
- type: 'videoFrame',
1679
- trackId: message.trackId,
1680
- videoFrame,
1681
- }, [videoFrame]);
1682
- },
1683
- });
1684
- const abortController = new AbortController();
1685
- abortControllers.set(message.trackId, abortController);
1686
- processor.readable.pipeTo(consumer, {
1687
- signal: abortController.signal,
1688
- }).catch((error) => {
1689
- // Handle AbortError silently
1690
- if (error instanceof DOMException && error.name === 'AbortError')
1691
- return;
1692
- sendMessage({
1693
- type: 'error',
1694
- trackId: message.trackId,
1695
- error,
1696
- });
1697
- });
1698
- }
1699
- ;
1700
- break;
1701
- case 'stopTrack':
1702
- {
1703
- const abortController = abortControllers.get(message.trackId);
1704
- if (abortController) {
1705
- abortController.abort();
1706
- abortControllers.delete(message.trackId);
1707
- }
1708
- const track = activeTracks.get(message.trackId);
1709
- track?.stop();
1710
- activeTracks.delete(message.trackId);
1711
- sendMessage({
1712
- type: 'trackStopped',
1713
- trackId: message.trackId,
1714
- });
1715
- }
1716
- ;
1717
- break;
1718
- default: assertNever(message);
1719
- }
1720
- });
1721
- };
1722
- let nextMediaStreamTrackProcessorWorkerId = 0;
1723
- let mediaStreamTrackProcessorWorker = null;
1724
- const initMediaStreamTrackProcessorWorker = () => {
1725
- const blob = new Blob([`(${mediaStreamTrackProcessorWorkerCode.toString()})()`], { type: 'application/javascript' });
1726
- const url = URL.createObjectURL(blob);
1727
- mediaStreamTrackProcessorWorker = new Worker(url);
1728
- };
1729
- let mediaStreamTrackProcessorIsSupportedInWorkerCache = null;
1730
- const mediaStreamTrackProcessorIsSupportedInWorker = async () => {
1731
- if (mediaStreamTrackProcessorIsSupportedInWorkerCache !== null) {
1732
- return mediaStreamTrackProcessorIsSupportedInWorkerCache;
1733
- }
1734
- if (!mediaStreamTrackProcessorWorker) {
1735
- initMediaStreamTrackProcessorWorker();
1736
- }
1737
- return new Promise((resolve) => {
1738
- assert(mediaStreamTrackProcessorWorker);
1739
- const listener = (event) => {
1740
- const message = event.data;
1741
- if (message.type === 'support') {
1742
- mediaStreamTrackProcessorIsSupportedInWorkerCache = message.supported;
1743
- mediaStreamTrackProcessorWorker.removeEventListener('message', listener);
1744
- resolve(message.supported);
1745
- }
1746
- };
1747
- mediaStreamTrackProcessorWorker.addEventListener('message', listener);
1748
- });
1749
- };
1750
- const sendMessageToMediaStreamTrackProcessorWorker = (message, transfer) => {
1751
- assert(mediaStreamTrackProcessorWorker);
1752
- if (transfer) {
1753
- mediaStreamTrackProcessorWorker.postMessage(message, transfer);
1754
- }
1755
- else {
1756
- mediaStreamTrackProcessorWorker.postMessage(message);
1757
- }
1758
- };
1759
- /**
1760
- * Base class for subtitle sources - sources for subtitle tracks.
1761
- * @group Media sources
1762
- * @public
1763
- */
1764
- export class SubtitleSource extends MediaSource {
1765
- /** Internal constructor. */
1766
- constructor(codec) {
1767
- super();
1768
- /** @internal */
1769
- this._connectedTrack = null;
1770
- if (!SUBTITLE_CODECS.includes(codec)) {
1771
- throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(', ')}.`);
1772
- }
1773
- this._codec = codec;
1774
- }
1775
- }
1776
- /**
1777
- * This source can be used to add subtitles from a subtitle text file.
1778
- * @group Media sources
1779
- * @public
1780
- */
1781
- export class TextSubtitleSource extends SubtitleSource {
1782
- /** Creates a new {@link TextSubtitleSource} where added text chunks are in the specified `codec`. */
1783
- constructor(codec) {
1784
- super(codec);
1785
- /** @internal */
1786
- this._error = null;
1787
- this._parser = new SubtitleParser({
1788
- codec,
1789
- output: (cue, metadata) => {
1790
- void this._connectedTrack?.output._muxer.addSubtitleCue(this._connectedTrack, cue, metadata)
1791
- .catch((error) => {
1792
- this._error ??= error;
1793
- });
1794
- },
1795
- });
1796
- }
1797
- /**
1798
- * Parses the subtitle text according to the specified codec and adds it to the output track. You don't have to
1799
- * add the entire subtitle file at once here; you can provide it in chunks.
1800
- *
1801
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1802
- * to respect writer and encoder backpressure.
1803
- */
1804
- add(text) {
1805
- if (typeof text !== 'string') {
1806
- throw new TypeError('text must be a string.');
1807
- }
1808
- this._checkForError();
1809
- this._ensureValidAdd();
1810
- this._parser.parse(text);
1811
- return this._connectedTrack.output._muxer.mutex.currentPromise;
1812
- }
1813
- /** @internal */
1814
- _checkForError() {
1815
- if (this._error) {
1816
- throw this._error;
1817
- }
1818
- }
1819
- /** @internal */
1820
- async _flushAndClose(forceClose) {
1821
- if (!forceClose) {
1822
- this._checkForError();
1823
- }
1824
- }
1825
- }