@kenzuya/mediabunny 1.26.0 → 1.28.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. package/README.md +1 -1
  2. package/dist/bundles/{mediabunny.mjs → mediabunny.js} +21963 -21388
  3. package/dist/bundles/mediabunny.min.js +490 -0
  4. package/dist/modules/shared/mp3-misc.d.ts.map +1 -1
  5. package/dist/modules/src/adts/adts-demuxer.d.ts +6 -6
  6. package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -1
  7. package/dist/modules/src/adts/adts-muxer.d.ts +4 -4
  8. package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -1
  9. package/dist/modules/src/adts/adts-reader.d.ts +1 -1
  10. package/dist/modules/src/adts/adts-reader.d.ts.map +1 -1
  11. package/dist/modules/src/avi/avi-demuxer.d.ts +44 -0
  12. package/dist/modules/src/avi/avi-demuxer.d.ts.map +1 -0
  13. package/dist/modules/src/avi/avi-misc.d.ts +88 -0
  14. package/dist/modules/src/avi/avi-misc.d.ts.map +1 -0
  15. package/dist/modules/src/avi/avi-muxer.d.ts +45 -0
  16. package/dist/modules/src/avi/avi-muxer.d.ts.map +1 -0
  17. package/dist/modules/src/avi/riff-writer.d.ts +26 -0
  18. package/dist/modules/src/avi/riff-writer.d.ts.map +1 -0
  19. package/dist/modules/src/codec-data.d.ts +8 -3
  20. package/dist/modules/src/codec-data.d.ts.map +1 -1
  21. package/dist/modules/src/codec.d.ts +10 -10
  22. package/dist/modules/src/codec.d.ts.map +1 -1
  23. package/dist/modules/src/conversion.d.ts +33 -16
  24. package/dist/modules/src/conversion.d.ts.map +1 -1
  25. package/dist/modules/src/custom-coder.d.ts +8 -8
  26. package/dist/modules/src/custom-coder.d.ts.map +1 -1
  27. package/dist/modules/src/demuxer.d.ts +3 -3
  28. package/dist/modules/src/demuxer.d.ts.map +1 -1
  29. package/dist/modules/src/encode.d.ts +8 -8
  30. package/dist/modules/src/encode.d.ts.map +1 -1
  31. package/dist/modules/src/flac/flac-demuxer.d.ts +7 -7
  32. package/dist/modules/src/flac/flac-demuxer.d.ts.map +1 -1
  33. package/dist/modules/src/flac/flac-misc.d.ts +3 -3
  34. package/dist/modules/src/flac/flac-misc.d.ts.map +1 -1
  35. package/dist/modules/src/flac/flac-muxer.d.ts +5 -5
  36. package/dist/modules/src/flac/flac-muxer.d.ts.map +1 -1
  37. package/dist/modules/src/id3.d.ts +3 -3
  38. package/dist/modules/src/id3.d.ts.map +1 -1
  39. package/dist/modules/src/index.d.ts +20 -20
  40. package/dist/modules/src/index.d.ts.map +1 -1
  41. package/dist/modules/src/input-format.d.ts +22 -0
  42. package/dist/modules/src/input-format.d.ts.map +1 -1
  43. package/dist/modules/src/input-track.d.ts +8 -8
  44. package/dist/modules/src/input-track.d.ts.map +1 -1
  45. package/dist/modules/src/input.d.ts +12 -12
  46. package/dist/modules/src/isobmff/isobmff-boxes.d.ts +2 -2
  47. package/dist/modules/src/isobmff/isobmff-boxes.d.ts.map +1 -1
  48. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts +12 -12
  49. package/dist/modules/src/isobmff/isobmff-demuxer.d.ts.map +1 -1
  50. package/dist/modules/src/isobmff/isobmff-misc.d.ts.map +1 -1
  51. package/dist/modules/src/isobmff/isobmff-muxer.d.ts +11 -11
  52. package/dist/modules/src/isobmff/isobmff-muxer.d.ts.map +1 -1
  53. package/dist/modules/src/isobmff/isobmff-reader.d.ts +2 -2
  54. package/dist/modules/src/isobmff/isobmff-reader.d.ts.map +1 -1
  55. package/dist/modules/src/matroska/ebml.d.ts +3 -3
  56. package/dist/modules/src/matroska/ebml.d.ts.map +1 -1
  57. package/dist/modules/src/matroska/matroska-demuxer.d.ts +13 -13
  58. package/dist/modules/src/matroska/matroska-demuxer.d.ts.map +1 -1
  59. package/dist/modules/src/matroska/matroska-input.d.ts +33 -0
  60. package/dist/modules/src/matroska/matroska-input.d.ts.map +1 -0
  61. package/dist/modules/src/matroska/matroska-misc.d.ts.map +1 -1
  62. package/dist/modules/src/matroska/matroska-muxer.d.ts +5 -5
  63. package/dist/modules/src/matroska/matroska-muxer.d.ts.map +1 -1
  64. package/dist/modules/src/media-sink.d.ts +5 -5
  65. package/dist/modules/src/media-sink.d.ts.map +1 -1
  66. package/dist/modules/src/media-source.d.ts +22 -4
  67. package/dist/modules/src/media-source.d.ts.map +1 -1
  68. package/dist/modules/src/metadata.d.ts +2 -2
  69. package/dist/modules/src/metadata.d.ts.map +1 -1
  70. package/dist/modules/src/misc.d.ts +5 -4
  71. package/dist/modules/src/misc.d.ts.map +1 -1
  72. package/dist/modules/src/mp3/mp3-demuxer.d.ts +7 -7
  73. package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
  74. package/dist/modules/src/mp3/mp3-muxer.d.ts +4 -4
  75. package/dist/modules/src/mp3/mp3-muxer.d.ts.map +1 -1
  76. package/dist/modules/src/mp3/mp3-reader.d.ts +2 -2
  77. package/dist/modules/src/mp3/mp3-reader.d.ts.map +1 -1
  78. package/dist/modules/src/mp3/mp3-writer.d.ts +1 -1
  79. package/dist/modules/src/mp3/mp3-writer.d.ts.map +1 -1
  80. package/dist/modules/src/muxer.d.ts +4 -4
  81. package/dist/modules/src/muxer.d.ts.map +1 -1
  82. package/dist/modules/src/node.d.ts +1 -1
  83. package/dist/modules/src/ogg/ogg-demuxer.d.ts +7 -7
  84. package/dist/modules/src/ogg/ogg-demuxer.d.ts.map +1 -1
  85. package/dist/modules/src/ogg/ogg-misc.d.ts +1 -1
  86. package/dist/modules/src/ogg/ogg-misc.d.ts.map +1 -1
  87. package/dist/modules/src/ogg/ogg-muxer.d.ts +5 -5
  88. package/dist/modules/src/ogg/ogg-muxer.d.ts.map +1 -1
  89. package/dist/modules/src/ogg/ogg-reader.d.ts +1 -1
  90. package/dist/modules/src/ogg/ogg-reader.d.ts.map +1 -1
  91. package/dist/modules/src/output-format.d.ts +51 -6
  92. package/dist/modules/src/output-format.d.ts.map +1 -1
  93. package/dist/modules/src/output.d.ts +13 -13
  94. package/dist/modules/src/output.d.ts.map +1 -1
  95. package/dist/modules/src/packet.d.ts +1 -1
  96. package/dist/modules/src/packet.d.ts.map +1 -1
  97. package/dist/modules/src/pcm.d.ts.map +1 -1
  98. package/dist/modules/src/reader.d.ts +2 -2
  99. package/dist/modules/src/reader.d.ts.map +1 -1
  100. package/dist/modules/src/sample.d.ts +57 -15
  101. package/dist/modules/src/sample.d.ts.map +1 -1
  102. package/dist/modules/src/source.d.ts +3 -3
  103. package/dist/modules/src/source.d.ts.map +1 -1
  104. package/dist/modules/src/subtitles.d.ts +1 -1
  105. package/dist/modules/src/subtitles.d.ts.map +1 -1
  106. package/dist/modules/src/target.d.ts +2 -2
  107. package/dist/modules/src/target.d.ts.map +1 -1
  108. package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
  109. package/dist/modules/src/wave/riff-writer.d.ts +1 -1
  110. package/dist/modules/src/wave/riff-writer.d.ts.map +1 -1
  111. package/dist/modules/src/wave/wave-demuxer.d.ts +6 -6
  112. package/dist/modules/src/wave/wave-demuxer.d.ts.map +1 -1
  113. package/dist/modules/src/wave/wave-muxer.d.ts +4 -4
  114. package/dist/modules/src/wave/wave-muxer.d.ts.map +1 -1
  115. package/dist/modules/src/writer.d.ts +1 -1
  116. package/dist/modules/src/writer.d.ts.map +1 -1
  117. package/dist/packages/eac3/eac3.wasm +0 -0
  118. package/dist/packages/eac3/mediabunny-eac3.js +1058 -0
  119. package/dist/packages/eac3/mediabunny-eac3.min.js +44 -0
  120. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.js +694 -0
  121. package/dist/packages/mp3-encoder/mediabunny-mp3-encoder.min.js +58 -0
  122. package/dist/packages/mpeg4/mediabunny-mpeg4.js +1198 -0
  123. package/dist/packages/mpeg4/mediabunny-mpeg4.min.js +44 -0
  124. package/dist/packages/mpeg4/xvid.wasm +0 -0
  125. package/package.json +18 -57
  126. package/dist/bundles/mediabunny.cjs +0 -26140
  127. package/dist/bundles/mediabunny.min.cjs +0 -147
  128. package/dist/bundles/mediabunny.min.mjs +0 -146
  129. package/dist/mediabunny.d.ts +0 -3319
  130. package/dist/modules/shared/mp3-misc.js +0 -147
  131. package/dist/modules/src/adts/adts-demuxer.js +0 -239
  132. package/dist/modules/src/adts/adts-muxer.js +0 -80
  133. package/dist/modules/src/adts/adts-reader.js +0 -63
  134. package/dist/modules/src/codec-data.js +0 -1730
  135. package/dist/modules/src/codec.js +0 -869
  136. package/dist/modules/src/conversion.js +0 -1459
  137. package/dist/modules/src/custom-coder.js +0 -117
  138. package/dist/modules/src/demuxer.js +0 -12
  139. package/dist/modules/src/encode.js +0 -442
  140. package/dist/modules/src/flac/flac-demuxer.js +0 -504
  141. package/dist/modules/src/flac/flac-misc.js +0 -135
  142. package/dist/modules/src/flac/flac-muxer.js +0 -222
  143. package/dist/modules/src/id3.js +0 -848
  144. package/dist/modules/src/index.js +0 -28
  145. package/dist/modules/src/input-format.js +0 -480
  146. package/dist/modules/src/input-track.js +0 -372
  147. package/dist/modules/src/input.js +0 -188
  148. package/dist/modules/src/isobmff/isobmff-boxes.js +0 -1480
  149. package/dist/modules/src/isobmff/isobmff-demuxer.js +0 -2618
  150. package/dist/modules/src/isobmff/isobmff-misc.js +0 -20
  151. package/dist/modules/src/isobmff/isobmff-muxer.js +0 -966
  152. package/dist/modules/src/isobmff/isobmff-reader.js +0 -72
  153. package/dist/modules/src/matroska/ebml.js +0 -653
  154. package/dist/modules/src/matroska/matroska-demuxer.js +0 -2133
  155. package/dist/modules/src/matroska/matroska-misc.js +0 -20
  156. package/dist/modules/src/matroska/matroska-muxer.js +0 -1017
  157. package/dist/modules/src/media-sink.js +0 -1736
  158. package/dist/modules/src/media-source.js +0 -1825
  159. package/dist/modules/src/metadata.js +0 -193
  160. package/dist/modules/src/misc.js +0 -623
  161. package/dist/modules/src/mp3/mp3-demuxer.js +0 -285
  162. package/dist/modules/src/mp3/mp3-muxer.js +0 -123
  163. package/dist/modules/src/mp3/mp3-reader.js +0 -26
  164. package/dist/modules/src/mp3/mp3-writer.js +0 -78
  165. package/dist/modules/src/muxer.js +0 -50
  166. package/dist/modules/src/node.js +0 -9
  167. package/dist/modules/src/ogg/ogg-demuxer.js +0 -763
  168. package/dist/modules/src/ogg/ogg-misc.js +0 -78
  169. package/dist/modules/src/ogg/ogg-muxer.js +0 -353
  170. package/dist/modules/src/ogg/ogg-reader.js +0 -65
  171. package/dist/modules/src/output-format.js +0 -527
  172. package/dist/modules/src/output.js +0 -300
  173. package/dist/modules/src/packet.js +0 -182
  174. package/dist/modules/src/pcm.js +0 -85
  175. package/dist/modules/src/reader.js +0 -236
  176. package/dist/modules/src/sample.js +0 -1056
  177. package/dist/modules/src/source.js +0 -1182
  178. package/dist/modules/src/subtitles.js +0 -575
  179. package/dist/modules/src/target.js +0 -140
  180. package/dist/modules/src/wave/riff-writer.js +0 -30
  181. package/dist/modules/src/wave/wave-demuxer.js +0 -447
  182. package/dist/modules/src/wave/wave-muxer.js +0 -318
  183. package/dist/modules/src/writer.js +0 -370
  184. package/src/adts/adts-demuxer.ts +0 -331
  185. package/src/adts/adts-muxer.ts +0 -111
  186. package/src/adts/adts-reader.ts +0 -85
  187. package/src/codec-data.ts +0 -2078
  188. package/src/codec.ts +0 -1092
  189. package/src/conversion.ts +0 -2112
  190. package/src/custom-coder.ts +0 -197
  191. package/src/demuxer.ts +0 -24
  192. package/src/encode.ts +0 -739
  193. package/src/flac/flac-demuxer.ts +0 -730
  194. package/src/flac/flac-misc.ts +0 -164
  195. package/src/flac/flac-muxer.ts +0 -320
  196. package/src/id3.ts +0 -925
  197. package/src/index.ts +0 -221
  198. package/src/input-format.ts +0 -541
  199. package/src/input-track.ts +0 -529
  200. package/src/input.ts +0 -235
  201. package/src/isobmff/isobmff-boxes.ts +0 -1719
  202. package/src/isobmff/isobmff-demuxer.ts +0 -3190
  203. package/src/isobmff/isobmff-misc.ts +0 -29
  204. package/src/isobmff/isobmff-muxer.ts +0 -1348
  205. package/src/isobmff/isobmff-reader.ts +0 -91
  206. package/src/matroska/ebml.ts +0 -730
  207. package/src/matroska/matroska-demuxer.ts +0 -2481
  208. package/src/matroska/matroska-misc.ts +0 -29
  209. package/src/matroska/matroska-muxer.ts +0 -1276
  210. package/src/media-sink.ts +0 -2179
  211. package/src/media-source.ts +0 -2243
  212. package/src/metadata.ts +0 -320
  213. package/src/misc.ts +0 -798
  214. package/src/mp3/mp3-demuxer.ts +0 -383
  215. package/src/mp3/mp3-muxer.ts +0 -166
  216. package/src/mp3/mp3-reader.ts +0 -34
  217. package/src/mp3/mp3-writer.ts +0 -120
  218. package/src/muxer.ts +0 -88
  219. package/src/node.ts +0 -11
  220. package/src/ogg/ogg-demuxer.ts +0 -1053
  221. package/src/ogg/ogg-misc.ts +0 -116
  222. package/src/ogg/ogg-muxer.ts +0 -497
  223. package/src/ogg/ogg-reader.ts +0 -93
  224. package/src/output-format.ts +0 -945
  225. package/src/output.ts +0 -488
  226. package/src/packet.ts +0 -263
  227. package/src/pcm.ts +0 -112
  228. package/src/reader.ts +0 -323
  229. package/src/sample.ts +0 -1461
  230. package/src/source.ts +0 -1688
  231. package/src/subtitles.ts +0 -711
  232. package/src/target.ts +0 -204
  233. package/src/tsconfig.json +0 -16
  234. package/src/wave/riff-writer.ts +0 -36
  235. package/src/wave/wave-demuxer.ts +0 -529
  236. package/src/wave/wave-muxer.ts +0 -371
  237. package/src/writer.ts +0 -490
@@ -1,2243 +0,0 @@
1
- /*!
2
- * Copyright (c) 2025-present, Vanilagy and contributors
3
- *
4
- * This Source Code Form is subject to the terms of the Mozilla Public
5
- * License, v. 2.0. If a copy of the MPL was not distributed with this
6
- * file, You can obtain one at https://mozilla.org/MPL/2.0/.
7
- */
8
-
9
- import {
10
- AUDIO_CODECS,
11
- AudioCodec,
12
- buildAacAudioSpecificConfig,
13
- parseAacAudioSpecificConfig,
14
- parsePcmCodec,
15
- PCM_AUDIO_CODECS,
16
- PcmAudioCodec,
17
- SUBTITLE_CODECS,
18
- SubtitleCodec,
19
- VIDEO_CODECS,
20
- VideoCodec,
21
- } from './codec';
22
- import { OutputAudioTrack, OutputSubtitleTrack, OutputTrack, OutputVideoTrack } from './output';
23
- import {
24
- assert,
25
- assertNever,
26
- CallSerializer,
27
- clamp,
28
- isFirefox,
29
- last,
30
- promiseWithResolvers,
31
- setInt24,
32
- setUint24,
33
- toUint8Array,
34
- } from './misc';
35
- import { Muxer } from './muxer';
36
- import { SubtitleParser } from './subtitles';
37
- import { toAlaw, toUlaw } from './pcm';
38
- import {
39
- CustomVideoEncoder,
40
- CustomAudioEncoder,
41
- customVideoEncoders,
42
- customAudioEncoders,
43
- } from './custom-coder';
44
- import { EncodedPacket, EncodedPacketSideData } from './packet';
45
- import { AudioSample, VideoSample } from './sample';
46
- import {
47
- AudioEncodingConfig,
48
- buildAudioEncoderConfig,
49
- buildVideoEncoderConfig,
50
- validateAudioEncodingConfig,
51
- validateVideoEncodingConfig,
52
- VideoEncodingConfig,
53
- } from './encode';
54
-
55
- /**
56
- * Base class for media sources. Media sources are used to add media samples to an output file.
57
- * @group Media sources
58
- * @public
59
- */
60
- export abstract class MediaSource {
61
- /** @internal */
62
- _connectedTrack: OutputTrack | null = null;
63
- /** @internal */
64
- _closingPromise: Promise<void> | null = null;
65
- /** @internal */
66
- _closed = false;
67
- /**
68
- * @internal
69
- * A time offset in seconds that is added to all timestamps generated by this source.
70
- */
71
- _timestampOffset = 0;
72
-
73
- /** @internal */
74
- _ensureValidAdd() {
75
- if (!this._connectedTrack) {
76
- throw new Error('Source is not connected to an output track.');
77
- }
78
-
79
- if (this._connectedTrack.output.state === 'canceled') {
80
- throw new Error('Output has been canceled.');
81
- }
82
-
83
- if (this._connectedTrack.output.state === 'finalizing' || this._connectedTrack.output.state === 'finalized') {
84
- throw new Error('Output has been finalized.');
85
- }
86
-
87
- if (this._connectedTrack.output.state === 'pending') {
88
- throw new Error('Output has not started.');
89
- }
90
-
91
- if (this._closed) {
92
- throw new Error('Source is closed.');
93
- }
94
- }
95
-
96
- /** @internal */
97
- async _start() {}
98
- /** @internal */
99
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
100
- async _flushAndClose(forceClose: boolean) {}
101
-
102
- /**
103
- * Closes this source. This prevents future samples from being added and signals to the output file that no further
104
- * samples will come in for this track. Calling `.close()` is optional but recommended after adding the
105
- * last sample - for improved performance and reduced memory usage.
106
- */
107
- close() {
108
- if (this._closingPromise) {
109
- return;
110
- }
111
-
112
- const connectedTrack = this._connectedTrack;
113
-
114
- if (!connectedTrack) {
115
- throw new Error('Cannot call close without connecting the source to an output track.');
116
- }
117
-
118
- if (connectedTrack.output.state === 'pending') {
119
- throw new Error('Cannot call close before output has been started.');
120
- }
121
-
122
- this._closingPromise = (async () => {
123
- await this._flushAndClose(false);
124
-
125
- this._closed = true;
126
-
127
- if (connectedTrack.output.state === 'finalizing' || connectedTrack.output.state === 'finalized') {
128
- return;
129
- }
130
-
131
- connectedTrack.output._muxer.onTrackClose(connectedTrack);
132
- })();
133
- }
134
-
135
- /** @internal */
136
- async _flushOrWaitForOngoingClose(forceClose: boolean) {
137
- if (this._closingPromise) {
138
- // Since closing also flushes, we don't want to do it twice
139
- return this._closingPromise;
140
- } else {
141
- return this._flushAndClose(forceClose);
142
- }
143
- }
144
- }
145
-
146
- /**
147
- * Base class for video sources - sources for video tracks.
148
- * @group Media sources
149
- * @public
150
- */
151
- export abstract class VideoSource extends MediaSource {
152
- /** @internal */
153
- override _connectedTrack: OutputVideoTrack | null = null;
154
- /** @internal */
155
- _codec: VideoCodec;
156
-
157
- /** Internal constructor. */
158
- constructor(codec: VideoCodec) {
159
- super();
160
-
161
- if (!VIDEO_CODECS.includes(codec)) {
162
- throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(', ')}.`);
163
- }
164
-
165
- this._codec = codec;
166
- }
167
- }
168
-
169
- /**
170
- * The most basic video source; can be used to directly pipe encoded packets into the output file.
171
- * @group Media sources
172
- * @public
173
- */
174
- export class EncodedVideoPacketSource extends VideoSource {
175
- /** Creates a new {@link EncodedVideoPacketSource} whose packets are encoded using `codec`. */
176
- constructor(codec: VideoCodec) {
177
- super(codec);
178
- }
179
-
180
- /**
181
- * Adds an encoded packet to the output video track. Packets must be added in *decode order*, while a packet's
182
- * timestamp must be its *presentation timestamp*. B-frames are handled automatically.
183
- *
184
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
185
- * decoder config.
186
- *
187
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
188
- * to respect writer and encoder backpressure.
189
- */
190
- add(packet: EncodedPacket, meta?: EncodedVideoChunkMetadata) {
191
- if (!(packet instanceof EncodedPacket)) {
192
- throw new TypeError('packet must be an EncodedPacket.');
193
- }
194
- if (packet.isMetadataOnly) {
195
- throw new TypeError('Metadata-only packets cannot be added.');
196
- }
197
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
198
- throw new TypeError('meta, when provided, must be an object.');
199
- }
200
-
201
- this._ensureValidAdd();
202
- return this._connectedTrack!.output._muxer.addEncodedVideoPacket(this._connectedTrack!, packet, meta);
203
- }
204
- }
205
-
206
- class VideoEncoderWrapper {
207
- private ensureEncoderPromise: Promise<void> | null = null;
208
- private encoderInitialized = false;
209
- private encoder: VideoEncoder | null = null;
210
- private muxer: Muxer | null = null;
211
- private lastMultipleOfKeyFrameInterval = -1;
212
- private codedWidth: number | null = null;
213
- private codedHeight: number | null = null;
214
- private resizeCanvas: HTMLCanvasElement | OffscreenCanvas | null = null;
215
-
216
- private customEncoder: CustomVideoEncoder | null = null;
217
- private customEncoderCallSerializer = new CallSerializer();
218
- private customEncoderQueueSize = 0;
219
-
220
- // Alpha stuff
221
- private alphaEncoder: VideoEncoder | null = null;
222
- private splitter: ColorAlphaSplitter | null = null;
223
- private splitterCreationFailed = false;
224
- private alphaFrameQueue: (VideoFrame | null)[] = [];
225
-
226
- /**
227
- * Encoders typically throw their errors "out of band", meaning asynchronously in some other execution context.
228
- * However, we want to surface these errors to the user within the normal control flow, so they don't go uncaught.
229
- * So, we keep track of the encoder error and throw it as soon as we get the chance.
230
- */
231
- private error: Error | null = null;
232
- private errorNeedsNewStack = true;
233
-
234
- constructor(private source: VideoSource, private encodingConfig: VideoEncodingConfig) {}
235
-
236
- async add(videoSample: VideoSample, shouldClose: boolean, encodeOptions?: VideoEncoderEncodeOptions) {
237
- try {
238
- this.checkForEncoderError();
239
- this.source._ensureValidAdd();
240
-
241
- // Ensure video sample size remains constant
242
- if (this.codedWidth !== null && this.codedHeight !== null) {
243
- if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {
244
- const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? 'deny';
245
-
246
- if (sizeChangeBehavior === 'passThrough') {
247
- // Do nada
248
- } else if (sizeChangeBehavior === 'deny') {
249
- throw new Error(
250
- `Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},`
251
- + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to`
252
- + ` change over time, set \`sizeChangeBehavior\` to a value other than 'strict' in the`
253
- + ` encoding options.`,
254
- );
255
- } else {
256
- let canvasIsNew = false;
257
-
258
- if (!this.resizeCanvas) {
259
- if (typeof document !== 'undefined') {
260
- // Prefer an HTMLCanvasElement
261
- this.resizeCanvas = document.createElement('canvas');
262
- this.resizeCanvas.width = this.codedWidth;
263
- this.resizeCanvas.height = this.codedHeight;
264
- } else {
265
- this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);
266
- }
267
-
268
- canvasIsNew = true;
269
- }
270
-
271
- const context = this.resizeCanvas.getContext('2d', {
272
- alpha: isFirefox(), // Firefox has VideoFrame glitches with opaque canvases
273
- }) as CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D;
274
- assert(context);
275
-
276
- if (!canvasIsNew) {
277
- if (isFirefox()) {
278
- context.fillStyle = 'black';
279
- context.fillRect(0, 0, this.codedWidth, this.codedHeight);
280
- } else {
281
- context.clearRect(0, 0, this.codedWidth, this.codedHeight);
282
- }
283
- }
284
-
285
- videoSample.drawWithFit(context, { fit: sizeChangeBehavior });
286
-
287
- if (shouldClose) {
288
- videoSample.close();
289
- }
290
-
291
- videoSample = new VideoSample(this.resizeCanvas, {
292
- timestamp: videoSample.timestamp,
293
- duration: videoSample.duration,
294
- rotation: videoSample.rotation,
295
- });
296
- shouldClose = true;
297
- }
298
- }
299
- } else {
300
- this.codedWidth = videoSample.codedWidth;
301
- this.codedHeight = videoSample.codedHeight;
302
- }
303
-
304
- if (!this.encoderInitialized) {
305
- if (!this.ensureEncoderPromise) {
306
- this.ensureEncoder(videoSample);
307
- }
308
-
309
- // No, this "if" statement is not useless. Sometimes, the above call to `ensureEncoder` might have
310
- // synchronously completed and the encoder is already initialized. In this case, we don't need to await
311
- // the promise anymore. This also fixes nasty async race condition bugs when multiple code paths are
312
- // calling this method: It's important that the call that initialized the encoder go through this
313
- // code first.
314
- if (!this.encoderInitialized) {
315
- await this.ensureEncoderPromise;
316
- }
317
- }
318
- assert(this.encoderInitialized);
319
-
320
- const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;
321
- const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);
322
-
323
- // Ensure a key frame every keyFrameInterval seconds. It is important that all video tracks follow the same
324
- // "key frame" rhythm, because aligned key frames are required to start new fragments in ISOBMFF or clusters
325
- // in Matroska (or at least desirable).
326
- const finalEncodeOptions = {
327
- ...encodeOptions,
328
- keyFrame: encodeOptions?.keyFrame
329
- || keyFrameInterval === 0
330
- || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval,
331
- };
332
- this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;
333
-
334
- if (this.customEncoder) {
335
- this.customEncoderQueueSize++;
336
-
337
- // We clone the sample so it cannot be closed on us from the outside before it reaches the encoder
338
- const clonedSample = videoSample.clone();
339
-
340
- const promise = this.customEncoderCallSerializer
341
- .call(() => this.customEncoder!.encode(clonedSample, finalEncodeOptions))
342
- .then(() => this.customEncoderQueueSize--)
343
- .catch((error: Error) => this.error ??= error)
344
- .finally(() => {
345
- clonedSample.close();
346
- // `videoSample` gets closed in the finally block at the end of the method
347
- });
348
-
349
- if (this.customEncoderQueueSize >= 4) {
350
- await promise;
351
- }
352
- } else {
353
- assert(this.encoder);
354
-
355
- const videoFrame = videoSample.toVideoFrame();
356
-
357
- if (!this.alphaEncoder) {
358
- // No alpha encoder, simple case
359
- this.encoder.encode(videoFrame, finalEncodeOptions);
360
- videoFrame.close();
361
- } else {
362
- // We're expected to encode alpha as well
363
- const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes('A');
364
-
365
- if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {
366
- this.alphaFrameQueue.push(null);
367
- this.encoder.encode(videoFrame, finalEncodeOptions);
368
- videoFrame.close();
369
- } else {
370
- const width = videoFrame.displayWidth;
371
- const height = videoFrame.displayHeight;
372
-
373
- if (!this.splitter) {
374
- try {
375
- this.splitter = new ColorAlphaSplitter(width, height);
376
- } catch (error) {
377
- console.error('Due to an error, only color data will be encoded.', error);
378
-
379
- this.splitterCreationFailed = true;
380
- this.alphaFrameQueue.push(null);
381
- this.encoder.encode(videoFrame, finalEncodeOptions);
382
- videoFrame.close();
383
- }
384
- }
385
-
386
- if (this.splitter) {
387
- const colorFrame = this.splitter.extractColor(videoFrame);
388
- const alphaFrame = this.splitter.extractAlpha(videoFrame);
389
-
390
- this.alphaFrameQueue.push(alphaFrame);
391
- this.encoder.encode(colorFrame, finalEncodeOptions);
392
- colorFrame.close();
393
- videoFrame.close();
394
- }
395
- }
396
- }
397
-
398
- if (shouldClose) {
399
- videoSample.close();
400
- }
401
-
402
- // We need to do this after sending the frame to the encoder as the frame otherwise might be closed
403
- if (this.encoder.encodeQueueSize >= 4) {
404
- await new Promise(resolve => this.encoder!.addEventListener('dequeue', resolve, { once: true }));
405
- }
406
- }
407
-
408
- await this.muxer!.mutex.currentPromise; // Allow the writer to apply backpressure
409
- } finally {
410
- if (shouldClose) {
411
- // Make sure it's always closed, even if there was an error
412
- videoSample.close();
413
- }
414
- }
415
- }
416
-
417
- private ensureEncoder(videoSample: VideoSample) {
418
- const encoderError = new Error();
419
- this.ensureEncoderPromise = (async () => {
420
- const encoderConfig = buildVideoEncoderConfig({
421
- width: videoSample.codedWidth,
422
- height: videoSample.codedHeight,
423
- ...this.encodingConfig,
424
- framerate: this.source._connectedTrack?.metadata.frameRate,
425
- });
426
- this.encodingConfig.onEncoderConfig?.(encoderConfig);
427
-
428
- const MatchingCustomEncoder = customVideoEncoders.find(x => x.supports(
429
- this.encodingConfig.codec,
430
- encoderConfig,
431
- ));
432
-
433
- if (MatchingCustomEncoder) {
434
- // @ts-expect-error "Can't create instance of abstract class 🤓"
435
- this.customEncoder = new MatchingCustomEncoder() as CustomVideoEncoder;
436
- // @ts-expect-error It's technically readonly
437
- this.customEncoder.codec = this.encodingConfig.codec;
438
- // @ts-expect-error It's technically readonly
439
- this.customEncoder.config = encoderConfig;
440
- // @ts-expect-error It's technically readonly
441
- this.customEncoder.onPacket = (packet, meta) => {
442
- if (!(packet instanceof EncodedPacket)) {
443
- throw new TypeError('The first argument passed to onPacket must be an EncodedPacket.');
444
- }
445
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
446
- throw new TypeError('The second argument passed to onPacket must be an object or undefined.');
447
- }
448
-
449
- this.encodingConfig.onEncodedPacket?.(packet, meta);
450
- void this.muxer!.addEncodedVideoPacket(this.source._connectedTrack!, packet, meta)
451
- .catch((error) => {
452
- this.error ??= error;
453
- this.errorNeedsNewStack = false;
454
- });
455
- };
456
-
457
- await this.customEncoder.init();
458
- } else {
459
- if (typeof VideoEncoder === 'undefined') {
460
- throw new Error('VideoEncoder is not supported by this browser.');
461
- }
462
-
463
- encoderConfig.alpha = 'discard'; // Since we handle alpha ourselves
464
-
465
- if (this.encodingConfig.alpha === 'keep') {
466
- // Encoding alpha requires using two parallel encoders, so we need to make sure they stay in sync
467
- // and that neither of them drops frames. Setting latencyMode to 'quality' achieves this, because
468
- // "User Agents MUST not drop frames to achieve the target bitrate and/or framerate."
469
- encoderConfig.latencyMode = 'quality';
470
- }
471
-
472
- const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;
473
- if (
474
- hasOddDimension
475
- && (this.encodingConfig.codec === 'avc' || this.encodingConfig.codec === 'hevc')
476
- ) {
477
- // Throw a special error for this case as it gets hit often
478
- throw new Error(
479
- `The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec`
480
- + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to`
481
- + ` round your dimensions to the nearest even number.`,
482
- );
483
- }
484
-
485
- const support = await VideoEncoder.isConfigSupported(encoderConfig);
486
- if (!support.supported) {
487
- throw new Error(
488
- `This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,`
489
- + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:`
490
- + ` ${encoderConfig.hardwareAcceleration ?? 'no-preference'}) is not supported by this browser.`
491
- + ` Consider using another codec or changing your video parameters.`,
492
- );
493
- }
494
-
495
- /** Queue of color chunks waiting for their alpha counterpart. */
496
- const colorChunkQueue: {
497
- chunk: EncodedVideoChunk;
498
- meta: EncodedVideoChunkMetadata | undefined;
499
- }[] = [];
500
- /** Each value is the number of encoded alpha chunks at which a null alpha chunk should be added. */
501
- const nullAlphaChunkQueue: number[] = [];
502
- let encodedAlphaChunkCount = 0;
503
- let alphaEncoderQueue = 0;
504
-
505
- const addPacket = (
506
- colorChunk: EncodedVideoChunk,
507
- alphaChunk: EncodedVideoChunk | null,
508
- meta: EncodedVideoChunkMetadata | undefined,
509
- ) => {
510
- const sideData: EncodedPacketSideData = {};
511
-
512
- if (alphaChunk) {
513
- const alphaData = new Uint8Array(alphaChunk.byteLength);
514
- alphaChunk.copyTo(alphaData);
515
-
516
- sideData.alpha = alphaData;
517
- }
518
-
519
- const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);
520
-
521
- this.encodingConfig.onEncodedPacket?.(packet, meta);
522
- void this.muxer!.addEncodedVideoPacket(this.source._connectedTrack!, packet, meta)
523
- .catch((error) => {
524
- this.error ??= error;
525
- this.errorNeedsNewStack = false;
526
- });
527
- };
528
-
529
- this.encoder = new VideoEncoder({
530
- output: (chunk, meta) => {
531
- if (!this.alphaEncoder) {
532
- // We're done
533
- addPacket(chunk, null, meta);
534
- return;
535
- }
536
-
537
- const alphaFrame = this.alphaFrameQueue.shift();
538
- assert(alphaFrame !== undefined);
539
-
540
- if (alphaFrame) {
541
- this.alphaEncoder.encode(alphaFrame, {
542
- // Crucial: The alpha frame is forced to be a key frame whenever the color frame
543
- // also is. Without this, playback can glitch and even crash in some browsers.
544
- // This is the reason why the two encoders are wired in series and not in parallel.
545
- keyFrame: chunk.type === 'key',
546
- });
547
- alphaEncoderQueue++;
548
- alphaFrame.close();
549
- colorChunkQueue.push({ chunk, meta });
550
- } else {
551
- // There was no alpha component for this frame
552
- if (alphaEncoderQueue === 0) {
553
- // No pending alpha encodes either, so we're done
554
- addPacket(chunk, null, meta);
555
- } else {
556
- // There are still alpha encodes pending, so we can't add the packet immediately since
557
- // we'd end up with out-of-order packets. Instead, let's queue a null alpha chunk to be
558
- // added in the future, after the current encoder workload has completed:
559
- nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);
560
- colorChunkQueue.push({ chunk, meta });
561
- }
562
- }
563
- },
564
- error: (error) => {
565
- error.stack = encoderError.stack; // Provide a more useful stack trace
566
- this.error ??= error;
567
- },
568
- });
569
- this.encoder.configure(encoderConfig);
570
-
571
- if (this.encodingConfig.alpha === 'keep') {
572
- // We need to encode alpha as well, which we do with a separate encoder
573
- this.alphaEncoder = new VideoEncoder({
574
- // We ignore the alpha chunk's metadata
575
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
576
- output: (chunk, meta) => {
577
- alphaEncoderQueue--;
578
-
579
- // There has to be a color chunk because the encoders are wired in series
580
- const colorChunk = colorChunkQueue.shift();
581
- assert(colorChunk !== undefined);
582
-
583
- addPacket(colorChunk.chunk, chunk, colorChunk.meta);
584
-
585
- // See if there are any null alpha chunks queued up
586
- encodedAlphaChunkCount++;
587
- while (
588
- nullAlphaChunkQueue.length > 0
589
- && nullAlphaChunkQueue[0] === encodedAlphaChunkCount
590
- ) {
591
- nullAlphaChunkQueue.shift();
592
- const colorChunk = colorChunkQueue.shift();
593
- assert(colorChunk !== undefined);
594
-
595
- addPacket(colorChunk.chunk, null, colorChunk.meta);
596
- }
597
- },
598
- error: (error) => {
599
- error.stack = encoderError.stack; // Provide a more useful stack trace
600
- this.error ??= error;
601
- },
602
- });
603
- this.alphaEncoder.configure(encoderConfig);
604
- }
605
- }
606
-
607
- assert(this.source._connectedTrack);
608
- this.muxer = this.source._connectedTrack.output._muxer;
609
-
610
- this.encoderInitialized = true;
611
- })();
612
- }
613
-
614
- async flushAndClose(forceClose: boolean) {
615
- if (!forceClose) this.checkForEncoderError();
616
-
617
- if (this.customEncoder) {
618
- if (!forceClose) {
619
- void this.customEncoderCallSerializer.call(() => this.customEncoder!.flush());
620
- }
621
-
622
- await this.customEncoderCallSerializer.call(() => this.customEncoder!.close());
623
- } else if (this.encoder) {
624
- if (!forceClose) {
625
- // These are wired in series, therefore they must also be flushed in series
626
- await this.encoder.flush();
627
- await this.alphaEncoder?.flush();
628
- }
629
-
630
- if (this.encoder.state !== 'closed') {
631
- this.encoder.close();
632
- }
633
- if (this.alphaEncoder && this.alphaEncoder.state !== 'closed') {
634
- this.alphaEncoder.close();
635
- }
636
-
637
- this.alphaFrameQueue.forEach(x => x?.close());
638
-
639
- this.splitter?.close();
640
- }
641
-
642
- if (!forceClose) this.checkForEncoderError();
643
- }
644
-
645
- getQueueSize() {
646
- if (this.customEncoder) {
647
- return this.customEncoderQueueSize;
648
- } else {
649
- // Because the color and alpha encoders are wired in series, there's no need to also include the alpha
650
- // encoder's queue size here
651
- return this.encoder?.encodeQueueSize ?? 0;
652
- }
653
- }
654
-
655
- checkForEncoderError() {
656
- if (this.error) {
657
- if (this.errorNeedsNewStack) {
658
- this.error.stack = new Error().stack; // Provide an even more useful stack trace
659
- }
660
-
661
- throw this.error;
662
- }
663
- }
664
- }
665
-
666
- /** Utility class for splitting a composite frame into separate color and alpha components. */
667
- class ColorAlphaSplitter {
668
- canvas: OffscreenCanvas | HTMLCanvasElement;
669
-
670
- private gl: WebGL2RenderingContext;
671
- private colorProgram: WebGLProgram;
672
- private alphaProgram: WebGLProgram;
673
- private vao: WebGLVertexArrayObject;
674
- private sourceTexture: WebGLTexture;
675
- private lastFrame: VideoFrame | null = null;
676
- private alphaResolutionLocation: WebGLUniformLocation;
677
-
678
- constructor(initialWidth: number, initialHeight: number) {
679
- if (typeof OffscreenCanvas !== 'undefined') {
680
- this.canvas = new OffscreenCanvas(initialWidth, initialHeight);
681
- } else {
682
- this.canvas = document.createElement('canvas');
683
- this.canvas.width = initialWidth;
684
- this.canvas.height = initialHeight;
685
- }
686
-
687
- const gl = this.canvas.getContext('webgl2', {
688
- alpha: true, // Needed due to the YUV thing we do for alpha
689
- }) as unknown as WebGL2RenderingContext | null; // Casting because of some TypeScript weirdness
690
- if (!gl) {
691
- throw new Error('Couldn\'t acquire WebGL 2 context.');
692
- }
693
-
694
- this.gl = gl;
695
-
696
- this.colorProgram = this.createColorProgram();
697
- this.alphaProgram = this.createAlphaProgram();
698
- this.vao = this.createVAO();
699
- this.sourceTexture = this.createTexture();
700
-
701
- this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, 'u_resolution')!;
702
-
703
- this.gl.useProgram(this.colorProgram);
704
- this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, 'u_sourceTexture'), 0);
705
-
706
- this.gl.useProgram(this.alphaProgram);
707
- this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, 'u_sourceTexture'), 0);
708
- }
709
-
710
- private createVertexShader(): WebGLShader {
711
- return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es
712
- in vec2 a_position;
713
- in vec2 a_texCoord;
714
- out vec2 v_texCoord;
715
-
716
- void main() {
717
- gl_Position = vec4(a_position, 0.0, 1.0);
718
- v_texCoord = a_texCoord;
719
- }
720
- `);
721
- }
722
-
723
- private createColorProgram(): WebGLProgram {
724
- const vertexShader = this.createVertexShader();
725
-
726
- // This shader is simple, simply copy the color information while setting alpha to 1
727
- const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es
728
- precision highp float;
729
-
730
- uniform sampler2D u_sourceTexture;
731
- in vec2 v_texCoord;
732
- out vec4 fragColor;
733
-
734
- void main() {
735
- vec4 source = texture(u_sourceTexture, v_texCoord);
736
- fragColor = vec4(source.rgb, 1.0);
737
- }
738
- `);
739
-
740
- const program = this.gl.createProgram();
741
- this.gl.attachShader(program, vertexShader);
742
- this.gl.attachShader(program, fragmentShader);
743
- this.gl.linkProgram(program);
744
-
745
- return program;
746
- }
747
-
748
- private createAlphaProgram(): WebGLProgram {
749
- const vertexShader = this.createVertexShader();
750
-
751
- // This shader's more complex. The main reason is that this shader writes data in I420 (yuv420) pixel format
752
- // instead of regular RGBA. In other words, we use the shader to write out I420 data into an RGBA canvas, which
753
- // we then later read out with JavaScript. The reason being that browsers weirdly encode canvases and mess up
754
- // the color spaces, and the only way to have full control over the color space is by outputting YUV data
755
- // directly (avoiding the RGB conversion). Doing this conversion in JS is painfully slow, so let's utlize the
756
- // GPU since we're already calling it anyway.
757
- const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es
758
- precision highp float;
759
-
760
- uniform sampler2D u_sourceTexture;
761
- uniform vec2 u_resolution; // The width and height of the canvas
762
- in vec2 v_texCoord;
763
- out vec4 fragColor;
764
-
765
- // This function determines the value for a single byte in the YUV stream
766
- float getByteValue(float byteOffset) {
767
- float width = u_resolution.x;
768
- float height = u_resolution.y;
769
-
770
- float yPlaneSize = width * height;
771
-
772
- if (byteOffset < yPlaneSize) {
773
- // This byte is in the luma plane. Find the corresponding pixel coordinates to sample from
774
- float y = floor(byteOffset / width);
775
- float x = mod(byteOffset, width);
776
-
777
- // Add 0.5 to sample the center of the texel
778
- vec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;
779
-
780
- // The luma value is the alpha from the source texture
781
- return texture(u_sourceTexture, sampleCoord).a;
782
- } else {
783
- // Write a fixed value for chroma and beyond
784
- return 128.0 / 255.0;
785
- }
786
- }
787
-
788
- void main() {
789
- // Each fragment writes 4 bytes (R, G, B, A)
790
- float pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);
791
- float baseByteOffset = pixelIndex * 4.0;
792
-
793
- vec4 result;
794
- for (int i = 0; i < 4; i++) {
795
- float currentByteOffset = baseByteOffset + float(i);
796
- result[i] = getByteValue(currentByteOffset);
797
- }
798
-
799
- fragColor = result;
800
- }
801
- `);
802
-
803
- const program = this.gl.createProgram();
804
- this.gl.attachShader(program, vertexShader);
805
- this.gl.attachShader(program, fragmentShader);
806
- this.gl.linkProgram(program);
807
-
808
- return program;
809
- }
810
-
811
- private createShader(type: number, source: string): WebGLShader {
812
- const shader = this.gl.createShader(type)!;
813
- this.gl.shaderSource(shader, source);
814
- this.gl.compileShader(shader);
815
- if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
816
- console.error('Shader compile error:', this.gl.getShaderInfoLog(shader));
817
- }
818
- return shader;
819
- }
820
-
821
- private createVAO(): WebGLVertexArrayObject {
822
- const vao = this.gl.createVertexArray();
823
- this.gl.bindVertexArray(vao);
824
-
825
- const vertices = new Float32Array([
826
- -1, -1, 0, 1,
827
- 1, -1, 1, 1,
828
- -1, 1, 0, 0,
829
- 1, 1, 1, 0,
830
- ]);
831
-
832
- const buffer = this.gl.createBuffer();
833
- this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);
834
- this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);
835
-
836
- const positionLocation = this.gl.getAttribLocation(this.colorProgram, 'a_position');
837
- const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, 'a_texCoord');
838
-
839
- this.gl.enableVertexAttribArray(positionLocation);
840
- this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);
841
-
842
- this.gl.enableVertexAttribArray(texCoordLocation);
843
- this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);
844
-
845
- return vao;
846
- }
847
-
848
- private createTexture(): WebGLTexture {
849
- const texture = this.gl.createTexture();
850
-
851
- this.gl.bindTexture(this.gl.TEXTURE_2D, texture);
852
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);
853
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);
854
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);
855
- this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);
856
-
857
- return texture;
858
- }
859
-
860
- private updateTexture(sourceFrame: VideoFrame): void {
861
- if (this.lastFrame === sourceFrame) {
862
- return;
863
- }
864
-
865
- if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {
866
- this.canvas.width = sourceFrame.displayWidth;
867
- this.canvas.height = sourceFrame.displayHeight;
868
- }
869
-
870
- this.gl.activeTexture(this.gl.TEXTURE0);
871
- this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);
872
- this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);
873
-
874
- this.lastFrame = sourceFrame;
875
- }
876
-
877
- extractColor(sourceFrame: VideoFrame) {
878
- this.updateTexture(sourceFrame);
879
-
880
- this.gl.useProgram(this.colorProgram);
881
- this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);
882
- this.gl.clear(this.gl.COLOR_BUFFER_BIT);
883
- this.gl.bindVertexArray(this.vao);
884
- this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);
885
-
886
- return new VideoFrame(this.canvas, {
887
- timestamp: sourceFrame.timestamp,
888
- duration: sourceFrame.duration ?? undefined,
889
- alpha: 'discard',
890
- });
891
- }
892
-
893
- extractAlpha(sourceFrame: VideoFrame) {
894
- this.updateTexture(sourceFrame);
895
-
896
- this.gl.useProgram(this.alphaProgram);
897
- this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);
898
-
899
- this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);
900
- this.gl.clear(this.gl.COLOR_BUFFER_BIT);
901
- this.gl.bindVertexArray(this.vao);
902
- this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);
903
-
904
- const { width, height } = this.canvas;
905
-
906
- const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);
907
- const yuvSize = width * height + chromaSamples * 2;
908
- const requiredHeight = Math.ceil(yuvSize / (width * 4));
909
-
910
- let yuv = new Uint8Array(4 * width * requiredHeight);
911
- this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);
912
- yuv = yuv.subarray(0, yuvSize);
913
-
914
- assert(yuv[width * height] === 128); // Where chroma data starts
915
- assert(yuv[yuv.length - 1] === 128); // Assert the YUV data has been fully written
916
-
917
- // Defining this separately because TypeScript doesn't know `transfer` and I can't be bothered to do declaration
918
- // merging right now
919
- const init = {
920
- format: 'I420' as const,
921
- codedWidth: width,
922
- codedHeight: height,
923
- timestamp: sourceFrame.timestamp,
924
- duration: sourceFrame.duration ?? undefined,
925
- transfer: [yuv.buffer],
926
- };
927
- return new VideoFrame(yuv, init);
928
- }
929
-
930
- close() {
931
- this.gl.getExtension('WEBGL_lose_context')?.loseContext();
932
- this.gl = null as unknown as WebGL2RenderingContext;
933
- }
934
- }
935
-
936
- /**
937
- * This source can be used to add raw, unencoded video samples (frames) to an output video track. These frames will
938
- * automatically be encoded and then piped into the output.
939
- * @group Media sources
940
- * @public
941
- */
942
- export class VideoSampleSource extends VideoSource {
943
- /** @internal */
944
- private _encoder: VideoEncoderWrapper;
945
-
946
- /**
947
- * Creates a new {@link VideoSampleSource} whose samples are encoded according to the specified
948
- * {@link VideoEncodingConfig}.
949
- */
950
- constructor(encodingConfig: VideoEncodingConfig) {
951
- validateVideoEncodingConfig(encodingConfig);
952
-
953
- super(encodingConfig.codec);
954
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
955
- }
956
-
957
- /**
958
- * Encodes a video sample (frame) and then adds it to the output.
959
- *
960
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
961
- * to respect writer and encoder backpressure.
962
- */
963
- add(videoSample: VideoSample, encodeOptions?: VideoEncoderEncodeOptions) {
964
- if (!(videoSample instanceof VideoSample)) {
965
- throw new TypeError('videoSample must be a VideoSample.');
966
- }
967
-
968
- return this._encoder.add(videoSample, false, encodeOptions);
969
- }
970
-
971
- /** @internal */
972
- override _flushAndClose(forceClose: boolean) {
973
- return this._encoder.flushAndClose(forceClose);
974
- }
975
- }
976
-
977
- /**
978
- * This source can be used to add video frames to the output track from a fixed canvas element. Since canvases are often
979
- * used for rendering, this source provides a convenient wrapper around {@link VideoSampleSource}.
980
- * @group Media sources
981
- * @public
982
- */
983
- export class CanvasSource extends VideoSource {
984
- /** @internal */
985
- private _encoder: VideoEncoderWrapper;
986
- /** @internal */
987
- private _canvas: HTMLCanvasElement | OffscreenCanvas;
988
-
989
- /**
990
- * Creates a new {@link CanvasSource} from a canvas element or `OffscreenCanvas` whose samples are encoded
991
- * according to the specified {@link VideoEncodingConfig}.
992
- */
993
- constructor(canvas: HTMLCanvasElement | OffscreenCanvas, encodingConfig: VideoEncodingConfig) {
994
- if (
995
- !(typeof HTMLCanvasElement !== 'undefined' && canvas instanceof HTMLCanvasElement)
996
- && !(typeof OffscreenCanvas !== 'undefined' && canvas instanceof OffscreenCanvas)
997
- ) {
998
- throw new TypeError('canvas must be an HTMLCanvasElement or OffscreenCanvas.');
999
- }
1000
- validateVideoEncodingConfig(encodingConfig);
1001
-
1002
- super(encodingConfig.codec);
1003
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
1004
- this._canvas = canvas;
1005
- }
1006
-
1007
- /**
1008
- * Captures the current canvas state as a video sample (frame), encodes it and adds it to the output.
1009
- *
1010
- * @param timestamp - The timestamp of the sample, in seconds.
1011
- * @param duration - The duration of the sample, in seconds.
1012
- *
1013
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1014
- * to respect writer and encoder backpressure.
1015
- */
1016
- add(timestamp: number, duration = 0, encodeOptions?: VideoEncoderEncodeOptions) {
1017
- if (!Number.isFinite(timestamp) || timestamp < 0) {
1018
- throw new TypeError('timestamp must be a non-negative number.');
1019
- }
1020
- if (!Number.isFinite(duration) || duration < 0) {
1021
- throw new TypeError('duration must be a non-negative number.');
1022
- }
1023
-
1024
- const sample = new VideoSample(this._canvas, { timestamp, duration });
1025
- return this._encoder.add(sample, true, encodeOptions);
1026
- }
1027
-
1028
- /** @internal */
1029
- override _flushAndClose(forceClose: boolean) {
1030
- return this._encoder.flushAndClose(forceClose);
1031
- }
1032
- }
1033
-
1034
- /**
1035
- * Video source that encodes the frames of a
1036
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes them into the
1037
- * output. This is useful for capturing live or real-time data such as webcams or screen captures. Frames will
1038
- * automatically start being captured once the connected {@link Output} is started, and will keep being captured until
1039
- * the {@link Output} is finalized or this source is closed.
1040
- * @group Media sources
1041
- * @public
1042
- */
1043
- export class MediaStreamVideoTrackSource extends VideoSource {
1044
- /** @internal */
1045
- private _encoder: VideoEncoderWrapper;
1046
- /** @internal */
1047
- private _abortController: AbortController | null = null;
1048
- /** @internal */
1049
- private _track: MediaStreamVideoTrack;
1050
- /** @internal */
1051
- private _workerTrackId: number | null = null;
1052
- /** @internal */
1053
- private _workerListener: ((event: MessageEvent) => void) | null = null;
1054
- /** @internal */
1055
- private _promiseWithResolvers = promiseWithResolvers();
1056
- /** @internal */
1057
- private _errorPromiseAccessed = false;
1058
-
1059
- /** A promise that rejects upon any error within this source. This promise never resolves. */
1060
- get errorPromise() {
1061
- this._errorPromiseAccessed = true;
1062
- return this._promiseWithResolvers.promise;
1063
- }
1064
-
1065
- /**
1066
- * Creates a new {@link MediaStreamVideoTrackSource} from a
1067
- * [`MediaStreamVideoTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack), which will pull
1068
- * video samples from the stream in real time and encode them according to {@link VideoEncodingConfig}.
1069
- */
1070
- constructor(track: MediaStreamVideoTrack, encodingConfig: VideoEncodingConfig) {
1071
- if (!(track instanceof MediaStreamTrack) || track.kind !== 'video') {
1072
- throw new TypeError('track must be a video MediaStreamTrack.');
1073
- }
1074
- validateVideoEncodingConfig(encodingConfig);
1075
-
1076
- encodingConfig = {
1077
- ...encodingConfig,
1078
- latencyMode: 'realtime',
1079
- };
1080
-
1081
- super(encodingConfig.codec);
1082
- this._encoder = new VideoEncoderWrapper(this, encodingConfig);
1083
- this._track = track;
1084
- }
1085
-
1086
- /** @internal */
1087
- override async _start() {
1088
- if (!this._errorPromiseAccessed) {
1089
- console.warn(
1090
- 'Make sure not to ignore the `errorPromise` field on MediaStreamVideoTrackSource, so that any internal'
1091
- + ' errors get bubbled up properly.',
1092
- );
1093
- }
1094
-
1095
- this._abortController = new AbortController();
1096
-
1097
- let firstVideoFrameTimestamp: number | null = null;
1098
- let errored = false;
1099
-
1100
- const onVideoFrame = (videoFrame: VideoFrame) => {
1101
- if (errored) {
1102
- videoFrame.close();
1103
- return;
1104
- }
1105
-
1106
- if (firstVideoFrameTimestamp === null) {
1107
- firstVideoFrameTimestamp = videoFrame.timestamp / 1e6;
1108
-
1109
- const muxer = this._connectedTrack!.output._muxer;
1110
- if (muxer.firstMediaStreamTimestamp === null) {
1111
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
1112
- this._timestampOffset = -firstVideoFrameTimestamp;
1113
- } else {
1114
- this._timestampOffset = (performance.now() / 1000 - muxer.firstMediaStreamTimestamp)
1115
- - firstVideoFrameTimestamp;
1116
- }
1117
- }
1118
-
1119
- if (this._encoder.getQueueSize() >= 4) {
1120
- // Drop frames if the encoder is overloaded
1121
- videoFrame.close();
1122
- return;
1123
- }
1124
-
1125
- void this._encoder.add(new VideoSample(videoFrame), true)
1126
- .catch((error) => {
1127
- errored = true;
1128
-
1129
- this._abortController?.abort();
1130
- this._promiseWithResolvers.reject(error);
1131
-
1132
- if (this._workerTrackId !== null) {
1133
- // Tell the worker to stop the track
1134
- sendMessageToMediaStreamTrackProcessorWorker({
1135
- type: 'stopTrack',
1136
- trackId: this._workerTrackId,
1137
- });
1138
- }
1139
- });
1140
- };
1141
-
1142
- if (typeof MediaStreamTrackProcessor !== 'undefined') {
1143
- // We can do it here directly, perfect
1144
- const processor = new MediaStreamTrackProcessor({ track: this._track });
1145
- const consumer = new WritableStream<VideoFrame>({ write: onVideoFrame });
1146
-
1147
- processor.readable.pipeTo(consumer, {
1148
- signal: this._abortController.signal,
1149
- }).catch((error) => {
1150
- // Handle AbortError silently
1151
- if (error instanceof DOMException && error.name === 'AbortError') return;
1152
-
1153
- this._promiseWithResolvers.reject(error);
1154
- });
1155
- } else {
1156
- // It might still be supported in a worker, so let's check that
1157
- const supportedInWorker = await mediaStreamTrackProcessorIsSupportedInWorker();
1158
-
1159
- if (supportedInWorker) {
1160
- this._workerTrackId = nextMediaStreamTrackProcessorWorkerId++;
1161
-
1162
- sendMessageToMediaStreamTrackProcessorWorker({
1163
- type: 'videoTrack',
1164
- trackId: this._workerTrackId,
1165
- track: this._track,
1166
- });
1167
-
1168
- this._workerListener = (event: MessageEvent) => {
1169
- const message = event.data as MediaStreamTrackProcessorWorkerMessage;
1170
-
1171
- if (message.type === 'videoFrame' && message.trackId === this._workerTrackId) {
1172
- onVideoFrame(message.videoFrame);
1173
- } else if (message.type === 'error' && message.trackId === this._workerTrackId) {
1174
- this._promiseWithResolvers.reject(message.error);
1175
- }
1176
- };
1177
-
1178
- mediaStreamTrackProcessorWorker!.addEventListener('message', this._workerListener);
1179
- } else {
1180
- throw new Error('MediaStreamTrackProcessor is required but not supported by this browser.');
1181
- }
1182
- }
1183
- }
1184
-
1185
- /** @internal */
1186
- override async _flushAndClose(forceClose: boolean) {
1187
- if (this._abortController) {
1188
- this._abortController.abort();
1189
- this._abortController = null;
1190
- }
1191
-
1192
- if (this._workerTrackId !== null) {
1193
- assert(this._workerListener);
1194
-
1195
- sendMessageToMediaStreamTrackProcessorWorker({
1196
- type: 'stopTrack',
1197
- trackId: this._workerTrackId,
1198
- });
1199
-
1200
- // Wait for the worker to stop the track
1201
- await new Promise<void>((resolve) => {
1202
- const listener = (event: MessageEvent) => {
1203
- const message = event.data as MediaStreamTrackProcessorWorkerMessage;
1204
-
1205
- if (message.type === 'trackStopped' && message.trackId === this._workerTrackId) {
1206
- assert(this._workerListener);
1207
- mediaStreamTrackProcessorWorker!.removeEventListener('message', this._workerListener);
1208
- mediaStreamTrackProcessorWorker!.removeEventListener('message', listener);
1209
-
1210
- resolve();
1211
- }
1212
- };
1213
-
1214
- mediaStreamTrackProcessorWorker!.addEventListener('message', listener);
1215
- });
1216
- }
1217
-
1218
- await this._encoder.flushAndClose(forceClose);
1219
- }
1220
- }
1221
-
1222
- /**
1223
- * Base class for audio sources - sources for audio tracks.
1224
- * @group Media sources
1225
- * @public
1226
- */
1227
- export abstract class AudioSource extends MediaSource {
1228
- /** @internal */
1229
- override _connectedTrack: OutputAudioTrack | null = null;
1230
- /** @internal */
1231
- _codec: AudioCodec;
1232
-
1233
- /** Internal constructor. */
1234
- constructor(codec: AudioCodec) {
1235
- super();
1236
-
1237
- if (!AUDIO_CODECS.includes(codec)) {
1238
- throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(', ')}.`);
1239
- }
1240
-
1241
- this._codec = codec;
1242
- }
1243
- }
1244
-
1245
- /**
1246
- * The most basic audio source; can be used to directly pipe encoded packets into the output file.
1247
- * @group Media sources
1248
- * @public
1249
- */
1250
- export class EncodedAudioPacketSource extends AudioSource {
1251
- /** Creates a new {@link EncodedAudioPacketSource} whose packets are encoded using `codec`. */
1252
- constructor(codec: AudioCodec) {
1253
- super(codec);
1254
- }
1255
-
1256
- /**
1257
- * Adds an encoded packet to the output audio track. Packets must be added in *decode order*.
1258
- *
1259
- * @param meta - Additional metadata from the encoder. You should pass this for the first call, including a valid
1260
- * decoder config.
1261
- *
1262
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1263
- * to respect writer and encoder backpressure.
1264
- */
1265
- add(packet: EncodedPacket, meta?: EncodedAudioChunkMetadata) {
1266
- if (!(packet instanceof EncodedPacket)) {
1267
- throw new TypeError('packet must be an EncodedPacket.');
1268
- }
1269
- if (packet.isMetadataOnly) {
1270
- throw new TypeError('Metadata-only packets cannot be added.');
1271
- }
1272
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
1273
- throw new TypeError('meta, when provided, must be an object.');
1274
- }
1275
-
1276
- this._ensureValidAdd();
1277
- return this._connectedTrack!.output._muxer.addEncodedAudioPacket(this._connectedTrack!, packet, meta);
1278
- }
1279
- }
1280
-
1281
- class AudioEncoderWrapper {
1282
- private ensureEncoderPromise: Promise<void> | null = null;
1283
- private encoderInitialized = false;
1284
- private encoder: AudioEncoder | null = null;
1285
- private muxer: Muxer | null = null;
1286
- private lastNumberOfChannels: number | null = null;
1287
- private lastSampleRate: number | null = null;
1288
-
1289
- private isPcmEncoder = false;
1290
- private outputSampleSize: number | null = null;
1291
- private writeOutputValue: ((view: DataView, byteOffset: number, value: number) => void) | null = null;
1292
-
1293
- private customEncoder: CustomAudioEncoder | null = null;
1294
- private customEncoderCallSerializer = new CallSerializer();
1295
- private customEncoderQueueSize = 0;
1296
-
1297
- private lastEndSampleIndex: number | null = null;
1298
-
1299
- /**
1300
- * Encoders typically throw their errors "out of band", meaning asynchronously in some other execution context.
1301
- * However, we want to surface these errors to the user within the normal control flow, so they don't go uncaught.
1302
- * So, we keep track of the encoder error and throw it as soon as we get the chance.
1303
- */
1304
- private error: Error | null = null;
1305
- private errorNeedsNewStack = true;
1306
-
1307
- constructor(private source: AudioSource, private encodingConfig: AudioEncodingConfig) {}
1308
-
1309
- async add(audioSample: AudioSample, shouldClose: boolean) {
1310
- try {
1311
- this.checkForEncoderError();
1312
- this.source._ensureValidAdd();
1313
-
1314
- // Ensure audio parameters remain constant
1315
- if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {
1316
- if (
1317
- audioSample.numberOfChannels !== this.lastNumberOfChannels
1318
- || audioSample.sampleRate !== this.lastSampleRate
1319
- ) {
1320
- throw new Error(
1321
- `Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at`
1322
- + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at`
1323
- + ` ${audioSample.sampleRate} Hz.`,
1324
- );
1325
- }
1326
- } else {
1327
- this.lastNumberOfChannels = audioSample.numberOfChannels;
1328
- this.lastSampleRate = audioSample.sampleRate;
1329
- }
1330
-
1331
- if (!this.encoderInitialized) {
1332
- if (!this.ensureEncoderPromise) {
1333
- this.ensureEncoder(audioSample);
1334
- }
1335
-
1336
- // No, this "if" statement is not useless. Sometimes, the above call to `ensureEncoder` might have
1337
- // synchronously completed and the encoder is already initialized. In this case, we don't need to await
1338
- // the promise anymore. This also fixes nasty async race condition bugs when multiple code paths are
1339
- // calling this method: It's important that the call that initialized the encoder go through this
1340
- // code first.
1341
- if (!this.encoderInitialized) {
1342
- await this.ensureEncoderPromise;
1343
- }
1344
- }
1345
- assert(this.encoderInitialized);
1346
-
1347
- // Handle padding of gaps with silence to avoid audio drift over time, like in
1348
- // https://github.com/Vanilagy/mediabunny/issues/176
1349
- // TODO An open question is how encoders deal with the first AudioData having a non-zero timestamp, and with
1350
- // AudioDatas that have an overlapping timestamp range.
1351
- {
1352
- const startSampleIndex = Math.round(
1353
- audioSample.timestamp * audioSample.sampleRate,
1354
- );
1355
- const endSampleIndex = Math.round(
1356
- (audioSample.timestamp + audioSample.duration) * audioSample.sampleRate,
1357
- );
1358
-
1359
- if (this.lastEndSampleIndex !== null && startSampleIndex > this.lastEndSampleIndex) {
1360
- const sampleCount = startSampleIndex - this.lastEndSampleIndex;
1361
- const fillSample = new AudioSample({
1362
- data: new Float32Array(sampleCount * audioSample.numberOfChannels),
1363
- format: 'f32-planar',
1364
- sampleRate: audioSample.sampleRate,
1365
- numberOfChannels: audioSample.numberOfChannels,
1366
- numberOfFrames: sampleCount,
1367
- timestamp: this.lastEndSampleIndex / audioSample.sampleRate,
1368
- });
1369
-
1370
- await this.add(fillSample, true); // Recursive call
1371
- }
1372
-
1373
- this.lastEndSampleIndex = endSampleIndex;
1374
- }
1375
-
1376
- if (this.customEncoder) {
1377
- this.customEncoderQueueSize++;
1378
-
1379
- // We clone the sample so it cannot be closed on us from the outside before it reaches the encoder
1380
- const clonedSample = audioSample.clone();
1381
-
1382
- const promise = this.customEncoderCallSerializer
1383
- .call(() => this.customEncoder!.encode(clonedSample))
1384
- .then(() => this.customEncoderQueueSize--)
1385
- .catch((error: Error) => this.error ??= error)
1386
- .finally(() => {
1387
- clonedSample.close();
1388
- // `audioSample` gets closed in the finally block at the end of the method
1389
- });
1390
-
1391
- if (this.customEncoderQueueSize >= 4) {
1392
- await promise;
1393
- }
1394
-
1395
- await this.muxer!.mutex.currentPromise; // Allow the writer to apply backpressure
1396
- } else if (this.isPcmEncoder) {
1397
- await this.doPcmEncoding(audioSample, shouldClose);
1398
- } else {
1399
- assert(this.encoder);
1400
- const audioData = audioSample.toAudioData();
1401
- this.encoder.encode(audioData);
1402
- audioData.close();
1403
-
1404
- if (shouldClose) {
1405
- audioSample.close();
1406
- }
1407
-
1408
- if (this.encoder.encodeQueueSize >= 4) {
1409
- await new Promise(resolve => this.encoder!.addEventListener('dequeue', resolve, { once: true }));
1410
- }
1411
-
1412
- await this.muxer!.mutex.currentPromise; // Allow the writer to apply backpressure
1413
- }
1414
- } finally {
1415
- if (shouldClose) {
1416
- // Make sure it's always closed, even if there was an error
1417
- audioSample.close();
1418
- }
1419
- }
1420
- }
1421
-
1422
- private async doPcmEncoding(audioSample: AudioSample, shouldClose: boolean) {
1423
- assert(this.outputSampleSize);
1424
- assert(this.writeOutputValue);
1425
-
1426
- // Need to extract data from the audio data before we close it
1427
- const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;
1428
-
1429
- const CHUNK_SIZE = 2048;
1430
- const outputs: {
1431
- frameCount: number;
1432
- view: DataView;
1433
- }[] = [];
1434
-
1435
- // Prepare all of the output buffers, each being bounded by CHUNK_SIZE so we don't generate huge packets
1436
- for (let frame = 0; frame < numberOfFrames; frame += CHUNK_SIZE) {
1437
- const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);
1438
- const outputSize = frameCount * numberOfChannels * this.outputSampleSize;
1439
- const outputBuffer = new ArrayBuffer(outputSize);
1440
- const outputView = new DataView(outputBuffer);
1441
-
1442
- outputs.push({ frameCount, view: outputView });
1443
- }
1444
-
1445
- const allocationSize = audioSample.allocationSize(({ planeIndex: 0, format: 'f32-planar' }));
1446
- const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);
1447
-
1448
- for (let i = 0; i < numberOfChannels; i++) {
1449
- audioSample.copyTo(floats, { planeIndex: i, format: 'f32-planar' });
1450
-
1451
- for (let j = 0; j < outputs.length; j++) {
1452
- const { frameCount, view } = outputs[j]!;
1453
-
1454
- for (let k = 0; k < frameCount; k++) {
1455
- this.writeOutputValue(
1456
- view,
1457
- (k * numberOfChannels + i) * this.outputSampleSize,
1458
- floats[j * CHUNK_SIZE + k]!,
1459
- );
1460
- }
1461
- }
1462
- }
1463
-
1464
- if (shouldClose) {
1465
- audioSample.close();
1466
- }
1467
-
1468
- const meta: EncodedAudioChunkMetadata = {
1469
- decoderConfig: {
1470
- codec: this.encodingConfig.codec,
1471
- numberOfChannels,
1472
- sampleRate,
1473
- },
1474
- };
1475
-
1476
- for (let i = 0; i < outputs.length; i++) {
1477
- const { frameCount, view } = outputs[i]!;
1478
- const outputBuffer = view.buffer;
1479
- const startFrame = i * CHUNK_SIZE;
1480
-
1481
- const packet = new EncodedPacket(
1482
- new Uint8Array(outputBuffer),
1483
- 'key',
1484
- timestamp + startFrame / sampleRate,
1485
- frameCount / sampleRate,
1486
- );
1487
-
1488
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1489
- await this.muxer!.addEncodedAudioPacket(this.source._connectedTrack!, packet, meta); // With backpressure
1490
- }
1491
- }
1492
-
1493
- private ensureEncoder(audioSample: AudioSample) {
1494
- const encoderError = new Error();
1495
- this.ensureEncoderPromise = (async () => {
1496
- const { numberOfChannels, sampleRate } = audioSample;
1497
-
1498
- const encoderConfig = buildAudioEncoderConfig({
1499
- numberOfChannels,
1500
- sampleRate,
1501
- ...this.encodingConfig,
1502
- });
1503
- this.encodingConfig.onEncoderConfig?.(encoderConfig);
1504
-
1505
- const MatchingCustomEncoder = customAudioEncoders.find(x => x.supports(
1506
- this.encodingConfig.codec,
1507
- encoderConfig,
1508
- ));
1509
-
1510
- if (MatchingCustomEncoder) {
1511
- // @ts-expect-error "Can't create instance of abstract class 🤓"
1512
- this.customEncoder = new MatchingCustomEncoder() as CustomAudioEncoder;
1513
- // @ts-expect-error It's technically readonly
1514
- this.customEncoder.codec = this.encodingConfig.codec;
1515
- // @ts-expect-error It's technically readonly
1516
- this.customEncoder.config = encoderConfig;
1517
- // @ts-expect-error It's technically readonly
1518
- this.customEncoder.onPacket = (packet, meta) => {
1519
- if (!(packet instanceof EncodedPacket)) {
1520
- throw new TypeError('The first argument passed to onPacket must be an EncodedPacket.');
1521
- }
1522
- if (meta !== undefined && (!meta || typeof meta !== 'object')) {
1523
- throw new TypeError('The second argument passed to onPacket must be an object or undefined.');
1524
- }
1525
-
1526
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1527
- void this.muxer!.addEncodedAudioPacket(this.source._connectedTrack!, packet, meta)
1528
- .catch((error) => {
1529
- this.error ??= error;
1530
- this.errorNeedsNewStack = false;
1531
- });
1532
- };
1533
-
1534
- await this.customEncoder.init();
1535
- } else if ((PCM_AUDIO_CODECS as readonly string[]).includes(this.encodingConfig.codec)) {
1536
- this.initPcmEncoder();
1537
- } else {
1538
- if (typeof AudioEncoder === 'undefined') {
1539
- throw new Error('AudioEncoder is not supported by this browser.');
1540
- }
1541
-
1542
- const support = await AudioEncoder.isConfigSupported(encoderConfig);
1543
- if (!support.supported) {
1544
- throw new Error(
1545
- `This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,`
1546
- + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not`
1547
- + ` supported by this browser. Consider using another codec or changing your audio parameters.`,
1548
- );
1549
- }
1550
-
1551
- this.encoder = new AudioEncoder({
1552
- output: (chunk, meta) => {
1553
- // WebKit emits an invalid description for AAC (https://bugs.webkit.org/show_bug.cgi?id=302253),
1554
- // which we try to detect here. If detected, we'll provide our own description instead, derived
1555
- // from the codec string and audio parameters.
1556
- if (this.encodingConfig.codec === 'aac' && meta?.decoderConfig) {
1557
- let needsDescriptionOverwrite = false;
1558
- if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {
1559
- needsDescriptionOverwrite = true;
1560
- } else {
1561
- const audioSpecificConfig = parseAacAudioSpecificConfig(
1562
- toUint8Array(meta.decoderConfig.description),
1563
- );
1564
-
1565
- needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;
1566
- }
1567
-
1568
- if (needsDescriptionOverwrite) {
1569
- const objectType = Number(last(encoderConfig.codec.split('.')));
1570
-
1571
- meta.decoderConfig.description = buildAacAudioSpecificConfig({
1572
- objectType,
1573
- numberOfChannels: meta.decoderConfig.numberOfChannels,
1574
- sampleRate: meta.decoderConfig.sampleRate,
1575
- });
1576
- }
1577
- }
1578
-
1579
- const packet = EncodedPacket.fromEncodedChunk(chunk);
1580
-
1581
- this.encodingConfig.onEncodedPacket?.(packet, meta);
1582
- void this.muxer!.addEncodedAudioPacket(this.source._connectedTrack!, packet, meta)
1583
- .catch((error) => {
1584
- this.error ??= error;
1585
- this.errorNeedsNewStack = false;
1586
- });
1587
- },
1588
- error: (error) => {
1589
- error.stack = encoderError.stack; // Provide a more useful stack trace
1590
- this.error ??= error;
1591
- },
1592
- });
1593
- this.encoder.configure(encoderConfig);
1594
- }
1595
-
1596
- assert(this.source._connectedTrack);
1597
- this.muxer = this.source._connectedTrack.output._muxer;
1598
-
1599
- this.encoderInitialized = true;
1600
- })();
1601
- }
1602
-
1603
- private initPcmEncoder() {
1604
- this.isPcmEncoder = true;
1605
-
1606
- const codec = this.encodingConfig.codec as PcmAudioCodec;
1607
- const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);
1608
-
1609
- this.outputSampleSize = sampleSize;
1610
-
1611
- // All these functions receive a float sample as input and map it into the desired format
1612
-
1613
- switch (sampleSize) {
1614
- case 1: {
1615
- if (dataType === 'unsigned') {
1616
- this.writeOutputValue = (view, byteOffset, value) =>
1617
- view.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));
1618
- } else if (dataType === 'signed') {
1619
- this.writeOutputValue = (view, byteOffset, value) => {
1620
- view.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));
1621
- };
1622
- } else if (dataType === 'ulaw') {
1623
- this.writeOutputValue = (view, byteOffset, value) => {
1624
- const int16 = clamp(Math.floor(value * 32767), -32768, 32767);
1625
- view.setUint8(byteOffset, toUlaw(int16));
1626
- };
1627
- } else if (dataType === 'alaw') {
1628
- this.writeOutputValue = (view, byteOffset, value) => {
1629
- const int16 = clamp(Math.floor(value * 32767), -32768, 32767);
1630
- view.setUint8(byteOffset, toAlaw(int16));
1631
- };
1632
- } else {
1633
- assert(false);
1634
- }
1635
- }; break;
1636
- case 2: {
1637
- if (dataType === 'unsigned') {
1638
- this.writeOutputValue = (view, byteOffset, value) =>
1639
- view.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);
1640
- } else if (dataType === 'signed') {
1641
- this.writeOutputValue = (view, byteOffset, value) =>
1642
- view.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);
1643
- } else {
1644
- assert(false);
1645
- }
1646
- }; break;
1647
- case 3: {
1648
- if (dataType === 'unsigned') {
1649
- this.writeOutputValue = (view, byteOffset, value) =>
1650
- setUint24(view, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);
1651
- } else if (dataType === 'signed') {
1652
- this.writeOutputValue = (view, byteOffset, value) =>
1653
- setInt24(
1654
- view,
1655
- byteOffset,
1656
- clamp(Math.round(value * 8388607), -8388608, 8388607),
1657
- littleEndian,
1658
- );
1659
- } else {
1660
- assert(false);
1661
- }
1662
- }; break;
1663
- case 4: {
1664
- if (dataType === 'unsigned') {
1665
- this.writeOutputValue = (view, byteOffset, value) =>
1666
- view.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);
1667
- } else if (dataType === 'signed') {
1668
- this.writeOutputValue = (view, byteOffset, value) =>
1669
- view.setInt32(
1670
- byteOffset,
1671
- clamp(Math.round(value * 2147483647), -2147483648, 2147483647),
1672
- littleEndian,
1673
- );
1674
- } else if (dataType === 'float') {
1675
- this.writeOutputValue = (view, byteOffset, value) =>
1676
- view.setFloat32(byteOffset, value, littleEndian);
1677
- } else {
1678
- assert(false);
1679
- }
1680
- }; break;
1681
- case 8: {
1682
- if (dataType === 'float') {
1683
- this.writeOutputValue = (view, byteOffset, value) =>
1684
- view.setFloat64(byteOffset, value, littleEndian);
1685
- } else {
1686
- assert(false);
1687
- }
1688
- }; break;
1689
- default: {
1690
- assertNever(sampleSize);
1691
- assert(false);
1692
- };
1693
- }
1694
- }
1695
-
1696
- async flushAndClose(forceClose: boolean) {
1697
- if (!forceClose) this.checkForEncoderError();
1698
-
1699
- if (this.customEncoder) {
1700
- if (!forceClose) {
1701
- void this.customEncoderCallSerializer.call(() => this.customEncoder!.flush());
1702
- }
1703
-
1704
- await this.customEncoderCallSerializer.call(() => this.customEncoder!.close());
1705
- } else if (this.encoder) {
1706
- if (!forceClose) {
1707
- await this.encoder.flush();
1708
- }
1709
-
1710
- if (this.encoder.state !== 'closed') {
1711
- this.encoder.close();
1712
- }
1713
- }
1714
-
1715
- if (!forceClose) this.checkForEncoderError();
1716
- }
1717
-
1718
- getQueueSize() {
1719
- if (this.customEncoder) {
1720
- return this.customEncoderQueueSize;
1721
- } else if (this.isPcmEncoder) {
1722
- return 0;
1723
- } else {
1724
- return this.encoder?.encodeQueueSize ?? 0;
1725
- }
1726
- }
1727
-
1728
- checkForEncoderError() {
1729
- if (this.error) {
1730
- if (this.errorNeedsNewStack) {
1731
- this.error.stack = new Error().stack; // Provide an even more useful stack trace
1732
- }
1733
-
1734
- throw this.error;
1735
- }
1736
- }
1737
- }
1738
-
1739
- /**
1740
- * This source can be used to add raw, unencoded audio samples to an output audio track. These samples will
1741
- * automatically be encoded and then piped into the output.
1742
- * @group Media sources
1743
- * @public
1744
- */
1745
- export class AudioSampleSource extends AudioSource {
1746
- /** @internal */
1747
- private _encoder: AudioEncoderWrapper;
1748
-
1749
- /**
1750
- * Creates a new {@link AudioSampleSource} whose samples are encoded according to the specified
1751
- * {@link AudioEncodingConfig}.
1752
- */
1753
- constructor(encodingConfig: AudioEncodingConfig) {
1754
- validateAudioEncodingConfig(encodingConfig);
1755
-
1756
- super(encodingConfig.codec);
1757
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1758
- }
1759
-
1760
- /**
1761
- * Encodes an audio sample and then adds it to the output.
1762
- *
1763
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1764
- * to respect writer and encoder backpressure.
1765
- */
1766
- add(audioSample: AudioSample) {
1767
- if (!(audioSample instanceof AudioSample)) {
1768
- throw new TypeError('audioSample must be an AudioSample.');
1769
- }
1770
-
1771
- return this._encoder.add(audioSample, false);
1772
- }
1773
-
1774
- /** @internal */
1775
- override _flushAndClose(forceClose: boolean) {
1776
- return this._encoder.flushAndClose(forceClose);
1777
- }
1778
- }
1779
-
1780
- /**
1781
- * This source can be used to add audio data from an AudioBuffer to the output track. This is useful when working with
1782
- * the Web Audio API.
1783
- * @group Media sources
1784
- * @public
1785
- */
1786
- export class AudioBufferSource extends AudioSource {
1787
- /** @internal */
1788
- private _encoder: AudioEncoderWrapper;
1789
- /** @internal */
1790
- private _accumulatedTime = 0;
1791
-
1792
- /**
1793
- * Creates a new {@link AudioBufferSource} whose `AudioBuffer` instances are encoded according to the specified
1794
- * {@link AudioEncodingConfig}.
1795
- */
1796
- constructor(encodingConfig: AudioEncodingConfig) {
1797
- validateAudioEncodingConfig(encodingConfig);
1798
-
1799
- super(encodingConfig.codec);
1800
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1801
- }
1802
-
1803
- /**
1804
- * Converts an AudioBuffer to audio samples, encodes them and adds them to the output. The first AudioBuffer will
1805
- * be played at timestamp 0, and any subsequent AudioBuffer will have a timestamp equal to the total duration of
1806
- * all previous AudioBuffers.
1807
- *
1808
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
1809
- * to respect writer and encoder backpressure.
1810
- */
1811
- async add(audioBuffer: AudioBuffer) {
1812
- if (!(audioBuffer instanceof AudioBuffer)) {
1813
- throw new TypeError('audioBuffer must be an AudioBuffer.');
1814
- }
1815
-
1816
- const iterator = AudioSample._fromAudioBuffer(audioBuffer, this._accumulatedTime);
1817
- this._accumulatedTime += audioBuffer.duration;
1818
-
1819
- for (const audioSample of iterator) {
1820
- await this._encoder.add(audioSample, true);
1821
- }
1822
- }
1823
-
1824
- /** @internal */
1825
- override _flushAndClose(forceClose: boolean) {
1826
- return this._encoder.flushAndClose(forceClose);
1827
- }
1828
- }
1829
-
1830
- /**
1831
- * Audio source that encodes the data of a
1832
- * [`MediaStreamAudioTrack`](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack) and pipes it into the
1833
- * output. This is useful for capturing live or real-time audio such as microphones or audio from other media elements.
1834
- * Audio will automatically start being captured once the connected {@link Output} is started, and will keep being
1835
- * captured until the {@link Output} is finalized or this source is closed.
1836
- * @group Media sources
1837
- * @public
1838
- */
1839
- export class MediaStreamAudioTrackSource extends AudioSource {
1840
- /** @internal */
1841
- private _encoder: AudioEncoderWrapper;
1842
- /** @internal */
1843
- private _abortController: AbortController | null = null;
1844
- /** @internal */
1845
- private _track: MediaStreamAudioTrack;
1846
- /** @internal */
1847
- private _audioContext: AudioContext | null = null;
1848
- /** @internal */
1849
- private _scriptProcessorNode: ScriptProcessorNode | null = null; // Deprecated but goated
1850
- /** @internal */
1851
- private _promiseWithResolvers = promiseWithResolvers();
1852
- /** @internal */
1853
- private _errorPromiseAccessed = false;
1854
-
1855
- /** A promise that rejects upon any error within this source. This promise never resolves. */
1856
- get errorPromise() {
1857
- this._errorPromiseAccessed = true;
1858
- return this._promiseWithResolvers.promise;
1859
- }
1860
-
1861
- /**
1862
- * Creates a new {@link MediaStreamAudioTrackSource} from a `MediaStreamAudioTrack`, which will pull audio samples
1863
- * from the stream in real time and encode them according to {@link AudioEncodingConfig}.
1864
- */
1865
- constructor(track: MediaStreamAudioTrack, encodingConfig: AudioEncodingConfig) {
1866
- if (!(track instanceof MediaStreamTrack) || track.kind !== 'audio') {
1867
- throw new TypeError('track must be an audio MediaStreamTrack.');
1868
- }
1869
- validateAudioEncodingConfig(encodingConfig);
1870
-
1871
- super(encodingConfig.codec);
1872
- this._encoder = new AudioEncoderWrapper(this, encodingConfig);
1873
- this._track = track;
1874
- }
1875
-
1876
- /** @internal */
1877
- override async _start() {
1878
- if (!this._errorPromiseAccessed) {
1879
- console.warn(
1880
- 'Make sure not to ignore the `errorPromise` field on MediaStreamVideoTrackSource, so that any internal'
1881
- + ' errors get bubbled up properly.',
1882
- );
1883
- }
1884
-
1885
- this._abortController = new AbortController();
1886
-
1887
- if (typeof MediaStreamTrackProcessor !== 'undefined') {
1888
- // Great, MediaStreamTrackProcessor is supported, this is the preferred way of doing things
1889
- let firstAudioDataTimestamp: number | null = null;
1890
-
1891
- const processor = new MediaStreamTrackProcessor({ track: this._track });
1892
- const consumer = new WritableStream<AudioData>({
1893
- write: (audioData) => {
1894
- if (firstAudioDataTimestamp === null) {
1895
- firstAudioDataTimestamp = audioData.timestamp / 1e6;
1896
-
1897
- const muxer = this._connectedTrack!.output._muxer;
1898
- if (muxer.firstMediaStreamTimestamp === null) {
1899
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
1900
- this._timestampOffset = -firstAudioDataTimestamp;
1901
- } else {
1902
- this._timestampOffset = (performance.now() / 1000 - muxer.firstMediaStreamTimestamp)
1903
- - firstAudioDataTimestamp;
1904
- }
1905
- }
1906
-
1907
- if (this._encoder.getQueueSize() >= 4) {
1908
- // Drop data if the encoder is overloaded
1909
- audioData.close();
1910
- return;
1911
- }
1912
-
1913
- void this._encoder.add(new AudioSample(audioData), true)
1914
- .catch((error) => {
1915
- this._abortController?.abort();
1916
- this._promiseWithResolvers.reject(error);
1917
- });
1918
- },
1919
- });
1920
-
1921
- processor.readable.pipeTo(consumer, {
1922
- signal: this._abortController.signal,
1923
- }).catch((error) => {
1924
- // Handle AbortError silently
1925
- if (error instanceof DOMException && error.name === 'AbortError') return;
1926
-
1927
- this._promiseWithResolvers.reject(error);
1928
- });
1929
- } else {
1930
- // Let's fall back to an AudioContext approach
1931
-
1932
- // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access
1933
- const AudioContext = window.AudioContext || (window as any).webkitAudioContext;
1934
-
1935
- this._audioContext = new AudioContext({ sampleRate: this._track.getSettings().sampleRate });
1936
- const sourceNode = this._audioContext.createMediaStreamSource(new MediaStream([this._track]));
1937
- this._scriptProcessorNode = this._audioContext.createScriptProcessor(4096);
1938
-
1939
- if (this._audioContext.state === 'suspended') {
1940
- await this._audioContext.resume();
1941
- }
1942
-
1943
- sourceNode.connect(this._scriptProcessorNode);
1944
- this._scriptProcessorNode.connect(this._audioContext.destination);
1945
-
1946
- let audioReceived = false;
1947
- let totalDuration = 0;
1948
-
1949
- this._scriptProcessorNode.onaudioprocess = (event) => {
1950
- const iterator = AudioSample._fromAudioBuffer(event.inputBuffer, totalDuration);
1951
- totalDuration += event.inputBuffer.duration;
1952
-
1953
- for (const audioSample of iterator) {
1954
- if (!audioReceived) {
1955
- audioReceived = true;
1956
-
1957
- const muxer = this._connectedTrack!.output._muxer;
1958
- if (muxer.firstMediaStreamTimestamp === null) {
1959
- muxer.firstMediaStreamTimestamp = performance.now() / 1000;
1960
- } else {
1961
- this._timestampOffset = performance.now() / 1000 - muxer.firstMediaStreamTimestamp;
1962
- }
1963
- }
1964
-
1965
- if (this._encoder.getQueueSize() >= 4) {
1966
- // Drop data if the encoder is overloaded
1967
- audioSample.close();
1968
- continue;
1969
- }
1970
-
1971
- void this._encoder.add(audioSample, true)
1972
- .catch((error) => {
1973
- void this._audioContext!.suspend();
1974
- this._promiseWithResolvers.reject(error);
1975
- });
1976
- }
1977
- };
1978
- }
1979
- }
1980
-
1981
- /** @internal */
1982
- override async _flushAndClose(forceClose: boolean) {
1983
- if (this._abortController) {
1984
- this._abortController.abort();
1985
- this._abortController = null;
1986
- }
1987
-
1988
- if (this._audioContext) {
1989
- assert(this._scriptProcessorNode);
1990
-
1991
- this._scriptProcessorNode.disconnect();
1992
- await this._audioContext.suspend();
1993
- }
1994
-
1995
- await this._encoder.flushAndClose(forceClose);
1996
- }
1997
- }
1998
-
1999
- // === MEDIA STREAM TRACK PROCESSOR WORKER ===
2000
-
2001
- type MediaStreamTrackProcessorWorkerMessage = {
2002
- type: 'support';
2003
- supported: boolean;
2004
- } | {
2005
- type: 'videoFrame';
2006
- trackId: number;
2007
- videoFrame: VideoFrame;
2008
- } | {
2009
- type: 'trackStopped';
2010
- trackId: number;
2011
- } | {
2012
- type: 'error';
2013
- trackId: number;
2014
- error: Error;
2015
- };
2016
-
2017
- type MediaStreamTrackProcessorControllerMessage = {
2018
- type: 'videoTrack';
2019
- trackId: number;
2020
- track: MediaStreamVideoTrack;
2021
- } | {
2022
- type: 'stopTrack';
2023
- trackId: number;
2024
- };
2025
-
2026
- const mediaStreamTrackProcessorWorkerCode = () => {
2027
- const sendMessage = (message: MediaStreamTrackProcessorWorkerMessage, transfer?: Transferable[]) => {
2028
- if (transfer) {
2029
- self.postMessage(message, { transfer });
2030
- } else {
2031
- self.postMessage(message);
2032
- }
2033
- };
2034
-
2035
- // Immediately send a message to the main thread, letting them know of the support
2036
- sendMessage({
2037
- type: 'support',
2038
- supported: typeof MediaStreamTrackProcessor !== 'undefined',
2039
- });
2040
-
2041
- const abortControllers = new Map<number, AbortController>();
2042
- const activeTracks = new Map<number, MediaStreamVideoTrack>();
2043
-
2044
- self.addEventListener('message', (event) => {
2045
- const message = event.data as MediaStreamTrackProcessorControllerMessage;
2046
-
2047
- switch (message.type) {
2048
- case 'videoTrack': {
2049
- activeTracks.set(message.trackId, message.track);
2050
-
2051
- const processor = new MediaStreamTrackProcessor({ track: message.track });
2052
- const consumer = new WritableStream<VideoFrame>({
2053
- write: (videoFrame) => {
2054
- if (!activeTracks.has(message.trackId)) {
2055
- videoFrame.close();
2056
- return;
2057
- }
2058
-
2059
- // Send it to the main thread
2060
- sendMessage({
2061
- type: 'videoFrame',
2062
- trackId: message.trackId,
2063
- videoFrame,
2064
- }, [videoFrame]);
2065
- },
2066
- });
2067
-
2068
- const abortController = new AbortController();
2069
- abortControllers.set(message.trackId, abortController);
2070
-
2071
- processor.readable.pipeTo(consumer, {
2072
- signal: abortController.signal,
2073
- }).catch((error: Error) => {
2074
- // Handle AbortError silently
2075
- if (error instanceof DOMException && error.name === 'AbortError') return;
2076
-
2077
- sendMessage({
2078
- type: 'error',
2079
- trackId: message.trackId,
2080
- error,
2081
- });
2082
- });
2083
- }; break;
2084
-
2085
- case 'stopTrack': {
2086
- const abortController = abortControllers.get(message.trackId);
2087
- if (abortController) {
2088
- abortController.abort();
2089
- abortControllers.delete(message.trackId);
2090
- }
2091
-
2092
- const track = activeTracks.get(message.trackId);
2093
- track?.stop();
2094
- activeTracks.delete(message.trackId);
2095
-
2096
- sendMessage({
2097
- type: 'trackStopped',
2098
- trackId: message.trackId,
2099
- });
2100
- }; break;
2101
-
2102
- default: assertNever(message);
2103
- }
2104
- });
2105
- };
2106
-
2107
- let nextMediaStreamTrackProcessorWorkerId = 0;
2108
- let mediaStreamTrackProcessorWorker: Worker | null = null;
2109
-
2110
- const initMediaStreamTrackProcessorWorker = () => {
2111
- const blob = new Blob(
2112
- [`(${mediaStreamTrackProcessorWorkerCode.toString()})()`],
2113
- { type: 'application/javascript' },
2114
- );
2115
- const url = URL.createObjectURL(blob);
2116
-
2117
- mediaStreamTrackProcessorWorker = new Worker(url);
2118
- };
2119
-
2120
- let mediaStreamTrackProcessorIsSupportedInWorkerCache: boolean | null = null;
2121
- const mediaStreamTrackProcessorIsSupportedInWorker = async () => {
2122
- if (mediaStreamTrackProcessorIsSupportedInWorkerCache !== null) {
2123
- return mediaStreamTrackProcessorIsSupportedInWorkerCache;
2124
- }
2125
-
2126
- if (!mediaStreamTrackProcessorWorker) {
2127
- initMediaStreamTrackProcessorWorker();
2128
- }
2129
-
2130
- return new Promise<boolean>((resolve) => {
2131
- assert(mediaStreamTrackProcessorWorker);
2132
-
2133
- const listener = (event: MessageEvent) => {
2134
- const message = event.data as MediaStreamTrackProcessorWorkerMessage;
2135
-
2136
- if (message.type === 'support') {
2137
- mediaStreamTrackProcessorIsSupportedInWorkerCache = message.supported;
2138
- mediaStreamTrackProcessorWorker!.removeEventListener('message', listener);
2139
-
2140
- resolve(message.supported);
2141
- }
2142
- };
2143
-
2144
- mediaStreamTrackProcessorWorker.addEventListener('message', listener);
2145
- });
2146
- };
2147
-
2148
- const sendMessageToMediaStreamTrackProcessorWorker = (
2149
- message: MediaStreamTrackProcessorControllerMessage,
2150
- transfer?: Transferable[],
2151
- ) => {
2152
- assert(mediaStreamTrackProcessorWorker);
2153
-
2154
- if (transfer) {
2155
- mediaStreamTrackProcessorWorker.postMessage(message, transfer);
2156
- } else {
2157
- mediaStreamTrackProcessorWorker.postMessage(message);
2158
- }
2159
- };
2160
-
2161
- /**
2162
- * Base class for subtitle sources - sources for subtitle tracks.
2163
- * @group Media sources
2164
- * @public
2165
- */
2166
- export abstract class SubtitleSource extends MediaSource {
2167
- /** @internal */
2168
- override _connectedTrack: OutputSubtitleTrack | null = null;
2169
- /** @internal */
2170
- _codec: SubtitleCodec;
2171
-
2172
- /** Internal constructor. */
2173
- constructor(codec: SubtitleCodec) {
2174
- super();
2175
-
2176
- if (!SUBTITLE_CODECS.includes(codec)) {
2177
- throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(', ')}.`);
2178
- }
2179
-
2180
- this._codec = codec;
2181
- }
2182
- }
2183
-
2184
- /**
2185
- * This source can be used to add subtitles from a subtitle text file.
2186
- * @group Media sources
2187
- * @public
2188
- */
2189
- export class TextSubtitleSource extends SubtitleSource {
2190
- /** @internal */
2191
- private _parser: SubtitleParser;
2192
- /** @internal */
2193
- private _error: Error | null = null;
2194
-
2195
- /** Creates a new {@link TextSubtitleSource} where added text chunks are in the specified `codec`. */
2196
- constructor(codec: SubtitleCodec) {
2197
- super(codec);
2198
-
2199
- this._parser = new SubtitleParser({
2200
- codec,
2201
- output: (cue, metadata) => {
2202
- void this._connectedTrack?.output._muxer.addSubtitleCue(this._connectedTrack, cue, metadata)
2203
- .catch((error) => {
2204
- this._error ??= error;
2205
- });
2206
- },
2207
- });
2208
- }
2209
-
2210
- /**
2211
- * Parses the subtitle text according to the specified codec and adds it to the output track. You don't have to
2212
- * add the entire subtitle file at once here; you can provide it in chunks.
2213
- *
2214
- * @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
2215
- * to respect writer and encoder backpressure.
2216
- */
2217
- add(text: string) {
2218
- if (typeof text !== 'string') {
2219
- throw new TypeError('text must be a string.');
2220
- }
2221
-
2222
- this._checkForError();
2223
-
2224
- this._ensureValidAdd();
2225
- this._parser.parse(text);
2226
-
2227
- return this._connectedTrack!.output._muxer.mutex.currentPromise;
2228
- }
2229
-
2230
- /** @internal */
2231
- _checkForError() {
2232
- if (this._error) {
2233
- throw this._error;
2234
- }
2235
- }
2236
-
2237
- /** @internal */
2238
- override async _flushAndClose(forceClose: boolean) {
2239
- if (!forceClose) {
2240
- this._checkForError();
2241
- }
2242
- }
2243
- }