@meframe/core 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. package/README.md +17 -4
  2. package/dist/Meframe.d.ts.map +1 -1
  3. package/dist/Meframe.js +2 -4
  4. package/dist/Meframe.js.map +1 -1
  5. package/dist/cache/CacheManager.d.ts.map +1 -1
  6. package/dist/cache/CacheManager.js +8 -1
  7. package/dist/cache/CacheManager.js.map +1 -1
  8. package/dist/config/defaults.d.ts.map +1 -1
  9. package/dist/config/defaults.js +2 -9
  10. package/dist/config/defaults.js.map +1 -1
  11. package/dist/config/types.d.ts +3 -4
  12. package/dist/config/types.d.ts.map +1 -1
  13. package/dist/controllers/PlaybackController.d.ts +4 -2
  14. package/dist/controllers/PlaybackController.d.ts.map +1 -1
  15. package/dist/controllers/PlaybackController.js +7 -13
  16. package/dist/controllers/PlaybackController.js.map +1 -1
  17. package/dist/controllers/PreRenderService.d.ts +3 -2
  18. package/dist/controllers/PreRenderService.d.ts.map +1 -1
  19. package/dist/controllers/PreRenderService.js.map +1 -1
  20. package/dist/controllers/PreviewHandle.d.ts +2 -0
  21. package/dist/controllers/PreviewHandle.d.ts.map +1 -1
  22. package/dist/controllers/PreviewHandle.js +6 -0
  23. package/dist/controllers/PreviewHandle.js.map +1 -1
  24. package/dist/controllers/index.d.ts +1 -1
  25. package/dist/controllers/index.d.ts.map +1 -1
  26. package/dist/controllers/types.d.ts +2 -12
  27. package/dist/controllers/types.d.ts.map +1 -1
  28. package/dist/event/events.d.ts +5 -59
  29. package/dist/event/events.d.ts.map +1 -1
  30. package/dist/event/events.js +1 -6
  31. package/dist/event/events.js.map +1 -1
  32. package/dist/model/CompositionModel.js +1 -2
  33. package/dist/model/CompositionModel.js.map +1 -1
  34. package/dist/orchestrator/CompositionPlanner.d.ts.map +1 -1
  35. package/dist/orchestrator/CompositionPlanner.js +1 -0
  36. package/dist/orchestrator/CompositionPlanner.js.map +1 -1
  37. package/dist/orchestrator/Orchestrator.d.ts.map +1 -1
  38. package/dist/orchestrator/Orchestrator.js +3 -13
  39. package/dist/orchestrator/Orchestrator.js.map +1 -1
  40. package/dist/orchestrator/VideoClipSession.d.ts.map +1 -1
  41. package/dist/orchestrator/VideoClipSession.js +4 -5
  42. package/dist/orchestrator/VideoClipSession.js.map +1 -1
  43. package/dist/orchestrator/types.d.ts +1 -1
  44. package/dist/orchestrator/types.d.ts.map +1 -1
  45. package/dist/stages/compose/GlobalAudioSession.d.ts.map +1 -1
  46. package/dist/stages/compose/GlobalAudioSession.js +3 -2
  47. package/dist/stages/compose/GlobalAudioSession.js.map +1 -1
  48. package/dist/stages/compose/VideoComposer.d.ts.map +1 -1
  49. package/dist/stages/compose/types.d.ts +3 -1
  50. package/dist/stages/compose/types.d.ts.map +1 -1
  51. package/dist/stages/decode/AudioChunkDecoder.d.ts.map +1 -1
  52. package/dist/stages/decode/VideoChunkDecoder.d.ts +0 -1
  53. package/dist/stages/decode/VideoChunkDecoder.d.ts.map +1 -1
  54. package/dist/stages/demux/MP4Demuxer.d.ts +2 -1
  55. package/dist/stages/demux/MP4Demuxer.d.ts.map +1 -1
  56. package/dist/stages/load/EventHandlers.d.ts +2 -11
  57. package/dist/stages/load/EventHandlers.d.ts.map +1 -1
  58. package/dist/stages/load/EventHandlers.js +1 -24
  59. package/dist/stages/load/EventHandlers.js.map +1 -1
  60. package/dist/stages/load/ResourceLoader.d.ts.map +1 -1
  61. package/dist/stages/load/ResourceLoader.js +11 -13
  62. package/dist/stages/load/ResourceLoader.js.map +1 -1
  63. package/dist/stages/load/TaskManager.d.ts +1 -1
  64. package/dist/stages/load/TaskManager.d.ts.map +1 -1
  65. package/dist/stages/load/TaskManager.js +3 -2
  66. package/dist/stages/load/TaskManager.js.map +1 -1
  67. package/dist/stages/load/types.d.ts +2 -0
  68. package/dist/stages/load/types.d.ts.map +1 -1
  69. package/dist/utils/time-utils.d.ts +3 -2
  70. package/dist/utils/time-utils.d.ts.map +1 -1
  71. package/dist/utils/time-utils.js +2 -1
  72. package/dist/utils/time-utils.js.map +1 -1
  73. package/dist/vite-plugin.d.ts +19 -0
  74. package/dist/vite-plugin.d.ts.map +1 -0
  75. package/dist/vite-plugin.js +145 -0
  76. package/dist/vite-plugin.js.map +1 -0
  77. package/dist/worker/WorkerPool.d.ts +7 -4
  78. package/dist/worker/WorkerPool.d.ts.map +1 -1
  79. package/dist/worker/WorkerPool.js +29 -18
  80. package/dist/worker/WorkerPool.js.map +1 -1
  81. package/dist/{stages/demux → workers}/MP4Demuxer.js +17 -15
  82. package/dist/workers/MP4Demuxer.js.map +1 -0
  83. package/dist/workers/WorkerChannel.js +486 -0
  84. package/dist/workers/WorkerChannel.js.map +1 -0
  85. package/dist/workers/mp4box.all.js +7049 -0
  86. package/dist/workers/mp4box.all.js.map +1 -0
  87. package/dist/workers/stages/compose/audio-compose.worker.js +1063 -0
  88. package/dist/workers/stages/compose/audio-compose.worker.js.map +1 -0
  89. package/dist/workers/stages/compose/video-compose.worker.js +1209 -0
  90. package/dist/workers/stages/compose/video-compose.worker.js.map +1 -0
  91. package/dist/{stages → workers/stages}/decode/decode.worker.js +401 -20
  92. package/dist/workers/stages/decode/decode.worker.js.map +1 -0
  93. package/dist/{stages → workers/stages}/demux/audio-demux.worker.js +184 -4
  94. package/dist/workers/stages/demux/audio-demux.worker.js.map +1 -0
  95. package/dist/{stages → workers/stages}/demux/video-demux.worker.js +7 -30
  96. package/dist/workers/stages/demux/video-demux.worker.js.map +1 -0
  97. package/dist/{stages → workers/stages}/encode/encode.worker.js +238 -5
  98. package/dist/workers/stages/encode/encode.worker.js.map +1 -0
  99. package/dist/{stages/mux/MP4Muxer.js → workers/stages/mux/mux.worker.js} +244 -5
  100. package/dist/workers/stages/mux/mux.worker.js.map +1 -0
  101. package/package.json +27 -21
  102. package/dist/model/types.js +0 -5
  103. package/dist/model/types.js.map +0 -1
  104. package/dist/plugins/BackpressureMonitor.js +0 -62
  105. package/dist/plugins/BackpressureMonitor.js.map +0 -1
  106. package/dist/stages/compose/AudioDucker.js +0 -161
  107. package/dist/stages/compose/AudioDucker.js.map +0 -1
  108. package/dist/stages/compose/AudioMixer.js +0 -373
  109. package/dist/stages/compose/AudioMixer.js.map +0 -1
  110. package/dist/stages/compose/FilterProcessor.js +0 -226
  111. package/dist/stages/compose/FilterProcessor.js.map +0 -1
  112. package/dist/stages/compose/LayerRenderer.js +0 -215
  113. package/dist/stages/compose/LayerRenderer.js.map +0 -1
  114. package/dist/stages/compose/TransitionProcessor.js +0 -189
  115. package/dist/stages/compose/TransitionProcessor.js.map +0 -1
  116. package/dist/stages/compose/VideoComposer.js +0 -186
  117. package/dist/stages/compose/VideoComposer.js.map +0 -1
  118. package/dist/stages/compose/audio-compose.worker.d.ts +0 -79
  119. package/dist/stages/compose/audio-compose.worker.d.ts.map +0 -1
  120. package/dist/stages/compose/audio-compose.worker.js +0 -541
  121. package/dist/stages/compose/audio-compose.worker.js.map +0 -1
  122. package/dist/stages/compose/video-compose.worker.d.ts +0 -60
  123. package/dist/stages/compose/video-compose.worker.d.ts.map +0 -1
  124. package/dist/stages/compose/video-compose.worker.js +0 -369
  125. package/dist/stages/compose/video-compose.worker.js.map +0 -1
  126. package/dist/stages/decode/AudioChunkDecoder.js +0 -83
  127. package/dist/stages/decode/AudioChunkDecoder.js.map +0 -1
  128. package/dist/stages/decode/BaseDecoder.js +0 -130
  129. package/dist/stages/decode/BaseDecoder.js.map +0 -1
  130. package/dist/stages/decode/VideoChunkDecoder.js +0 -209
  131. package/dist/stages/decode/VideoChunkDecoder.js.map +0 -1
  132. package/dist/stages/decode/decode.worker.d.ts +0 -70
  133. package/dist/stages/decode/decode.worker.d.ts.map +0 -1
  134. package/dist/stages/decode/decode.worker.js.map +0 -1
  135. package/dist/stages/demux/MP3FrameParser.js +0 -186
  136. package/dist/stages/demux/MP3FrameParser.js.map +0 -1
  137. package/dist/stages/demux/MP4Demuxer.js.map +0 -1
  138. package/dist/stages/demux/audio-demux.worker.d.ts +0 -51
  139. package/dist/stages/demux/audio-demux.worker.d.ts.map +0 -1
  140. package/dist/stages/demux/audio-demux.worker.js.map +0 -1
  141. package/dist/stages/demux/video-demux.worker.d.ts +0 -48
  142. package/dist/stages/demux/video-demux.worker.d.ts.map +0 -1
  143. package/dist/stages/demux/video-demux.worker.js.map +0 -1
  144. package/dist/stages/encode/AudioChunkEncoder.js +0 -37
  145. package/dist/stages/encode/AudioChunkEncoder.js.map +0 -1
  146. package/dist/stages/encode/BaseEncoder.js +0 -164
  147. package/dist/stages/encode/BaseEncoder.js.map +0 -1
  148. package/dist/stages/encode/VideoChunkEncoder.js +0 -50
  149. package/dist/stages/encode/VideoChunkEncoder.js.map +0 -1
  150. package/dist/stages/encode/encode.worker.d.ts +0 -3
  151. package/dist/stages/encode/encode.worker.d.ts.map +0 -1
  152. package/dist/stages/encode/encode.worker.js.map +0 -1
  153. package/dist/stages/mux/MP4Muxer.js.map +0 -1
  154. package/dist/stages/mux/mux.worker.d.ts +0 -65
  155. package/dist/stages/mux/mux.worker.d.ts.map +0 -1
  156. package/dist/stages/mux/mux.worker.js +0 -219
  157. package/dist/stages/mux/mux.worker.js.map +0 -1
  158. package/dist/stages/mux/utils.js +0 -34
  159. package/dist/stages/mux/utils.js.map +0 -1
@@ -0,0 +1,1063 @@
1
+ import { W as WorkerChannel, a as WorkerMessageType, b as WorkerState } from "../../WorkerChannel.js";
2
+ class AudioMixer {
3
+ config;
4
+ tracksMap = /* @__PURE__ */ new Map();
5
+ constructor(config) {
6
+ this.config = config;
7
+ }
8
+ getConfig() {
9
+ return { ...this.config };
10
+ }
11
+ updateConfig(update) {
12
+ this.config = { ...this.config, ...update };
13
+ }
14
+ get tracks() {
15
+ return Array.from(this.tracksMap.values());
16
+ }
17
+ createMixStream(ducker) {
18
+ return new TransformStream(
19
+ {
20
+ transform: async (request, controller) => {
21
+ try {
22
+ const frameCount = this.getFrameCount(request.durationUs);
23
+ if (ducker && request.duckingConfig?.enabled && frameCount > 0) {
24
+ const envelope = await ducker.generateDuckingEnvelope(request.tracks, frameCount);
25
+ for (const track of request.tracks) {
26
+ if (request.duckingConfig.targetTracks.includes(track.trackId)) {
27
+ track.duckingEnvelope = ducker.applyEnvelopeToVolume(1, envelope);
28
+ }
29
+ }
30
+ }
31
+ const result = await this.mixTracks(request, frameCount);
32
+ controller.enqueue(result);
33
+ } catch (error) {
34
+ controller.error(error);
35
+ }
36
+ }
37
+ },
38
+ {
39
+ highWaterMark: 2,
40
+ size: () => 1
41
+ }
42
+ );
43
+ }
44
+ async mixTracks(request, precomputedFrameCount) {
45
+ const tracks = request.tracks ?? [];
46
+ const frameCount = precomputedFrameCount ?? this.getFrameCount(request.durationUs);
47
+ const requestedChannelCount = this.config.numberOfChannels ?? 0;
48
+ const inferredChannelCount = tracks.reduce((max, track) => {
49
+ const trackChannels = track?.numberOfChannels ?? track?.audioData?.numberOfChannels ?? this.config.numberOfChannels ?? 0;
50
+ return trackChannels > max ? trackChannels : max;
51
+ }, 0);
52
+ const channelCount = requestedChannelCount > 0 ? requestedChannelCount : Math.max(inferredChannelCount, 1);
53
+ const outputChannels = Array.from({ length: channelCount }, () => {
54
+ return new Float32Array(frameCount);
55
+ });
56
+ for (const track of tracks) {
57
+ if (!track) {
58
+ continue;
59
+ }
60
+ const resolvedAudioData = track.audioData;
61
+ if (!resolvedAudioData) {
62
+ continue;
63
+ }
64
+ this.mixTrackIntoOutput(
65
+ outputChannels,
66
+ {
67
+ ...track,
68
+ audioData: resolvedAudioData,
69
+ numberOfChannels: track.numberOfChannels ?? resolvedAudioData.numberOfChannels ?? this.config.numberOfChannels,
70
+ sampleRate: track.sampleRate ?? resolvedAudioData.sampleRate ?? this.config.sampleRate
71
+ },
72
+ request.timeUs,
73
+ frameCount
74
+ );
75
+ }
76
+ const { peakLevel, rmsLevel } = this.limitAndMeasure(outputChannels);
77
+ const audioData = this.createAudioData(outputChannels, request.timeUs);
78
+ return {
79
+ audioData,
80
+ timeUs: request.timeUs,
81
+ durationUs: request.durationUs,
82
+ peakLevel,
83
+ rmsLevel
84
+ };
85
+ }
86
+ addTrack(track) {
87
+ this.tracksMap.set(track.id, track);
88
+ }
89
+ removeTrack(trackId) {
90
+ this.tracksMap.delete(trackId);
91
+ }
92
+ updateTrack(trackId, patch) {
93
+ const track = this.tracksMap.get(trackId);
94
+ if (!track) {
95
+ return;
96
+ }
97
+ const { config, ...rest } = patch;
98
+ if (config) {
99
+ Object.assign(track.config, config);
100
+ }
101
+ Object.assign(track, rest);
102
+ }
103
+ mixTrackIntoOutput(outputChannels, track, mixStartUs, totalFrameCount) {
104
+ if (totalFrameCount === 0) {
105
+ track.audioData.close();
106
+ return;
107
+ }
108
+ if (track.sampleRate !== this.config.sampleRate) {
109
+ track.audioData.close();
110
+ throw new Error("AudioMixer: sample rate mismatch");
111
+ }
112
+ const trackChannelCount = track.audioData.numberOfChannels ?? track.numberOfChannels ?? 0;
113
+ if (trackChannelCount === 0) {
114
+ track.audioData.close();
115
+ return;
116
+ }
117
+ const trackChannels = this.extractChannels(track.audioData);
118
+ if (trackChannels.length === 0) {
119
+ track.audioData.close();
120
+ return;
121
+ }
122
+ const trackFrameCount = track.audioData.numberOfFrames;
123
+ if (trackFrameCount === 0) {
124
+ track.audioData.close();
125
+ return;
126
+ }
127
+ const timestampUs = track.audioData.timestamp ?? mixStartUs;
128
+ const deltaUs = timestampUs - mixStartUs;
129
+ let outputOffsetFrames = Math.round(deltaUs / 1e6 * this.config.sampleRate);
130
+ let sourceOffsetFrames = 0;
131
+ if (outputOffsetFrames < 0) {
132
+ sourceOffsetFrames = Math.min(trackFrameCount, -outputOffsetFrames);
133
+ outputOffsetFrames = 0;
134
+ }
135
+ if (outputOffsetFrames >= totalFrameCount) {
136
+ track.audioData.close();
137
+ return;
138
+ }
139
+ const availableFrames = Math.min(
140
+ trackFrameCount - sourceOffsetFrames,
141
+ totalFrameCount - outputOffsetFrames
142
+ );
143
+ if (availableFrames <= 0) {
144
+ track.audioData.close();
145
+ return;
146
+ }
147
+ const gains = this.buildGainEnvelope(
148
+ track,
149
+ availableFrames,
150
+ outputOffsetFrames,
151
+ sourceOffsetFrames,
152
+ trackFrameCount
153
+ );
154
+ const destinationChannelCount = outputChannels.length;
155
+ const sourceChannelCount = trackChannels.length;
156
+ for (let channelIndex = 0; channelIndex < destinationChannelCount; channelIndex++) {
157
+ const destination = outputChannels[channelIndex];
158
+ const source = trackChannels[channelIndex] ?? trackChannels[sourceChannelCount - 1];
159
+ if (!destination || !source) continue;
160
+ for (let frameIndex = 0; frameIndex < availableFrames; frameIndex++) {
161
+ const sample = source[sourceOffsetFrames + frameIndex] ?? 0;
162
+ const gain = gains[frameIndex] ?? 0;
163
+ destination[outputOffsetFrames + frameIndex] = (destination[outputOffsetFrames + frameIndex] ?? 0) + sample * gain;
164
+ }
165
+ }
166
+ track.audioData.close();
167
+ }
168
+ buildGainEnvelope(track, length, outputOffsetFrames, sourceOffsetFrames, trackFrameCount) {
169
+ const gains = new Float32Array(length);
170
+ const baseVolume = typeof track.config.volume === "number" ? track.config.volume : 1;
171
+ gains.fill(baseVolume);
172
+ const fadeInSamples = this.getFadeSampleCount(track.config.fadeIn);
173
+ const fadeOutSamples = this.getFadeSampleCount(track.config.fadeOut);
174
+ const clipDurationSamples = this.getClipSampleCount(track.config.durationUs) || trackFrameCount;
175
+ const trackStartFrame = this.computeTrackStartFrame(track);
176
+ for (let i = 0; i < length; i++) {
177
+ const envelopeIndex = outputOffsetFrames + i;
178
+ const absoluteFrame = trackStartFrame + sourceOffsetFrames + i;
179
+ let gain = baseVolume;
180
+ if (fadeInSamples > 0 && absoluteFrame < fadeInSamples) {
181
+ const progress = Math.min(1, absoluteFrame / fadeInSamples);
182
+ gain *= this.getCurveValue(progress, track.config.fadeIn?.curve);
183
+ }
184
+ if (fadeOutSamples > 0 && clipDurationSamples > 0) {
185
+ const fadeStart = Math.max(0, clipDurationSamples - fadeOutSamples);
186
+ if (absoluteFrame >= fadeStart) {
187
+ const progress = Math.min(1, (absoluteFrame - fadeStart) / fadeOutSamples);
188
+ const remaining = Math.max(0, 1 - progress);
189
+ gain *= this.getCurveValue(remaining, track.config.fadeOut?.curve);
190
+ }
191
+ }
192
+ if (track.duckingEnvelope && envelopeIndex < track.duckingEnvelope.length && envelopeIndex >= 0) {
193
+ gain *= track.duckingEnvelope[envelopeIndex] ?? 1;
194
+ }
195
+ gains[i] = gain;
196
+ }
197
+ return gains;
198
+ }
199
+ extractChannels(audioData) {
200
+ const configuredChannels = this.config.numberOfChannels ?? 0;
201
+ const channelCount = audioData.numberOfChannels ?? configuredChannels;
202
+ const frameCount = audioData.numberOfFrames;
203
+ const format = audioData.format ?? "f32";
204
+ if (!channelCount || !frameCount) {
205
+ return [];
206
+ }
207
+ const toFloat = (value) => value / 32768;
208
+ const zeroChannels = () => Array.from(
209
+ { length: configuredChannels || channelCount },
210
+ () => new Float32Array(frameCount)
211
+ );
212
+ if (format === "f32") {
213
+ const interleaved = new Float32Array(frameCount * channelCount);
214
+ audioData.copyTo(interleaved, { format: "f32", planeIndex: 0 });
215
+ const channels2 = zeroChannels();
216
+ for (let frame = 0; frame < frameCount; frame++) {
217
+ const offset = frame * channelCount;
218
+ for (let channel = 0; channel < channels2.length; channel++) {
219
+ const channelArray = channels2[channel];
220
+ if (!channelArray) continue;
221
+ const sourceChannel = channel < channelCount ? channel : channelCount - 1;
222
+ channelArray[frame] = interleaved[offset + sourceChannel] ?? 0;
223
+ }
224
+ }
225
+ return channels2;
226
+ }
227
+ if (format === "s16") {
228
+ const interleaved = new Int16Array(frameCount * channelCount);
229
+ audioData.copyTo(interleaved, { format: "s16", planeIndex: 0 });
230
+ const channels2 = zeroChannels();
231
+ for (let frame = 0; frame < frameCount; frame++) {
232
+ const offset = frame * channelCount;
233
+ for (let channel = 0; channel < channels2.length; channel++) {
234
+ const channelArray = channels2[channel];
235
+ if (!channelArray) continue;
236
+ const sourceChannel = channel < channelCount ? channel : channelCount - 1;
237
+ channelArray[frame] = toFloat(interleaved[offset + sourceChannel] ?? 0);
238
+ }
239
+ }
240
+ return channels2;
241
+ }
242
+ if (format === "f32-planar") {
243
+ const channels2 = zeroChannels();
244
+ for (let channel = 0; channel < channels2.length; channel++) {
245
+ const channelArray = channels2[channel];
246
+ if (!channelArray) continue;
247
+ const sourceChannel = channel < channelCount ? channel : channelCount - 1;
248
+ audioData.copyTo(channelArray, { planeIndex: sourceChannel, format: "f32-planar" });
249
+ }
250
+ return channels2;
251
+ }
252
+ if (format === "s16-planar") {
253
+ const tmp = new Int16Array(frameCount);
254
+ const channels2 = zeroChannels();
255
+ for (let channel = 0; channel < channels2.length; channel++) {
256
+ const channelArray = channels2[channel];
257
+ if (!channelArray) continue;
258
+ const sourceChannel = channel < channelCount ? channel : channelCount - 1;
259
+ audioData.copyTo(tmp, { planeIndex: sourceChannel, format: "s16-planar" });
260
+ for (let i = 0; i < frameCount; i++) {
261
+ channelArray[i] = toFloat(tmp[i] ?? 0);
262
+ }
263
+ }
264
+ return channels2;
265
+ }
266
+ const channels = zeroChannels();
267
+ for (let channel = 0; channel < channels.length; channel++) {
268
+ const channelArray = channels[channel];
269
+ if (!channelArray) continue;
270
+ const sourceChannel = channel < channelCount ? channel : channelCount - 1;
271
+ audioData.copyTo(channelArray, { planeIndex: sourceChannel });
272
+ }
273
+ return channels;
274
+ }
275
+ limitAndMeasure(channels) {
276
+ let peak = 0;
277
+ let sumSquares = 0;
278
+ let samples = 0;
279
+ for (const channel of channels) {
280
+ for (let i = 0; i < channel.length; i++) {
281
+ let sample = channel[i] ?? 0;
282
+ if (sample > 1) {
283
+ sample = 1;
284
+ } else if (sample < -1) {
285
+ sample = -1;
286
+ }
287
+ channel[i] = sample;
288
+ const absSample = Math.abs(sample);
289
+ if (absSample > peak) {
290
+ peak = absSample;
291
+ }
292
+ sumSquares += sample * sample;
293
+ samples++;
294
+ }
295
+ }
296
+ const rmsLevel = samples > 0 ? Math.sqrt(sumSquares / samples) : 0;
297
+ return {
298
+ peakLevel: peak,
299
+ rmsLevel
300
+ };
301
+ }
302
+ createAudioData(channels, timestampUs) {
303
+ const configuredChannels = this.config.numberOfChannels ?? 0;
304
+ const inferredChannels = channels.length;
305
+ const numberOfChannels = (inferredChannels > 0 ? inferredChannels : configuredChannels) || 1;
306
+ const numberOfFrames = channels[0]?.length ?? 0;
307
+ if (numberOfFrames === 0) {
308
+ return new AudioData({
309
+ format: "f32",
310
+ sampleRate: this.config.sampleRate,
311
+ numberOfFrames: 0,
312
+ numberOfChannels,
313
+ timestamp: timestampUs,
314
+ data: new Float32Array(0)
315
+ });
316
+ }
317
+ const interleaved = new Float32Array(numberOfFrames * numberOfChannels);
318
+ for (let frame = 0; frame < numberOfFrames; frame++) {
319
+ for (let channel = 0; channel < numberOfChannels; channel++) {
320
+ const sourceChannel = channels[channel] ?? channels[channels.length - 1];
321
+ interleaved[frame * numberOfChannels + channel] = sourceChannel?.[frame] ?? 0;
322
+ }
323
+ }
324
+ return new AudioData({
325
+ format: "f32",
326
+ sampleRate: this.config.sampleRate,
327
+ numberOfFrames,
328
+ numberOfChannels,
329
+ timestamp: timestampUs,
330
+ data: interleaved
331
+ });
332
+ }
333
+ getFrameCount(durationUs) {
334
+ if (durationUs <= 0) {
335
+ return 0;
336
+ }
337
+ return Math.ceil(durationUs / 1e6 * this.config.sampleRate);
338
+ }
339
+ getFadeSampleCount(fade) {
340
+ if (!fade || fade.durationUs <= 0) {
341
+ return 0;
342
+ }
343
+ return Math.round(fade.durationUs / 1e6 * this.config.sampleRate);
344
+ }
345
+ getClipSampleCount(durationUs) {
346
+ if (!durationUs || durationUs <= 0) {
347
+ return 0;
348
+ }
349
+ return Math.round(durationUs / 1e6 * this.config.sampleRate);
350
+ }
351
+ computeTrackStartFrame(track) {
352
+ const audioTimestamp = track.audioData.timestamp ?? track.config.startTimeUs;
353
+ const relativeUs = audioTimestamp - track.config.startTimeUs;
354
+ const relativeFrames = Math.round(relativeUs / 1e6 * this.config.sampleRate);
355
+ return relativeFrames > 0 ? relativeFrames : 0;
356
+ }
357
+ getCurveValue(progress, curve = "linear") {
358
+ const clamped = Math.min(Math.max(progress, 0), 1);
359
+ switch (curve) {
360
+ case "exponential":
361
+ return clamped * clamped;
362
+ case "logarithmic":
363
+ return Math.log10(clamped * 9 + 1);
364
+ case "cosine":
365
+ return (1 - Math.cos(clamped * Math.PI)) / 2;
366
+ default:
367
+ return clamped;
368
+ }
369
+ }
370
+ }
371
+ class AudioDucker {
372
+ config = null;
373
+ sampleRate;
374
+ constructor(sampleRate) {
375
+ this.sampleRate = sampleRate;
376
+ }
377
+ configure(config) {
378
+ this.config = config;
379
+ }
380
+ /**
381
+ * Analyze trigger tracks (voice) and generate ducking envelope
382
+ * Returns gain values (0-1) to apply to target tracks (BGM)
383
+ */
384
+ async generateDuckingEnvelope(tracks, frameCount) {
385
+ if (!this.config?.enabled) {
386
+ return new Float32Array(frameCount).fill(1);
387
+ }
388
+ const envelope = new Float32Array(frameCount);
389
+ envelope.fill(1);
390
+ const triggerTracks = tracks.filter((t) => this.config.triggerTracks.includes(t.trackId));
391
+ if (triggerTracks.length === 0) {
392
+ return envelope;
393
+ }
394
+ for (const track of triggerTracks) {
395
+ const voiceActivity = await this.detectVoiceActivity(track.audioData);
396
+ this.applyDuckingToEnvelope(envelope, voiceActivity);
397
+ }
398
+ return envelope;
399
+ }
400
+ /**
401
+ * Voice Activity Detection (VAD)
402
+ * Simple energy-based detection with smoothing
403
+ * More sophisticated implementations could use:
404
+ * - Zero-crossing rate (ZCR) for speech/music discrimination
405
+ * - Spectral centroid for voice frequency detection
406
+ * - Machine learning models for robust VAD
407
+ */
408
+ async detectVoiceActivity(audioData) {
409
+ const frameCount = audioData.numberOfFrames;
410
+ const activity = new Float32Array(frameCount);
411
+ const monoData = new Float32Array(frameCount);
412
+ const channelData = new Float32Array(frameCount);
413
+ for (let ch = 0; ch < audioData.numberOfChannels; ch++) {
414
+ audioData.copyTo(channelData, {
415
+ planeIndex: ch,
416
+ format: "f32-planar"
417
+ });
418
+ for (let i = 0; i < frameCount; i++) {
419
+ if (monoData && channelData) {
420
+ monoData[i] = (monoData[i] || 0) + (channelData[i] || 0) / audioData.numberOfChannels;
421
+ }
422
+ }
423
+ }
424
+ const windowSize = Math.floor(this.sampleRate * 0.02);
425
+ const hopSize = Math.floor(windowSize / 2);
426
+ for (let i = 0; i < frameCount; i += hopSize) {
427
+ const end = Math.min(i + windowSize, frameCount);
428
+ let energy = 0;
429
+ for (let j = i; j < end; j++) {
430
+ if (monoData && monoData[j] !== void 0) {
431
+ const sample = monoData[j];
432
+ if (sample !== void 0) {
433
+ energy += sample * sample;
434
+ }
435
+ }
436
+ }
437
+ energy = Math.sqrt(energy / (end - i));
438
+ const threshold = 0.01;
439
+ const isVoice = energy > threshold;
440
+ for (let j = i; j < end; j++) {
441
+ activity[j] = isVoice ? 1 : 0;
442
+ }
443
+ }
444
+ return this.smoothActivityDetection(activity);
445
+ }
446
+ /**
447
+ * Smooth voice activity detection to avoid choppy ducking
448
+ * Uses a simple moving average filter
449
+ */
450
+ smoothActivityDetection(activity) {
451
+ const smoothed = new Float32Array(activity.length);
452
+ const smoothWindow = Math.floor(this.sampleRate * 0.05);
453
+ for (let i = 0; i < activity.length; i++) {
454
+ let sum = 0;
455
+ let count = 0;
456
+ for (let j = Math.max(0, i - smoothWindow); j <= Math.min(activity.length - 1, i + smoothWindow); j++) {
457
+ if (activity && activity[j] !== void 0) {
458
+ const val = activity[j];
459
+ if (val !== void 0) {
460
+ sum += val;
461
+ }
462
+ }
463
+ count++;
464
+ }
465
+ smoothed[i] = sum / count;
466
+ }
467
+ return smoothed;
468
+ }
469
+ /**
470
+ * Apply ducking based on voice activity
471
+ * Implements attack/release envelope shaping
472
+ */
473
+ applyDuckingToEnvelope(envelope, voiceActivity) {
474
+ if (!this.config) return;
475
+ const duckingLevel = 1 - this.config.duckingLevel;
476
+ const attackSamples = Math.floor(this.config.attackTimeMs / 1e3 * this.sampleRate);
477
+ const releaseSamples = Math.floor(this.config.releaseTimeMs / 1e3 * this.sampleRate);
478
+ const lookAheadSamples = this.config.lookAheadMs ? Math.floor(this.config.lookAheadMs / 1e3 * this.sampleRate) : 0;
479
+ let currentGain = 1;
480
+ let releaseCounter = 0;
481
+ for (let i = 0; i < envelope.length; i++) {
482
+ const lookAheadIndex = Math.min(i + lookAheadSamples, voiceActivity.length - 1);
483
+ const activity = voiceActivity[lookAheadIndex];
484
+ if (activity !== void 0 && activity > 0.5) {
485
+ if (currentGain > duckingLevel) {
486
+ currentGain = Math.max(duckingLevel, currentGain - (1 - duckingLevel) / attackSamples);
487
+ } else {
488
+ currentGain = duckingLevel;
489
+ }
490
+ releaseCounter = 0;
491
+ } else if (currentGain < 1) {
492
+ releaseCounter++;
493
+ if (releaseCounter > releaseSamples * 0.1) {
494
+ currentGain = Math.min(1, currentGain + (1 - duckingLevel) / releaseSamples);
495
+ }
496
+ }
497
+ envelope[i] = Math.min(envelope[i] || 1, currentGain);
498
+ }
499
+ }
500
+ /**
501
+ * Apply ducking envelope to audio buffer
502
+ * This modulates the volume over time according to the envelope
503
+ */
504
+ applyEnvelopeToVolume(baseVolume, envelope) {
505
+ const result = new Float32Array(envelope.length);
506
+ for (let i = 0; i < envelope.length; i++) {
507
+ result[i] = baseVolume * (envelope[i] || 1);
508
+ }
509
+ return result;
510
+ }
511
+ /**
512
+ * Calculate dynamic range to avoid over-compression
513
+ * Returns the difference between peak and RMS levels in dB
514
+ */
515
+ calculateDynamicRange(envelope) {
516
+ let peak = 0;
517
+ let sumSquares = 0;
518
+ for (const value of envelope) {
519
+ peak = Math.max(peak, value);
520
+ sumSquares += value * value;
521
+ }
522
+ const rms = Math.sqrt(sumSquares / envelope.length);
523
+ const peakDb = 20 * Math.log10(peak);
524
+ const rmsDb = 20 * Math.log10(rms);
525
+ return peakDb - rmsDb;
526
+ }
527
+ }
528
+ class AudioComposeWorker {
529
+ channel;
530
+ mixer = null;
531
+ ducker = null;
532
+ mixStream = null;
533
+ // Connections to other workers
534
+ decoderPort = null;
535
+ encoderPort = null;
536
+ // Track buffer map
537
+ trackBuffers = /* @__PURE__ */ new Map();
538
+ trackQueueWaiters = /* @__PURE__ */ new Map();
539
+ encoderStreamAttached = false;
540
+ mixWindowUs = 4e4;
541
+ // 40ms window
542
+ maxQueuedSegments = 8;
543
+ mixing = false;
544
+ constructor() {
545
+ this.channel = new WorkerChannel(self, {
546
+ name: "AudioComposeWorker",
547
+ timeout: 3e4
548
+ });
549
+ this.setupHandlers();
550
+ }
551
+ setupHandlers() {
552
+ this.channel.registerHandler("configure", this.handleConfigure.bind(this));
553
+ this.channel.registerHandler("connect", this.handleConnect.bind(this));
554
+ this.channel.registerHandler("add_track", this.handleAddTrack.bind(this));
555
+ this.channel.registerHandler("remove_track", this.handleRemoveTrack.bind(this));
556
+ this.channel.registerHandler("update_track", this.handleUpdateTrack.bind(this));
557
+ this.channel.registerHandler("configure_ducking", this.handleConfigureDucking.bind(this));
558
+ this.channel.registerHandler("get_stats", this.handleGetStats.bind(this));
559
+ this.channel.registerHandler(WorkerMessageType.Dispose, this.handleDispose.bind(this));
560
+ }
561
+ /** Unified connect handler mapping for stream pipeline */
562
+ async handleConnect(payload) {
563
+ if (payload.direction === "upstream") {
564
+ this.decoderPort = payload.port;
565
+ const decoderChannel = new WorkerChannel(this.decoderPort, {
566
+ name: "AudioCompose-Decoder",
567
+ timeout: 3e4
568
+ });
569
+ decoderChannel.registerHandler("audio_track:add", this.handleAddTrack.bind(this));
570
+ decoderChannel.registerHandler("audio_track:remove", this.handleRemoveTrack.bind(this));
571
+ decoderChannel.registerHandler("audio_track:update", this.handleUpdateTrack.bind(this));
572
+ decoderChannel.receiveStream(this.handleReceiveStream.bind(this));
573
+ return { success: true };
574
+ }
575
+ if (payload.direction === "downstream") {
576
+ this.encoderPort = payload.port;
577
+ return { success: true };
578
+ }
579
+ return { success: true };
580
+ }
581
+ async handleReceiveStream(stream, metadata) {
582
+ if (metadata?.streamType !== "audio" || !this.mixStream || !this.mixer) {
583
+ return;
584
+ }
585
+ const update = {};
586
+ const currentConfig = this.mixer.getConfig();
587
+ if (typeof metadata?.sampleRate === "number" && metadata.sampleRate > 0) {
588
+ if (!currentConfig.sampleRate || currentConfig.sampleRate !== metadata.sampleRate) {
589
+ update.sampleRate = metadata.sampleRate;
590
+ }
591
+ }
592
+ if (typeof metadata?.numberOfChannels === "number" && metadata.numberOfChannels > 0) {
593
+ if (!currentConfig.numberOfChannels || metadata.numberOfChannels > currentConfig.numberOfChannels) {
594
+ update.numberOfChannels = metadata.numberOfChannels;
595
+ }
596
+ }
597
+ if (Object.keys(update).length > 0) {
598
+ this.mixer.updateConfig(update);
599
+ }
600
+ const mixerConfig = this.mixer.getConfig();
601
+ const streamMetadata = {
602
+ ...metadata,
603
+ streamType: "audio",
604
+ sampleRate: mixerConfig.sampleRate,
605
+ numberOfChannels: mixerConfig.numberOfChannels
606
+ };
607
+ await this.attachEncodeStream(streamMetadata);
608
+ const trackId = metadata?.trackId ?? metadata?.clipId;
609
+ if (!trackId) {
610
+ console.warn("[AudioComposeWorker] Missing track identifier in audio stream metadata");
611
+ await stream.cancel();
612
+ return;
613
+ }
614
+ await this.bufferTrackStream(trackId, stream, streamMetadata);
615
+ this.scheduleMix();
616
+ }
617
+ /**
618
+ * Configure audio composer
619
+ * @param payload.config - Audio composition configuration
620
+ * @param payload.initial - If true, initialize worker state; otherwise just update config
621
+ */
622
+ async handleConfigure(payload) {
623
+ const { config, initial = false } = payload;
624
+ try {
625
+ if (initial) {
626
+ this.channel.state = WorkerState.Ready;
627
+ this.mixer = new AudioMixer(config);
628
+ this.ducker = new AudioDucker(config.sampleRate);
629
+ this.mixStream = this.mixer.createMixStream(this.ducker);
630
+ this.channel.notify("configured", {
631
+ sampleRate: config.sampleRate,
632
+ numberOfChannels: config.numberOfChannels
633
+ });
634
+ } else {
635
+ if (!this.mixer || !this.ducker) {
636
+ throw {
637
+ code: "NOT_INITIALIZED",
638
+ message: "Audio composer not initialized. Call configure with initial=true first"
639
+ };
640
+ }
641
+ }
642
+ return { success: true };
643
+ } catch (error) {
644
+ throw {
645
+ code: error.code || "CONFIG_ERROR",
646
+ message: error.message
647
+ };
648
+ }
649
+ }
650
+ /**
651
+ * Connect to decoder worker to receive audio streams
652
+ */
653
+ /**
654
+ * Add an audio track
655
+ */
656
+ handleAddTrack(payload) {
657
+ if (!this.mixer) {
658
+ throw {
659
+ code: "NOT_CONFIGURED",
660
+ message: "Mixer not configured"
661
+ };
662
+ }
663
+ const config = this.cloneTrackConfig(payload.config);
664
+ const track = {
665
+ id: payload.trackId,
666
+ clipId: payload.clipId,
667
+ type: payload.type ?? "other",
668
+ config
669
+ };
670
+ this.mixer.addTrack(track);
671
+ this.trackBuffers.set(payload.trackId, {
672
+ clipId: payload.clipId,
673
+ queue: [],
674
+ ended: false,
675
+ config,
676
+ type: payload.type ?? "other"
677
+ });
678
+ this.channel.notify("track_added", {
679
+ trackId: track.id,
680
+ trackType: track.type
681
+ });
682
+ return { success: true };
683
+ }
684
+ /**
685
+ * Remove an audio track
686
+ */
687
+ handleRemoveTrack(payload) {
688
+ if (!this.mixer) {
689
+ throw {
690
+ code: "NOT_CONFIGURED",
691
+ message: "Mixer not configured"
692
+ };
693
+ }
694
+ this.mixer.removeTrack(payload.trackId);
695
+ this.disposeTrackBuffer(payload.trackId);
696
+ this.channel.notify("track_removed", {
697
+ trackId: payload.trackId
698
+ });
699
+ return { success: true };
700
+ }
701
+ /**
702
+ * Update track configuration
703
+ */
704
+ handleUpdateTrack(payload) {
705
+ if (!this.mixer) {
706
+ throw {
707
+ code: "NOT_CONFIGURED",
708
+ message: "Mixer not configured"
709
+ };
710
+ }
711
+ this.mixer.updateTrack(payload.trackId, payload.config);
712
+ const buffer = this.trackBuffers.get(payload.trackId);
713
+ if (buffer) {
714
+ if (payload.config.type) {
715
+ buffer.type = payload.config.type;
716
+ }
717
+ this.applyTrackConfigPatch(buffer.config, payload.config);
718
+ }
719
+ this.channel.notify("track_updated", {
720
+ trackId: payload.trackId
721
+ });
722
+ return { success: true };
723
+ }
724
+ /**
725
+ * Configure audio ducking
726
+ */
727
+ handleConfigureDucking(config) {
728
+ if (!this.ducker) {
729
+ throw {
730
+ code: "NOT_CONFIGURED",
731
+ message: "Ducker not configured"
732
+ };
733
+ }
734
+ this.ducker.configure(config);
735
+ this.channel.notify("ducking_configured", {
736
+ enabled: config.enabled
737
+ });
738
+ return { success: true };
739
+ }
740
+ /**
741
+ * Get mixer statistics
742
+ */
743
+ async handleGetStats() {
744
+ if (!this.mixer) {
745
+ return { state: this.channel.state };
746
+ }
747
+ return {
748
+ tracks: this.mixer.tracks,
749
+ ducking: this.ducker ? {
750
+ configured: this.ducker !== null
751
+ } : null,
752
+ state: this.channel.state
753
+ };
754
+ }
755
+ /**
756
+ * Dispose worker and cleanup resources
757
+ */
758
+ async handleDispose() {
759
+ this.mixer = null;
760
+ this.ducker = null;
761
+ this.mixStream = null;
762
+ this.decoderPort?.close();
763
+ this.decoderPort = null;
764
+ this.encoderPort?.close();
765
+ this.encoderPort = null;
766
+ this.channel.state = WorkerState.Disposed;
767
+ return { success: true };
768
+ }
769
+ async attachEncodeStream(metadata) {
770
+ if (!this.mixStream || !this.encoderPort || this.encoderStreamAttached || !this.mixer) {
771
+ return;
772
+ }
773
+ const encoderChannel = new WorkerChannel(this.encoderPort, {
774
+ name: "AudioCompose-Encoder",
775
+ timeout: 3e4
776
+ });
777
+ const mixerConfig = this.mixer?.getConfig();
778
+ const streamMetadata = {
779
+ ...metadata,
780
+ streamType: "audio",
781
+ sampleRate: mixerConfig?.sampleRate ?? metadata?.sampleRate,
782
+ numberOfChannels: mixerConfig?.numberOfChannels ?? metadata?.numberOfChannels
783
+ };
784
+ const [encoderResultStream, previewResultStream] = this.mixStream.readable.tee();
785
+ const createAudioDataStream = (source) => {
786
+ return new ReadableStream({
787
+ start: (controller) => {
788
+ const reader = source.getReader();
789
+ const pump = async () => {
790
+ const { done, value } = await reader.read();
791
+ if (done) {
792
+ reader.releaseLock();
793
+ controller.close();
794
+ return;
795
+ }
796
+ try {
797
+ controller.enqueue(value.audioData);
798
+ } catch (error) {
799
+ controller.error(error);
800
+ reader.releaseLock();
801
+ return;
802
+ }
803
+ await pump();
804
+ };
805
+ pump().catch((error) => {
806
+ reader.releaseLock();
807
+ controller.error(error);
808
+ });
809
+ }
810
+ });
811
+ };
812
+ const encoderStream = createAudioDataStream(encoderResultStream);
813
+ const previewStream = createAudioDataStream(previewResultStream);
814
+ await encoderChannel.sendStream(encoderStream, streamMetadata);
815
+ this.channel.sendStream(previewStream, streamMetadata);
816
+ this.encoderStreamAttached = true;
817
+ await this.scheduleMix();
818
+ }
819
+ async bufferTrackStream(trackId, stream, metadata) {
820
+ const buffer = this.trackBuffers.get(trackId);
821
+ if (!buffer) {
822
+ await stream.cancel();
823
+ return;
824
+ }
825
+ const reader = stream.getReader();
826
+ const process = async () => {
827
+ while (true) {
828
+ if (buffer.queue.length >= this.maxQueuedSegments) {
829
+ await this.waitForQueueSpace(trackId);
830
+ }
831
+ const { done, value } = await reader.read();
832
+ if (done) {
833
+ buffer.ended = true;
834
+ reader.releaseLock();
835
+ return;
836
+ }
837
+ buffer.queue.push({
838
+ audioData: value,
839
+ timestampUs: value.timestamp ?? 0,
840
+ durationUs: value.duration ?? Math.round(value.numberOfFrames / (metadata.sampleRate ?? 48e3) * 1e6)
841
+ });
842
+ this.scheduleMix();
843
+ }
844
+ };
845
+ process().catch((error) => {
846
+ buffer.ended = true;
847
+ reader.releaseLock();
848
+ console.error("[AudioComposeWorker] Track stream error:", error);
849
+ });
850
+ }
851
+ async scheduleMix() {
852
+ if (this.mixing || !this.mixStream || !this.encoderStreamAttached || !this.mixer) {
853
+ return;
854
+ }
855
+ const window = this.computeNextWindow();
856
+ if (!window) {
857
+ return;
858
+ }
859
+ this.mixing = true;
860
+ try {
861
+ const request = this.buildMixRequest(window.timeUs, window.durationUs);
862
+ if (!request) {
863
+ return;
864
+ }
865
+ const writer = this.mixStream.writable.getWriter();
866
+ try {
867
+ await writer.write(request);
868
+ } finally {
869
+ writer.releaseLock();
870
+ }
871
+ } catch (error) {
872
+ console.error("[AudioComposeWorker] Failed to enqueue mix request:", error);
873
+ } finally {
874
+ this.mixing = false;
875
+ if (this.hasBufferedAudio()) {
876
+ queueMicrotask(() => {
877
+ void this.scheduleMix();
878
+ });
879
+ }
880
+ }
881
+ }
882
+ computeNextWindow() {
883
+ let earliest = null;
884
+ for (const buffer of this.trackBuffers.values()) {
885
+ if (buffer.queue.length === 0) {
886
+ continue;
887
+ }
888
+ const firstFrame = buffer.queue[0];
889
+ if (!firstFrame) {
890
+ continue;
891
+ }
892
+ const ts = firstFrame.timestampUs;
893
+ if (earliest === null || ts < earliest) {
894
+ earliest = ts;
895
+ }
896
+ }
897
+ if (earliest === null) {
898
+ return null;
899
+ }
900
+ return {
901
+ timeUs: earliest,
902
+ durationUs: this.mixWindowUs
903
+ };
904
+ }
905
+ buildMixRequest(timeUs, _durationUs) {
906
+ if (!this.mixer) {
907
+ return null;
908
+ }
909
+ const tracks = [];
910
+ let resolvedDurationUs = this.mixWindowUs;
911
+ for (const [trackId, buffer] of this.trackBuffers.entries()) {
912
+ const segment = this.consumeSegment(trackId, buffer);
913
+ if (!segment) {
914
+ continue;
915
+ }
916
+ tracks.push({
917
+ trackId,
918
+ clipId: buffer.clipId,
919
+ audioData: segment.audioData,
920
+ config: segment.config,
921
+ type: buffer.type,
922
+ sampleRate: this.mixer.config.sampleRate,
923
+ numberOfChannels: this.mixer.config.numberOfChannels
924
+ });
925
+ resolvedDurationUs = Math.min(resolvedDurationUs, segment.durationUs ?? this.mixWindowUs);
926
+ }
927
+ if (tracks.length === 0) {
928
+ return null;
929
+ }
930
+ return {
931
+ tracks,
932
+ timeUs,
933
+ durationUs: resolvedDurationUs
934
+ };
935
+ }
936
+ consumeSegment(trackId, buffer) {
937
+ if (buffer.queue.length === 0) {
938
+ return null;
939
+ }
940
+ const head = buffer.queue[0];
941
+ if (!head) {
942
+ return null;
943
+ }
944
+ const queueItem = buffer.queue.shift();
945
+ if (!queueItem) {
946
+ return null;
947
+ }
948
+ this.resolveQueueWaiter(trackId);
949
+ return {
950
+ audioData: queueItem.audioData,
951
+ config: buffer.config,
952
+ sampleRate: this.mixer.config.sampleRate,
953
+ numberOfChannels: this.mixer.config.numberOfChannels,
954
+ durationUs: queueItem.durationUs
955
+ };
956
+ }
957
+ hasBufferedAudio() {
958
+ for (const buffer of this.trackBuffers.values()) {
959
+ if (buffer.queue.length > 0) {
960
+ return true;
961
+ }
962
+ }
963
+ return false;
964
+ }
965
+ cloneTrackConfig(config) {
966
+ return {
967
+ startTimeUs: config.startTimeUs,
968
+ durationUs: config.durationUs,
969
+ volume: config.volume,
970
+ fadeIn: config.fadeIn ? { ...config.fadeIn } : void 0,
971
+ fadeOut: config.fadeOut ? { ...config.fadeOut } : void 0,
972
+ effects: config.effects ? config.effects.map((effect) => ({
973
+ type: effect.type,
974
+ params: effect.params ?? {}
975
+ })) : void 0,
976
+ duckingTag: config.duckingTag
977
+ };
978
+ }
979
+ applyTrackConfigPatch(target, patch) {
980
+ if (patch.fadeIn) {
981
+ target.fadeIn = { ...patch.fadeIn };
982
+ } else if (patch.fadeIn === null) {
983
+ target.fadeIn = void 0;
984
+ }
985
+ if (patch.fadeOut) {
986
+ target.fadeOut = { ...patch.fadeOut };
987
+ } else if (patch.fadeOut === null) {
988
+ target.fadeOut = void 0;
989
+ }
990
+ if (patch.effects) {
991
+ target.effects = patch.effects.map((effect) => ({
992
+ type: effect.type,
993
+ params: effect.params ?? {}
994
+ }));
995
+ }
996
+ if (typeof patch.volume === "number") {
997
+ target.volume = patch.volume;
998
+ }
999
+ if (patch.startTimeUs !== void 0) {
1000
+ target.startTimeUs = patch.startTimeUs;
1001
+ }
1002
+ if (patch.durationUs !== void 0) {
1003
+ target.durationUs = patch.durationUs;
1004
+ }
1005
+ if (patch.duckingTag !== void 0) {
1006
+ target.duckingTag = patch.duckingTag;
1007
+ }
1008
+ }
1009
+ async waitForQueueSpace(trackId) {
1010
+ const existing = this.trackQueueWaiters.get(trackId);
1011
+ if (existing && existing.length === 0) {
1012
+ this.trackQueueWaiters.delete(trackId);
1013
+ }
1014
+ await new Promise((resolve) => {
1015
+ const waiters = this.trackQueueWaiters.get(trackId);
1016
+ if (waiters) {
1017
+ waiters.push(resolve);
1018
+ } else {
1019
+ this.trackQueueWaiters.set(trackId, [resolve]);
1020
+ }
1021
+ });
1022
+ }
1023
+ resolveQueueWaiter(trackId) {
1024
+ const waiters = this.trackQueueWaiters.get(trackId);
1025
+ if (!waiters || waiters.length === 0) {
1026
+ return;
1027
+ }
1028
+ const resolve = waiters.shift();
1029
+ resolve?.();
1030
+ if (!waiters.length) {
1031
+ this.trackQueueWaiters.delete(trackId);
1032
+ }
1033
+ }
1034
+ disposeTrackBuffer(trackId) {
1035
+ const buffer = this.trackBuffers.get(trackId);
1036
+ if (!buffer) {
1037
+ return;
1038
+ }
1039
+ for (const item of buffer.queue) {
1040
+ try {
1041
+ item.audioData.close();
1042
+ } catch (error) {
1043
+ console.warn("[AudioComposeWorker] Failed to close AudioData on disposal", error);
1044
+ }
1045
+ }
1046
+ this.trackBuffers.delete(trackId);
1047
+ const waiters = this.trackQueueWaiters.get(trackId);
1048
+ if (waiters) {
1049
+ waiters.forEach((resolve) => resolve());
1050
+ this.trackQueueWaiters.delete(trackId);
1051
+ }
1052
+ }
1053
+ }
1054
+ const worker = new AudioComposeWorker();
1055
+ self.addEventListener("beforeunload", () => {
1056
+ worker["handleDispose"]();
1057
+ });
1058
+ const audioCompose_worker = null;
1059
+ export {
1060
+ AudioComposeWorker,
1061
+ audioCompose_worker as default
1062
+ };
1063
+ //# sourceMappingURL=audio-compose.worker.js.map