@meframe/core 0.0.2 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. package/dist/Meframe.d.ts.map +1 -1
  2. package/dist/Meframe.js +2 -1
  3. package/dist/Meframe.js.map +1 -1
  4. package/dist/config/defaults.d.ts.map +1 -1
  5. package/dist/config/defaults.js +2 -1
  6. package/dist/config/defaults.js.map +1 -1
  7. package/dist/config/types.d.ts +3 -0
  8. package/dist/config/types.d.ts.map +1 -1
  9. package/dist/orchestrator/Orchestrator.d.ts.map +1 -1
  10. package/dist/orchestrator/Orchestrator.js +2 -1
  11. package/dist/orchestrator/Orchestrator.js.map +1 -1
  12. package/dist/orchestrator/types.d.ts +1 -0
  13. package/dist/orchestrator/types.d.ts.map +1 -1
  14. package/dist/stages/compose/types.d.ts +2 -1
  15. package/dist/stages/compose/types.d.ts.map +1 -1
  16. package/dist/stages/demux/MP4Demuxer.d.ts +0 -1
  17. package/dist/stages/demux/MP4Demuxer.d.ts.map +1 -1
  18. package/dist/utils/time-utils.d.ts +3 -2
  19. package/dist/utils/time-utils.d.ts.map +1 -1
  20. package/dist/utils/time-utils.js +2 -1
  21. package/dist/utils/time-utils.js.map +1 -1
  22. package/dist/vite-plugin.d.ts +5 -3
  23. package/dist/vite-plugin.d.ts.map +1 -1
  24. package/dist/vite-plugin.js +109 -52
  25. package/dist/vite-plugin.js.map +1 -1
  26. package/dist/worker/WorkerPool.d.ts +7 -0
  27. package/dist/worker/WorkerPool.d.ts.map +1 -1
  28. package/dist/worker/WorkerPool.js +29 -5
  29. package/dist/worker/WorkerPool.js.map +1 -1
  30. package/dist/{stages/demux → workers}/MP4Demuxer.js +4 -13
  31. package/dist/workers/MP4Demuxer.js.map +1 -0
  32. package/dist/workers/WorkerChannel.js +486 -0
  33. package/dist/workers/WorkerChannel.js.map +1 -0
  34. package/dist/{assets/video-demux.worker-D019I7GQ.js → workers/mp4box.all.js} +4 -912
  35. package/dist/workers/mp4box.all.js.map +1 -0
  36. package/dist/{assets/audio-compose.worker-nGVvHD5Q.js → workers/stages/compose/audio-compose.worker.js} +7 -481
  37. package/dist/workers/stages/compose/audio-compose.worker.js.map +1 -0
  38. package/dist/{assets/video-compose.worker-DPzsC21d.js → workers/stages/compose/video-compose.worker.js} +7 -481
  39. package/dist/workers/stages/compose/video-compose.worker.js.map +1 -0
  40. package/dist/{assets/decode.worker-DpWHsc7R.js → workers/stages/decode/decode.worker.js} +7 -481
  41. package/dist/workers/stages/decode/decode.worker.js.map +1 -0
  42. package/dist/{stages → workers/stages}/demux/audio-demux.worker.js +184 -4
  43. package/dist/workers/stages/demux/audio-demux.worker.js.map +1 -0
  44. package/dist/{stages → workers/stages}/demux/video-demux.worker.js +2 -3
  45. package/dist/workers/stages/demux/video-demux.worker.js.map +1 -0
  46. package/dist/{stages → workers/stages}/encode/encode.worker.js +238 -4
  47. package/dist/workers/stages/encode/encode.worker.js.map +1 -0
  48. package/dist/{stages/mux/MP4Muxer.js → workers/stages/mux/mux.worker.js} +244 -5
  49. package/dist/workers/stages/mux/mux.worker.js.map +1 -0
  50. package/package.json +21 -21
  51. package/dist/assets/audio-compose.worker-nGVvHD5Q.js.map +0 -1
  52. package/dist/assets/audio-demux.worker-xwWBtbAe.js +0 -8299
  53. package/dist/assets/audio-demux.worker-xwWBtbAe.js.map +0 -1
  54. package/dist/assets/decode.worker-DpWHsc7R.js.map +0 -1
  55. package/dist/assets/encode.worker-nfOb3kw6.js +0 -1026
  56. package/dist/assets/encode.worker-nfOb3kw6.js.map +0 -1
  57. package/dist/assets/mux.worker-uEMQY066.js +0 -8019
  58. package/dist/assets/mux.worker-uEMQY066.js.map +0 -1
  59. package/dist/assets/video-compose.worker-DPzsC21d.js.map +0 -1
  60. package/dist/assets/video-demux.worker-D019I7GQ.js.map +0 -1
  61. package/dist/model/types.js +0 -5
  62. package/dist/model/types.js.map +0 -1
  63. package/dist/plugins/BackpressureMonitor.js +0 -62
  64. package/dist/plugins/BackpressureMonitor.js.map +0 -1
  65. package/dist/stages/compose/AudioDucker.js +0 -161
  66. package/dist/stages/compose/AudioDucker.js.map +0 -1
  67. package/dist/stages/compose/AudioMixer.js +0 -373
  68. package/dist/stages/compose/AudioMixer.js.map +0 -1
  69. package/dist/stages/compose/FilterProcessor.js +0 -226
  70. package/dist/stages/compose/FilterProcessor.js.map +0 -1
  71. package/dist/stages/compose/LayerRenderer.js +0 -215
  72. package/dist/stages/compose/LayerRenderer.js.map +0 -1
  73. package/dist/stages/compose/TransitionProcessor.js +0 -189
  74. package/dist/stages/compose/TransitionProcessor.js.map +0 -1
  75. package/dist/stages/compose/VideoComposer.js +0 -186
  76. package/dist/stages/compose/VideoComposer.js.map +0 -1
  77. package/dist/stages/compose/audio-compose.worker.d.ts +0 -79
  78. package/dist/stages/compose/audio-compose.worker.d.ts.map +0 -1
  79. package/dist/stages/compose/audio-compose.worker.js +0 -540
  80. package/dist/stages/compose/audio-compose.worker.js.map +0 -1
  81. package/dist/stages/compose/audio-compose.worker2.js +0 -5
  82. package/dist/stages/compose/audio-compose.worker2.js.map +0 -1
  83. package/dist/stages/compose/video-compose.worker.d.ts +0 -60
  84. package/dist/stages/compose/video-compose.worker.d.ts.map +0 -1
  85. package/dist/stages/compose/video-compose.worker.js +0 -379
  86. package/dist/stages/compose/video-compose.worker.js.map +0 -1
  87. package/dist/stages/compose/video-compose.worker2.js +0 -5
  88. package/dist/stages/compose/video-compose.worker2.js.map +0 -1
  89. package/dist/stages/decode/AudioChunkDecoder.js +0 -82
  90. package/dist/stages/decode/AudioChunkDecoder.js.map +0 -1
  91. package/dist/stages/decode/BaseDecoder.js +0 -130
  92. package/dist/stages/decode/BaseDecoder.js.map +0 -1
  93. package/dist/stages/decode/VideoChunkDecoder.js +0 -199
  94. package/dist/stages/decode/VideoChunkDecoder.js.map +0 -1
  95. package/dist/stages/decode/decode.worker.d.ts +0 -70
  96. package/dist/stages/decode/decode.worker.d.ts.map +0 -1
  97. package/dist/stages/decode/decode.worker.js +0 -423
  98. package/dist/stages/decode/decode.worker.js.map +0 -1
  99. package/dist/stages/decode/decode.worker2.js +0 -5
  100. package/dist/stages/decode/decode.worker2.js.map +0 -1
  101. package/dist/stages/demux/MP3FrameParser.js +0 -186
  102. package/dist/stages/demux/MP3FrameParser.js.map +0 -1
  103. package/dist/stages/demux/MP4Demuxer.js.map +0 -1
  104. package/dist/stages/demux/audio-demux.worker.d.ts +0 -51
  105. package/dist/stages/demux/audio-demux.worker.d.ts.map +0 -1
  106. package/dist/stages/demux/audio-demux.worker.js.map +0 -1
  107. package/dist/stages/demux/audio-demux.worker2.js +0 -5
  108. package/dist/stages/demux/audio-demux.worker2.js.map +0 -1
  109. package/dist/stages/demux/video-demux.worker.d.ts +0 -51
  110. package/dist/stages/demux/video-demux.worker.d.ts.map +0 -1
  111. package/dist/stages/demux/video-demux.worker.js.map +0 -1
  112. package/dist/stages/demux/video-demux.worker2.js +0 -5
  113. package/dist/stages/demux/video-demux.worker2.js.map +0 -1
  114. package/dist/stages/encode/AudioChunkEncoder.js +0 -37
  115. package/dist/stages/encode/AudioChunkEncoder.js.map +0 -1
  116. package/dist/stages/encode/BaseEncoder.js +0 -164
  117. package/dist/stages/encode/BaseEncoder.js.map +0 -1
  118. package/dist/stages/encode/VideoChunkEncoder.js +0 -50
  119. package/dist/stages/encode/VideoChunkEncoder.js.map +0 -1
  120. package/dist/stages/encode/encode.worker.d.ts +0 -3
  121. package/dist/stages/encode/encode.worker.d.ts.map +0 -1
  122. package/dist/stages/encode/encode.worker.js.map +0 -1
  123. package/dist/stages/encode/encode.worker2.js +0 -5
  124. package/dist/stages/encode/encode.worker2.js.map +0 -1
  125. package/dist/stages/mux/MP4Muxer.js.map +0 -1
  126. package/dist/stages/mux/mux.worker.d.ts +0 -65
  127. package/dist/stages/mux/mux.worker.d.ts.map +0 -1
  128. package/dist/stages/mux/mux.worker.js +0 -219
  129. package/dist/stages/mux/mux.worker.js.map +0 -1
  130. package/dist/stages/mux/mux.worker2.js +0 -5
  131. package/dist/stages/mux/mux.worker2.js.map +0 -1
  132. package/dist/stages/mux/utils.js +0 -34
  133. package/dist/stages/mux/utils.js.map +0 -1
  134. package/dist/worker/worker-registry.d.ts +0 -12
  135. package/dist/worker/worker-registry.d.ts.map +0 -1
  136. package/dist/worker/worker-registry.js +0 -20
  137. package/dist/worker/worker-registry.js.map +0 -1
@@ -1,5 +0,0 @@
1
- const MICROSECONDS_PER_SECOND = 1e6;
2
- export {
3
- MICROSECONDS_PER_SECOND
4
- };
5
- //# sourceMappingURL=types.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"types.js","sources":["../../src/model/types.ts"],"sourcesContent":["// All time values in microseconds (µs)\nexport type TimeUs = number; // 1 second = 1_000_000 µs\n\n// Helper constants\nexport const MICROSECONDS_PER_SECOND = 1_000_000;\nexport const MICROSECONDS_PER_MILLISECOND = 1_000;\n\n// ────── Root Object ──────\nexport interface CompositionModelData {\n version: '1.0';\n fps: 24 | 25 | 30 | 60;\n durationUs: TimeUs;\n\n tracks: Track[];\n resources: Record<string, Resource>;\n\n renderConfig?: RenderConfig;\n\n ext?: Record<string, unknown>;\n}\n\nexport interface RenderConfig {\n width: number;\n height: number;\n backgroundColor?: string;\n}\n\n// ────── Track ──────\nexport interface Track {\n id: string;\n kind: 'video' | 'audio' | 'caption' | 'fx';\n clips: Clip[];\n\n effects?: Effect[];\n duckingRules?: DuckingRule[];\n}\n\n// ────── Clip ──────\nexport interface Clip {\n id: string;\n resourceId: string;\n startUs: TimeUs;\n durationUs: TimeUs;\n trackId?: string;\n trackKind?: 'video' | 'audio' | 'caption' | 'fx';\n\n trimStartUs?: TimeUs;\n trimEndUs?: TimeUs;\n\n effects?: Effect[];\n attachments?: Attachment[];\n\n transitionIn?: Transition;\n transitionOut?: Transition;\n}\n\n// ────── Resource ──────\nexport interface Resource {\n id: string;\n type: 'video' | 'image' | 'audio' | 'json' | string;\n uri: string;\n metadata?: Record<string, unknown>;\n clipIds?: string[];\n // Runtime state maintained by engine\n state?: 'pending' | 'loading' | 'ready' | 'error';\n}\n\n// ────── Common Structures ──────\nexport interface Effect {\n id: string;\n effectType: 'filter' | 'lut' | 'animation' | string;\n params?: Record<string, unknown>;\n}\n\nexport interface Transition {\n id: string;\n transitionType: 'fade' | 'wipe' | 'slide' | string;\n durationUs: TimeUs;\n curve?: 'linear' | 'ease-in' | 'ease-out' | string;\n params?: Record<string, unknown>;\n}\n\nexport interface Attachment {\n id: string;\n kind: 'caption' | 'sticker' | 'mask' | string;\n startUs: TimeUs;\n durationUs: TimeUs;\n data: Record<string, unknown>;\n}\n\nexport interface DuckingRule {\n targetTrackKind: 'voice' | 'audio' | string;\n ratio: number;\n attackMs: number;\n releaseMs: number;\n}\n\n// ────── Patch System ──────\nexport interface CompositionPatch {\n operations: PatchOperation[];\n metadata?: {\n timestamp: number;\n source?: string;\n version?: string;\n };\n}\n\nexport type PatchOperation =\n | TrackOperation\n | ClipOperation\n | ResourceOperation\n | AttachmentOperation\n | TransitionOperation\n | EffectOperation\n | RenderConfigOperation;\n\n// Track operations\nexport interface TrackOperation {\n type: 'addTrack' | 'updateTrack' | 'removeTrack';\n trackId?: string;\n track?: Partial<Track>;\n}\n\n// Clip operations\nexport interface ClipOperation {\n type: 'addClip' | 'updateClip' | 'removeClip' | 'moveClip';\n trackId: string;\n clipId?: string;\n clip?: Partial<Clip>;\n targetTrackId?: string;\n targetStartUs?: TimeUs;\n}\n\n// Resource operations\nexport interface ResourceOperation {\n type: 'addResource' | 'updateResource' | 'removeResource';\n resourceId: string;\n resource?: Partial<Resource>;\n}\n\n// Attachment operations\nexport interface AttachmentOperation {\n type: 'addAttachment' | 'updateAttachment' | 'removeAttachment';\n trackId: string;\n clipId: string;\n attachmentId?: string;\n attachment?: Partial<Attachment>;\n}\n\n// Transition operations\nexport interface TransitionOperation {\n type: 'addTransition' | 'updateTransition' | 'removeTransition';\n trackId: string;\n clipId: string;\n position: 'in' | 'out';\n transition?: Partial<Transition>;\n}\n\n// Render config operations\nexport interface RenderConfigOperation {\n type: 'updateRenderConfig';\n renderConfig?: Partial<RenderConfig>;\n}\n\n// Effect operations\nexport interface EffectOperation {\n type: 'addEffect' | 'updateEffect' | 'removeEffect';\n targetType: 'track' | 'clip';\n targetId: string;\n effectId?: string;\n effect?: Partial<Effect>;\n}\n\n// ────── Dirty Range ──────\nexport interface DirtyRange {\n trackId: string;\n startUs: TimeUs;\n endUs: TimeUs;\n reason: string;\n}\n\n// ────── Validation ──────\nexport interface ValidationError {\n path: string;\n message: string;\n value: any;\n}\n"],"names":[],"mappings":"AAIO,MAAM,0BAA0B;"}
@@ -1,62 +0,0 @@
1
- class BackpressureMonitor {
2
- metrics = /* @__PURE__ */ new Map();
3
- /**
4
- * Update metrics for a stage
5
- */
6
- updateMetrics(stage, desiredSize, queueSize = 0) {
7
- const isPaused = desiredSize <= 0;
8
- this.metrics.set(stage, {
9
- desiredSize,
10
- queueSize,
11
- isPaused,
12
- lastUpdate: Date.now()
13
- });
14
- }
15
- /**
16
- * Get current metrics snapshot
17
- */
18
- getSnapshot() {
19
- const now = Date.now();
20
- const snapshot = {};
21
- for (const [stage, metrics] of this.metrics) {
22
- snapshot[stage] = {
23
- ...metrics,
24
- age: now - metrics.lastUpdate
25
- };
26
- }
27
- return snapshot;
28
- }
29
- /**
30
- * Check if any stage is experiencing backpressure
31
- */
32
- hasBackpressure() {
33
- for (const metrics of this.metrics.values()) {
34
- if (metrics.isPaused) {
35
- return true;
36
- }
37
- }
38
- return false;
39
- }
40
- /**
41
- * Get stages currently experiencing backpressure
42
- */
43
- getBottlenecks() {
44
- const bottlenecks = [];
45
- for (const [stage, metrics] of this.metrics) {
46
- if (metrics.isPaused) {
47
- bottlenecks.push(stage);
48
- }
49
- }
50
- return bottlenecks;
51
- }
52
- /**
53
- * Clear all metrics
54
- */
55
- clear() {
56
- this.metrics.clear();
57
- }
58
- }
59
- export {
60
- BackpressureMonitor
61
- };
62
- //# sourceMappingURL=BackpressureMonitor.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"BackpressureMonitor.js","sources":["../../src/plugins/BackpressureMonitor.ts"],"sourcesContent":["/**\n * Monitor and report backpressure status across pipeline stages\n * This is a runtime monitoring tool used by plugins\n */\nexport class BackpressureMonitor {\n private metrics = new Map<\n string,\n {\n desiredSize: number;\n queueSize: number;\n isPaused: boolean;\n lastUpdate: number;\n }\n >();\n\n /**\n * Update metrics for a stage\n */\n updateMetrics(stage: string, desiredSize: number, queueSize: number = 0): void {\n const isPaused = desiredSize <= 0;\n this.metrics.set(stage, {\n desiredSize,\n queueSize,\n isPaused,\n lastUpdate: Date.now(),\n });\n }\n\n /**\n * Get current metrics snapshot\n */\n getSnapshot(): Record<\n string,\n {\n desiredSize: number;\n queueSize: number;\n isPaused: boolean;\n age: number;\n }\n > {\n const now = Date.now();\n const snapshot: Record<string, any> = {};\n\n for (const [stage, metrics] of this.metrics) {\n snapshot[stage] = {\n ...metrics,\n age: now - metrics.lastUpdate,\n };\n }\n\n return snapshot;\n }\n\n /**\n * Check if any stage is experiencing backpressure\n */\n hasBackpressure(): boolean {\n for (const metrics of this.metrics.values()) {\n if (metrics.isPaused) {\n return true;\n }\n }\n return false;\n }\n\n /**\n * Get stages currently experiencing backpressure\n */\n getBottlenecks(): string[] {\n const bottlenecks: string[] = [];\n for (const [stage, metrics] of this.metrics) {\n if (metrics.isPaused) {\n bottlenecks.push(stage);\n }\n }\n return bottlenecks;\n }\n\n /**\n * Clear all metrics\n */\n clear(): void {\n this.metrics.clear();\n }\n}\n"],"names":[],"mappings":"AAIO,MAAM,oBAAoB;AAAA,EACvB,8BAAc,IAAA;AAAA;AAAA;AAAA;AAAA,EAatB,cAAc,OAAe,aAAqB,YAAoB,GAAS;AAC7E,UAAM,WAAW,eAAe;AAChC,SAAK,QAAQ,IAAI,OAAO;AAAA,MACtB;AAAA,MACA;AAAA,MACA;AAAA,MACA,YAAY,KAAK,IAAA;AAAA,IAAI,CACtB;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKA,cAQE;AACA,UAAM,MAAM,KAAK,IAAA;AACjB,UAAM,WAAgC,CAAA;AAEtC,eAAW,CAAC,OAAO,OAAO,KAAK,KAAK,SAAS;AAC3C,eAAS,KAAK,IAAI;AAAA,QAChB,GAAG;AAAA,QACH,KAAK,MAAM,QAAQ;AAAA,MAAA;AAAA,IAEvB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,kBAA2B;AACzB,eAAW,WAAW,KAAK,QAAQ,OAAA,GAAU;AAC3C,UAAI,QAAQ,UAAU;AACpB,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,iBAA2B;AACzB,UAAM,cAAwB,CAAA;AAC9B,eAAW,CAAC,OAAO,OAAO,KAAK,KAAK,SAAS;AAC3C,UAAI,QAAQ,UAAU;AACpB,oBAAY,KAAK,KAAK;AAAA,MACxB;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,QAAc;AACZ,SAAK,QAAQ,MAAA;AAAA,EACf;AACF;"}
@@ -1,161 +0,0 @@
1
- class AudioDucker {
2
- config = null;
3
- sampleRate;
4
- constructor(sampleRate) {
5
- this.sampleRate = sampleRate;
6
- }
7
- configure(config) {
8
- this.config = config;
9
- }
10
- /**
11
- * Analyze trigger tracks (voice) and generate ducking envelope
12
- * Returns gain values (0-1) to apply to target tracks (BGM)
13
- */
14
- async generateDuckingEnvelope(tracks, frameCount) {
15
- if (!this.config?.enabled) {
16
- return new Float32Array(frameCount).fill(1);
17
- }
18
- const envelope = new Float32Array(frameCount);
19
- envelope.fill(1);
20
- const triggerTracks = tracks.filter((t) => this.config.triggerTracks.includes(t.trackId));
21
- if (triggerTracks.length === 0) {
22
- return envelope;
23
- }
24
- for (const track of triggerTracks) {
25
- const voiceActivity = await this.detectVoiceActivity(track.audioData);
26
- this.applyDuckingToEnvelope(envelope, voiceActivity);
27
- }
28
- return envelope;
29
- }
30
- /**
31
- * Voice Activity Detection (VAD)
32
- * Simple energy-based detection with smoothing
33
- * More sophisticated implementations could use:
34
- * - Zero-crossing rate (ZCR) for speech/music discrimination
35
- * - Spectral centroid for voice frequency detection
36
- * - Machine learning models for robust VAD
37
- */
38
- async detectVoiceActivity(audioData) {
39
- const frameCount = audioData.numberOfFrames;
40
- const activity = new Float32Array(frameCount);
41
- const monoData = new Float32Array(frameCount);
42
- const channelData = new Float32Array(frameCount);
43
- for (let ch = 0; ch < audioData.numberOfChannels; ch++) {
44
- audioData.copyTo(channelData, {
45
- planeIndex: ch,
46
- format: "f32-planar"
47
- });
48
- for (let i = 0; i < frameCount; i++) {
49
- if (monoData && channelData) {
50
- monoData[i] = (monoData[i] || 0) + (channelData[i] || 0) / audioData.numberOfChannels;
51
- }
52
- }
53
- }
54
- const windowSize = Math.floor(this.sampleRate * 0.02);
55
- const hopSize = Math.floor(windowSize / 2);
56
- for (let i = 0; i < frameCount; i += hopSize) {
57
- const end = Math.min(i + windowSize, frameCount);
58
- let energy = 0;
59
- for (let j = i; j < end; j++) {
60
- if (monoData && monoData[j] !== void 0) {
61
- const sample = monoData[j];
62
- if (sample !== void 0) {
63
- energy += sample * sample;
64
- }
65
- }
66
- }
67
- energy = Math.sqrt(energy / (end - i));
68
- const threshold = 0.01;
69
- const isVoice = energy > threshold;
70
- for (let j = i; j < end; j++) {
71
- activity[j] = isVoice ? 1 : 0;
72
- }
73
- }
74
- return this.smoothActivityDetection(activity);
75
- }
76
- /**
77
- * Smooth voice activity detection to avoid choppy ducking
78
- * Uses a simple moving average filter
79
- */
80
- smoothActivityDetection(activity) {
81
- const smoothed = new Float32Array(activity.length);
82
- const smoothWindow = Math.floor(this.sampleRate * 0.05);
83
- for (let i = 0; i < activity.length; i++) {
84
- let sum = 0;
85
- let count = 0;
86
- for (let j = Math.max(0, i - smoothWindow); j <= Math.min(activity.length - 1, i + smoothWindow); j++) {
87
- if (activity && activity[j] !== void 0) {
88
- const val = activity[j];
89
- if (val !== void 0) {
90
- sum += val;
91
- }
92
- }
93
- count++;
94
- }
95
- smoothed[i] = sum / count;
96
- }
97
- return smoothed;
98
- }
99
- /**
100
- * Apply ducking based on voice activity
101
- * Implements attack/release envelope shaping
102
- */
103
- applyDuckingToEnvelope(envelope, voiceActivity) {
104
- if (!this.config) return;
105
- const duckingLevel = 1 - this.config.duckingLevel;
106
- const attackSamples = Math.floor(this.config.attackTimeMs / 1e3 * this.sampleRate);
107
- const releaseSamples = Math.floor(this.config.releaseTimeMs / 1e3 * this.sampleRate);
108
- const lookAheadSamples = this.config.lookAheadMs ? Math.floor(this.config.lookAheadMs / 1e3 * this.sampleRate) : 0;
109
- let currentGain = 1;
110
- let releaseCounter = 0;
111
- for (let i = 0; i < envelope.length; i++) {
112
- const lookAheadIndex = Math.min(i + lookAheadSamples, voiceActivity.length - 1);
113
- const activity = voiceActivity[lookAheadIndex];
114
- if (activity !== void 0 && activity > 0.5) {
115
- if (currentGain > duckingLevel) {
116
- currentGain = Math.max(duckingLevel, currentGain - (1 - duckingLevel) / attackSamples);
117
- } else {
118
- currentGain = duckingLevel;
119
- }
120
- releaseCounter = 0;
121
- } else if (currentGain < 1) {
122
- releaseCounter++;
123
- if (releaseCounter > releaseSamples * 0.1) {
124
- currentGain = Math.min(1, currentGain + (1 - duckingLevel) / releaseSamples);
125
- }
126
- }
127
- envelope[i] = Math.min(envelope[i] || 1, currentGain);
128
- }
129
- }
130
- /**
131
- * Apply ducking envelope to audio buffer
132
- * This modulates the volume over time according to the envelope
133
- */
134
- applyEnvelopeToVolume(baseVolume, envelope) {
135
- const result = new Float32Array(envelope.length);
136
- for (let i = 0; i < envelope.length; i++) {
137
- result[i] = baseVolume * (envelope[i] || 1);
138
- }
139
- return result;
140
- }
141
- /**
142
- * Calculate dynamic range to avoid over-compression
143
- * Returns the difference between peak and RMS levels in dB
144
- */
145
- calculateDynamicRange(envelope) {
146
- let peak = 0;
147
- let sumSquares = 0;
148
- for (const value of envelope) {
149
- peak = Math.max(peak, value);
150
- sumSquares += value * value;
151
- }
152
- const rms = Math.sqrt(sumSquares / envelope.length);
153
- const peakDb = 20 * Math.log10(peak);
154
- const rmsDb = 20 * Math.log10(rms);
155
- return peakDb - rmsDb;
156
- }
157
- }
158
- export {
159
- AudioDucker
160
- };
161
- //# sourceMappingURL=AudioDucker.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"AudioDucker.js","sources":["../../../src/stages/compose/AudioDucker.ts"],"sourcesContent":["import type { DuckingConfig, MixRequest } from './types';\n\n/**\n * AudioDucker - Automatic volume ducking for background music\n *\n * Ducking: Audio engineering technique where the volume of one audio source\n * is automatically reduced when another audio source is present.\n *\n * Common use case: Reduce background music volume when voice/narration plays\n * to improve speech intelligibility without completely muting the music.\n *\n * Key parameters:\n * - Threshold: Level at which ducking triggers\n * - Ratio: How much to reduce volume (e.g., 3:1 means reduce by 1/3)\n * - Attack: How quickly volume reduces (typically 10-50ms)\n * - Release: How quickly volume returns to normal (typically 100-500ms)\n * - Hold: Time to maintain ducking after trigger ends\n */\nexport class AudioDucker {\n private config: DuckingConfig | null = null;\n private sampleRate: number;\n\n constructor(sampleRate: number) {\n this.sampleRate = sampleRate;\n }\n\n configure(config: DuckingConfig): void {\n this.config = config;\n }\n\n /**\n * Analyze trigger tracks (voice) and generate ducking envelope\n * Returns gain values (0-1) to apply to target tracks (BGM)\n */\n async generateDuckingEnvelope(\n tracks: MixRequest['tracks'],\n frameCount: number\n ): Promise<Float32Array> {\n if (!this.config?.enabled) {\n return new Float32Array(frameCount).fill(1.0);\n }\n\n // Initialize envelope with no ducking (gain = 1.0)\n const envelope = new Float32Array(frameCount);\n envelope.fill(1.0);\n\n // Find trigger tracks (typically voice/narration)\n const triggerTracks = tracks.filter((t) => this.config!.triggerTracks.includes(t.trackId));\n\n if (triggerTracks.length === 0) {\n return envelope;\n }\n\n // Analyze each trigger track for voice activity\n for (const track of triggerTracks) {\n const voiceActivity = await this.detectVoiceActivity(track.audioData);\n this.applyDuckingToEnvelope(envelope, voiceActivity);\n }\n\n return envelope;\n }\n\n /**\n * Voice Activity Detection (VAD)\n * Simple energy-based detection with smoothing\n * More sophisticated implementations could use:\n * - Zero-crossing rate (ZCR) for speech/music discrimination\n * - Spectral centroid for voice frequency detection\n * - Machine learning models for robust VAD\n */\n private async detectVoiceActivity(audioData: AudioData): Promise<Float32Array> {\n const frameCount = audioData.numberOfFrames;\n const activity = new Float32Array(frameCount);\n\n // Convert to mono for analysis\n const monoData = new Float32Array(frameCount);\n const channelData = new Float32Array(frameCount);\n\n for (let ch = 0; ch < audioData.numberOfChannels; ch++) {\n audioData.copyTo(channelData, {\n planeIndex: ch,\n format: 'f32-planar' as const,\n });\n\n for (let i = 0; i < frameCount; i++) {\n if (monoData && channelData) {\n monoData[i] = (monoData[i] || 0) + (channelData[i] || 0) / audioData.numberOfChannels;\n }\n }\n }\n\n // Energy calculation with windowing\n // Window size: 20ms is typical for speech analysis\n const windowSize = Math.floor(this.sampleRate * 0.02);\n const hopSize = Math.floor(windowSize / 2); // 50% overlap\n\n for (let i = 0; i < frameCount; i += hopSize) {\n const end = Math.min(i + windowSize, frameCount);\n\n // Calculate RMS energy in window\n let energy = 0;\n for (let j = i; j < end; j++) {\n if (monoData && monoData[j] !== undefined) {\n const sample = monoData[j];\n if (sample !== undefined) {\n energy += sample * sample;\n }\n }\n }\n energy = Math.sqrt(energy / (end - i));\n\n // Simple threshold-based VAD\n // Typical speech energy threshold: -40dB to -30dB\n const threshold = 0.01; // Approximately -40dB\n const isVoice = energy > threshold;\n\n // Fill activity array for this window\n for (let j = i; j < end; j++) {\n activity[j] = isVoice ? 1.0 : 0.0;\n }\n }\n\n // Smooth activity detection to avoid rapid changes\n return this.smoothActivityDetection(activity);\n }\n\n /**\n * Smooth voice activity detection to avoid choppy ducking\n * Uses a simple moving average filter\n */\n private smoothActivityDetection(activity: Float32Array): Float32Array {\n const smoothed = new Float32Array(activity.length);\n const smoothWindow = Math.floor(this.sampleRate * 0.05); // 50ms smoothing\n\n for (let i = 0; i < activity.length; i++) {\n let sum = 0;\n let count = 0;\n\n for (\n let j = Math.max(0, i - smoothWindow);\n j <= Math.min(activity.length - 1, i + smoothWindow);\n j++\n ) {\n if (activity && activity[j] !== undefined) {\n const val = activity[j];\n if (val !== undefined) {\n sum += val;\n }\n }\n count++;\n }\n\n smoothed[i] = sum / count;\n }\n\n return smoothed;\n }\n\n /**\n * Apply ducking based on voice activity\n * Implements attack/release envelope shaping\n */\n private applyDuckingToEnvelope(envelope: Float32Array, voiceActivity: Float32Array): void {\n if (!this.config) return;\n\n const duckingLevel = 1.0 - this.config.duckingLevel;\n const attackSamples = Math.floor((this.config.attackTimeMs / 1000) * this.sampleRate);\n const releaseSamples = Math.floor((this.config.releaseTimeMs / 1000) * this.sampleRate);\n const lookAheadSamples = this.config.lookAheadMs\n ? Math.floor((this.config.lookAheadMs / 1000) * this.sampleRate)\n : 0;\n\n let currentGain = 1.0;\n let releaseCounter = 0;\n\n for (let i = 0; i < envelope.length; i++) {\n // Look ahead for upcoming voice activity\n const lookAheadIndex = Math.min(i + lookAheadSamples, voiceActivity.length - 1);\n const activity = voiceActivity[lookAheadIndex];\n\n if (activity !== undefined && activity > 0.5) {\n // Voice detected - apply ducking with attack curve\n if (currentGain > duckingLevel) {\n // Attack phase - reduce gain\n currentGain = Math.max(duckingLevel, currentGain - (1.0 - duckingLevel) / attackSamples);\n } else {\n currentGain = duckingLevel;\n }\n releaseCounter = 0;\n } else if (currentGain < 1.0) {\n // No voice - apply release curve\n releaseCounter++;\n if (releaseCounter > releaseSamples * 0.1) {\n // Small hold time\n currentGain = Math.min(1.0, currentGain + (1.0 - duckingLevel) / releaseSamples);\n }\n }\n\n // Apply the calculated gain\n envelope[i] = Math.min(envelope[i] || 1, currentGain);\n }\n }\n\n /**\n * Apply ducking envelope to audio buffer\n * This modulates the volume over time according to the envelope\n */\n applyEnvelopeToVolume(baseVolume: number, envelope: Float32Array): Float32Array {\n const result = new Float32Array(envelope.length);\n for (let i = 0; i < envelope.length; i++) {\n result[i] = baseVolume * (envelope[i] || 1);\n }\n return result;\n }\n\n /**\n * Calculate dynamic range to avoid over-compression\n * Returns the difference between peak and RMS levels in dB\n */\n calculateDynamicRange(envelope: Float32Array): number {\n let peak = 0;\n let sumSquares = 0;\n\n for (const value of envelope) {\n peak = Math.max(peak, value);\n sumSquares += value * value;\n }\n\n const rms = Math.sqrt(sumSquares / envelope.length);\n\n // Convert to dB (20 * log10(ratio))\n const peakDb = 20 * Math.log10(peak);\n const rmsDb = 20 * Math.log10(rms);\n\n return peakDb - rmsDb;\n }\n}\n"],"names":[],"mappings":"AAkBO,MAAM,YAAY;AAAA,EACf,SAA+B;AAAA,EAC/B;AAAA,EAER,YAAY,YAAoB;AAC9B,SAAK,aAAa;AAAA,EACpB;AAAA,EAEA,UAAU,QAA6B;AACrC,SAAK,SAAS;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,wBACJ,QACA,YACuB;AACvB,QAAI,CAAC,KAAK,QAAQ,SAAS;AACzB,aAAO,IAAI,aAAa,UAAU,EAAE,KAAK,CAAG;AAAA,IAC9C;AAGA,UAAM,WAAW,IAAI,aAAa,UAAU;AAC5C,aAAS,KAAK,CAAG;AAGjB,UAAM,gBAAgB,OAAO,OAAO,CAAC,MAAM,KAAK,OAAQ,cAAc,SAAS,EAAE,OAAO,CAAC;AAEzF,QAAI,cAAc,WAAW,GAAG;AAC9B,aAAO;AAAA,IACT;AAGA,eAAW,SAAS,eAAe;AACjC,YAAM,gBAAgB,MAAM,KAAK,oBAAoB,MAAM,SAAS;AACpE,WAAK,uBAAuB,UAAU,aAAa;AAAA,IACrD;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,oBAAoB,WAA6C;AAC7E,UAAM,aAAa,UAAU;AAC7B,UAAM,WAAW,IAAI,aAAa,UAAU;AAG5C,UAAM,WAAW,IAAI,aAAa,UAAU;AAC5C,UAAM,cAAc,IAAI,aAAa,UAAU;AAE/C,aAAS,KAAK,GAAG,KAAK,UAAU,kBAAkB,MAAM;AACtD,gBAAU,OAAO,aAAa;AAAA,QAC5B,YAAY;AAAA,QACZ,QAAQ;AAAA,MAAA,CACT;AAED,eAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,YAAI,YAAY,aAAa;AAC3B,mBAAS,CAAC,KAAK,SAAS,CAAC,KAAK,MAAM,YAAY,CAAC,KAAK,KAAK,UAAU;AAAA,QACvE;AAAA,MACF;AAAA,IACF;AAIA,UAAM,aAAa,KAAK,MAAM,KAAK,aAAa,IAAI;AACpD,UAAM,UAAU,KAAK,MAAM,aAAa,CAAC;AAEzC,aAAS,IAAI,GAAG,IAAI,YAAY,KAAK,SAAS;AAC5C,YAAM,MAAM,KAAK,IAAI,IAAI,YAAY,UAAU;AAG/C,UAAI,SAAS;AACb,eAAS,IAAI,GAAG,IAAI,KAAK,KAAK;AAC5B,YAAI,YAAY,SAAS,CAAC,MAAM,QAAW;AACzC,gBAAM,SAAS,SAAS,CAAC;AACzB,cAAI,WAAW,QAAW;AACxB,sBAAU,SAAS;AAAA,UACrB;AAAA,QACF;AAAA,MACF;AACA,eAAS,KAAK,KAAK,UAAU,MAAM,EAAE;AAIrC,YAAM,YAAY;AAClB,YAAM,UAAU,SAAS;AAGzB,eAAS,IAAI,GAAG,IAAI,KAAK,KAAK;AAC5B,iBAAS,CAAC,IAAI,UAAU,IAAM;AAAA,MAChC;AAAA,IACF;AAGA,WAAO,KAAK,wBAAwB,QAAQ;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,wBAAwB,UAAsC;AACpE,UAAM,WAAW,IAAI,aAAa,SAAS,MAAM;AACjD,UAAM,eAAe,KAAK,MAAM,KAAK,aAAa,IAAI;AAEtD,aAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,UAAI,MAAM;AACV,UAAI,QAAQ;AAEZ,eACM,IAAI,KAAK,IAAI,GAAG,IAAI,YAAY,GACpC,KAAK,KAAK,IAAI,SAAS,SAAS,GAAG,IAAI,YAAY,GACnD,KACA;AACA,YAAI,YAAY,SAAS,CAAC,MAAM,QAAW;AACzC,gBAAM,MAAM,SAAS,CAAC;AACtB,cAAI,QAAQ,QAAW;AACrB,mBAAO;AAAA,UACT;AAAA,QACF;AACA;AAAA,MACF;AAEA,eAAS,CAAC,IAAI,MAAM;AAAA,IACtB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,uBAAuB,UAAwB,eAAmC;AACxF,QAAI,CAAC,KAAK,OAAQ;AAElB,UAAM,eAAe,IAAM,KAAK,OAAO;AACvC,UAAM,gBAAgB,KAAK,MAAO,KAAK,OAAO,eAAe,MAAQ,KAAK,UAAU;AACpF,UAAM,iBAAiB,KAAK,MAAO,KAAK,OAAO,gBAAgB,MAAQ,KAAK,UAAU;AACtF,UAAM,mBAAmB,KAAK,OAAO,cACjC,KAAK,MAAO,KAAK,OAAO,cAAc,MAAQ,KAAK,UAAU,IAC7D;AAEJ,QAAI,cAAc;AAClB,QAAI,iBAAiB;AAErB,aAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AAExC,YAAM,iBAAiB,KAAK,IAAI,IAAI,kBAAkB,cAAc,SAAS,CAAC;AAC9E,YAAM,WAAW,cAAc,cAAc;AAE7C,UAAI,aAAa,UAAa,WAAW,KAAK;AAE5C,YAAI,cAAc,cAAc;AAE9B,wBAAc,KAAK,IAAI,cAAc,eAAe,IAAM,gBAAgB,aAAa;AAAA,QACzF,OAAO;AACL,wBAAc;AAAA,QAChB;AACA,yBAAiB;AAAA,MACnB,WAAW,cAAc,GAAK;AAE5B;AACA,YAAI,iBAAiB,iBAAiB,KAAK;AAEzC,wBAAc,KAAK,IAAI,GAAK,eAAe,IAAM,gBAAgB,cAAc;AAAA,QACjF;AAAA,MACF;AAGA,eAAS,CAAC,IAAI,KAAK,IAAI,SAAS,CAAC,KAAK,GAAG,WAAW;AAAA,IACtD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,sBAAsB,YAAoB,UAAsC;AAC9E,UAAM,SAAS,IAAI,aAAa,SAAS,MAAM;AAC/C,aAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,aAAO,CAAC,IAAI,cAAc,SAAS,CAAC,KAAK;AAAA,IAC3C;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,sBAAsB,UAAgC;AACpD,QAAI,OAAO;AACX,QAAI,aAAa;AAEjB,eAAW,SAAS,UAAU;AAC5B,aAAO,KAAK,IAAI,MAAM,KAAK;AAC3B,oBAAc,QAAQ;AAAA,IACxB;AAEA,UAAM,MAAM,KAAK,KAAK,aAAa,SAAS,MAAM;AAGlD,UAAM,SAAS,KAAK,KAAK,MAAM,IAAI;AACnC,UAAM,QAAQ,KAAK,KAAK,MAAM,GAAG;AAEjC,WAAO,SAAS;AAAA,EAClB;AACF;"}
@@ -1,373 +0,0 @@
1
- class AudioMixer {
2
- config;
3
- tracksMap = /* @__PURE__ */ new Map();
4
- constructor(config) {
5
- this.config = config;
6
- }
7
- getConfig() {
8
- return { ...this.config };
9
- }
10
- updateConfig(update) {
11
- this.config = { ...this.config, ...update };
12
- }
13
- get tracks() {
14
- return Array.from(this.tracksMap.values());
15
- }
16
- createMixStream(ducker) {
17
- return new TransformStream(
18
- {
19
- transform: async (request, controller) => {
20
- try {
21
- const frameCount = this.getFrameCount(request.durationUs);
22
- if (ducker && request.duckingConfig?.enabled && frameCount > 0) {
23
- const envelope = await ducker.generateDuckingEnvelope(request.tracks, frameCount);
24
- for (const track of request.tracks) {
25
- if (request.duckingConfig.targetTracks.includes(track.trackId)) {
26
- track.duckingEnvelope = ducker.applyEnvelopeToVolume(1, envelope);
27
- }
28
- }
29
- }
30
- const result = await this.mixTracks(request, frameCount);
31
- controller.enqueue(result);
32
- } catch (error) {
33
- controller.error(error);
34
- }
35
- }
36
- },
37
- {
38
- highWaterMark: 2,
39
- size: () => 1
40
- }
41
- );
42
- }
43
- async mixTracks(request, precomputedFrameCount) {
44
- const tracks = request.tracks ?? [];
45
- const frameCount = precomputedFrameCount ?? this.getFrameCount(request.durationUs);
46
- const requestedChannelCount = this.config.numberOfChannels ?? 0;
47
- const inferredChannelCount = tracks.reduce((max, track) => {
48
- const trackChannels = track?.numberOfChannels ?? track?.audioData?.numberOfChannels ?? this.config.numberOfChannels ?? 0;
49
- return trackChannels > max ? trackChannels : max;
50
- }, 0);
51
- const channelCount = requestedChannelCount > 0 ? requestedChannelCount : Math.max(inferredChannelCount, 1);
52
- const outputChannels = Array.from({ length: channelCount }, () => {
53
- return new Float32Array(frameCount);
54
- });
55
- for (const track of tracks) {
56
- if (!track) {
57
- continue;
58
- }
59
- const resolvedAudioData = track.audioData;
60
- if (!resolvedAudioData) {
61
- continue;
62
- }
63
- this.mixTrackIntoOutput(
64
- outputChannels,
65
- {
66
- ...track,
67
- audioData: resolvedAudioData,
68
- numberOfChannels: track.numberOfChannels ?? resolvedAudioData.numberOfChannels ?? this.config.numberOfChannels,
69
- sampleRate: track.sampleRate ?? resolvedAudioData.sampleRate ?? this.config.sampleRate
70
- },
71
- request.timeUs,
72
- frameCount
73
- );
74
- }
75
- const { peakLevel, rmsLevel } = this.limitAndMeasure(outputChannels);
76
- const audioData = this.createAudioData(outputChannels, request.timeUs);
77
- return {
78
- audioData,
79
- timeUs: request.timeUs,
80
- durationUs: request.durationUs,
81
- peakLevel,
82
- rmsLevel
83
- };
84
- }
85
- addTrack(track) {
86
- this.tracksMap.set(track.id, track);
87
- }
88
- removeTrack(trackId) {
89
- this.tracksMap.delete(trackId);
90
- }
91
- updateTrack(trackId, patch) {
92
- const track = this.tracksMap.get(trackId);
93
- if (!track) {
94
- return;
95
- }
96
- const { config, ...rest } = patch;
97
- if (config) {
98
- Object.assign(track.config, config);
99
- }
100
- Object.assign(track, rest);
101
- }
102
- mixTrackIntoOutput(outputChannels, track, mixStartUs, totalFrameCount) {
103
- if (totalFrameCount === 0) {
104
- track.audioData.close();
105
- return;
106
- }
107
- if (track.sampleRate !== this.config.sampleRate) {
108
- track.audioData.close();
109
- throw new Error("AudioMixer: sample rate mismatch");
110
- }
111
- const trackChannelCount = track.audioData.numberOfChannels ?? track.numberOfChannels ?? 0;
112
- if (trackChannelCount === 0) {
113
- track.audioData.close();
114
- return;
115
- }
116
- const trackChannels = this.extractChannels(track.audioData);
117
- if (trackChannels.length === 0) {
118
- track.audioData.close();
119
- return;
120
- }
121
- const trackFrameCount = track.audioData.numberOfFrames;
122
- if (trackFrameCount === 0) {
123
- track.audioData.close();
124
- return;
125
- }
126
- const timestampUs = track.audioData.timestamp ?? mixStartUs;
127
- const deltaUs = timestampUs - mixStartUs;
128
- let outputOffsetFrames = Math.round(deltaUs / 1e6 * this.config.sampleRate);
129
- let sourceOffsetFrames = 0;
130
- if (outputOffsetFrames < 0) {
131
- sourceOffsetFrames = Math.min(trackFrameCount, -outputOffsetFrames);
132
- outputOffsetFrames = 0;
133
- }
134
- if (outputOffsetFrames >= totalFrameCount) {
135
- track.audioData.close();
136
- return;
137
- }
138
- const availableFrames = Math.min(
139
- trackFrameCount - sourceOffsetFrames,
140
- totalFrameCount - outputOffsetFrames
141
- );
142
- if (availableFrames <= 0) {
143
- track.audioData.close();
144
- return;
145
- }
146
- const gains = this.buildGainEnvelope(
147
- track,
148
- availableFrames,
149
- outputOffsetFrames,
150
- sourceOffsetFrames,
151
- trackFrameCount
152
- );
153
- const destinationChannelCount = outputChannels.length;
154
- const sourceChannelCount = trackChannels.length;
155
- for (let channelIndex = 0; channelIndex < destinationChannelCount; channelIndex++) {
156
- const destination = outputChannels[channelIndex];
157
- const source = trackChannels[channelIndex] ?? trackChannels[sourceChannelCount - 1];
158
- if (!destination || !source) continue;
159
- for (let frameIndex = 0; frameIndex < availableFrames; frameIndex++) {
160
- const sample = source[sourceOffsetFrames + frameIndex] ?? 0;
161
- const gain = gains[frameIndex] ?? 0;
162
- destination[outputOffsetFrames + frameIndex] = (destination[outputOffsetFrames + frameIndex] ?? 0) + sample * gain;
163
- }
164
- }
165
- track.audioData.close();
166
- }
167
- buildGainEnvelope(track, length, outputOffsetFrames, sourceOffsetFrames, trackFrameCount) {
168
- const gains = new Float32Array(length);
169
- const baseVolume = typeof track.config.volume === "number" ? track.config.volume : 1;
170
- gains.fill(baseVolume);
171
- const fadeInSamples = this.getFadeSampleCount(track.config.fadeIn);
172
- const fadeOutSamples = this.getFadeSampleCount(track.config.fadeOut);
173
- const clipDurationSamples = this.getClipSampleCount(track.config.durationUs) || trackFrameCount;
174
- const trackStartFrame = this.computeTrackStartFrame(track);
175
- for (let i = 0; i < length; i++) {
176
- const envelopeIndex = outputOffsetFrames + i;
177
- const absoluteFrame = trackStartFrame + sourceOffsetFrames + i;
178
- let gain = baseVolume;
179
- if (fadeInSamples > 0 && absoluteFrame < fadeInSamples) {
180
- const progress = Math.min(1, absoluteFrame / fadeInSamples);
181
- gain *= this.getCurveValue(progress, track.config.fadeIn?.curve);
182
- }
183
- if (fadeOutSamples > 0 && clipDurationSamples > 0) {
184
- const fadeStart = Math.max(0, clipDurationSamples - fadeOutSamples);
185
- if (absoluteFrame >= fadeStart) {
186
- const progress = Math.min(1, (absoluteFrame - fadeStart) / fadeOutSamples);
187
- const remaining = Math.max(0, 1 - progress);
188
- gain *= this.getCurveValue(remaining, track.config.fadeOut?.curve);
189
- }
190
- }
191
- if (track.duckingEnvelope && envelopeIndex < track.duckingEnvelope.length && envelopeIndex >= 0) {
192
- gain *= track.duckingEnvelope[envelopeIndex] ?? 1;
193
- }
194
- gains[i] = gain;
195
- }
196
- return gains;
197
- }
198
- extractChannels(audioData) {
199
- const configuredChannels = this.config.numberOfChannels ?? 0;
200
- const channelCount = audioData.numberOfChannels ?? configuredChannels;
201
- const frameCount = audioData.numberOfFrames;
202
- const format = audioData.format ?? "f32";
203
- if (!channelCount || !frameCount) {
204
- return [];
205
- }
206
- const toFloat = (value) => value / 32768;
207
- const zeroChannels = () => Array.from(
208
- { length: configuredChannels || channelCount },
209
- () => new Float32Array(frameCount)
210
- );
211
- if (format === "f32") {
212
- const interleaved = new Float32Array(frameCount * channelCount);
213
- audioData.copyTo(interleaved, { format: "f32", planeIndex: 0 });
214
- const channels2 = zeroChannels();
215
- for (let frame = 0; frame < frameCount; frame++) {
216
- const offset = frame * channelCount;
217
- for (let channel = 0; channel < channels2.length; channel++) {
218
- const channelArray = channels2[channel];
219
- if (!channelArray) continue;
220
- const sourceChannel = channel < channelCount ? channel : channelCount - 1;
221
- channelArray[frame] = interleaved[offset + sourceChannel] ?? 0;
222
- }
223
- }
224
- return channels2;
225
- }
226
- if (format === "s16") {
227
- const interleaved = new Int16Array(frameCount * channelCount);
228
- audioData.copyTo(interleaved, { format: "s16", planeIndex: 0 });
229
- const channels2 = zeroChannels();
230
- for (let frame = 0; frame < frameCount; frame++) {
231
- const offset = frame * channelCount;
232
- for (let channel = 0; channel < channels2.length; channel++) {
233
- const channelArray = channels2[channel];
234
- if (!channelArray) continue;
235
- const sourceChannel = channel < channelCount ? channel : channelCount - 1;
236
- channelArray[frame] = toFloat(interleaved[offset + sourceChannel] ?? 0);
237
- }
238
- }
239
- return channels2;
240
- }
241
- if (format === "f32-planar") {
242
- const channels2 = zeroChannels();
243
- for (let channel = 0; channel < channels2.length; channel++) {
244
- const channelArray = channels2[channel];
245
- if (!channelArray) continue;
246
- const sourceChannel = channel < channelCount ? channel : channelCount - 1;
247
- audioData.copyTo(channelArray, { planeIndex: sourceChannel, format: "f32-planar" });
248
- }
249
- return channels2;
250
- }
251
- if (format === "s16-planar") {
252
- const tmp = new Int16Array(frameCount);
253
- const channels2 = zeroChannels();
254
- for (let channel = 0; channel < channels2.length; channel++) {
255
- const channelArray = channels2[channel];
256
- if (!channelArray) continue;
257
- const sourceChannel = channel < channelCount ? channel : channelCount - 1;
258
- audioData.copyTo(tmp, { planeIndex: sourceChannel, format: "s16-planar" });
259
- for (let i = 0; i < frameCount; i++) {
260
- channelArray[i] = toFloat(tmp[i] ?? 0);
261
- }
262
- }
263
- return channels2;
264
- }
265
- const channels = zeroChannels();
266
- for (let channel = 0; channel < channels.length; channel++) {
267
- const channelArray = channels[channel];
268
- if (!channelArray) continue;
269
- const sourceChannel = channel < channelCount ? channel : channelCount - 1;
270
- audioData.copyTo(channelArray, { planeIndex: sourceChannel });
271
- }
272
- return channels;
273
- }
274
- limitAndMeasure(channels) {
275
- let peak = 0;
276
- let sumSquares = 0;
277
- let samples = 0;
278
- for (const channel of channels) {
279
- for (let i = 0; i < channel.length; i++) {
280
- let sample = channel[i] ?? 0;
281
- if (sample > 1) {
282
- sample = 1;
283
- } else if (sample < -1) {
284
- sample = -1;
285
- }
286
- channel[i] = sample;
287
- const absSample = Math.abs(sample);
288
- if (absSample > peak) {
289
- peak = absSample;
290
- }
291
- sumSquares += sample * sample;
292
- samples++;
293
- }
294
- }
295
- const rmsLevel = samples > 0 ? Math.sqrt(sumSquares / samples) : 0;
296
- return {
297
- peakLevel: peak,
298
- rmsLevel
299
- };
300
- }
301
- createAudioData(channels, timestampUs) {
302
- const configuredChannels = this.config.numberOfChannels ?? 0;
303
- const inferredChannels = channels.length;
304
- const numberOfChannels = (inferredChannels > 0 ? inferredChannels : configuredChannels) || 1;
305
- const numberOfFrames = channels[0]?.length ?? 0;
306
- if (numberOfFrames === 0) {
307
- return new AudioData({
308
- format: "f32",
309
- sampleRate: this.config.sampleRate,
310
- numberOfFrames: 0,
311
- numberOfChannels,
312
- timestamp: timestampUs,
313
- data: new Float32Array(0)
314
- });
315
- }
316
- const interleaved = new Float32Array(numberOfFrames * numberOfChannels);
317
- for (let frame = 0; frame < numberOfFrames; frame++) {
318
- for (let channel = 0; channel < numberOfChannels; channel++) {
319
- const sourceChannel = channels[channel] ?? channels[channels.length - 1];
320
- interleaved[frame * numberOfChannels + channel] = sourceChannel?.[frame] ?? 0;
321
- }
322
- }
323
- return new AudioData({
324
- format: "f32",
325
- sampleRate: this.config.sampleRate,
326
- numberOfFrames,
327
- numberOfChannels,
328
- timestamp: timestampUs,
329
- data: interleaved
330
- });
331
- }
332
- getFrameCount(durationUs) {
333
- if (durationUs <= 0) {
334
- return 0;
335
- }
336
- return Math.ceil(durationUs / 1e6 * this.config.sampleRate);
337
- }
338
- getFadeSampleCount(fade) {
339
- if (!fade || fade.durationUs <= 0) {
340
- return 0;
341
- }
342
- return Math.round(fade.durationUs / 1e6 * this.config.sampleRate);
343
- }
344
- getClipSampleCount(durationUs) {
345
- if (!durationUs || durationUs <= 0) {
346
- return 0;
347
- }
348
- return Math.round(durationUs / 1e6 * this.config.sampleRate);
349
- }
350
- computeTrackStartFrame(track) {
351
- const audioTimestamp = track.audioData.timestamp ?? track.config.startTimeUs;
352
- const relativeUs = audioTimestamp - track.config.startTimeUs;
353
- const relativeFrames = Math.round(relativeUs / 1e6 * this.config.sampleRate);
354
- return relativeFrames > 0 ? relativeFrames : 0;
355
- }
356
- getCurveValue(progress, curve = "linear") {
357
- const clamped = Math.min(Math.max(progress, 0), 1);
358
- switch (curve) {
359
- case "exponential":
360
- return clamped * clamped;
361
- case "logarithmic":
362
- return Math.log10(clamped * 9 + 1);
363
- case "cosine":
364
- return (1 - Math.cos(clamped * Math.PI)) / 2;
365
- default:
366
- return clamped;
367
- }
368
- }
369
- }
370
- export {
371
- AudioMixer
372
- };
373
- //# sourceMappingURL=AudioMixer.js.map