@twick/browser-render 0.15.6 → 0.15.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,30 +1,35 @@
1
1
  {
2
2
  "name": "@twick/browser-render",
3
- "version": "0.15.6",
3
+ "version": "0.15.7",
4
4
  "license": "https://github.com/ncounterspecialist/twick/blob/main/LICENSE.md",
5
5
  "description": "Browser-native video rendering for Twick using WebCodecs API",
6
- "main": "./dist/index.cjs",
6
+ "main": "./dist/index.js",
7
7
  "module": "./dist/index.mjs",
8
8
  "types": "./dist/index.d.ts",
9
9
  "exports": {
10
10
  ".": {
11
11
  "types": "./dist/index.d.ts",
12
12
  "import": "./dist/index.mjs",
13
- "require": "./dist/index.cjs"
13
+ "require": "./dist/index.js"
14
14
  }
15
15
  },
16
+ "files": [
17
+ "dist",
18
+ "public"
19
+ ],
16
20
  "scripts": {
17
- "build": "tsup",
21
+ "build": "tsup && node scripts/copy-wasm.js",
18
22
  "dev": "tsup --watch",
19
23
  "test:browser": "tsx src/test-browser-render.ts",
20
- "clean": "rimraf dist"
24
+ "clean": "rimraf dist",
25
+ "docs": "typedoc"
21
26
  },
22
27
  "publishConfig": {
23
28
  "access": "public"
24
29
  },
25
30
  "dependencies": {
26
- "@twick/core": "^0.15.6",
27
- "@twick/visualizer": "0.15.6",
31
+ "@twick/core": "^0.15.7",
32
+ "@twick/visualizer": "0.15.7",
28
33
  "mp4-wasm": "^1.0.6",
29
34
  "mp4box": "^0.5.2",
30
35
  "@ffmpeg/ffmpeg": "^0.12.10",
@@ -45,7 +50,9 @@
45
50
  "rimraf": "^5.0.5",
46
51
  "tsup": "^8.0.0",
47
52
  "tsx": "^4.7.0",
48
- "typescript": "5.4.2"
53
+ "typescript": "5.4.2",
54
+ "typedoc": "^0.25.8",
55
+ "typedoc-plugin-markdown": "^3.17.1"
49
56
  },
50
57
  "engines": {
51
58
  "node": ">=20.0.0"
Binary file
@@ -1,217 +0,0 @@
1
- # Audio Implementation Guide
2
-
3
- ## Overview
4
-
5
- Browser-based audio processing for Twick video rendering, matching the server's FFmpeg implementation.
6
-
7
- ## Architecture
8
-
9
- ### 1. **Audio Processor** (`src/audio/audio-processor.ts`)
10
- - Mirrors server's `generate-audio.ts` logic
11
- - Uses Web Audio API instead of FFmpeg
12
- - Handles:
13
- - Asset tracking across frames
14
- - Audio extraction from video/audio elements
15
- - Playback rate adjustment
16
- - Volume control
17
- - Audio trimming
18
- - Multi-track mixing
19
-
20
- ### 2. **Service Worker** (`public/audio-worker.js`)
21
- - Caches media assets for offline processing
22
- - Handles background audio extraction
23
- - Reduces network requests during rendering
24
-
25
- ### 3. **Audio/Video Muxer** (`src/audio/audio-video-muxer.ts`)
26
- - Combines video and audio tracks
27
- - Two approaches:
28
- - **mp4box.js**: Lightweight, browser-native
29
- - **FFmpeg.wasm**: More robust, ~30MB bundle
30
-
31
- ## Implementation Steps
32
-
33
- ### Step 1: Install Dependencies
34
-
35
- ```bash
36
- # For mp4box.js approach (recommended)
37
- npm install mp4box
38
-
39
- # OR for FFmpeg.wasm approach (more features, larger)
40
- npm install @ffmpeg/ffmpeg @ffmpeg/util
41
- ```
42
-
43
- ### Step 2: Register Service Worker
44
-
45
- ```typescript
46
- // In your app's initialization
47
- if ('serviceWorker' in navigator) {
48
- navigator.serviceWorker.register('/audio-worker.js')
49
- .then(reg => console.log('Audio worker registered'))
50
- .catch(err => console.error('Audio worker failed:', err));
51
- }
52
- ```
53
-
54
- ### Step 3: Enable Audio in Browser Renderer
55
-
56
- Update `browser-renderer.ts`:
57
-
58
- ```typescript
59
- import { BrowserAudioProcessor, getAssetPlacement } from './audio/audio-processor';
60
- import { muxAudioVideo } from './audio/audio-video-muxer';
61
-
62
- // In BrowserWasmExporter.generateAudio():
63
- public async generateAudio(
64
- assets: any[][],
65
- startFrame: number,
66
- endFrame: number,
67
- ): Promise<ArrayBuffer | null> {
68
- const processor = new BrowserAudioProcessor();
69
- const assetPlacements = getAssetPlacement(assets);
70
-
71
- const processedBuffers: AudioBuffer[] = [];
72
- for (const asset of assetPlacements) {
73
- if (asset.volume > 0 && asset.playbackRate > 0) {
74
- const buffer = await processor.processAudioAsset(
75
- asset,
76
- this.settings.fps || 30,
77
- endFrame - startFrame
78
- );
79
- processedBuffers.push(buffer);
80
- }
81
- }
82
-
83
- const mixedBuffer = processor.mixAudioBuffers(processedBuffers);
84
- const wavData = processor.audioBufferToWav(mixedBuffer);
85
-
86
- await processor.close();
87
- return wavData;
88
- }
89
- ```
90
-
91
- ### Step 4: Collect Audio Assets During Rendering
92
-
93
- In `renderTwickVideoInBrowser()`:
94
-
95
- ```typescript
96
- // Track media assets for each frame
97
- const mediaAssets: AssetInfo[][] = [];
98
-
99
- for (let frame = 0; frame < totalFrames; frame++) {
100
- // ... existing rendering code ...
101
-
102
- // Collect media assets from current scene
103
- const currentAssets = (renderer as any).playback.currentScene.getMediaAssets?.() || [];
104
- mediaAssets.push(currentAssets);
105
- }
106
-
107
- // Generate audio after video rendering
108
- const audioData = await exporter.generateAudio(mediaAssets, 0, totalFrames);
109
-
110
- // Mux audio and video
111
- if (audioData) {
112
- const finalBlob = await muxAudioVideo({
113
- videoBlob,
114
- audioBuffer: audioData,
115
- fps,
116
- width,
117
- height
118
- });
119
- return finalBlob;
120
- }
121
- ```
122
-
123
- ## API Parity with Server
124
-
125
- | Feature | Server (FFmpeg) | Browser (Web Audio) | Status |
126
- |---------|-----------------|---------------------|--------|
127
- | Asset Tracking | `getAssetPlacement()` | `getAssetPlacement()` | ✅ Ready |
128
- | Audio Extraction | FFmpeg decode | `decodeAudioData()` | ✅ Ready |
129
- | Playback Rate | `atempo` filter | Sample interpolation | ✅ Ready |
130
- | Volume | `volume` filter | Gain multiplication | ✅ Ready |
131
- | Trimming | `atrim` filter | Sample slicing | ✅ Ready |
132
- | Mixing | `amix` filter | Buffer mixing | ✅ Ready |
133
- | WAV Encoding | FFmpeg encode | Manual WAV encoding | ✅ Ready |
134
- | Muxing | FFmpeg merge | mp4box.js / FFmpeg.wasm | ⚠️ Needs library |
135
-
136
- ## Performance Considerations
137
-
138
- ### Memory Usage
139
- - Web Audio API decodes entire audio files into memory
140
- - Large video files can cause memory issues
141
- - Consider chunked processing for long videos
142
-
143
- ### Processing Time
144
- - Audio processing adds 20-50% to render time
145
- - Service worker caching helps with repeated renders
146
- - Consider showing separate progress for video/audio
147
-
148
- ### Browser Limits
149
- - Chrome: ~2GB audio buffer limit
150
- - Safari: Stricter memory limits
151
- - Firefox: Better memory management but slower
152
-
153
- ## Example Usage
154
-
155
- ```typescript
156
- import { useBrowserRenderer } from '@twick/browser-render';
157
-
158
- function VideoRenderer() {
159
- const { render, progress } = useBrowserRenderer({
160
- width: 1920,
161
- height: 1080,
162
- fps: 30,
163
- includeAudio: true, // Enable audio processing
164
- });
165
-
166
- const handleRender = async () => {
167
- const videoBlob = await render({
168
- input: {
169
- properties: { width: 1920, height: 1080, fps: 30 },
170
- tracks: [
171
- {
172
- type: 'element',
173
- elements: [
174
- {
175
- type: 'video',
176
- src: 'https://example.com/video.mp4',
177
- // Audio will be automatically extracted and included
178
- }
179
- ]
180
- }
181
- ]
182
- }
183
- });
184
- };
185
-
186
- return <button onClick={handleRender}>Render with Audio</button>;
187
- }
188
- ```
189
-
190
- ## Troubleshooting
191
-
192
- ### No Audio in Output
193
- 1. Check if `includeAudio: true` is set
194
- 2. Verify service worker is registered
195
- 3. Check browser console for Web Audio API errors
196
- 4. Ensure video sources have audio tracks
197
-
198
- ### Memory Errors
199
- 1. Reduce video quality/resolution
200
- 2. Process shorter segments
201
- 3. Clear service worker cache
202
- 4. Use FFmpeg.wasm with streaming
203
-
204
- ### Performance Issues
205
- 1. Use service worker caching
206
- 2. Reduce number of audio tracks
207
- 3. Lower audio sample rate (default: 48kHz)
208
- 4. Consider server-side rendering for production
209
-
210
- ## Future Enhancements
211
-
212
- - [ ] Streaming audio processing (chunks)
213
- - [ ] Web Workers for parallel processing
214
- - [ ] Real-time audio preview
215
- - [ ] Audio effects (reverb, EQ, etc.)
216
- - [ ] WASM-based audio processing for better performance
217
- - [ ] Support for more audio formats
package/package.json.bak DELETED
@@ -1,53 +0,0 @@
1
- {
2
- "name": "@twick/browser-render",
3
- "version": "0.15.6",
4
- "license": "https://github.com/ncounterspecialist/twick/blob/main/LICENSE.md",
5
- "description": "Browser-native video rendering for Twick using WebCodecs API",
6
- "main": "./dist/index.cjs",
7
- "module": "./dist/index.mjs",
8
- "types": "./dist/index.d.ts",
9
- "exports": {
10
- ".": {
11
- "types": "./dist/index.d.ts",
12
- "import": "./dist/index.mjs",
13
- "require": "./dist/index.cjs"
14
- }
15
- },
16
- "scripts": {
17
- "build": "tsup",
18
- "dev": "tsup --watch",
19
- "test:browser": "tsx src/test-browser-render.ts",
20
- "clean": "rimraf dist"
21
- },
22
- "publishConfig": {
23
- "access": "public"
24
- },
25
- "dependencies": {
26
- "@twick/core": "^0.15.6",
27
- "@twick/visualizer": "0.15.6",
28
- "mp4-wasm": "^1.0.6",
29
- "mp4box": "^0.5.2",
30
- "@ffmpeg/ffmpeg": "^0.12.10",
31
- "@ffmpeg/util": "^0.12.1",
32
- "@ffmpeg/core": "^0.12.6"
33
- },
34
- "peerDependencies": {
35
- "react": ">=17.0.0"
36
- },
37
- "peerDependenciesMeta": {
38
- "react": {
39
- "optional": true
40
- }
41
- },
42
- "devDependencies": {
43
- "@types/node": "^20.10.0",
44
- "@types/react": "^18.2.0",
45
- "rimraf": "^5.0.5",
46
- "tsup": "^8.0.0",
47
- "tsx": "^4.7.0",
48
- "typescript": "5.4.2"
49
- },
50
- "engines": {
51
- "node": ">=20.0.0"
52
- }
53
- }
@@ -1,239 +0,0 @@
1
- /**
2
- * Browser-based audio processing using Web Audio API
3
- * Mirrors the server's FFmpeg audio generation logic
4
- */
5
-
6
- export interface MediaAsset {
7
- key: string;
8
- src: string;
9
- type: 'video' | 'audio';
10
- startInVideo: number;
11
- endInVideo: number;
12
- duration: number;
13
- playbackRate: number;
14
- volume: number;
15
- trimLeftInSeconds: number;
16
- durationInSeconds: number;
17
- }
18
-
19
- export interface AssetInfo {
20
- key: string;
21
- src: string;
22
- type: 'video' | 'audio';
23
- currentTime: number;
24
- playbackRate: number;
25
- volume: number;
26
- }
27
-
28
- /**
29
- * Get asset placement from frames (similar to server's getAssetPlacement)
30
- */
31
- export function getAssetPlacement(frames: AssetInfo[][]): MediaAsset[] {
32
- const assets: MediaAsset[] = [];
33
- const assetTimeMap = new Map<string, { start: number; end: number }>();
34
-
35
- for (let frame = 0; frame < frames.length; frame++) {
36
- for (const asset of frames[frame]) {
37
- if (!assetTimeMap.has(asset.key)) {
38
- assetTimeMap.set(asset.key, {
39
- start: asset.currentTime,
40
- end: asset.currentTime,
41
- });
42
- assets.push({
43
- key: asset.key,
44
- src: asset.src,
45
- type: asset.type,
46
- startInVideo: frame,
47
- endInVideo: frame,
48
- duration: 0,
49
- durationInSeconds: 0,
50
- playbackRate: asset.playbackRate,
51
- volume: asset.volume,
52
- trimLeftInSeconds: asset.currentTime,
53
- });
54
- } else {
55
- const timeInfo = assetTimeMap.get(asset.key);
56
- if (timeInfo) {
57
- timeInfo.end = asset.currentTime;
58
- }
59
- const existingAsset = assets.find(a => a.key === asset.key);
60
- if (existingAsset) {
61
- existingAsset.endInVideo = frame;
62
- }
63
- }
64
- }
65
- }
66
-
67
- // Calculate durations
68
- assets.forEach(asset => {
69
- const timeInfo = assetTimeMap.get(asset.key);
70
- if (timeInfo) {
71
- asset.durationInSeconds = (timeInfo.end - timeInfo.start) / asset.playbackRate;
72
- }
73
- asset.duration = asset.endInVideo - asset.startInVideo + 1;
74
- });
75
-
76
- return assets;
77
- }
78
-
79
- /**
80
- * Audio processor using Web Audio API
81
- */
82
- export class BrowserAudioProcessor {
83
- private audioContext: AudioContext;
84
-
85
- constructor(private sampleRate: number = 48000) {
86
- this.audioContext = new AudioContext({ sampleRate });
87
- }
88
-
89
- /**
90
- * Fetch and decode audio from a media source
91
- */
92
- async fetchAndDecodeAudio(src: string): Promise<AudioBuffer> {
93
- const response = await fetch(src);
94
- const arrayBuffer = await response.arrayBuffer();
95
- return await this.audioContext.decodeAudioData(arrayBuffer);
96
- }
97
-
98
- /**
99
- * Process audio asset with playback rate, volume, and timing
100
- */
101
- async processAudioAsset(
102
- asset: MediaAsset,
103
- fps: number,
104
- totalFrames: number
105
- ): Promise<AudioBuffer> {
106
- const audioBuffer = await this.fetchAndDecodeAudio(asset.src);
107
-
108
- const duration = totalFrames / fps;
109
- const outputLength = Math.ceil(duration * this.sampleRate);
110
- const outputBuffer = this.audioContext.createBuffer(
111
- 2, // stereo
112
- outputLength,
113
- this.sampleRate
114
- );
115
-
116
- // Calculate timing
117
- const startTime = asset.startInVideo / fps;
118
- const trimLeft = asset.trimLeftInSeconds / asset.playbackRate;
119
- const trimRight = trimLeft + asset.durationInSeconds;
120
-
121
- // Process each channel
122
- for (let channel = 0; channel < 2; channel++) {
123
- const inputData = audioBuffer.getChannelData(Math.min(channel, audioBuffer.numberOfChannels - 1));
124
- const outputData = outputBuffer.getChannelData(channel);
125
-
126
- // Calculate sample positions
127
- const startSample = Math.floor(startTime * this.sampleRate);
128
- const trimLeftSample = Math.floor(trimLeft * this.sampleRate);
129
- const trimRightSample = Math.floor(trimRight * this.sampleRate);
130
-
131
- // Copy and process samples
132
- for (let i = 0; i < outputData.length; i++) {
133
- const outputTime = i / this.sampleRate;
134
- const assetTime = outputTime - startTime;
135
-
136
- if (assetTime < 0 || assetTime >= asset.durationInSeconds) {
137
- outputData[i] = 0; // Silence
138
- } else {
139
- // Apply playback rate
140
- const inputSample = Math.floor((trimLeftSample + assetTime * asset.playbackRate * this.sampleRate));
141
- if (inputSample >= 0 && inputSample < inputData.length) {
142
- outputData[i] = inputData[inputSample] * asset.volume;
143
- } else {
144
- outputData[i] = 0;
145
- }
146
- }
147
- }
148
- }
149
-
150
- return outputBuffer;
151
- }
152
-
153
- /**
154
- * Mix multiple audio buffers
155
- */
156
- mixAudioBuffers(buffers: AudioBuffer[]): AudioBuffer {
157
- if (buffers.length === 0) {
158
- return this.audioContext.createBuffer(2, 1, this.sampleRate);
159
- }
160
-
161
- const maxLength = Math.max(...buffers.map(b => b.length));
162
- const mixedBuffer = this.audioContext.createBuffer(2, maxLength, this.sampleRate);
163
-
164
- for (let channel = 0; channel < 2; channel++) {
165
- const mixedData = mixedBuffer.getChannelData(channel);
166
-
167
- buffers.forEach(buffer => {
168
- const channelData = buffer.getChannelData(Math.min(channel, buffer.numberOfChannels - 1));
169
- for (let i = 0; i < channelData.length; i++) {
170
- mixedData[i] = (mixedData[i] || 0) + channelData[i] / buffers.length;
171
- }
172
- });
173
- }
174
-
175
- return mixedBuffer;
176
- }
177
-
178
- /**
179
- * Convert AudioBuffer to WAV format
180
- */
181
- audioBufferToWav(buffer: AudioBuffer): ArrayBuffer {
182
- const numberOfChannels = buffer.numberOfChannels;
183
- const sampleRate = buffer.sampleRate;
184
- const format = 1; // PCM
185
- const bitDepth = 16;
186
-
187
- const bytesPerSample = bitDepth / 8;
188
- const blockAlign = numberOfChannels * bytesPerSample;
189
-
190
- const data = new Float32Array(buffer.length * numberOfChannels);
191
- for (let channel = 0; channel < numberOfChannels; channel++) {
192
- const channelData = buffer.getChannelData(channel);
193
- for (let i = 0; i < buffer.length; i++) {
194
- data[i * numberOfChannels + channel] = channelData[i];
195
- }
196
- }
197
-
198
- const dataLength = data.length * bytesPerSample;
199
- const headerLength = 44;
200
- const wav = new ArrayBuffer(headerLength + dataLength);
201
- const view = new DataView(wav);
202
-
203
- // Write WAV header
204
- const writeString = (offset: number, string: string) => {
205
- for (let i = 0; i < string.length; i++) {
206
- view.setUint8(offset + i, string.charCodeAt(i));
207
- }
208
- };
209
-
210
- writeString(0, 'RIFF');
211
- view.setUint32(4, 36 + dataLength, true);
212
- writeString(8, 'WAVE');
213
- writeString(12, 'fmt ');
214
- view.setUint32(16, 16, true); // fmt chunk size
215
- view.setUint16(20, format, true);
216
- view.setUint16(22, numberOfChannels, true);
217
- view.setUint32(24, sampleRate, true);
218
- view.setUint32(28, sampleRate * blockAlign, true);
219
- view.setUint16(32, blockAlign, true);
220
- view.setUint16(34, bitDepth, true);
221
- writeString(36, 'data');
222
- view.setUint32(40, dataLength, true);
223
-
224
- // Write audio data
225
- const volume = 0.8;
226
- let offset = 44;
227
- for (let i = 0; i < data.length; i++) {
228
- const sample = Math.max(-1, Math.min(1, data[i]));
229
- view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
230
- offset += 2;
231
- }
232
-
233
- return wav;
234
- }
235
-
236
- async close() {
237
- await this.audioContext.close();
238
- }
239
- }
@@ -1,79 +0,0 @@
1
- /**
2
- * Browser-based audio/video muxing using FFmpeg.wasm (main thread)
3
- * Loads core files from local public/ffmpeg directory
4
- */
5
-
6
- export interface MuxerOptions {
7
- videoBlob: Blob;
8
- audioBuffer: ArrayBuffer;
9
- fps: number;
10
- width: number;
11
- height: number;
12
- }
13
-
14
- /**
15
- * Mux audio and video using FFmpeg.wasm in main thread
16
- * Core files loaded from /ffmpeg/ (no CDN, no CORS issues)
17
- */
18
- export async function muxAudioVideo(options: MuxerOptions): Promise<Blob> {
19
- try {
20
- console.log('🎬 Starting FFmpeg.wasm muxing (main thread)...');
21
-
22
- // Import from installed packages (bundled by Vite)
23
- const { FFmpeg } = await import('@ffmpeg/ffmpeg');
24
- const { fetchFile, toBlobURL } = await import('@ffmpeg/util');
25
-
26
- const ffmpeg = new FFmpeg();
27
-
28
- ffmpeg.on('log', ({ message }) => {
29
- console.log('[FFmpeg]', message);
30
- });
31
-
32
- ffmpeg.on('progress', ({ progress }) => {
33
- console.log(`[FFmpeg] Progress: ${(progress * 100).toFixed(1)}%`);
34
- });
35
-
36
- console.log('[FFmpeg] Loading core from /ffmpeg/...');
37
-
38
- // Load from LOCAL files in public/ffmpeg (no CDN!)
39
- // Note: FFmpeg 0.12.x has worker embedded in core.js, no separate workerURL needed
40
- await ffmpeg.load({
41
- coreURL: await toBlobURL('/ffmpeg/ffmpeg-core.js', 'text/javascript'),
42
- wasmURL: await toBlobURL('/ffmpeg/ffmpeg-core.wasm', 'application/wasm'),
43
- });
44
-
45
- console.log('✅ FFmpeg.wasm loaded');
46
-
47
- // Write input files
48
- console.log('[FFmpeg] Writing input files...');
49
- await ffmpeg.writeFile('video.mp4', await fetchFile(options.videoBlob));
50
- await ffmpeg.writeFile('audio.wav', new Uint8Array(options.audioBuffer));
51
-
52
- console.log('[FFmpeg] Muxing audio and video...');
53
-
54
- // Mux video and audio
55
- await ffmpeg.exec([
56
- '-i', 'video.mp4',
57
- '-i', 'audio.wav',
58
- '-c:v', 'copy',
59
- '-c:a', 'aac',
60
- '-b:a', '192k',
61
- '-shortest',
62
- 'output.mp4'
63
- ]);
64
-
65
- console.log('[FFmpeg] Reading output file...');
66
-
67
- // Read output
68
- const data = await ffmpeg.readFile('output.mp4');
69
- const muxedBlob = new Blob([data], { type: 'video/mp4' });
70
-
71
- console.log(`✅ Muxed video with audio: ${(muxedBlob.size / 1024 / 1024).toFixed(2)} MB`);
72
-
73
- return muxedBlob;
74
- } catch (error) {
75
- console.error('❌ FFmpeg.wasm muxing failed:', error);
76
- console.warn('⚠️ Returning video-only');
77
- return options.videoBlob;
78
- }
79
- }