@vidtreo/recorder 1.1.0 → 1.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -14,9 +14,10 @@ export {};
14
14
 
15
15
  export {};
16
16
 
17
- export {};
17
+ export declare function waitForCondition(condition: () => boolean | Promise<boolean>, maxIterations?: number): Promise<void>;
18
+ export declare function waitForValue<T>(getValue: () => T | Promise<T>, predicate: (value: T) => boolean, maxIterations?: number): Promise<T>;
18
19
 
19
- export {};
20
+ export declare function wait(_ms: number): Promise<void>;
20
21
 
21
22
  export {};
22
23
 
@@ -34,964 +35,1349 @@ export {};
34
35
 
35
36
  export {};
36
37
 
37
- export declare function wait(_ms: number): Promise<void>;
38
-
39
- export declare function waitForCondition(condition: () => boolean | Promise<boolean>, maxIterations?: number): Promise<void>;
40
- export declare function waitForValue<T>(getValue: () => T | Promise<T>, predicate: (value: T) => boolean, maxIterations?: number): Promise<T>;
41
-
42
- export declare function getBrowserName(): string;
43
- export declare function validateBrowserSupport(): void;
38
+ export {};
44
39
 
45
- export type BrowserGuardErrorCode = "browser.unsupported";
46
- export type BrowserGuardError = Error & {
47
- code: BrowserGuardErrorCode;
48
- };
40
+ export {};
49
41
 
50
- import type { TranscodeConfig } from "../transcode/transcode-types";
51
- export declare class ConfigManager {
52
- private configService;
53
- private currentConfig;
54
- private configFetched;
55
- private configReady;
56
- initialize(apiKey: string | null, backendUrl: string | null): Promise<void>;
57
- fetchConfig(): Promise<void>;
58
- getConfig(): Promise<TranscodeConfig>;
59
- isConfigReady(): boolean;
60
- clearCache(): void;
61
- }
42
+ export {};
62
43
 
63
- import type { TranscodeConfig } from "../transcode/transcode-types";
64
- export type BackendPreset = "sd" | "hd" | "fhd" | "4k";
65
- export declare const DEFAULT_AUDIO_BITRATE = 128000;
66
- export type BackendConfigResponse = {
67
- presetEncoding: BackendPreset;
68
- outputFormat?: "mp4" | "webm" | "mkv" | "mov";
69
- watermark?: {
70
- url: string;
71
- opacity?: number;
72
- position: string;
73
- };
74
- };
75
- export declare const PRESET_VIDEO_BITRATE_MAP: Record<BackendPreset, number>;
76
- export declare const RESOLUTION_MAP: Record<BackendPreset, {
77
- width: number;
78
- height: number;
79
- }>;
80
- export declare const MOBILE_RESOLUTION_MAP: Record<BackendPreset, {
81
- width: number;
82
- height: number;
83
- }>;
84
- export declare const DEFAULT_BACKEND_URL = "https://core.vidtreo.com";
85
- export declare const DEFAULT_TRANSCODE_CONFIG: Readonly<TranscodeConfig>;
86
- export declare function getDefaultConfigForFormat(format: TranscodeConfig["format"]): TranscodeConfig;
44
+ export {};
87
45
 
88
- import type { TranscodeConfig } from "../transcode/transcode-types";
89
- /**
90
- * Options for mapping backend preset to transcoding configuration.
91
- */
92
- export type MapPresetOptions = {
93
- preset: BackendPreset;
94
- outputFormat?: TranscodeConfig["format"];
95
- watermark?: BackendConfigResponse["watermark"];
96
- isMobile?: boolean;
97
- };
98
- export declare function mapPresetToConfig(options: MapPresetOptions): Promise<TranscodeConfig>;
46
+ export {};
99
47
 
100
- import type { TranscodeConfig } from "../transcode/transcode-types";
101
- export type ConfigServiceOptions = {
48
+ export type VidtreoRecorderConfig = {
102
49
  apiKey: string;
103
- backendUrl: string;
104
- cacheTimeout?: number;
105
- };
106
- export declare class ConfigService {
107
- private readonly cacheTimeout;
108
- private readonly options;
109
- private cachedConfig;
110
- private cacheTimestamp;
111
- private fetchPromise;
112
- private lastFetchSucceeded;
113
- private constructor();
114
- static getInstance(options: ConfigServiceOptions): ConfigService;
115
- fetchConfig(): Promise<TranscodeConfig>;
116
- isConfigReady(): boolean;
117
- clearCache(): void;
118
- static clearAllInstances(): void;
119
- getCurrentConfig(): TranscodeConfig;
120
- private fetchConfigFromBackend;
121
- }
122
-
123
- import type { CameraStreamManager } from "../stream/stream";
124
- import { StreamProcessor } from "../stream/stream-processor";
125
- export declare class RecordingManager {
126
- private recordingState;
127
- private countdownDuration;
128
- private countdownRemaining;
129
- private countdownTimeoutId;
130
- private countdownIntervalId;
131
- private countdownStartTime;
132
- private isPaused;
133
- private maxRecordingTime;
134
- private maxTimeTimer;
135
- private recordingStartTime;
136
- private maxTimeRemaining;
137
- private recordingSeconds;
138
- private recordingIntervalId;
139
- private pauseStartTime;
140
- private totalPausedTime;
141
- private readonly streamManager;
142
- private readonly callbacks;
143
- private streamProcessor;
144
- private originalCameraStream;
145
- private enableTabVisibilityOverlay;
146
- private tabVisibilityOverlayText;
147
- constructor(streamManager: CameraStreamManager, callbacks: RecordingCallbacks);
148
- setCountdownDuration(duration: number): void;
149
- setMaxRecordingTime(maxTime: number | null): void;
150
- setTabVisibilityOverlayConfig(enabled: boolean, text: string | undefined): void;
151
- getRecordingState(): RecordingState;
152
- isPausedState(): boolean;
153
- getRecordingSeconds(): number;
154
- getStreamProcessor(): StreamProcessor | null;
155
- updateSourceType(isScreenCapture: boolean): void;
156
- setOriginalCameraStream(stream: MediaStream | null): void;
157
- getOriginalCameraStream(): MediaStream | null;
158
- startRecording(): Promise<void>;
159
- private startCountdown;
160
- private doStartRecording;
161
- stopRecording(): Promise<Blob>;
162
- pauseRecording(): void;
163
- resumeRecording(): void;
164
- cancelCountdown(): void;
165
- cleanup(): void;
166
- private resetRecordingState;
167
- private resetPauseState;
168
- private updatePausedDuration;
169
- private startRecordingTimer;
170
- private startMaxTimeTimer;
171
- private clearTimer;
172
- private handleError;
173
- }
174
-
175
- import type { AudioLevelCallbacks } from "../audio/types";
176
- import type { DeviceCallbacks } from "../device/types";
177
- import type { UploadCallbacks as StorageUploadCallbacks } from "../storage/types";
178
- import type { TranscodeConfig } from "../transcode/transcode-types";
179
- import type { UploadCallbacks } from "../upload/types";
180
- export type RecordingState = "idle" | "countdown" | "recording";
181
- export type SourceType = "camera" | "screen";
182
- export type RecordingCallbacks = {
183
- onStateChange: (state: RecordingState) => void;
184
- onCountdownUpdate: (state: RecordingState, remaining: number) => void;
185
- onTimerUpdate: (formatted: string) => void;
186
- onError: (error: Error) => void;
187
- onRecordingComplete: (blob: Blob) => void;
188
- onClearUploadStatus: () => void;
189
- onStopAudioTracking: () => void;
190
- onGetConfig: () => Promise<TranscodeConfig>;
191
- };
192
- export type RecorderConfig = {
193
- apiKey?: string | null;
194
- backendUrl?: string | null;
195
- demo?: boolean;
196
- countdownDuration?: number;
197
- maxRecordingTime?: number | null;
198
- userMetadata?: Record<string, unknown>;
50
+ apiUrl?: string;
199
51
  enableSourceSwitching?: boolean;
200
52
  enableMute?: boolean;
201
53
  enablePause?: boolean;
202
54
  enableDeviceChange?: boolean;
55
+ maxRecordingTime?: number;
56
+ countdownDuration?: number;
57
+ userMetadata?: Record<string, unknown>;
203
58
  enableTabVisibilityOverlay?: boolean;
204
59
  tabVisibilityOverlayText?: string;
205
- nativeCamera?: boolean;
60
+ onUploadComplete?: (result: {
61
+ recordingId: string;
62
+ uploadUrl: string;
63
+ }) => void;
64
+ onUploadProgress?: (progress: number) => void;
65
+ onUploadError?: (error: Error) => void;
66
+ onRecordingStart?: () => void;
67
+ onRecordingStop?: () => void;
68
+ onError?: (error: Error) => void;
206
69
  };
207
- export type RecorderCallbacks = {
208
- recording?: Partial<RecordingCallbacks>;
209
- audioLevel?: AudioLevelCallbacks;
210
- device?: DeviceCallbacks;
211
- upload?: UploadCallbacks;
212
- storage?: StorageUploadCallbacks;
213
- sourceSwitch?: {
214
- onSourceChange?: (sourceType: SourceType) => Promise<void>;
215
- onPreviewUpdate?: (stream: MediaStream) => Promise<void>;
216
- onError?: (error: Error) => void;
217
- onTransitionStart?: (message: string) => void;
218
- onTransitionEnd?: () => void;
219
- };
220
- stream?: {
221
- onStreamStart?: (stream: MediaStream) => void;
222
- onStreamStop?: () => void;
223
- onError?: (error: Error) => void;
224
- };
225
- onStorageCleanupError?: (error: string) => void;
70
+ export type RecordingStartOptions = {
71
+ video?: boolean | CameraConstraints;
72
+ audio?: boolean | MediaTrackConstraints;
226
73
  };
227
-
228
- import type { AudioLevelCallbacks } from "../audio/types";
229
- import { DeviceManager } from "../device/device-manager";
230
- import { CameraStreamManager } from "../stream/stream";
231
- import { VideoUploadService } from "../upload/upload-service";
232
- export declare class RecorderController {
233
- private readonly streamManager;
234
- private readonly configManager;
235
- private readonly storageManager;
236
- private readonly deviceManager;
237
- private readonly audioLevelAnalyzer;
238
- private readonly recordingManager;
239
- private readonly sourceSwitchManager;
240
- private readonly uploadService;
241
- private readonly uploadCallbacks;
242
- private readonly callbacks;
243
- private uploadQueueManager;
244
- private telemetryClient;
245
- private uploadMetadataById;
74
+ export type RecordingStopResult = {
75
+ recordingId: string;
76
+ uploadUrl: string;
77
+ blob: Blob;
78
+ };
79
+ export declare class VidtreoRecorder {
80
+ private readonly controller;
81
+ private readonly config;
246
82
  private isInitialized;
247
- private isDemo;
248
- private enableTabVisibilityOverlay;
249
- private tabVisibilityOverlayText;
250
- constructor(callbacks?: RecorderCallbacks);
251
- initialize(config: RecorderConfig): Promise<void>;
252
- startStream(): Promise<void>;
253
- stopStream(): Promise<void>;
254
- switchVideoDevice(deviceId: string | null): Promise<MediaStream>;
255
- switchAudioDevice(deviceId: string | null): Promise<MediaStream>;
256
- startRecording(): Promise<void>;
257
- stopRecording(): Promise<Blob>;
258
- getTabVisibilityOverlayConfig(): {
259
- enabled: boolean;
260
- text: string | undefined;
261
- };
262
- pauseRecording(): void;
263
- resumeRecording(): void;
83
+ constructor(config: VidtreoRecorderConfig);
84
+ initialize(): Promise<void>;
85
+ startPreview(sourceType?: SourceType): Promise<MediaStream>;
86
+ startRecording(_options?: RecordingStartOptions, sourceType?: SourceType): Promise<void>;
264
87
  switchSource(sourceType: SourceType): Promise<void>;
265
- setCameraDevice(deviceId: string | null): void;
266
- setMicDevice(deviceId: string | null): void;
267
- getAvailableDevices(): Promise<import("../..").AvailableDevices>;
88
+ stopRecording(): Promise<RecordingStopResult>;
89
+ getAvailableDevices(): Promise<import(".").AvailableDevices>;
268
90
  muteAudio(): void;
269
91
  unmuteAudio(): void;
270
92
  toggleMute(): void;
271
- getIsMuted(): boolean;
272
- startAudioLevelTracking(stream: MediaStream, callbacks?: AudioLevelCallbacks): Promise<void>;
273
- stopAudioLevelTracking(): void;
274
- getAudioLevel(): number;
275
- uploadVideo(blob: Blob, apiKey: string, backendUrl: string, metadata: Record<string, unknown>): Promise<void>;
93
+ isMuted(): boolean;
94
+ pauseRecording(): void;
95
+ resumeRecording(): void;
96
+ isPaused(): boolean;
97
+ getRecordingState(): import(".").RecordingState;
276
98
  getStream(): MediaStream | null;
277
- isConfigReady(): boolean;
278
- ensureConfigReady(): Promise<void>;
279
99
  cleanup(): void;
280
- getRecordingState(): RecordingState;
281
- isPaused(): boolean;
282
- getCurrentSourceType(): SourceType;
283
- getOriginalCameraStream(): MediaStream | null;
284
- getStreamManager(): CameraStreamManager;
285
- getAudioStreamForAnalysis(): MediaStream | null;
286
- getDeviceManager(): DeviceManager;
287
- getConfig(): Promise<import("../..").TranscodeConfig>;
288
- getUploadService(): VideoUploadService | null;
289
- isRecording(): boolean;
290
- isActive(): boolean;
291
- private sendTelemetryEvent;
292
- private executeTelemetryAction;
293
- private executeTelemetryActionWithResult;
294
- private setUploadMetadata;
295
- private clearUploadMetadata;
296
- private createRecordingCallbacks;
297
- private createSourceSwitchCallbacks;
100
+ private ensureInitialized;
298
101
  }
299
102
 
300
- export declare class StreamManager {
301
- private mediaStream;
302
- private state;
303
- private readonly eventListeners;
304
- private readonly streamConfig;
305
- private selectedAudioDeviceId;
306
- private selectedVideoDeviceId;
307
- constructor(streamConfig?: Partial<StreamConfig>);
308
- getState(): StreamState;
309
- getStream(): MediaStream | null;
310
- getAudioStreamForAnalysis(): MediaStream | null;
311
- isActive(): boolean;
312
- on<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
313
- off<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): void;
314
- once<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
315
- emit<T extends keyof StreamEventMap>(event: T, data: StreamEventMap[T]): void;
316
- setState(newState: StreamState): void;
317
- setAudioDevice(deviceId: string | null): void;
318
- setVideoDevice(deviceId: string | null): void;
319
- getAudioDevice(): string | null;
320
- getVideoDevice(): string | null;
321
- getAvailableDevices(): Promise<{
322
- audioinput: MediaDeviceInfo[];
323
- videoinput: MediaDeviceInfo[];
103
+ export type UploadResult = {
104
+ id: string;
105
+ uploadUrl: string | null;
106
+ };
107
+ export type UploadCallbacks = {
108
+ onProgress: (progress: number) => void;
109
+ onSuccess: (result: UploadResult) => void;
110
+ onError: (error: Error) => void;
111
+ onClearStatus: () => void;
112
+ };
113
+
114
+ export type VideoUploadOptions = {
115
+ apiKey: string;
116
+ backendUrl: string;
117
+ filename?: string;
118
+ metadata?: Record<string, unknown>;
119
+ userMetadata?: Record<string, unknown>;
120
+ onProgress?: (progress: number) => void;
121
+ };
122
+ export type VideoUploadResult = {
123
+ id: string;
124
+ publicId: string;
125
+ filename: string;
126
+ fileSize: number;
127
+ mimeType: string;
128
+ duration: number | null;
129
+ status: string;
130
+ uploadUrl: string | null;
131
+ createdAt: string;
132
+ };
133
+ export declare class VideoUploadService {
134
+ uploadVideo(blob: Blob, options: VideoUploadOptions): Promise<VideoUploadResult>;
135
+ private uploadVideoFile;
136
+ private parseSuccessResponse;
137
+ private parseErrorResponse;
138
+ private safeParseJsonFromXhr;
139
+ }
140
+
141
+ import type { PendingUpload, VideoStorageService } from "../storage/video-storage";
142
+ type UploadCallbacks = {
143
+ onUploadProgress?: (id: string, progress: number) => void;
144
+ onUploadComplete?: (id: string, result: VideoUploadResult) => void;
145
+ onUploadError?: (id: string, error: Error) => void;
146
+ };
147
+ export declare class UploadQueueManager {
148
+ private readonly storageService;
149
+ private readonly uploadService;
150
+ private readonly processingIntervalId;
151
+ private readonly networkOnlineHandler;
152
+ private isProcessing;
153
+ private retryTimeoutId;
154
+ private callbacks;
155
+ constructor(storageService: VideoStorageService, uploadService: VideoUploadService);
156
+ destroy(): void;
157
+ setCallbacks(callbacks: UploadCallbacks): void;
158
+ queueUpload(upload: Omit<PendingUpload, "id" | "createdAt" | "updatedAt" | "status" | "retryCount">): Promise<string>;
159
+ processQueue(): Promise<void>;
160
+ getPendingUploads(): Promise<PendingUpload[]>;
161
+ getStats(): Promise<{
162
+ pending: number;
163
+ uploading: number;
164
+ failed: number;
165
+ total: number;
324
166
  }>;
325
- private buildDeviceConstraints;
326
- private buildVideoConstraints;
327
- private buildAudioConstraints;
328
- startStream(): Promise<MediaStream>;
329
- stopStream(): void;
330
- private stopStreamTracks;
331
- private isTrackLive;
332
- private tryReplaceTrack;
333
- private recreateStreamWithNewTrack;
334
- private switchDeviceTrack;
335
- switchVideoDevice(deviceId: string | null): Promise<MediaStream>;
336
- switchAudioDevice(deviceId: string | null): Promise<MediaStream>;
337
- setMediaStream(stream: MediaStream): void;
338
- setAudioTracksEnabled(enabled: boolean): void;
167
+ private getOldestUpload;
168
+ private getOldestFailedUpload;
169
+ private processUpload;
170
+ private calculateRetryDelay;
171
+ private scheduleRetry;
172
+ private clearTimer;
173
+ }
174
+ export {};
175
+
176
+ export declare class TelemetryClient {
177
+ private readonly config;
178
+ private readonly dependencies;
179
+ private readonly installationId;
180
+ private pendingEvents;
181
+ private flushTimeoutId;
182
+ private throttledEventTimestamps;
183
+ constructor(config: TelemetryClientConfig, dependencies: TelemetryClientDependencies);
184
+ triggerTelemetryEvent(event: TelemetryEventInput): void;
185
+ private enqueueEvent;
186
+ private scheduleFlush;
187
+ private flushQueue;
188
+ private clearFlushTimer;
189
+ private buildRequestPayload;
190
+ private shouldSkipEvent;
191
+ private markEventTracking;
192
+ private updateNumberMap;
193
+ private getOneTimeCacheKey;
194
+ private isOneTimeEvent;
195
+ private isThrottledEvent;
196
+ private buildBaseProperties;
197
+ private mergeProperties;
198
+ private buildFingerprint;
199
+ private buildContext;
200
+ private buildError;
201
+ private getBrowserName;
202
+ private getDeviceMemory;
203
+ private sendPayload;
204
+ private buildTelemetryEndpoint;
205
+ }
206
+
207
+ export declare function createBrowserDependencies(): TelemetryClientDependencies;
208
+ export declare function createTelemetryClient(apiKey: string, backendUrl: string, options?: {
209
+ endpoint?: string;
210
+ sessionId?: string;
211
+ userId?: string;
212
+ environmentId?: string;
213
+ appVersion?: string;
214
+ release?: string;
215
+ pageUrl?: string;
216
+ referrerUrl?: string;
217
+ sdkLocation?: string;
218
+ clientLocation?: string;
219
+ }): TelemetryClient;
220
+
221
+ export {};
222
+
223
+ export type TelemetryEventCategory = "lifecycle" | "interaction" | "performance" | "error";
224
+ export type TelemetryEventName = "sdk.init.started" | "sdk.init.succeeded" | "sdk.init.failed" | "preview.start.succeeded" | "preview.start.failed" | "recording.start.requested" | "recording.start.succeeded" | "recording.start.failed" | "recording.stop.requested" | "recording.stop.succeeded" | "recording.stop.failed" | "upload.started" | "upload.succeeded" | "upload.failed" | "source.switch.requested" | "source.switch.succeeded" | "source.switch.failed" | "stream.error";
225
+ export type TelemetryEventInput = {
226
+ name: TelemetryEventName;
227
+ properties?: Record<string, unknown>;
228
+ error?: unknown;
229
+ };
230
+ export type TelemetryErrorDto = {
231
+ message: string;
232
+ code?: string;
233
+ stack?: string;
234
+ };
235
+ export type TelemetryFingerprintDto = {
236
+ userAgent?: string;
237
+ language?: string;
238
+ platform?: string;
239
+ hardwareConcurrency?: number;
240
+ deviceMemory?: number;
241
+ };
242
+ export type TelemetryContextDto = {
243
+ sessionId?: string;
244
+ userId?: string;
245
+ environmentId?: string;
246
+ appVersion?: string;
247
+ release?: string;
248
+ pageUrl?: string;
249
+ referrerUrl?: string;
250
+ sdkLocation?: string;
251
+ clientLocation?: string;
252
+ };
253
+ export type TelemetryEventDto = {
254
+ event: TelemetryEventName;
255
+ category: TelemetryEventCategory;
256
+ timestamp: number;
257
+ installationId: string;
258
+ fingerprint: TelemetryFingerprintDto;
259
+ sdkVersion: string;
260
+ context?: TelemetryContextDto;
261
+ properties?: Record<string, unknown>;
262
+ error?: TelemetryErrorDto;
263
+ };
264
+ export type SendTelemetryRequestDto = {
265
+ events: TelemetryEventDto[];
266
+ };
267
+ export type TelemetryClientConfig = {
268
+ apiKey: string;
269
+ backendUrl: string;
270
+ endpoint?: string;
271
+ sessionId?: string;
272
+ userId?: string;
273
+ environmentId?: string;
274
+ appVersion?: string;
275
+ release?: string;
276
+ pageUrl?: string;
277
+ referrerUrl?: string;
278
+ sdkLocation?: string;
279
+ clientLocation?: string;
280
+ };
281
+ export type TelemetryNavigator = Navigator & {
282
+ deviceMemory?: number;
283
+ };
284
+ export type TelemetryClientDependencies = {
285
+ fetchFunction: typeof fetch;
286
+ cryptoProvider: Crypto | null;
287
+ storageProvider: Storage | null;
288
+ navigatorProvider: TelemetryNavigator | null;
289
+ locationProvider: Location | null;
290
+ documentProvider: Document | null;
291
+ nowProvider: () => number;
292
+ randomProvider: () => number;
293
+ setTimeoutFunction: (callback: () => void, delay: number) => ReturnType<typeof setTimeout>;
294
+ clearTimeoutFunction: (timeoutId: ReturnType<typeof setTimeout>) => void;
295
+ };
296
+
297
+ export declare const SDK_VERSION: string;
298
+
299
+ export type BrowserGuardErrorCode = "browser.unsupported";
300
+ export type BrowserGuardResolutionStage = "policy" | "feature-preflight";
301
+ export type BrowserGuardError = Error & {
302
+ code: BrowserGuardErrorCode;
303
+ browserName?: string;
304
+ browserVersion?: string;
305
+ missingCapabilities?: string[];
306
+ resolutionStage?: BrowserGuardResolutionStage;
307
+ };
308
+
309
+ export type BrowserInfo = {
310
+ name: string;
311
+ version: string;
312
+ normalizedName: string;
313
+ };
314
+ type BrowserUnsupportedErrorOptions = {
315
+ browserInfo?: BrowserInfo;
316
+ missingCapabilities?: string[];
317
+ resolutionStage?: BrowserGuardResolutionStage;
318
+ };
319
+ export declare function getBrowserInfo(): BrowserInfo;
320
+ export declare function getBrowserName(): string;
321
+ export declare function createBrowserUnsupportedError(options?: BrowserUnsupportedErrorOptions): BrowserGuardError;
322
+ export declare function validateBrowserSupport(): void;
323
+ export {};
324
+
325
+ export type AvailableDevices = {
326
+ audioinput: MediaDeviceInfo[];
327
+ videoinput: MediaDeviceInfo[];
328
+ };
329
+ export type DeviceCallbacks = {
330
+ onDevicesChanged: (devices: AvailableDevices) => void;
331
+ onDeviceSelected: (type: "camera" | "mic", deviceId: string | null) => void;
332
+ };
333
+
334
+ import type { CameraStreamManager } from "../stream/stream";
335
+ export declare class DeviceManager {
336
+ private readonly streamManager;
337
+ private readonly callbacks?;
338
+ private availableDevices;
339
+ private selectedCameraDeviceId;
340
+ private selectedMicDeviceId;
341
+ constructor(streamManager: CameraStreamManager, callbacks?: DeviceCallbacks);
342
+ getAvailableDevices(): Promise<AvailableDevices>;
343
+ setCameraDevice(deviceId: string | null): void;
344
+ setMicDevice(deviceId: string | null): void;
345
+ getSelectedCameraDeviceId(): string | null;
346
+ getSelectedMicDeviceId(): string | null;
347
+ getAvailableDevicesList(): AvailableDevices;
348
+ }
349
+
350
+ export type NativeCameraFile = {
351
+ file: File;
352
+ previewUrl: string;
353
+ duration: number;
354
+ validated: boolean;
355
+ };
356
+ export type FileValidationResult = {
357
+ valid: boolean;
358
+ error?: string;
359
+ };
360
+ export type NativeCameraConfig = {
361
+ maxFileSize?: number;
362
+ maxDuration?: number;
363
+ allowedFormats?: string[];
364
+ };
365
+
366
+ export declare function validateFile(file: File, config?: {
367
+ maxFileSize?: number;
368
+ maxRecordingTime?: number | null;
369
+ allowedFormats?: string[];
370
+ }): Promise<FileValidationResult>;
371
+
372
+ import type { RecordingStopResult } from "../../vidtreo-recorder";
373
+ import type { ConfigService } from "../config/config-service";
374
+ import type { VideoUploadService } from "../upload/upload-service";
375
+ export type NativeCameraHandlerConfig = {
376
+ apiKey?: string | null;
377
+ backendUrl?: string | null;
378
+ maxRecordingTime?: number | null;
379
+ maxFileSize?: number;
380
+ userMetadata?: Record<string, unknown>;
381
+ };
382
+ export declare class NativeCameraHandler {
383
+ private pendingFile;
384
+ private readonly configService;
385
+ private readonly uploadService;
386
+ private readonly config;
387
+ constructor(config: NativeCameraHandlerConfig, configService: ConfigService | null, uploadService: VideoUploadService);
388
+ handleFileSelection(file: File): Promise<NativeCameraFile>;
389
+ processAndUpload(onTranscodeProgress: (progress: number) => void, onUploadProgress: (progress: number) => void): Promise<RecordingStopResult>;
390
+ cancel(): void;
391
+ preloadConfig(): Promise<void>;
392
+ }
393
+
394
+ export declare function extractLastFrame(file: File, timeoutMs?: number): Promise<Blob>;
395
+
396
+ import type { TranscodeConfig } from "../transcode/transcode-types";
397
+ export declare class ConfigManager {
398
+ private configService;
399
+ private currentConfig;
400
+ private configFetched;
401
+ private configReady;
402
+ initialize(apiKey: string | null, backendUrl: string | null): Promise<void>;
403
+ fetchConfig(): Promise<void>;
404
+ getConfig(): Promise<TranscodeConfig>;
405
+ isConfigReady(): boolean;
406
+ clearCache(): void;
407
+ }
408
+
409
+ import type { TranscodeConfig } from "../transcode/transcode-types";
410
+ export type ConfigServiceOptions = {
411
+ apiKey: string;
412
+ backendUrl: string;
413
+ cacheTimeout?: number;
414
+ };
415
+ export declare class ConfigService {
416
+ private readonly cacheTimeout;
417
+ private readonly options;
418
+ private cachedConfig;
419
+ private cacheTimestamp;
420
+ private fetchPromise;
421
+ private lastFetchSucceeded;
422
+ private constructor();
423
+ static getInstance(options: ConfigServiceOptions): ConfigService;
424
+ fetchConfig(): Promise<TranscodeConfig>;
425
+ isConfigReady(): boolean;
426
+ clearCache(): void;
427
+ static clearAllInstances(): void;
428
+ getCurrentConfig(): TranscodeConfig;
429
+ private fetchConfigFromBackend;
430
+ }
431
+
432
+ import type { TranscodeConfig } from "../transcode/transcode-types";
433
+ export type BackendPreset = "sd" | "hd" | "fhd" | "4k";
434
+ export declare const DEFAULT_AUDIO_BITRATE = 128000;
435
+ export type BackendConfigResponse = {
436
+ presetEncoding: BackendPreset;
437
+ outputFormat?: "mp4" | "webm" | "mkv" | "mov";
438
+ watermark?: {
439
+ url: string;
440
+ opacity?: number;
441
+ position: string;
442
+ };
443
+ };
444
+ export declare const PRESET_VIDEO_BITRATE_MAP: Record<BackendPreset, number>;
445
+ export declare const RESOLUTION_MAP: Record<BackendPreset, {
446
+ width: number;
447
+ height: number;
448
+ }>;
449
+ export declare const MOBILE_RESOLUTION_MAP: Record<BackendPreset, {
450
+ width: number;
451
+ height: number;
452
+ }>;
453
+ export declare const DEFAULT_BACKEND_URL = "https://core.vidtreo.com";
454
+ export declare const DEFAULT_TRANSCODE_CONFIG: Readonly<TranscodeConfig>;
455
+ export declare function getDefaultConfigForFormat(format: TranscodeConfig["format"]): TranscodeConfig;
456
+
457
+ import type { TranscodeConfig } from "../transcode/transcode-types";
458
+ /**
459
+ * Options for mapping backend preset to transcoding configuration.
460
+ */
461
+ export type MapPresetOptions = {
462
+ preset: BackendPreset;
463
+ outputFormat?: TranscodeConfig["format"];
464
+ watermark?: BackendConfigResponse["watermark"];
465
+ isMobile?: boolean;
466
+ };
467
+ export declare function mapPresetToConfig(options: MapPresetOptions): Promise<TranscodeConfig>;
468
+
469
+ export type AudioLevelCallbacks = {
470
+ onLevelUpdate: (level: number, isMuted: boolean) => void;
471
+ };
472
+
473
+ export declare const AUDIO_WORKLET_PROCESSOR_NAME = "vidtreo-audio-worklet";
474
+ export declare const AUDIO_WORKLET_MESSAGE_TYPE_AUDIO_CHUNK = "audioChunk";
475
+ export declare const AUDIO_WORKLET_MESSAGE_TYPE_SET_MUTED = "setMuted";
476
+ export declare const AUDIO_WORKLET_MESSAGE_TYPE_SET_PAUSED = "setPaused";
477
+ export declare const AUDIO_WORKLET_MESSAGE_TYPE_SHUTDOWN = "shutdown";
478
+ export declare const audioWorkletProcessorCode = "\nconst AUDIO_WORKLET_MESSAGE_TYPE_AUDIO_CHUNK = \"audioChunk\";\nconst AUDIO_WORKLET_MESSAGE_TYPE_SET_MUTED = \"setMuted\";\nconst AUDIO_WORKLET_MESSAGE_TYPE_SET_PAUSED = \"setPaused\";\nconst AUDIO_WORKLET_MESSAGE_TYPE_SHUTDOWN = \"shutdown\";\n\nclass VidtreoAudioWorkletProcessor extends AudioWorkletProcessor {\n constructor() {\n super();\n this.isMuted = false;\n this.isPaused = false;\n this.processedFrames = 0;\n this.port.onmessage = this.handleMessage.bind(this);\n }\n\n handleMessage(event) {\n const data = event.data;\n if (!data || typeof data !== \"object\") {\n return;\n }\n\n if (data.type === AUDIO_WORKLET_MESSAGE_TYPE_SET_MUTED) {\n this.isMuted = data.isMuted === true;\n return;\n }\n\n if (data.type === AUDIO_WORKLET_MESSAGE_TYPE_SET_PAUSED) {\n this.isPaused = data.isPaused === true;\n return;\n }\n\n if (data.type === AUDIO_WORKLET_MESSAGE_TYPE_SHUTDOWN) {\n this.isPaused = true;\n }\n }\n\n process(inputs) {\n if (this.isPaused) {\n return true;\n }\n\n const inputGroup = inputs[0];\n if (!inputGroup || inputGroup.length === 0) {\n return true;\n }\n\n const firstChannel = inputGroup[0];\n if (!firstChannel || firstChannel.length === 0) {\n return true;\n }\n\n const frames = firstChannel.length;\n const numberOfChannels = inputGroup.length;\n const totalSamples = frames * numberOfChannels;\n const data = new Float32Array(totalSamples);\n\n let channelIndex = 0;\n while (channelIndex < numberOfChannels) {\n const channelData = inputGroup[channelIndex];\n if (channelData && channelData.length === frames) {\n data.set(channelData, channelIndex * frames);\n }\n channelIndex += 1;\n }\n\n if (this.isMuted) {\n data.fill(0);\n }\n\n const timestamp = this.processedFrames / sampleRate;\n this.processedFrames += frames;\n\n this.port.postMessage(\n {\n type: AUDIO_WORKLET_MESSAGE_TYPE_AUDIO_CHUNK,\n data,\n frames,\n numberOfChannels,\n sampleRate,\n timestamp,\n },\n [data.buffer]\n );\n\n return true;\n }\n}\n\nregisterProcessor(\"vidtreo-audio-worklet\", VidtreoAudioWorkletProcessor);\n";
479
+
480
+ import { WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR } from "../processor/worker/types";
481
+ export type AudioWorkletConfig = {
482
+ sampleRate: number;
483
+ numberOfChannels: number;
484
+ format: typeof WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR;
485
+ };
486
+ export type AudioWorkletChunk = {
487
+ data: Float32Array;
488
+ frames: number;
489
+ numberOfChannels: number;
490
+ sampleRate: number;
491
+ timestamp: number;
492
+ };
493
+ export declare class AudioWorkletController {
494
+ private audioContext;
495
+ private audioWorkletNode;
496
+ private audioSourceNode;
497
+ private audioDestinationNode;
498
+ private audioConfig;
499
+ private onAudioChunk;
500
+ private isMuted;
501
+ private isPaused;
502
+ private isProcessing;
503
+ private hasAudioWorkletUrlLease;
504
+ initialize(audioStream: MediaStream, onAudioChunk: (chunk: AudioWorkletChunk) => void): Promise<AudioWorkletConfig>;
505
+ startProcessing(): Promise<void>;
506
+ setMuted(isMuted: boolean): void;
507
+ setPaused(isPaused: boolean): void;
508
+ close(): void;
509
+ getConfig(): AudioWorkletConfig | null;
510
+ private handleWorkletMessage;
511
+ private parseAudioBuffer;
512
+ private getChannelCount;
513
+ private cleanupAfterInitializeFailure;
514
+ private normalizeInitializationError;
515
+ private disconnectAudioNodes;
516
+ private closeAudioContext;
517
+ private releaseAudioWorkletUrlLease;
518
+ }
519
+
520
+ export declare class AudioLevelAnalyzer {
521
+ private audioContext;
522
+ private analyser;
523
+ private audioLevelIntervalId;
524
+ private audioLevel;
525
+ private getMutedState;
526
+ private currentStream;
527
+ startTracking(stream: MediaStream, callbacks: AudioLevelCallbacks, getMutedState?: () => boolean): void;
528
+ stopTracking(): void;
529
+ getAudioLevel(): number;
530
+ private calculateAudioLevel;
531
+ private checkMutedState;
532
+ }
533
+
534
+ export declare function getAudioContextClass(): typeof AudioContext | null;
535
+
536
+ export type BrowserErrorLinkContent = {
537
+ prefix: string;
538
+ linkText: string | null;
539
+ linkHref: string | null;
540
+ linkTarget: string | null;
541
+ suffix: string;
542
+ };
543
+ export declare function getBrowserErrorText(errorCode: string | null, browserName: string | null, browserVersion: string | null, translations: {
544
+ browserUnsupported: string;
545
+ browserUnsupportedDynamic: string;
546
+ browserUnsupportedSafari: string;
547
+ browserUnsupportedFirefox: string;
548
+ }): string;
549
+ export declare function parseBrowserErrorLinkContent(text: string): BrowserErrorLinkContent;
550
+
551
+ export declare function isScreenCaptureStream(stream: MediaStream): boolean;
552
+
553
+ export declare const FILE_SIZE_UNITS: readonly ["Bytes", "KB", "MB", "GB"];
554
+ export declare const FILE_SIZE_BASE = 1024;
555
+ export declare function formatFileSize(bytes: number): string;
556
+ export declare function formatTime(totalSeconds: number): string;
557
+
558
+ export declare function calculateBarColor(position: number): string;
559
+
560
+ export declare function extractErrorMessage(error: unknown): string;
561
+
562
+ export type VisibilityInterval = {
563
+ start: number;
564
+ end: number;
565
+ };
566
+ export declare class TabVisibilityTracker {
567
+ private recordingStartTime;
568
+ private totalPausedTime;
569
+ private pauseStartTime;
570
+ private intervals;
571
+ private currentIntervalStart;
572
+ private isTracking;
573
+ private readonly visibilityChangeHandler;
574
+ private readonly blurHandler;
575
+ private readonly focusHandler;
576
+ constructor();
577
+ start(recordingStartTime: number): void;
578
+ pause(): void;
579
+ resume(): void;
580
+ getIntervals(): VisibilityInterval[];
581
+ reset(): void;
582
+ cleanup(): void;
583
+ private checkInitialState;
584
+ private handleVisibilityChange;
585
+ private handleBlur;
586
+ private handleFocus;
587
+ private startInterval;
588
+ private endCurrentIntervalIfActive;
589
+ private normalizeTimestamp;
590
+ }
591
+
592
+ export {};
593
+
594
+ declare const ANSI_COLORS: {
595
+ readonly reset: "\u001B[0m";
596
+ readonly bright: "\u001B[1m";
597
+ readonly dim: "\u001B[2m";
598
+ readonly red: "\u001B[31m";
599
+ readonly green: "\u001B[32m";
600
+ readonly yellow: "\u001B[33m";
601
+ readonly blue: "\u001B[34m";
602
+ readonly magenta: "\u001B[35m";
603
+ readonly cyan: "\u001B[36m";
604
+ readonly white: "\u001B[37m";
605
+ readonly gray: "\u001B[90m";
606
+ };
607
+ export declare const logger: {
608
+ readonly log: (message: string, ...args: unknown[]) => void;
609
+ readonly info: (message: string, ...args: unknown[]) => void;
610
+ readonly warn: (message: string, ...args: unknown[]) => void;
611
+ readonly error: (message: string, ...args: unknown[]) => void;
612
+ readonly debug: (message: string, ...args: unknown[]) => void;
613
+ readonly group: (label: string, color?: keyof typeof ANSI_COLORS) => void;
614
+ readonly groupEnd: () => void;
615
+ };
616
+ export {};
617
+
618
+ export declare function isMobileDevice(): boolean;
619
+
620
+ export type SharedObjectUrlStoreDependencies = {
621
+ createBlob: () => Blob;
622
+ createObjectUrl: (blob: Blob) => string;
623
+ revokeObjectUrl: (url: string) => void;
624
+ };
625
+ export type SharedObjectUrlStore = {
626
+ acquire: () => string;
627
+ release: () => void;
628
+ };
629
+ export declare function createSharedObjectUrlStore(dependencies: SharedObjectUrlStoreDependencies): SharedObjectUrlStore;
630
+
631
+ export declare function extractVideoDuration(file: File | Blob): Promise<number>;
632
+
633
+ export declare function requireNonNull<T>(value: T | null | undefined, message: string): T;
634
+ export declare function requireDefined<T>(value: T | undefined, message: string): T;
635
+ export declare function requireActive(isActive: boolean, componentName: string): void;
636
+ export declare function requireInitialized<T>(value: T | null | undefined, componentName: string): T;
637
+ export declare function requireStream(stream: MediaStream | null, message?: string): MediaStream;
638
+ export declare function requireProcessor<T>(processor: T | null, componentName?: string): T;
639
+ /**
640
+ * Validates that mediaDevices API is available.
641
+ * Throws a descriptive error if not available (e.g., on HTTP instead of HTTPS).
642
+ */
643
+ export declare function requireMediaDevices(): MediaDevices;
644
+
645
+ export declare function isMobileDevice(): boolean;
646
+
647
+ export {};
648
+
649
+ import type { UploadResult } from "../upload/types";
650
+ export type UploadCallbacks = {
651
+ onUploadProgress: (id: string, progress: number) => void;
652
+ onUploadComplete: (id: string, result: UploadResult) => void;
653
+ onUploadError: (id: string, error: Error) => void;
654
+ };
655
+
656
+ export declare class StorageManager {
657
+ private storageService;
658
+ private cleanupIntervalId;
659
+ initialize(onCleanupError: (error: string) => void): Promise<void>;
660
+ private setupCleanupInterval;
661
+ performCleanup(): Promise<void>;
662
+ getStorageService(): VideoStorageService | null;
339
663
  destroy(): void;
340
664
  }
341
665
 
342
- import type { TranscodeConfig } from "../transcode/transcode-types";
343
- export declare class StreamProcessor {
344
- private currentVideoStream;
345
- private onSourceChange?;
346
- private onBufferUpdate?;
347
- private onError?;
348
- private readonly workerProcessor;
349
- constructor();
350
- startProcessing(stream: MediaStream, config: TranscodeConfig, overlayConfig?: {
351
- enabled: boolean;
352
- text: string;
353
- recordingStartTime?: number;
354
- }): Promise<void>;
355
- updateTabVisibility(isHidden: boolean, timestamp: number): void;
356
- updateSourceType(isScreenCapture: boolean): void;
357
- pause(): void;
358
- resume(): void;
359
- isPausedState(): boolean;
360
- finalize(): Promise<StreamProcessorResult>;
361
- toggleMute(): void;
362
- isMutedState(): boolean;
363
- getClonedAudioTrack(): MediaStreamTrack | null;
364
- getAudioStreamForAnalysis(): MediaStream | null;
365
- switchVideoSource(newStream: MediaStream): Promise<void>;
366
- getCurrentVideoSource(): MediaStream | null;
367
- getBufferSize(): number;
368
- setOnMuteStateChange(callback: (muted: boolean) => void): void;
369
- setOnSourceChange(callback: (stream: MediaStream) => void): void;
370
- setOnBufferUpdate(callback: (size: number, formatted: string) => void): void;
371
- setOnError(callback: (error: Error) => void): void;
372
- cancel(): Promise<void>;
666
+ export {};
667
+
668
+ export type StorageQuota = {
669
+ usage: number;
670
+ quota: number;
671
+ available: number;
672
+ percentage: number;
673
+ };
674
+ export declare class QuotaManager {
675
+ getQuota(): Promise<StorageQuota>;
676
+ hasSpaceFor(sizeInBytes: number): Promise<boolean>;
677
+ requestPersistentStorage(): Promise<boolean>;
678
+ isPersistent(): Promise<boolean>;
679
+ formatBytes(bytes: number): string;
680
+ shouldWarn(threshold?: number): Promise<boolean>;
681
+ isCritical(threshold?: number): Promise<boolean>;
682
+ private checkThreshold;
373
683
  }
374
684
 
375
- export type CameraConstraints = {
376
- width?: number | {
377
- ideal?: number;
378
- min?: number;
379
- max?: number;
380
- };
381
- height?: number | {
382
- ideal?: number;
383
- min?: number;
384
- max?: number;
385
- };
386
- frameRate?: number | {
387
- ideal?: number;
388
- min?: number;
389
- max?: number;
390
- };
685
+ export type PendingUpload = {
686
+ id: string;
687
+ blob: Blob;
688
+ apiKey: string;
689
+ backendUrl: string;
690
+ filename: string;
691
+ duration?: number;
692
+ metadata?: Record<string, unknown>;
693
+ userMetadata?: Record<string, unknown>;
694
+ status: "pending" | "uploading" | "failed" | "completed";
695
+ retryCount: number;
696
+ lastError?: string;
697
+ createdAt: number;
698
+ updatedAt: number;
391
699
  };
392
- export type StreamConfig = {
393
- video: boolean | CameraConstraints;
394
- audio: boolean | MediaTrackConstraints;
700
+ export declare class VideoStorageService {
701
+ private db;
702
+ private readonly databaseFactory;
703
+ constructor(databaseFactory?: IDBFactory);
704
+ init(): Promise<void>;
705
+ private openDatabase;
706
+ private createOpenRequest;
707
+ private initializeStoreSchema;
708
+ private validateRequiredSchema;
709
+ isInitialized(): boolean;
710
+ savePendingUpload(upload: Omit<PendingUpload, "id" | "createdAt" | "updatedAt" | "status" | "retryCount">): Promise<string>;
711
+ getPendingUploads(status?: PendingUpload["status"]): Promise<PendingUpload[]>;
712
+ updateUploadStatus(id: string, updates: Partial<PendingUpload>): Promise<void>;
713
+ deleteUpload(id: string): Promise<void>;
714
+ cleanupPermanentlyFailedUploads(retentionHours?: number): Promise<number>;
715
+ getTotalStorageSize(): Promise<number>;
716
+ private generateUploadId;
717
+ private executeTransaction;
718
+ }
719
+
720
+ import type { AudioLevelCallbacks } from "../audio/types";
721
+ import type { DeviceCallbacks } from "../device/types";
722
+ import type { UploadCallbacks as StorageUploadCallbacks } from "../storage/types";
723
+ import type { TranscodeConfig } from "../transcode/transcode-types";
724
+ import type { UploadCallbacks } from "../upload/types";
725
+ export type RecordingState = "idle" | "countdown" | "recording";
726
+ export type SourceType = "camera" | "screen";
727
+ export type RecordingCallbacks = {
728
+ onStateChange: (state: RecordingState) => void;
729
+ onCountdownUpdate: (state: RecordingState, remaining: number) => void;
730
+ onTimerUpdate: (formatted: string) => void;
731
+ onError: (error: Error) => void;
732
+ onRecordingComplete: (blob: Blob) => void;
733
+ onClearUploadStatus: () => void;
734
+ onStopAudioTracking: () => void;
735
+ onGetConfig: () => Promise<TranscodeConfig>;
395
736
  };
396
- export type RecordingOptions = {
397
- mimeType?: string;
398
- videoBitsPerSecond?: number;
399
- audioBitsPerSecond?: number;
400
- bitsPerSecond?: number;
737
+ export type RecorderConfig = {
738
+ apiKey?: string | null;
739
+ backendUrl?: string | null;
740
+ demo?: boolean;
741
+ countdownDuration?: number;
742
+ maxRecordingTime?: number | null;
743
+ userMetadata?: Record<string, unknown>;
744
+ enableSourceSwitching?: boolean;
745
+ enableMute?: boolean;
746
+ enablePause?: boolean;
747
+ enableDeviceChange?: boolean;
748
+ enableTabVisibilityOverlay?: boolean;
749
+ tabVisibilityOverlayText?: string;
750
+ nativeCamera?: boolean;
401
751
  };
402
- export type StreamState = "idle" | "starting" | "active" | "recording" | "stopping" | "error";
403
- export type StreamEventMap = {
404
- statechange: {
405
- state: StreamState;
406
- previousState: StreamState;
407
- };
408
- streamstart: {
409
- stream: MediaStream;
410
- };
411
- streamstop: undefined;
412
- recordingstart: {
413
- recorder: null;
414
- };
415
- recordingstop: {
416
- blob: Blob;
417
- mimeType: string;
418
- };
419
- recordingdata: {
420
- data: Blob;
421
- };
422
- error: {
423
- error: Error;
424
- };
425
- recordingtimeupdate: {
426
- elapsed: number;
427
- formatted: string;
428
- };
429
- recordingbufferupdate: {
430
- size: number;
431
- formatted: string;
432
- };
433
- audiomutetoggle: {
434
- muted: boolean;
752
+ export type RecorderCallbacks = {
753
+ recording?: Partial<RecordingCallbacks>;
754
+ audioLevel?: AudioLevelCallbacks;
755
+ device?: DeviceCallbacks;
756
+ upload?: UploadCallbacks;
757
+ storage?: StorageUploadCallbacks;
758
+ sourceSwitch?: {
759
+ onSourceChange?: (sourceType: SourceType) => Promise<void>;
760
+ onPreviewUpdate?: (stream: MediaStream) => Promise<void>;
761
+ onError?: (error: Error) => void;
762
+ onTransitionStart?: (message: string) => void;
763
+ onTransitionEnd?: () => void;
435
764
  };
436
- videosourcechange: {
437
- stream: MediaStream;
765
+ stream?: {
766
+ onStreamStart?: (stream: MediaStream) => void;
767
+ onStreamStop?: () => void;
768
+ onError?: (error: Error) => void;
438
769
  };
439
- };
440
- export type StreamEventListener<T extends keyof StreamEventMap> = (data: StreamEventMap[T]) => void;
441
- export type StreamProcessorResult = {
442
- blob: Blob;
443
- totalSize: number;
770
+ onStorageCleanupError?: (error: string) => void;
444
771
  };
445
772
 
446
- export declare const DEFAULT_CAMERA_CONSTRAINTS: Readonly<CameraConstraints>;
447
- export declare const DEFAULT_STREAM_CONFIG: Readonly<StreamConfig>;
448
- export declare const DEFAULT_RECORDING_OPTIONS: Readonly<RecordingOptions>;
773
+ export type UploadTelemetryMetadata = {
774
+ filename: string;
775
+ duration: number;
776
+ sourceType: SourceType;
777
+ };
778
+ export declare class UploadMetadataManager {
779
+ private metadataById;
780
+ getMetadata(uploadId: string): UploadTelemetryMetadata | undefined;
781
+ setMetadata(uploadId: string, metadata: UploadTelemetryMetadata): void;
782
+ clearMetadata(uploadId: string): void;
783
+ }
449
784
 
785
+ import type { AudioLevelCallbacks } from "../audio/types";
786
+ import { DeviceManager } from "../device/device-manager";
787
+ import type { AvailableDevices } from "../device/types";
788
+ import { CameraStreamManager } from "../stream/stream";
450
789
  import type { TranscodeConfig } from "../transcode/transcode-types";
451
- export declare class CameraStreamManager {
790
+ import { VideoUploadService } from "../upload/upload-service";
791
+ export declare class RecorderController {
452
792
  private readonly streamManager;
453
- private readonly recordingState;
454
- constructor(streamConfig?: Partial<StreamConfig>);
455
- getState(): StreamState;
456
- getStream(): MediaStream | null;
457
- getAudioStreamForAnalysis(): MediaStream | null;
458
- isRecording(): boolean;
459
- isActive(): boolean;
460
- on<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
461
- off<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): void;
462
- once<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
463
- setAudioDevice(deviceId: string | null): void;
464
- setVideoDevice(deviceId: string | null): void;
465
- getAudioDevice(): string | null;
466
- getVideoDevice(): string | null;
467
- getAvailableDevices(): Promise<{
468
- audioinput: MediaDeviceInfo[];
469
- videoinput: MediaDeviceInfo[];
470
- }>;
471
- startStream(): Promise<MediaStream>;
472
- stopStream(): void;
793
+ private readonly configManager;
794
+ private readonly storageManager;
795
+ private readonly deviceManager;
796
+ private readonly audioLevelAnalyzer;
797
+ private readonly recordingManager;
798
+ private readonly sourceSwitchManager;
799
+ private readonly uploadService;
800
+ private readonly uploadCallbacks;
801
+ private readonly callbacks;
802
+ private readonly telemetryManager;
803
+ private readonly uploadMetadataManager;
804
+ private uploadQueueManager;
805
+ private isInitialized;
806
+ private isDemo;
807
+ private enableTabVisibilityOverlay;
808
+ private tabVisibilityOverlayText;
809
+ constructor(callbacks?: RecorderCallbacks);
810
+ initialize(config: RecorderConfig): Promise<void>;
811
+ startStream(): Promise<void>;
812
+ stopStream(): Promise<void>;
473
813
  switchVideoDevice(deviceId: string | null): Promise<MediaStream>;
474
814
  switchAudioDevice(deviceId: string | null): Promise<MediaStream>;
475
- startRecording(processor: StreamProcessor, config: TranscodeConfig, enableTabVisibilityOverlay?: boolean, tabVisibilityOverlayText?: string): Promise<void>;
476
- stopRecording(): Promise<{
477
- blob: Blob;
478
- tabVisibilityIntervals: Array<{
479
- start: number;
480
- end: number;
481
- }>;
482
- }>;
815
+ startRecording(): Promise<void>;
816
+ stopRecording(): Promise<Blob>;
817
+ getTabVisibilityOverlayConfig(): {
818
+ enabled: boolean;
819
+ text: string | undefined;
820
+ };
483
821
  pauseRecording(): void;
484
822
  resumeRecording(): void;
485
- toggleMute(): void;
823
+ switchSource(sourceType: SourceType): Promise<void>;
824
+ setCameraDevice(deviceId: string | null): void;
825
+ setMicDevice(deviceId: string | null): void;
826
+ getAvailableDevices(): Promise<AvailableDevices>;
486
827
  muteAudio(): void;
487
828
  unmuteAudio(): void;
488
- isMuted(): boolean;
489
- switchVideoSource(newStream: MediaStream): Promise<void>;
490
- setMediaStream(stream: MediaStream): void;
491
- getCurrentVideoSource(): MediaStream;
492
- destroy(): void;
829
+ toggleMute(): void;
830
+ getIsMuted(): boolean;
831
+ startAudioLevelTracking(stream: MediaStream, callbacks?: AudioLevelCallbacks): Promise<void>;
832
+ stopAudioLevelTracking(): void;
833
+ getAudioLevel(): number;
834
+ uploadVideo(blob: Blob, apiKey: string, backendUrl: string, metadata: Record<string, unknown>): Promise<void>;
835
+ getStream(): MediaStream | null;
836
+ isConfigReady(): boolean;
837
+ ensureConfigReady(): Promise<void>;
838
+ cleanup(): void;
839
+ getRecordingState(): RecordingState;
840
+ isPaused(): boolean;
841
+ getCurrentSourceType(): SourceType;
842
+ getOriginalCameraStream(): MediaStream | null;
843
+ getStreamManager(): CameraStreamManager;
844
+ getAudioStreamForAnalysis(): MediaStream | null;
845
+ getDeviceManager(): DeviceManager;
846
+ getConfig(): Promise<TranscodeConfig>;
847
+ getUploadService(): VideoUploadService | null;
848
+ isRecording(): boolean;
849
+ isActive(): boolean;
850
+ private initializeConfig;
851
+ private applyRecordingConfig;
852
+ private initializeStorage;
853
+ private validateRecorderSupport;
493
854
  }
494
855
 
495
- import type { TranscodeConfig } from "../transcode/transcode-types";
496
- import { type VisibilityInterval } from "../utils/tab-visibility-tracker";
497
- export type StopRecordingResult = {
498
- blob: Blob;
499
- tabVisibilityIntervals: VisibilityInterval[];
500
- };
501
- export declare class StreamRecordingState {
856
+ import type { CameraStreamManager } from "../stream/stream";
857
+ import { StreamProcessor } from "../stream/stream-processor";
858
+ export declare class RecordingManager {
859
+ private recordingState;
860
+ private countdownDuration;
861
+ private countdownRemaining;
862
+ private countdownTimeoutId;
863
+ private countdownIntervalId;
864
+ private countdownStartTime;
865
+ private isPaused;
866
+ private maxRecordingTime;
867
+ private maxTimeTimer;
502
868
  private recordingStartTime;
503
- private recordingTimer;
869
+ private maxTimeRemaining;
870
+ private recordingSeconds;
871
+ private recordingIntervalId;
504
872
  private pauseStartTime;
505
873
  private totalPausedTime;
506
- private streamProcessor;
507
- private bufferSizeUpdateInterval;
508
- private tabVisibilityTracker;
509
- private visibilityChangeHandler;
510
- private blurHandler;
511
- private focusHandler;
512
874
  private readonly streamManager;
513
- constructor(streamManager: StreamManager);
514
- isRecording(): boolean;
875
+ private readonly callbacks;
876
+ private streamProcessor;
877
+ private originalCameraStream;
878
+ private enableTabVisibilityOverlay;
879
+ private tabVisibilityOverlayText;
880
+ constructor(streamManager: CameraStreamManager, callbacks: RecordingCallbacks);
881
+ setCountdownDuration(duration: number): void;
882
+ setMaxRecordingTime(maxTime: number | null): void;
883
+ setTabVisibilityOverlayConfig(enabled: boolean, text: string | undefined): void;
884
+ getRecordingState(): RecordingState;
885
+ isPausedState(): boolean;
886
+ getRecordingSeconds(): number;
515
887
  getStreamProcessor(): StreamProcessor | null;
516
- getAudioStreamForAnalysis(): MediaStream | null;
517
- startRecording(processor: StreamProcessor, config: TranscodeConfig, enableTabVisibilityOverlay?: boolean, tabVisibilityOverlayText?: string): Promise<void>;
518
- stopRecording(): Promise<StopRecordingResult>;
888
+ updateSourceType(isScreenCapture: boolean): void;
889
+ setOriginalCameraStream(stream: MediaStream | null): void;
890
+ getOriginalCameraStream(): MediaStream | null;
891
+ startRecording(): Promise<void>;
892
+ private startCountdown;
893
+ private doStartRecording;
894
+ stopRecording(): Promise<Blob>;
519
895
  pauseRecording(): void;
520
896
  resumeRecording(): void;
521
- toggleMute(): void;
522
- muteAudio(): void;
523
- unmuteAudio(): void;
524
- isMuted(): boolean;
525
- switchVideoSource(newStream: MediaStream): Promise<void>;
526
- getCurrentVideoSource(): MediaStream;
527
- private formatTimeElapsed;
528
- private startRecordingTimer;
529
- private clearRecordingTimer;
530
- private clearBufferSizeInterval;
897
+ cancelCountdown(): void;
898
+ cleanup(): void;
531
899
  private resetRecordingState;
532
900
  private resetPauseState;
533
- private setupVisibilityUpdates;
534
- private cleanupVisibilityUpdates;
535
- destroy(): void;
901
+ private updatePausedDuration;
902
+ private startRecordingTimer;
903
+ private startMaxTimeTimer;
904
+ private clearTimer;
905
+ private handleError;
536
906
  }
537
907
 
538
- import type { SourceType } from "../recording/types";
539
- export type SourceSwitchCallbacks = {
540
- onSourceChange?: (sourceType: SourceType) => Promise<void>;
541
- onPreviewUpdate?: (stream: MediaStream) => Promise<void>;
542
- onError?: (error: Error) => void;
543
- onTransitionStart?: (message: string) => void;
544
- onTransitionEnd?: () => void;
545
- onScreenSelectionStart?: () => void;
546
- onScreenSelectionEnd?: () => void;
547
- getSelectedCameraDeviceId?: () => string | null;
548
- getSelectedMicDeviceId?: () => string | null;
908
+ import type { TelemetryClient } from "../telemetry/telemetry-client";
909
+ import type { TelemetryEventName } from "../telemetry/telemetry-types";
910
+ export type TelemetryActionConfiguration = {
911
+ requestedEvent?: TelemetryEventName;
912
+ succeededEvent: TelemetryEventName;
913
+ failedEvent: TelemetryEventName;
914
+ action: () => Promise<void>;
915
+ properties?: Record<string, unknown>;
549
916
  };
550
- export declare class SourceSwitchManager {
551
- private currentSourceType;
552
- private originalCameraStream;
553
- private originalCameraConstraints;
554
- private screenShareStream;
555
- private screenShareTrackEndHandler;
556
- private readonly streamManager;
557
- private callbacks;
558
- constructor(streamManager: CameraStreamManager, callbacks?: SourceSwitchCallbacks);
559
- getCurrentSourceType(): SourceType;
560
- getOriginalCameraStream(): MediaStream | null;
561
- private stopLiveTracks;
562
- private stopStreamTracks;
563
- private stopStreamVideoTracks;
564
- private isTrackLive;
565
- private areTracksLive;
566
- private storeOriginalCameraConstraints;
567
- private storeOriginalCameraStream;
568
- private createError;
569
- private waitForTracksToEnd;
570
- private combineScreenShareWithOriginalAudio;
571
- private handleScreenSelectionError;
572
- private isPermissionDeniedError;
573
- private processScreenShareStream;
574
- switchToScreenCapture(): Promise<MediaStream | null>;
575
- private setupScreenShareTrackHandler;
576
- removeScreenShareTrackHandler(stream: MediaStream | null): void;
577
- private canReuseStream;
578
- private canReuseOriginalStream;
579
- private canReuseManagerStream;
580
- private getSelectedCameraDeviceId;
581
- private getSelectedMicDeviceId;
582
- private buildVideoConstraints;
583
- private buildAudioConstraints;
584
- private validateTrack;
585
- private createCameraStreamWithOriginalAudio;
586
- private createCameraStreamWithNewAudio;
587
- private createNewCameraStreamForRecording;
588
- getCameraStream(): Promise<MediaStream>;
589
- switchToCamera(): Promise<void>;
590
- private notifyTransitionStart;
591
- private notifyTransitionEnd;
592
- private stopScreenShareStreamTracks;
593
- private stopDisplayTracks;
594
- private handleScreenShareStop;
595
- private applyCameraStream;
596
- toggleSource(): Promise<void>;
597
- private switchToScreen;
598
- private handleToggleError;
599
- handleRecordingStop(): Promise<void>;
600
- cleanup(): void;
601
- setCallbacks(callbacks: SourceSwitchCallbacks): void;
917
+ export type TelemetryActionWithResultConfiguration<Result> = {
918
+ requestedEvent?: TelemetryEventName;
919
+ succeededEvent: TelemetryEventName;
920
+ failedEvent: TelemetryEventName;
921
+ action: () => Promise<Result>;
922
+ properties?: Record<string, unknown>;
923
+ };
924
+ export type TelemetryManagerDependencies = {
925
+ createTelemetryClient: (apiKey: string, backendUrl: string) => TelemetryClient;
926
+ };
927
+ export declare class TelemetryManager {
928
+ private client;
929
+ private readonly createTelemetryClient;
930
+ constructor(dependencies: TelemetryManagerDependencies);
931
+ initialize(apiKey: string | null, backendUrl: string | null): void;
932
+ sendEvent(name: TelemetryEventName, properties?: Record<string, unknown>, error?: unknown): void;
933
+ executeAction(configuration: TelemetryActionConfiguration): Promise<void>;
934
+ executeActionWithResult<Result>(configuration: TelemetryActionWithResultConfiguration<Result>): Promise<Result>;
602
935
  }
603
936
 
604
- export declare const FORMAT_DEFAULT_CODECS: Record<OutputFormat, AudioCodec>;
605
- export declare function getDefaultAudioCodecForFormat(format: OutputFormat): AudioCodec;
606
- export declare function getAudioCodecForFormat(format: OutputFormat, overrideCodec?: AudioCodec): AudioCodec;
937
+ import type { SourceSwitchCallbacks } from "../stream/source-switch-manager";
938
+ import type { UploadCallbacks } from "../upload/types";
939
+ export type RecordingCallbackDependencies = {
940
+ stopAudioTracking: () => void;
941
+ getConfig: RecordingCallbacks["onGetConfig"];
942
+ };
943
+ export type SourceSwitchCallbackDependencies = {
944
+ isRecording: () => boolean;
945
+ updateSourceType: (isScreenCapture: boolean) => void;
946
+ getSelectedCameraDeviceId: () => string | null;
947
+ getSelectedMicDeviceId: () => string | null;
948
+ };
949
+ export declare function resolveUploadCallbacks(callbacks: RecorderCallbacks): UploadCallbacks;
950
+ export declare function resolveStorageCleanupErrorCallback(callbacks: RecorderCallbacks): (error: string) => void;
951
+ export declare function createRecordingCallbacks(callbacks: RecorderCallbacks, dependencies: RecordingCallbackDependencies): RecordingCallbacks;
952
+ export declare function createSourceSwitchCallbacks(callbacks: RecorderCallbacks, dependencies: SourceSwitchCallbackDependencies): SourceSwitchCallbacks;
953
+
954
+ export declare function transcodeVideo(input: TranscodeInput, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
955
+ export declare function transcodeVideoForNativeCamera(file: File, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
607
956
 
608
957
  import type { Quality, VideoCodec } from "mediabunny";
609
- export type OutputFormat = "mp4" | "mkv" | "mov" | "webm";
610
- export type AudioCodec = "aac" | "opus";
611
- export type WatermarkPosition = "top-left" | "top-right" | "bottom-left" | "bottom-right" | "center";
612
- /**
613
- * Watermark configuration for video recordings
614
- *
615
- * Performance & File Size Impact:
616
- * - Watermarks are burned into video frames, increasing file size
617
- * - Watermark size: 5% of video width (optimized for visibility vs file size)
618
- * - File size increase depends on opacity:
619
- * - opacity: 1.0 (opaque) → ~2-5% larger files (recommended)
620
- * - opacity: 0.5 (semi-transparent) → ~5-15% larger files
621
- * - opacity: 0.3 or lower → ~10-20% larger files
622
- *
623
- * Best Practices:
624
- * - Use fully opaque watermarks (opacity: 1.0) for production
625
- * - Use simple logos (PNG with transparency) rather than complex images
626
- * - Prefer solid colors over gradients for better compression
627
- * - Consider server-side watermarking for maximum efficiency
628
- */
629
- export type WatermarkConfig = {
630
- /** URL or data URI of the watermark image. Supports PNG, JPG, WebP. */
631
- url: string;
632
- /**
633
- * Opacity level (0.0 to 1.0). Default: 1.0
634
- * WARNING: Lower opacity significantly increases file size due to reduced compression efficiency.
635
- */
636
- opacity?: number;
637
- /** Position of the watermark on the video frame */
638
- position: WatermarkPosition;
958
+ export type { AudioCodec, OutputFormat, WatermarkConfig, WatermarkPosition, } from "../processor/types";
959
+ import type { AudioCodec, OutputFormat, WatermarkConfig } from "../processor/types";
960
+ export type TranscodeConfig = {
961
+ format: OutputFormat;
962
+ fps?: number;
963
+ width?: number;
964
+ height?: number;
965
+ bitrate?: number | Quality;
966
+ codec?: VideoCodec;
967
+ audioCodec?: AudioCodec;
968
+ audioBitrate?: number;
969
+ tabVisibilityIntervals?: Array<{
970
+ start: number;
971
+ end: number;
972
+ }>;
973
+ tabVisibilityOverlayText?: string;
974
+ watermark?: WatermarkConfig;
975
+ };
976
+ export type TranscodeInput = Blob | File | string;
977
+ export type TranscodeResult = {
978
+ buffer: ArrayBuffer;
979
+ blob: Blob;
980
+ };
981
+
982
+ export type CameraConstraints = {
983
+ width?: number | {
984
+ ideal?: number;
985
+ min?: number;
986
+ max?: number;
987
+ };
988
+ height?: number | {
989
+ ideal?: number;
990
+ min?: number;
991
+ max?: number;
992
+ };
993
+ frameRate?: number | {
994
+ ideal?: number;
995
+ min?: number;
996
+ max?: number;
997
+ };
998
+ };
999
+ export type StreamConfig = {
1000
+ video: boolean | CameraConstraints;
1001
+ audio: boolean | MediaTrackConstraints;
1002
+ };
1003
+ export type RecordingOptions = {
1004
+ mimeType?: string;
1005
+ videoBitsPerSecond?: number;
1006
+ audioBitsPerSecond?: number;
1007
+ bitsPerSecond?: number;
639
1008
  };
640
- export type TranscodeConfig = {
641
- format: OutputFormat;
642
- fps?: number;
643
- width?: number;
644
- height?: number;
645
- bitrate?: number | Quality;
646
- codec?: VideoCodec;
647
- audioCodec?: AudioCodec;
648
- audioBitrate?: number;
649
- watermark?: WatermarkConfig;
1009
+ export type StreamState = "idle" | "starting" | "active" | "recording" | "stopping" | "error";
1010
+ export type StreamEventMap = {
1011
+ statechange: {
1012
+ state: StreamState;
1013
+ previousState: StreamState;
1014
+ };
1015
+ streamstart: {
1016
+ stream: MediaStream;
1017
+ };
1018
+ streamstop: undefined;
1019
+ recordingstart: {
1020
+ recorder: null;
1021
+ };
1022
+ recordingstop: {
1023
+ blob: Blob;
1024
+ mimeType: string;
1025
+ };
1026
+ recordingdata: {
1027
+ data: Blob;
1028
+ };
1029
+ error: {
1030
+ error: Error;
1031
+ };
1032
+ recordingtimeupdate: {
1033
+ elapsed: number;
1034
+ formatted: string;
1035
+ };
1036
+ recordingbufferupdate: {
1037
+ size: number;
1038
+ formatted: string;
1039
+ };
1040
+ audiomutetoggle: {
1041
+ muted: boolean;
1042
+ };
1043
+ videosourcechange: {
1044
+ stream: MediaStream;
1045
+ };
650
1046
  };
651
- export type TranscodeInput = Blob | File | string;
652
- export type TranscodeResult = {
653
- buffer: ArrayBuffer;
1047
+ export type StreamEventListener<T extends keyof StreamEventMap> = (data: StreamEventMap[T]) => void;
1048
+ export type StreamProcessorResult = {
654
1049
  blob: Blob;
1050
+ totalSize: number;
655
1051
  };
656
1052
 
657
- import type { Quality, VideoCodec } from "mediabunny";
658
- export declare function detectBestCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined): Promise<VideoCodec>;
659
- export declare function detectBestAudioCodec(bitrate?: number | Quality): Promise<AudioCodec>;
1053
+ export type ConstraintsBuilderDependencies = {
1054
+ originalCameraConstraints: MediaTrackConstraints | null;
1055
+ getSelectedCameraDeviceId: () => string | null;
1056
+ };
1057
+ export declare function buildVideoConstraints(cameraDeviceId: string | null, dependencies: ConstraintsBuilderDependencies): MediaTrackConstraints;
1058
+ export declare function buildAudioConstraints(micDeviceId: string | null): MediaTrackConstraints | boolean;
660
1059
 
661
- import type { StreamProcessorResult } from "../stream/types";
662
- export declare class WorkerProcessor {
663
- private worker;
664
- private chunks;
665
- private totalSize;
666
- private isActive;
1060
+ export declare function stopLiveTracks(tracks: MediaStreamTrack[]): void;
1061
+ export declare function stopStreamTracks(stream: MediaStream): void;
1062
+ export declare function stopStreamVideoTracks(stream: MediaStream): void;
1063
+ export declare function isTrackLive(track: MediaStreamTrack | undefined): boolean;
1064
+ export declare function areTracksLive(videoTrack: MediaStreamTrack | undefined, audioTrack: MediaStreamTrack | undefined): boolean;
1065
+ export declare function validateTrack(track: MediaStreamTrack | undefined, trackType: string, stream: MediaStream): void;
1066
+
1067
+ import type { TranscodeConfig } from "../transcode/transcode-types";
1068
+ export declare class CameraStreamManager {
1069
+ private readonly streamManager;
1070
+ private readonly recordingState;
1071
+ constructor(streamConfig?: Partial<StreamConfig>);
1072
+ getState(): StreamState;
1073
+ getStream(): MediaStream | null;
1074
+ getAudioStreamForAnalysis(): MediaStream | null;
1075
+ isRecording(): boolean;
1076
+ isActive(): boolean;
1077
+ on<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
1078
+ off<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): void;
1079
+ once<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
1080
+ setAudioDevice(deviceId: string | null): void;
1081
+ setVideoDevice(deviceId: string | null): void;
1082
+ getAudioDevice(): string | null;
1083
+ getVideoDevice(): string | null;
1084
+ getAvailableDevices(): Promise<{
1085
+ audioinput: MediaDeviceInfo[];
1086
+ videoinput: MediaDeviceInfo[];
1087
+ }>;
1088
+ startStream(): Promise<MediaStream>;
1089
+ stopStream(): void;
1090
+ switchVideoDevice(deviceId: string | null): Promise<MediaStream>;
1091
+ switchAudioDevice(deviceId: string | null): Promise<MediaStream>;
1092
+ startRecording(processor: StreamProcessor, config: TranscodeConfig, enableTabVisibilityOverlay?: boolean, tabVisibilityOverlayText?: string): Promise<void>;
1093
+ stopRecording(): Promise<{
1094
+ blob: Blob;
1095
+ tabVisibilityIntervals: Array<{
1096
+ start: number;
1097
+ end: number;
1098
+ }>;
1099
+ }>;
1100
+ pauseRecording(): void;
1101
+ resumeRecording(): void;
1102
+ toggleMute(): void;
1103
+ muteAudio(): void;
1104
+ unmuteAudio(): void;
1105
+ isMuted(): boolean;
1106
+ switchVideoSource(newStream: MediaStream): Promise<void>;
1107
+ setMediaStream(stream: MediaStream): void;
1108
+ getCurrentVideoSource(): MediaStream;
1109
+ destroy(): void;
1110
+ }
1111
+
1112
+ import { WorkerProcessor } from "../processor/worker-processor";
1113
+ import type { TranscodeConfig } from "../transcode/transcode-types";
1114
+ type StreamProcessorDependencies = {
1115
+ workerProcessorFactory?: () => WorkerProcessor;
1116
+ };
1117
+ export declare class StreamProcessor {
1118
+ private currentVideoStream;
1119
+ private onSourceChange?;
667
1120
  private onBufferUpdate?;
668
1121
  private onError?;
669
- private onMuteStateChange?;
670
- private videoTrackClone;
671
- private audioTrackClone;
672
- private isMuted;
673
- private currentVideoTrack;
674
- private isPaused;
675
- private overlayConfig;
676
- constructor();
677
- private setupWorker;
678
- private readyPromiseResolve;
679
- private handleWorkerMessage;
680
- private handleWorkerError;
1122
+ private readonly workerProcessor;
1123
+ constructor(dependencies?: StreamProcessorDependencies);
681
1124
  startProcessing(stream: MediaStream, config: TranscodeConfig, overlayConfig?: {
682
1125
  enabled: boolean;
683
1126
  text: string;
684
1127
  recordingStartTime?: number;
685
1128
  }): Promise<void>;
686
- pause(): void;
687
- resume(): void;
688
- private isWorkerActive;
689
- toggleMute(): void;
690
- switchVideoSource(newStream: MediaStream): Promise<void>;
691
- finalize(): Promise<StreamProcessorResult>;
692
- private createBlobFromChunks;
693
- cancel(): Promise<void>;
694
- getBufferSize(): number;
695
- getMutedState(): boolean;
696
1129
  updateTabVisibility(isHidden: boolean, timestamp: number): void;
697
1130
  updateSourceType(isScreenCapture: boolean): void;
1131
+ pause(): void;
1132
+ resume(): void;
698
1133
  isPausedState(): boolean;
1134
+ finalize(): Promise<StreamProcessorResult>;
1135
+ toggleMute(): void;
1136
+ isMutedState(): boolean;
699
1137
  getClonedAudioTrack(): MediaStreamTrack | null;
700
1138
  getAudioStreamForAnalysis(): MediaStream | null;
701
- setOnBufferUpdate(callback: (size: number, formatted: string) => void): void;
702
- setOnError(callback: (error: Error) => void): void;
1139
+ switchVideoSource(newStream: MediaStream): Promise<void>;
1140
+ getCurrentVideoSource(): MediaStream | null;
1141
+ getBufferSize(): number;
703
1142
  setOnMuteStateChange(callback: (muted: boolean) => void): void;
704
- private cloneVideoTrack;
705
- private cloneAudioTrack;
706
- private stopCurrentVideoTrack;
707
- cleanup(): void;
708
- static isSupported(): boolean;
709
- }
710
-
711
- export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n resolver = resolve;\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isWebKitCache = null;\nvar isWebKit = () => {\n if (isWebKitCache !== null) {\n return isWebKitCache;\n }\n return isWebKitCache = !!(typeof navigator !== \"undefined\" && (navigator.vendor?.match(/apple/i) || /AppleWebKit/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent) || /\\b(iPad|iPhone|iPod)\\b/.test(navigator.userAgent)));\n};\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for AAC must include a description, which is expected to be\" + \" an AudioSpecificConfig as specified in ISO 14496-3.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar findNalUnitsInAnnexB = (packetData) => {\n const nalUnits = [];\n let i = 0;\n while (i < packetData.length) {\n let startCodePos = -1;\n let startCodeLength = 0;\n for (let j = i;j < packetData.length - 3; j++) {\n if (packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 1) {\n startCodePos = j;\n startCodeLength = 3;\n break;\n }\n if (j < packetData.length - 4 && packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 0 && packetData[j + 3] === 1) {\n startCodePos = j;\n startCodeLength = 4;\n break;\n }\n }\n if (startCodePos === -1) {\n break;\n }\n if (i > 0 && startCodePos > i) {\n const nalData = packetData.subarray(i, startCodePos);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n i = startCodePos + startCodeLength;\n }\n if (i < packetData.length) {\n const nalData = packetData.subarray(i);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n return nalUnits;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar ANNEX_B_START_CODE = new Uint8Array([0, 0, 0, 1]);\nvar concatNalUnitsInLengthPrefixed = (nalUnits, lengthSize) => {\n const totalLength = nalUnits.reduce((a, b) => a + lengthSize + b.byteLength, 0);\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const dataView = new DataView(result.buffer, result.byteOffset, result.byteLength);\n switch (lengthSize) {\n case 1:\n dataView.setUint8(offset, nalUnit.byteLength);\n break;\n case 2:\n dataView.setUint16(offset, nalUnit.byteLength, false);\n break;\n case 3:\n setUint24(dataView, offset, nalUnit.byteLength, false);\n break;\n case 4:\n dataView.setUint32(offset, nalUnit.byteLength, false);\n break;\n }\n offset += lengthSize;\n result.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return result;\n};\nvar extractNalUnitTypeForAvc = (data) => {\n return data[0] & 31;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.PPS);\n const spsExtUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS_EXT);\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = null;\n let bitDepthLumaMinus8 = null;\n let bitDepthChromaMinus8 = null;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n bitstream.skipBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const frameMbsOnlyFlag = bitstream.readBits(1);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar extractNalUnitTypeForHevc = (data) => {\n return data[0] >> 1 & 63;\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const vpsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.VPS_NUT);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SPS_NUT);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PPS_NUT);\n const seiUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PREFIX_SEI_NUT || extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SUFFIX_SEI_NUT);\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const sps = spsUnits[0];\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const sps_max_sub_layers_minus1 = bitstream.readBits(3);\n const sps_temporal_id_nesting_flag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, sps_max_sub_layers_minus1);\n readExpGolomb(bitstream);\n const chroma_format_idc = readExpGolomb(bitstream);\n if (chroma_format_idc === 3)\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const bit_depth_luma_minus8 = readExpGolomb(bitstream);\n const bit_depth_chroma_minus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const sps_sub_layer_ordering_info_present_flag = bitstream.readBits(1);\n const maxNum = sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1;\n for (let i = maxNum;i <= sps_max_sub_layers_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const num_short_term_ref_pic_sets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, num_short_term_ref_pic_sets);\n if (bitstream.readBits(1)) {\n const num_long_term_ref_pics_sps = readExpGolomb(bitstream);\n for (let i = 0;i < num_long_term_ref_pics_sps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let min_spatial_segmentation_idc = 0;\n if (bitstream.readBits(1)) {\n min_spatial_segmentation_idc = parseVuiForMinSpatialSegmentationIdc(bitstream, sps_max_sub_layers_minus1);\n }\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n minSpatialSegmentationIdc: min_spatial_segmentation_idc,\n parallelismType,\n chromaFormatIdc: chroma_format_idc,\n bitDepthLumaMinus8: bit_depth_luma_minus8,\n bitDepthChromaMinus8: bit_depth_chroma_minus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: sps_max_sub_layers_minus1 + 1,\n temporalIdNested: sps_temporal_id_nesting_flag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseVuiForMinSpatialSegmentationIdc = (bitstream, sps_max_sub_layers_minus1) => {\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n bitstream.readBits(8);\n bitstream.readBits(8);\n bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n const min_spatial_segmentation_idc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n return min_spatial_segmentation_idc;\n }\n return 0;\n};\nvar skipHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n return new EncodedPacket(this.data, this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, this.sequenceNumber, this.byteLength);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\nvar VIDEO_SAMPLE_PIXEL_FORMATS = [\n \"I420\",\n \"I420P10\",\n \"I420P12\",\n \"I420A\",\n \"I420AP10\",\n \"I420AP12\",\n \"I422\",\n \"I422P10\",\n \"I422P12\",\n \"I422A\",\n \"I422AP10\",\n \"I422AP12\",\n \"I444\",\n \"I444P10\",\n \"I444P12\",\n \"I444A\",\n \"I444AP10\",\n \"I444AP12\",\n \"NV12\",\n \"RGBA\",\n \"RGBX\",\n \"BGRA\",\n \"BGRX\"\n];\nvar VIDEO_SAMPLE_PIXEL_FORMATS_SET = new Set(VIDEO_SAMPLE_PIXEL_FORMATS);\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.format === undefined || !VIDEO_SAMPLE_PIXEL_FORMATS_SET.has(init.format)) {\n throw new TypeError(\"init.format must be one of: \" + VIDEO_SAMPLE_PIXEL_FORMATS.join(\", \"));\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this._layout = init.layout ?? createDefaultPlaneLayout(init.format, init.codedWidth, init.codedHeight);\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this._layout = null;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = new VideoSampleColorSpace(data.colorSpace);\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this._layout = null;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n return new VideoSample(this._data, {\n format: this.format,\n layout: this._layout,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize(options = {}) {\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot get allocation size when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const size = videoFrame.allocationSize(options);\n videoFrame.close();\n return size;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize(options);\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination, options = {}) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot copy video sample data when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const layout = await videoFrame.copyTo(destination, options);\n videoFrame.close();\n return layout;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.copyTo(destination, options);\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n const dest = toUint8Array(destination);\n dest.set(this._data);\n return this._layout;\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n return [{\n offset: 0,\n stride: 4 * this.codedWidth\n }];\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\n\nclass VideoSampleColorSpace {\n constructor(init) {\n this.primaries = init?.primaries ?? null;\n this.transfer = init?.transfer ?? null;\n this.matrix = init?.matrix ?? null;\n this.fullRange = init?.fullRange ?? null;\n }\n toJSON() {\n return {\n primaries: this.primaries,\n transfer: this.transfer,\n matrix: this.matrix,\n fullRange: this.fullRange\n };\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar validateVideoFrameCopyToOptions = (options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.colorSpace !== undefined && ![\"display-p3\", \"srgb\"].includes(options.colorSpace)) {\n throw new TypeError(\"options.colorSpace, when provided, must be 'display-p3' or 'srgb'.\");\n }\n if (options.format !== undefined && typeof options.format !== \"string\") {\n throw new TypeError(\"options.format, when provided, must be a string.\");\n }\n if (options.layout !== undefined) {\n if (!Array.isArray(options.layout)) {\n throw new TypeError(\"options.layout, when provided, must be an array.\");\n }\n for (const plane of options.layout) {\n if (!plane || typeof plane !== \"object\") {\n throw new TypeError(\"Each entry in options.layout must be an object.\");\n }\n if (!Number.isInteger(plane.offset) || plane.offset < 0) {\n throw new TypeError(\"plane.offset must be a non-negative integer.\");\n }\n if (!Number.isInteger(plane.stride) || plane.stride < 0) {\n throw new TypeError(\"plane.stride must be a non-negative integer.\");\n }\n }\n }\n if (options.rect !== undefined) {\n if (!options.rect || typeof options.rect !== \"object\") {\n throw new TypeError(\"options.rect, when provided, must be an object.\");\n }\n if (options.rect.x !== undefined && (!Number.isInteger(options.rect.x) || options.rect.x < 0)) {\n throw new TypeError(\"options.rect.x, when provided, must be a non-negative integer.\");\n }\n if (options.rect.y !== undefined && (!Number.isInteger(options.rect.y) || options.rect.y < 0)) {\n throw new TypeError(\"options.rect.y, when provided, must be a non-negative integer.\");\n }\n if (options.rect.width !== undefined && (!Number.isInteger(options.rect.width) || options.rect.width < 0)) {\n throw new TypeError(\"options.rect.width, when provided, must be a non-negative integer.\");\n }\n if (options.rect.height !== undefined && (!Number.isInteger(options.rect.height) || options.rect.height < 0)) {\n throw new TypeError(\"options.rect.height, when provided, must be a non-negative integer.\");\n }\n }\n};\nvar createDefaultPlaneLayout = (format, codedWidth, codedHeight) => {\n const planes = getPlaneConfigs(format);\n const layouts = [];\n let currentOffset = 0;\n for (const plane of planes) {\n const planeWidth = Math.ceil(codedWidth / plane.widthDivisor);\n const planeHeight = Math.ceil(codedHeight / plane.heightDivisor);\n const stride = planeWidth * plane.sampleBytes;\n const planeSize = stride * planeHeight;\n layouts.push({\n offset: currentOffset,\n stride\n });\n currentOffset += planeSize;\n }\n return layouts;\n};\nvar getPlaneConfigs = (format) => {\n const yuv = (yBytes, uvBytes, subX, subY, hasAlpha) => {\n const configs = [\n { sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY }\n ];\n if (hasAlpha) {\n configs.push({ sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 });\n }\n return configs;\n };\n switch (format) {\n case \"I420\":\n return yuv(1, 1, 2, 2, false);\n case \"I420P10\":\n case \"I420P12\":\n return yuv(2, 2, 2, 2, false);\n case \"I420A\":\n return yuv(1, 1, 2, 2, true);\n case \"I420AP10\":\n case \"I420AP12\":\n return yuv(2, 2, 2, 2, true);\n case \"I422\":\n return yuv(1, 1, 2, 1, false);\n case \"I422P10\":\n case \"I422P12\":\n return yuv(2, 2, 2, 1, false);\n case \"I422A\":\n return yuv(1, 1, 2, 1, true);\n case \"I422AP10\":\n case \"I422AP12\":\n return yuv(2, 2, 2, 1, true);\n case \"I444\":\n return yuv(1, 1, 1, 1, false);\n case \"I444P10\":\n case \"I444P12\":\n return yuv(2, 2, 1, 1, false);\n case \"I444A\":\n return yuv(1, 1, 1, 1, true);\n case \"I444AP10\":\n case \"I444AP12\":\n return yuv(2, 2, 1, 1, true);\n case \"NV12\":\n return [\n { sampleBytes: 1, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: 2, widthDivisor: 2, heightDivisor: 2 }\n ];\n case \"RGBA\":\n case \"RGBX\":\n case \"BGRA\":\n case \"BGRX\":\n return [\n { sampleBytes: 4, widthDivisor: 1, heightDivisor: 1 }\n ];\n default:\n assertNever(format);\n assert(false);\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const srcFormat = this.format;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (isWebKit() && numChannels > 2 && destFormat !== srcFormat) {\n doAudioDataCopyToWebKitWorkaround(this._data, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount);\n } else {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: destFormat\n });\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\nvar doAudioDataCopyToWebKitWorkaround = (audioData, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount) => {\n const readFn = getReadFunction(srcFormat);\n const writeFn = getWriteFunction(destFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const destBytesPerSample = getBytesPerSample(destFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar) {\n if (srcIsPlanar) {\n const data = new ArrayBuffer(copyFrameCount * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = (i * numChannels + planeIndex) * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n if (srcIsPlanar) {\n const planeSize = copyFrameCount * srcBytesPerSample;\n const data = new ArrayBuffer(planeSize);\n const dataView = toDataView(data);\n for (let ch = 0;ch < numChannels; ch++) {\n audioData.copyTo(data, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = (i * numChannels + ch) * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n for (let ch = 0;ch < numChannels; ch++) {\n const idx = i * numChannels + ch;\n const srcOffset = idx * srcBytesPerSample;\n const destOffset = idx * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n }\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig: meta.decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec)\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n if (nalUnits.length === 0) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = concatNalUnitsInLengthPrefixed(nalUnits, 4);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, meta);\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packet.data, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n return {\n video: { min: 0, max: Infinity },\n audio: { min: 0, max: Infinity },\n subtitle: { min: 0, max: Infinity },\n total: { min: 1, max: 2 ** 32 - 1 }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\nvar QUALITY_LOW = /* @__PURE__ */ new Quality(0.6);\nvar QUALITY_MEDIUM = /* @__PURE__ */ new Quality(1);\nvar QUALITY_HIGH = /* @__PURE__ */ new Quality(2);\nvar QUALITY_VERY_HIGH = /* @__PURE__ */ new Quality(4);\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n return this._closingPromise ??= (async () => {\n await this._flushAndClose(forceClose);\n this._closed = true;\n })();\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/watermark-utils.ts\nfunction calculateWatermarkTargetSize(videoWidth, imageWidth, imageHeight) {\n const targetWidth = Math.round(videoWidth * 0.07);\n const scaleFactor = targetWidth / imageWidth;\n const targetHeight = Math.round(imageHeight * scaleFactor);\n return { width: targetWidth, height: targetHeight };\n}\nfunction getWatermarkPosition(options) {\n const { watermarkWidth, watermarkHeight, videoWidth, videoHeight, position } = options;\n const padding = 20;\n switch (position) {\n case \"top-left\":\n return { x: padding, y: padding };\n case \"top-right\":\n return { x: videoWidth - watermarkWidth - padding, y: padding };\n case \"bottom-left\":\n return { x: padding, y: videoHeight - watermarkHeight - padding };\n case \"bottom-right\":\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n case \"center\":\n return {\n x: (videoWidth - watermarkWidth) / 2,\n y: (videoHeight - watermarkHeight) / 2\n };\n default:\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n }\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar DEFAULT_KEY_FRAME_INTERVAL_SECONDS = 5;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n isPaused = false;\n isMuted = false;\n frameRate = 30;\n lastVideoTimestamp = 0;\n lastAudioTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n config = null;\n lastKeyFrameTimestamp = 0;\n forceNextKeyFrame = false;\n videoProcessingActive = false;\n audioProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferUpdateInterval = null;\n pausedDuration = 0;\n pauseStartedAt = null;\n overlayConfig = null;\n overlayCanvas = null;\n compositionCanvas = null;\n compositionCtx = null;\n watermarkCanvas = null;\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n recordingStartTime = 0;\n pendingVisibilityUpdates = [];\n isScreenCapture = false;\n driftOffset = 0;\n constructor() {\n self.addEventListener(\"message\", this.handleMessage);\n }\n formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return \"0 Bytes\";\n }\n const units = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n const base = 1024;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * 100) / 100;\n return `${size} ${units[index]}`;\n }\n shouldIgnoreMessage() {\n return this.isStopping || this.isFinalized;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n if (message.type === \"start\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStart(message.videoStream, message.audioStream, message.config, message.overlayConfig), \"handleStart\");\n return;\n }\n if (message.type === \"pause\") {\n this.handlePause();\n return;\n }\n if (message.type === \"resume\") {\n this.handleResume();\n return;\n }\n if (message.type === \"stop\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n return;\n }\n if (message.type === \"toggleMute\") {\n this.handleToggleMute();\n return;\n }\n if (message.type === \"switchSource\") {\n this.handleAsyncOperation(this.handleSwitchSource(message.videoStream), \"handleSwitchSource\");\n return;\n }\n if (message.type === \"updateFps\") {\n this.handleUpdateFps(message.fps);\n return;\n }\n if (message.type === \"updateVisibility\") {\n this.handleUpdateVisibility(message.isHidden, message.timestamp);\n return;\n }\n if (message.type === \"updateSourceType\") {\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n }\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n };\n validateConfig(config) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width !== undefined && config.width <= 0) {\n throw new Error(\"Video width must be greater than zero\");\n }\n if (config.height !== undefined && config.height <= 0) {\n throw new Error(\"Video height must be greater than zero\");\n }\n if (config.fps !== undefined && config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate !== undefined && typeof config.bitrate === \"number\" && config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n }\n validateFormat(format) {\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n }\n initializeRecordingState(config) {\n this.config = config;\n this.frameRate = config.fps || 30;\n this.isPaused = false;\n this.isMuted = false;\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayCanvas = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.pendingVisibilityUpdates = [];\n this.watermarkCanvas = null;\n }\n setupOverlayConfig(overlayConfig) {\n this.overlayConfig = overlayConfig ? { enabled: overlayConfig.enabled, text: overlayConfig.text } : null;\n this.recordingStartTime = overlayConfig?.recordingStartTime !== undefined ? overlayConfig.recordingStartTime / 1000 : performance.now() / 1000;\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n }\n createOutput() {\n const writable = new WritableStream({\n write: (chunk) => {\n this.sendChunk(chunk.data, chunk.position);\n }\n });\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: \"fragmented\"\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n }\n createVideoSource(config) {\n const fps = config.fps || 30;\n const keyFrameIntervalSeconds = config.keyFrameInterval;\n const videoSourceOptions = {\n codec: config.codec,\n width: config.width,\n height: config.height,\n sizeChangeBehavior: \"contain\",\n alpha: \"discard\",\n bitrateMode: \"variable\",\n latencyMode: \"quality\",\n contentHint: \"detail\",\n hardwareAcceleration: \"no-preference\",\n keyFrameInterval: keyFrameIntervalSeconds,\n bitrate: this.deserializeBitrate(config.bitrate)\n };\n this.videoSource = new VideoSampleSource(videoSourceOptions);\n const output = requireNonNull(this.output, \"Output must be initialized before adding video track\");\n const trackOptions = {};\n if (fps !== undefined) {\n trackOptions.frameRate = fps;\n }\n output.addVideoTrack(this.videoSource, trackOptions);\n }\n setupAudioSource(audioStream, config) {\n if (audioStream && config.audioBitrate && config.audioCodec) {\n if (config.audioBitrate <= 0) {\n throw new Error(\"Audio bitrate must be greater than zero\");\n }\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.setupAudioProcessing(audioStream);\n }\n }\n async handleStart(videoStream, audioStream, config, overlayConfig) {\n this.validateConfig(config);\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.initializeRecordingState(config);\n this.setupOverlayConfig(overlayConfig);\n const format = config.format || \"mp4\";\n this.validateFormat(format);\n this.createOutput();\n this.createVideoSource(config);\n if (videoStream) {\n this.setupVideoProcessing(videoStream);\n }\n this.setupAudioSource(audioStream, config);\n const output = requireNonNull(this.output, \"Output must be initialized before starting\");\n if (this.config?.watermark) {\n this.prepareWatermark();\n }\n await output.start();\n this.startBufferUpdates();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n startBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n return;\n }\n this.bufferUpdateInterval = self.setInterval(() => {\n if (this.output) {\n const size = this.getBufferSize();\n const formatted = this.formatFileSize(size);\n this.sendBufferUpdate(size, formatted);\n }\n }, 1000);\n }\n stopBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n self.clearInterval(this.bufferUpdateInterval);\n this.bufferUpdateInterval = null;\n }\n }\n getBufferSize() {\n return this.totalSize;\n }\n totalSize = 0;\n setupVideoProcessing(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n calculateVideoFrameTimestamp(videoFrame) {\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const rawTs = typeof videoFrame.timestamp === \"number\" && videoFrame.timestamp !== null ? videoFrame.timestamp / 1e6 : performance.now() / 1000;\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTs;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n difference: this.baseVideoTimestamp - this.recordingStartTime,\n pendingUpdates: this.pendingVisibilityUpdates.length\n };\n logger.debug(\"[RecorderWorker] baseVideoTimestamp set\", logData);\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp);\n }\n this.pendingVisibilityUpdates = [];\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set\");\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTs - originalBase;\n this.baseVideoTimestamp = rawTs - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] First frame after source switch\", {\n rawTs,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: this.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTs = rawTs - this.baseVideoTimestamp - this.pausedDuration;\n const prevTs = this.lastVideoTimestamp > 0 ? this.lastVideoTimestamp : 0;\n const frameTimestamp = normalizedTs >= prevTs ? normalizedTs : prevTs + 1 / this.frameRate;\n if (frameTimestamp < 0) {\n logger.warn(\"[RecorderWorker] Negative frame timestamp detected, clamping to zero\", { frameTimestamp, normalizedTs, prevTs });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n logger.debug(\"[RecorderWorker] Frame timestamp calculation\", {\n rawTs,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTs,\n prevTs,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: this.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const ctx = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = ctx.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * 2);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * 2);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const r = 20;\n const g = 20;\n const b = 20;\n const borderRadius = 50;\n ctx.fillStyle = `rgba(${r}, ${g}, ${b}, ${OVERLAY_BACKGROUND_OPACITY})`;\n ctx.beginPath();\n ctx.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n ctx.fill();\n ctx.fillStyle = OVERLAY_TEXT_COLOR;\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n ctx.textBaseline = \"middle\";\n ctx.textAlign = \"center\";\n const textX = overlayWidth / 2;\n const textY = overlayHeight / 2;\n ctx.fillText(text, textX, textY);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n const padding = OVERLAY_PADDING;\n return {\n x: videoWidth - overlayWidth - padding,\n y: padding\n };\n }\n async prepareWatermark() {\n if (!this.config?.watermark || this.watermarkCanvas) {\n return;\n }\n const { url: url2, opacity = 1 } = this.config.watermark;\n try {\n const response = await fetch(url2, { mode: \"cors\" });\n if (!response.ok) {\n throw new Error(`HTTP error! status: ${response.status}`);\n }\n const blob = await response.blob();\n const isSvg = url2.toLowerCase().endsWith(\".svg\") || blob.type === \"image/svg+xml\";\n if (isSvg) {\n logger.warn(\"[RecorderWorker] Loading SVG watermark. Note: Some environments may not support SVG in createImageBitmap inside workers. If the watermark doesn't appear, consider using a PNG or a Data URL.\");\n }\n const bitmap = await createImageBitmap(blob).catch((err) => {\n throw new Error(`Failed to create ImageBitmap from blob (${blob.type}). Errors can happen with SVGs in workers or invalid formats: ${err.message}`);\n });\n const videoWidth = this.config?.width || 1280;\n const { width: targetWidth, height: targetHeight } = calculateWatermarkTargetSize(videoWidth, bitmap.width, bitmap.height);\n const scaleFactor = targetWidth / bitmap.width;\n const canvas = new OffscreenCanvas(targetWidth, targetHeight);\n const ctx = canvas.getContext(\"2d\", { willReadFrequently: false });\n if (!ctx) {\n bitmap.close();\n throw new Error(\"Failed to get watermark canvas context\");\n }\n ctx.globalAlpha = opacity;\n ctx.drawImage(bitmap, 0, 0, targetWidth, targetHeight);\n ctx.globalAlpha = 1;\n bitmap.close();\n this.watermarkCanvas = canvas;\n logger.debug(\"[RecorderWorker] Watermark prepared with pre-applied opacity\", {\n width: canvas.width,\n height: canvas.height,\n opacity,\n scaleFactor\n });\n } catch (error) {\n const errorMessage = error instanceof Error ? error.message : String(error);\n logger.error(\"[RecorderWorker] Failed to load watermark. This is often caused by CORS if the image is on another domain. Try using a Data URL (base64) or ensure the server has Access-Control-Allow-Origin: *.\", {\n url: url2,\n error: errorMessage\n });\n }\n }\n ensureCompositionCanvas(width, height) {\n if (!this.compositionCanvas) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionCtx = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get composition canvas context\");\n logger.debug(\"[RecorderWorker] Composition canvas created\", {\n width,\n height\n });\n return this.compositionCtx;\n }\n if (!this.compositionCtx) {\n this.compositionCtx = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get composition canvas context\");\n return this.compositionCtx;\n }\n const widthChanged = this.compositionCanvas.width !== width;\n const heightChanged = this.compositionCanvas.height !== height;\n if (widthChanged || heightChanged) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionCtx = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get composition canvas context\");\n logger.debug(\"[RecorderWorker] Composition canvas resized\", {\n width,\n height\n });\n return this.compositionCtx;\n }\n return this.compositionCtx;\n }\n shouldApplyOverlay(timestamp) {\n if (!this.overlayConfig?.enabled) {\n return false;\n }\n if (this.isScreenCapture) {\n return false;\n }\n const completedIntervalMatch = this.hiddenIntervals.some((interval) => timestamp >= interval.start && timestamp <= interval.end);\n const currentIntervalMatch = this.currentHiddenIntervalStart !== null && timestamp >= this.currentHiddenIntervalStart;\n const shouldApply = completedIntervalMatch || currentIntervalMatch;\n if (this.frameCount % 90 === 0) {\n logger.debug(\"[RecorderWorker] Overlay check\", {\n timestamp,\n shouldApply,\n frameCount: this.frameCount,\n intervalsCount: this.hiddenIntervals.length\n });\n }\n return shouldApply;\n }\n handleUpdateVisibility(isHidden, timestamp) {\n if (this.baseVideoTimestamp === null) {\n this.pendingVisibilityUpdates.push({ isHidden, timestamp });\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp);\n }\n processVisibilityUpdate(isHidden, timestamp) {\n const timestampSeconds = timestamp / 1000;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - this.pausedDuration;\n if (isHidden) {\n if (this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n logger.debug(\"[RecorderWorker] Started hidden interval\", {\n start: this.currentHiddenIntervalStart\n });\n }\n } else if (this.currentHiddenIntervalStart !== null) {\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals.push(interval);\n logger.debug(\"[RecorderWorker] Completed hidden interval\", {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n logger.warn(\"[RecorderWorker] Invalid interval (end <= start), discarding\");\n }\n this.currentHiddenIntervalStart = null;\n }\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const frameTimestamp = this.calculateVideoFrameTimestamp(videoFrame);\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const frameDuration = 1 / this.frameRate;\n let frameToProcess = videoFrame;\n let imageBitmap = null;\n const needsOverlay = this.shouldApplyOverlay(frameTimestamp);\n const needsWatermark = !!(this.config?.watermark && this.watermarkCanvas);\n const needsComposition = needsOverlay || needsWatermark;\n if (needsComposition) {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (width <= 0 || height <= 0) {\n logger.warn(\"[RecorderWorker] Invalid video frame dimensions, skipping composition\", { width, height });\n } else {\n const ctx = this.ensureCompositionCanvas(width, height);\n ctx.clearRect(0, 0, width, height);\n ctx.drawImage(videoFrame, 0, 0, width, height);\n if (needsOverlay && this.overlayConfig) {\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(this.overlayConfig.text);\n logger.debug(\"[RecorderWorker] Overlay canvas created\", {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n const overlayPosition = this.getOverlayPosition(this.overlayCanvas.width, width);\n ctx.drawImage(this.overlayCanvas, overlayPosition.x, overlayPosition.y);\n }\n if (needsWatermark && this.config?.watermark && this.watermarkCanvas) {\n const watermarkPosition = getWatermarkPosition({\n watermarkWidth: this.watermarkCanvas.width,\n watermarkHeight: this.watermarkCanvas.height,\n videoWidth: width,\n videoHeight: height,\n position: this.config.watermark.position\n });\n ctx.drawImage(this.watermarkCanvas, watermarkPosition.x, watermarkPosition.y);\n }\n const compositionCanvas = requireNonNull(this.compositionCanvas, \"Composition canvas must exist after ensureCompositionCanvas\");\n imageBitmap = compositionCanvas.transferToImageBitmap();\n const frameInit = {};\n if (typeof videoFrame.timestamp === \"number\") {\n frameInit.timestamp = videoFrame.timestamp;\n }\n if (typeof videoFrame.duration === \"number\") {\n frameInit.duration = videoFrame.duration;\n }\n frameToProcess = new VideoFrame(imageBitmap, frameInit);\n }\n }\n const maxLead = 0.05;\n const maxLag = 0.1;\n const targetAudio = this.lastAudioTimestamp;\n let adjustedTimestamp = frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - targetAudio > maxLead) {\n adjustedTimestamp = targetAudio + maxLead;\n } else if (targetAudio - adjustedTimestamp > maxLag) {\n adjustedTimestamp = targetAudio - maxLag;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n const finalTimestamp = adjustedTimestamp >= monotonicTimestamp ? adjustedTimestamp : monotonicTimestamp;\n let keyFrameIntervalSeconds = config.keyFrameInterval;\n if (!(keyFrameIntervalSeconds > 0)) {\n keyFrameIntervalSeconds = DEFAULT_KEY_FRAME_INTERVAL_SECONDS;\n }\n let keyFrameIntervalFrames = Math.round(keyFrameIntervalSeconds * this.frameRate);\n if (keyFrameIntervalFrames < 1) {\n keyFrameIntervalFrames = 1;\n }\n const timeSinceLastKeyFrame = finalTimestamp - this.lastKeyFrameTimestamp;\n const isKeyFrame = this.forceNextKeyFrame || timeSinceLastKeyFrame >= keyFrameIntervalSeconds || this.frameCount % keyFrameIntervalFrames === 0;\n this.driftOffset *= 0.5;\n const sample = new VideoSample(frameToProcess, {\n timestamp: finalTimestamp,\n duration: frameDuration\n });\n const addError = await videoSource.add(sample, isKeyFrame ? { keyFrame: true } : undefined).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n this.frameCount += 1;\n this.lastVideoTimestamp = finalTimestamp;\n if (isKeyFrame) {\n this.lastKeyFrameTimestamp = finalTimestamp;\n this.forceNextKeyFrame = false;\n }\n if (this.frameCount % 90 === 0 && this.audioProcessingActive) {\n const avDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: this.frameCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n avDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (imageBitmap) {\n imageBitmap.close();\n imageBitmap = null;\n }\n if (frameToProcess !== videoFrame) {\n frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.isPaused) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n setupAudioProcessing(audioStream) {\n if (!this.audioSource) {\n logger.warn(\"[RecorderWorker] setupAudioProcessing called but audioSource is null\");\n return;\n }\n logger.debug(\"[RecorderWorker] setupAudioProcessing\", {\n hasAudioSource: !!this.audioSource,\n hasAudioStream: !!audioStream,\n audioProcessingActive: this.audioProcessingActive,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n this.audioProcessor = audioStream.getReader();\n this.audioProcessingActive = true;\n logger.debug(\"[RecorderWorker] Audio processing started\", {\n hasAudioProcessor: !!this.audioProcessor,\n audioProcessingActive: this.audioProcessingActive\n });\n this.processAudioData();\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(\"Number of frames must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n audioData.copyTo(audioBuffer, { planeIndex: 0 });\n return audioBuffer;\n }\n createAudioSample(audioData, audioBuffer) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const shouldWriteSilence = this.isMuted;\n return new AudioSample({\n data: shouldWriteSilence ? new Float32Array(audioBuffer.length) : audioBuffer,\n format: \"f32-planar\",\n numberOfChannels,\n sampleRate,\n timestamp: this.lastAudioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfFrames = audioData.numberOfFrames;\n const duration = numberOfFrames / sampleRate;\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp: this.lastAudioTimestamp,\n duration,\n newLastAudioTimestamp: this.lastAudioTimestamp + duration,\n sampleRate: audioData.sampleRate,\n numberOfFrames: audioData.numberOfFrames\n });\n this.lastAudioTimestamp += duration;\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n logger.warn(\"[RecorderWorker] processAudioData called but processor or source is null\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource\n });\n return;\n }\n logger.debug(\"[RecorderWorker] processAudioData loop started\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource,\n audioProcessingActive: this.audioProcessingActive,\n isPaused: this.isPaused,\n isMuted: this.isMuted,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n let audioSampleCount = 0;\n while (this.audioProcessingActive) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n logger.debug(\"[RecorderWorker] Audio processor stream ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n this.audioProcessingActive = false;\n break;\n }\n const audioData = result.value;\n if (!audioData) {\n logger.warn(\"[RecorderWorker] Received null audioData from processor\");\n continue;\n }\n audioSampleCount += 1;\n if (audioSampleCount % 100 === 0) {\n logger.debug(\"[RecorderWorker] Processing audio sample\", {\n sampleCount: audioSampleCount,\n numberOfFrames: audioData.numberOfFrames,\n sampleRate: audioData.sampleRate,\n numberOfChannels: audioData.numberOfChannels,\n lastAudioTimestamp: this.lastAudioTimestamp,\n isPaused: this.isPaused,\n isMuted: this.isMuted\n });\n }\n if (this.isPaused) {\n this.handlePausedAudioData(audioData);\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const audioSample = this.createAudioSample(audioData, audioBuffer);\n await this.processAudioSample(audioData, audioSample);\n }\n logger.debug(\"[RecorderWorker] processAudioData loop ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n }\n handlePause() {\n if (this.isPaused) {\n return;\n }\n this.pauseStartedAt = performance.now() / 1000;\n this.isPaused = true;\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.isPaused) {\n return;\n }\n const now = performance.now() / 1000;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n this.sendStateChange(\"recording\");\n }\n async handleStop() {\n if (this.isStopping || this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return;\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize().catch((error) => {\n logger.warn(\"[RecorderWorker] finalize failed (ignored, already finalized?)\", error);\n });\n }\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n this.isStopping = false;\n }\n handleToggleMute() {\n this.isMuted = !this.isMuted;\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps: this.frameRate\n });\n this.frameRate = fps;\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n }\n async handleSwitchSource(videoStream) {\n requireDefined(videoStream, \"Video stream is required\");\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n const maxDriftCorrection = 0.1;\n this.driftOffset = Math.max(-maxDriftCorrection, Math.min(maxDriftCorrection, rawDrift));\n const continuationTimestamp = Math.max(this.lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n this.forceNextKeyFrame = true;\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n frameRate: this.frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp: this.lastAudioTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n });\n this.setupVideoProcessing(videoStream);\n }\n async cleanup() {\n this.stopBufferUpdates();\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.videoSource) {\n if (!this.isFinalized) {\n this.videoSource.close();\n }\n this.videoSource = null;\n }\n if (this.audioSource) {\n if (!this.isFinalized) {\n this.audioSource.close();\n }\n this.audioSource = null;\n }\n if (this.output) {\n if (!this.isFinalized) {\n await this.output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n this.output = null;\n }\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.totalSize = 0;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayCanvas = null;\n this.watermarkCanvas = null;\n this.overlayConfig = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.recordingStartTime = 0;\n this.pendingVisibilityUpdates = [];\n this.isScreenCapture = false;\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendBufferUpdate(size, formatted) {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n deserializeBitrate(bitrate) {\n if (typeof bitrate === \"number\") {\n return bitrate;\n }\n if (bitrate === \"low\") {\n return QUALITY_LOW;\n }\n if (bitrate === \"medium\") {\n return QUALITY_MEDIUM;\n }\n if (bitrate === \"high\") {\n return QUALITY_HIGH;\n }\n if (bitrate === \"very-high\") {\n return QUALITY_VERY_HIGH;\n }\n return QUALITY_HIGH;\n }\n}\nnew RecorderWorker;\n";
712
-
713
- export type WorkerMessage = WorkerStartMessage | WorkerPauseMessage | WorkerResumeMessage | WorkerStopMessage | WorkerToggleMuteMessage | WorkerSwitchSourceMessage | WorkerUpdateFpsMessage | WorkerUpdateVisibilityMessage | WorkerUpdateSourceTypeMessage;
714
- export type WorkerResponse = WorkerReadyResponse | WorkerErrorResponse | WorkerChunkResponse | WorkerBufferUpdateResponse | WorkerStateChangeResponse;
715
- export type WorkerStartMessage = {
716
- type: "start";
717
- videoStream: ReadableStream<VideoFrame> | null;
718
- audioStream: ReadableStream<AudioData> | null;
719
- config: WorkerTranscodeConfig;
720
- overlayConfig?: {
721
- enabled: boolean;
722
- text: string;
723
- recordingStartTime?: number;
724
- };
725
- };
726
- export type WorkerPauseMessage = {
727
- type: "pause";
728
- };
729
- export type WorkerResumeMessage = {
730
- type: "resume";
731
- };
732
- export type WorkerStopMessage = {
733
- type: "stop";
734
- };
735
- export type WorkerToggleMuteMessage = {
736
- type: "toggleMute";
737
- };
738
- export type WorkerSwitchSourceMessage = {
739
- type: "switchSource";
740
- videoStream: ReadableStream<VideoFrame>;
741
- };
742
- export type WorkerUpdateFpsMessage = {
743
- type: "updateFps";
744
- fps: number;
745
- };
746
- export type WorkerUpdateVisibilityMessage = {
747
- type: "updateVisibility";
748
- isHidden: boolean;
749
- timestamp: number;
750
- };
751
- export type WorkerUpdateSourceTypeMessage = {
752
- type: "updateSourceType";
753
- isScreenCapture: boolean;
754
- };
755
- export type WorkerReadyResponse = {
756
- type: "ready";
757
- };
758
- export type WorkerErrorResponse = {
759
- type: "error";
760
- error: string;
761
- };
762
- export type WorkerChunkResponse = {
763
- type: "chunk";
764
- data: Uint8Array;
765
- position: number;
766
- };
767
- export type WorkerBufferUpdateResponse = {
768
- type: "bufferUpdate";
769
- size: number;
770
- formatted: string;
771
- };
772
- export type WorkerStateChangeResponse = {
773
- type: "stateChange";
774
- state: "recording" | "paused" | "stopped";
775
- };
776
- import type { VideoCodec } from "mediabunny";
777
- import type { WatermarkConfig } from "../../transcode/transcode-types";
778
- export type WorkerTranscodeConfig = {
779
- width?: number;
780
- height?: number;
781
- fps?: number;
782
- bitrate?: number | string;
783
- audioCodec: "aac" | "opus";
784
- audioBitrate?: number;
785
- codec: VideoCodec;
786
- keyFrameInterval: number;
787
- format: "mp4" | "mkv" | "mov" | "webm";
788
- watermark?: WatermarkConfig;
789
- };
790
-
791
- import type { WatermarkPosition } from "../types";
792
- /**
793
- * Calculates the target size for a watermark based on video width.
794
- * Target is approximately 7% of video width.
795
- */
796
- export declare function calculateWatermarkTargetSize(videoWidth: number, imageWidth: number, imageHeight: number): {
797
- width: number;
798
- height: number;
799
- };
800
- /**
801
- * Options for calculating watermark position.
802
- */
803
- export type WatermarkPositionOptions = {
804
- watermarkWidth: number;
805
- watermarkHeight: number;
806
- videoWidth: number;
807
- videoHeight: number;
808
- position: WatermarkPosition;
809
- };
810
- /**
811
- * Calculates the (x, y) coordinates for drawing a watermark on a video frame.
812
- */
813
- export declare function getWatermarkPosition(options: WatermarkPositionOptions): {
814
- x: number;
815
- y: number;
816
- };
817
-
1143
+ setOnSourceChange(callback: (stream: MediaStream) => void): void;
1144
+ setOnBufferUpdate(callback: (size: number, formatted: string) => void): void;
1145
+ setOnError(callback: (error: Error) => void): void;
1146
+ cancel(): Promise<void>;
1147
+ }
818
1148
  export {};
819
1149
 
820
- import { type Quality } from "mediabunny";
821
- export declare function serializeBitrate(bitrate: number | Quality | undefined): number | string | undefined;
822
- export declare function deserializeBitrate(bitrate: number | string | undefined): number | Quality;
823
-
824
- export declare function calculateBarColor(position: number): string;
825
-
826
- export declare function isScreenCaptureStream(stream: MediaStream): boolean;
827
-
828
- export declare function isMobileDevice(): boolean;
829
-
830
- export declare const FILE_SIZE_UNITS: readonly ["Bytes", "KB", "MB", "GB"];
831
- export declare const FILE_SIZE_BASE = 1024;
832
- export declare function formatFileSize(bytes: number): string;
833
- export declare function formatTime(totalSeconds: number): string;
834
-
835
- export type VisibilityInterval = {
836
- start: number;
837
- end: number;
1150
+ import type { TranscodeConfig } from "../transcode/transcode-types";
1151
+ import { type VisibilityInterval } from "../utils/tab-visibility-tracker";
1152
+ export type StopRecordingResult = {
1153
+ blob: Blob;
1154
+ tabVisibilityIntervals: VisibilityInterval[];
838
1155
  };
839
- export declare class TabVisibilityTracker {
1156
+ export declare class StreamRecordingState {
840
1157
  private recordingStartTime;
841
- private totalPausedTime;
1158
+ private recordingTimer;
842
1159
  private pauseStartTime;
843
- private intervals;
844
- private currentIntervalStart;
845
- private isTracking;
846
- private readonly visibilityChangeHandler;
847
- private readonly blurHandler;
848
- private readonly focusHandler;
849
- constructor();
850
- start(recordingStartTime: number): void;
851
- pause(): void;
852
- resume(): void;
853
- getIntervals(): VisibilityInterval[];
854
- reset(): void;
855
- cleanup(): void;
856
- private checkInitialState;
857
- private handleVisibilityChange;
858
- private handleBlur;
859
- private handleFocus;
860
- private startInterval;
861
- private endCurrentIntervalIfActive;
862
- private normalizeTimestamp;
1160
+ private totalPausedTime;
1161
+ private streamProcessor;
1162
+ private bufferSizeUpdateInterval;
1163
+ private tabVisibilityTracker;
1164
+ private visibilityChangeHandler;
1165
+ private blurHandler;
1166
+ private focusHandler;
1167
+ private readonly streamManager;
1168
+ constructor(streamManager: StreamManager);
1169
+ isRecording(): boolean;
1170
+ getStreamProcessor(): StreamProcessor | null;
1171
+ getAudioStreamForAnalysis(): MediaStream | null;
1172
+ startRecording(processor: StreamProcessor, config: TranscodeConfig, enableTabVisibilityOverlay?: boolean, tabVisibilityOverlayText?: string): Promise<void>;
1173
+ stopRecording(): Promise<StopRecordingResult>;
1174
+ pauseRecording(): void;
1175
+ resumeRecording(): void;
1176
+ toggleMute(): void;
1177
+ muteAudio(): void;
1178
+ unmuteAudio(): void;
1179
+ isMuted(): boolean;
1180
+ switchVideoSource(newStream: MediaStream): Promise<void>;
1181
+ getCurrentVideoSource(): MediaStream;
1182
+ private formatTimeElapsed;
1183
+ private startRecordingTimer;
1184
+ private clearRecordingTimer;
1185
+ private clearBufferSizeInterval;
1186
+ private resetRecordingState;
1187
+ private resetPauseState;
1188
+ private setupVisibilityUpdates;
1189
+ private cleanupVisibilityUpdates;
1190
+ destroy(): void;
863
1191
  }
864
1192
 
865
- declare const ANSI_COLORS: {
866
- readonly reset: "\u001B[0m";
867
- readonly bright: "\u001B[1m";
868
- readonly dim: "\u001B[2m";
869
- readonly red: "\u001B[31m";
870
- readonly green: "\u001B[32m";
871
- readonly yellow: "\u001B[33m";
872
- readonly blue: "\u001B[34m";
873
- readonly magenta: "\u001B[35m";
874
- readonly cyan: "\u001B[36m";
875
- readonly white: "\u001B[37m";
876
- readonly gray: "\u001B[90m";
877
- };
878
- export declare const logger: {
879
- readonly log: (message: string, ...args: unknown[]) => void;
880
- readonly info: (message: string, ...args: unknown[]) => void;
881
- readonly warn: (message: string, ...args: unknown[]) => void;
882
- readonly error: (message: string, ...args: unknown[]) => void;
883
- readonly debug: (message: string, ...args: unknown[]) => void;
884
- readonly group: (label: string, color?: keyof typeof ANSI_COLORS) => void;
885
- readonly groupEnd: () => void;
886
- };
887
- export {};
888
-
889
- export declare function requireNonNull<T>(value: T | null | undefined, message: string): T;
890
- export declare function requireDefined<T>(value: T | undefined, message: string): T;
891
- export declare function requireActive(isActive: boolean, componentName: string): void;
892
- export declare function requireInitialized<T>(value: T | null | undefined, componentName: string): T;
893
- export declare function requireStream(stream: MediaStream | null, message?: string): MediaStream;
894
- export declare function requireProcessor<T>(processor: T | null, componentName?: string): T;
895
- /**
896
- * Validates that mediaDevices API is available.
897
- * Throws a descriptive error if not available (e.g., on HTTP instead of HTTPS).
898
- */
899
- export declare function requireMediaDevices(): MediaDevices;
900
-
901
- export declare function extractErrorMessage(error: unknown): string;
902
-
903
- export declare function extractVideoDuration(file: File | Blob): Promise<number>;
904
-
905
- export declare function isMobileDevice(): boolean;
1193
+ export declare const DEFAULT_CAMERA_CONSTRAINTS: Readonly<CameraConstraints>;
1194
+ export declare const DEFAULT_STREAM_CONFIG: Readonly<StreamConfig>;
1195
+ export declare const DEFAULT_RECORDING_OPTIONS: Readonly<RecordingOptions>;
906
1196
 
907
- export type PendingUpload = {
908
- id: string;
909
- blob: Blob;
910
- apiKey: string;
911
- backendUrl: string;
912
- filename: string;
913
- duration?: number;
914
- metadata?: Record<string, unknown>;
915
- userMetadata?: Record<string, unknown>;
916
- status: "pending" | "uploading" | "failed" | "completed";
917
- retryCount: number;
918
- lastError?: string;
919
- createdAt: number;
920
- updatedAt: number;
1197
+ export type TransitionCallbacks = {
1198
+ onTransitionStart?: (message: string) => void;
1199
+ onTransitionEnd?: () => void;
1200
+ onScreenSelectionStart?: () => void;
1201
+ onScreenSelectionEnd?: () => void;
921
1202
  };
922
- export declare class VideoStorageService {
923
- private db;
924
- init(): Promise<void>;
925
- isInitialized(): boolean;
926
- savePendingUpload(upload: Omit<PendingUpload, "id" | "createdAt" | "updatedAt" | "status" | "retryCount">): Promise<string>;
927
- getPendingUploads(status?: PendingUpload["status"]): Promise<PendingUpload[]>;
928
- updateUploadStatus(id: string, updates: Partial<PendingUpload>): Promise<void>;
929
- deleteUpload(id: string): Promise<void>;
930
- cleanupPermanentlyFailedUploads(retentionHours?: number): Promise<number>;
931
- getTotalStorageSize(): Promise<number>;
932
- private generateUploadId;
933
- private executeTransaction;
934
- }
1203
+ export declare function notifyTransitionStart(callbacks: TransitionCallbacks, message: string): void;
1204
+ export declare function notifyTransitionEnd(callbacks: TransitionCallbacks): void;
1205
+ export declare function notifyScreenSelectionStart(callbacks: TransitionCallbacks): void;
1206
+ export declare function notifyScreenSelectionEnd(callbacks: TransitionCallbacks): void;
1207
+ export declare function handleScreenSelectionError(callbacks: TransitionCallbacks): void;
935
1208
 
936
- import type { UploadResult } from "../upload/types";
937
- export type UploadCallbacks = {
938
- onUploadProgress: (id: string, progress: number) => void;
939
- onUploadComplete: (id: string, result: UploadResult) => void;
940
- onUploadError: (id: string, error: Error) => void;
1209
+ import type { SourceType } from "../recording/types";
1210
+ export type ScreenShareCallbacks = {
1211
+ onSourceChange?: (sourceType: SourceType) => Promise<void>;
941
1212
  };
1213
+ export type ScreenShareDependencies = {
1214
+ callbacks: ScreenShareCallbacks;
1215
+ streamManager: CameraStreamManager;
1216
+ combineScreenShareWithOriginalAudio: (screenVideoTrack: MediaStreamTrack) => MediaStream;
1217
+ stopStreamTracks: (stream: MediaStream) => void;
1218
+ stopStreamVideoTracks: (stream: MediaStream) => void;
1219
+ getCurrentSourceType: () => SourceType;
1220
+ setCurrentSourceType: (sourceType: SourceType) => void;
1221
+ getOriginalCameraStream: () => MediaStream | null;
1222
+ getScreenShareStream: () => MediaStream | null;
1223
+ setScreenShareStream: (stream: MediaStream | null) => void;
1224
+ getScreenShareTrackEndHandler: () => (() => void) | null;
1225
+ setScreenShareTrackEndHandler: (handler: (() => void) | null) => void;
1226
+ switchToCamera: () => Promise<void>;
1227
+ handleSwitchError: (error: unknown) => void;
1228
+ };
1229
+ export declare function processScreenShareStream(screenShareStream: MediaStream, currentStream: MediaStream | null, dependencies: ScreenShareDependencies): Promise<MediaStream>;
1230
+ export declare function setupScreenShareTrackHandler(newStream: MediaStream, dependencies: ScreenShareDependencies): void;
1231
+ export declare function removeScreenShareTrackHandler(stream: MediaStream | null, dependencies: ScreenShareDependencies): void;
942
1232
 
943
- export declare class StorageManager {
944
- private storageService;
945
- private cleanupIntervalId;
946
- initialize(onCleanupError: (error: string) => void): Promise<void>;
947
- private setupCleanupInterval;
948
- performCleanup(): Promise<void>;
949
- getStorageService(): VideoStorageService | null;
950
- destroy(): void;
1233
+ import type { SourceType } from "../recording/types";
1234
+ export type SourceSwitchCallbacks = {
1235
+ onSourceChange?: (sourceType: SourceType) => Promise<void>;
1236
+ onPreviewUpdate?: (stream: MediaStream) => Promise<void>;
1237
+ onError?: (error: Error) => void;
1238
+ onTransitionStart?: (message: string) => void;
1239
+ onTransitionEnd?: () => void;
1240
+ onScreenSelectionStart?: () => void;
1241
+ onScreenSelectionEnd?: () => void;
1242
+ getSelectedCameraDeviceId?: () => string | null;
1243
+ getSelectedMicDeviceId?: () => string | null;
1244
+ };
1245
+ export declare class SourceSwitchManager {
1246
+ private currentSourceType;
1247
+ private originalCameraStream;
1248
+ private screenShareStream;
1249
+ private screenShareTrackEndHandler;
1250
+ private readonly streamManager;
1251
+ private callbacks;
1252
+ private readonly cameraStreamBuilder;
1253
+ private readonly getOriginalCameraConstraints;
1254
+ private readonly setOriginalCameraConstraints;
1255
+ constructor(streamManager: CameraStreamManager, callbacks?: SourceSwitchCallbacks);
1256
+ getCurrentSourceType(): SourceType;
1257
+ getOriginalCameraStream(): MediaStream | null;
1258
+ private storeOriginalCameraConstraints;
1259
+ private storeOriginalCameraStream;
1260
+ private createError;
1261
+ private waitForTracksToEnd;
1262
+ private combineScreenShareWithOriginalAudio;
1263
+ private isPermissionDeniedError;
1264
+ switchToScreenCapture(): Promise<MediaStream | null>;
1265
+ private canReuseStream;
1266
+ private canReuseOriginalStream;
1267
+ private canReuseManagerStream;
1268
+ private getScreenShareDependencies;
1269
+ getCameraStream(): Promise<MediaStream>;
1270
+ switchToCamera(): Promise<void>;
1271
+ private stopScreenShareStreamTracks;
1272
+ private stopDisplayTracks;
1273
+ private handleScreenShareStop;
1274
+ private applyCameraStream;
1275
+ toggleSource(): Promise<void>;
1276
+ private switchToScreen;
1277
+ private handleToggleError;
1278
+ handleRecordingStop(): Promise<void>;
1279
+ cleanup(): void;
1280
+ setCallbacks(callbacks: SourceSwitchCallbacks): void;
951
1281
  }
952
1282
 
953
- export type StorageQuota = {
954
- usage: number;
955
- quota: number;
956
- available: number;
957
- percentage: number;
958
- };
959
- export declare class QuotaManager {
960
- getQuota(): Promise<StorageQuota>;
961
- hasSpaceFor(sizeInBytes: number): Promise<boolean>;
962
- requestPersistentStorage(): Promise<boolean>;
963
- isPersistent(): Promise<boolean>;
964
- formatBytes(bytes: number): string;
965
- shouldWarn(threshold?: number): Promise<boolean>;
966
- isCritical(threshold?: number): Promise<boolean>;
967
- private checkThreshold;
1283
+ export declare class StreamManager {
1284
+ private mediaStream;
1285
+ private state;
1286
+ private readonly eventListeners;
1287
+ private readonly streamConfig;
1288
+ private selectedAudioDeviceId;
1289
+ private selectedVideoDeviceId;
1290
+ constructor(streamConfig?: Partial<StreamConfig>);
1291
+ getState(): StreamState;
1292
+ getStream(): MediaStream | null;
1293
+ getAudioStreamForAnalysis(): MediaStream | null;
1294
+ isActive(): boolean;
1295
+ on<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
1296
+ off<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): void;
1297
+ once<T extends keyof StreamEventMap>(event: T, listener: StreamEventListener<T>): () => void;
1298
+ emit<T extends keyof StreamEventMap>(event: T, data: StreamEventMap[T]): void;
1299
+ setState(newState: StreamState): void;
1300
+ setAudioDevice(deviceId: string | null): void;
1301
+ setVideoDevice(deviceId: string | null): void;
1302
+ getAudioDevice(): string | null;
1303
+ getVideoDevice(): string | null;
1304
+ getAvailableDevices(): Promise<{
1305
+ audioinput: MediaDeviceInfo[];
1306
+ videoinput: MediaDeviceInfo[];
1307
+ }>;
1308
+ private buildDeviceConstraints;
1309
+ private buildVideoConstraints;
1310
+ private buildAudioConstraints;
1311
+ startStream(): Promise<MediaStream>;
1312
+ stopStream(): void;
1313
+ private stopStreamTracks;
1314
+ private isTrackLive;
1315
+ private tryReplaceTrack;
1316
+ private recreateStreamWithNewTrack;
1317
+ private switchDeviceTrack;
1318
+ switchVideoDevice(deviceId: string | null): Promise<MediaStream>;
1319
+ switchAudioDevice(deviceId: string | null): Promise<MediaStream>;
1320
+ setMediaStream(stream: MediaStream): void;
1321
+ setAudioTracksEnabled(enabled: boolean): void;
1322
+ destroy(): void;
968
1323
  }
969
1324
 
970
- export type AudioLevelCallbacks = {
971
- onLevelUpdate: (level: number, isMuted: boolean) => void;
1325
+ export type CameraStreamBuilderDependencies = {
1326
+ streamManager: CameraStreamManager;
1327
+ logger: {
1328
+ debug: (message: string, data?: Record<string, unknown>) => void;
1329
+ warn: (message: string, data?: Record<string, unknown>) => void;
1330
+ };
1331
+ getSelectedCameraDeviceId: () => string | null;
1332
+ getSelectedMicDeviceId: () => string | null;
1333
+ getOriginalCameraStream: () => MediaStream | null;
1334
+ setOriginalCameraStream: (stream: MediaStream | null) => void;
1335
+ getOriginalCameraConstraints: () => MediaTrackConstraints | null;
972
1336
  };
973
-
974
- export declare class AudioLevelAnalyzer {
975
- private audioContext;
976
- private analyser;
977
- private audioLevelIntervalId;
978
- private audioLevel;
979
- private getMutedState;
980
- private currentStream;
981
- startTracking(stream: MediaStream, callbacks: AudioLevelCallbacks, getMutedState?: () => boolean): void;
982
- stopTracking(): void;
983
- getAudioLevel(): number;
984
- private getAudioContextClass;
985
- private calculateAudioLevel;
986
- private checkMutedState;
1337
+ export declare class CameraStreamBuilder {
1338
+ private readonly dependencies;
1339
+ constructor(dependencies: CameraStreamBuilderDependencies);
1340
+ createCameraStreamWithOriginalAudio(cameraDeviceId: string | null): Promise<MediaStream | null>;
1341
+ createCameraStreamWithNewAudio(cameraDeviceId: string | null): Promise<MediaStream>;
1342
+ createNewCameraStreamForRecording(): Promise<MediaStream>;
1343
+ getCameraStream(parameters: {
1344
+ canReuseOriginalStream: () => boolean;
1345
+ canReuseManagerStream: () => boolean;
1346
+ }): Promise<MediaStream>;
987
1347
  }
988
1348
 
989
- export declare function transcodeVideo(input: TranscodeInput, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
990
- export declare function transcodeVideoForNativeCamera(file: File, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
991
-
992
1349
  import type { Quality, VideoCodec } from "mediabunny";
993
- export type { AudioCodec, OutputFormat, WatermarkConfig, WatermarkPosition, } from "../processor/types";
994
- import type { AudioCodec, OutputFormat, WatermarkConfig } from "../processor/types";
1350
+ export type OutputFormat = "mp4" | "mkv" | "mov" | "webm";
1351
+ export type AudioCodec = "aac" | "opus";
1352
+ export type WatermarkPosition = "top-left" | "top-right" | "bottom-left" | "bottom-right" | "center";
1353
+ /**
1354
+ * Watermark configuration for video recordings
1355
+ *
1356
+ * Performance & File Size Impact:
1357
+ * - Watermarks are burned into video frames, increasing file size
1358
+ * - Watermark size: 5% of video width (optimized for visibility vs file size)
1359
+ * - File size increase depends on opacity:
1360
+ * - opacity: 1.0 (opaque) → ~2-5% larger files (recommended)
1361
+ * - opacity: 0.5 (semi-transparent) → ~5-15% larger files
1362
+ * - opacity: 0.3 or lower → ~10-20% larger files
1363
+ *
1364
+ * Best Practices:
1365
+ * - Use fully opaque watermarks (opacity: 1.0) for production
1366
+ * - Use simple logos (PNG with transparency) rather than complex images
1367
+ * - Prefer solid colors over gradients for better compression
1368
+ * - Consider server-side watermarking for maximum efficiency
1369
+ */
1370
+ export type WatermarkConfig = {
1371
+ /** URL or data URI of the watermark image. Supports PNG, JPG, WebP. */
1372
+ url: string;
1373
+ /**
1374
+ * Opacity level (0.0 to 1.0). Default: 1.0
1375
+ * WARNING: Lower opacity significantly increases file size due to reduced compression efficiency.
1376
+ */
1377
+ opacity?: number;
1378
+ /** Position of the watermark on the video frame */
1379
+ position: WatermarkPosition;
1380
+ };
995
1381
  export type TranscodeConfig = {
996
1382
  format: OutputFormat;
997
1383
  fps?: number;
@@ -1001,11 +1387,6 @@ export type TranscodeConfig = {
1001
1387
  codec?: VideoCodec;
1002
1388
  audioCodec?: AudioCodec;
1003
1389
  audioBitrate?: number;
1004
- tabVisibilityIntervals?: Array<{
1005
- start: number;
1006
- end: number;
1007
- }>;
1008
- tabVisibilityOverlayText?: string;
1009
1390
  watermark?: WatermarkConfig;
1010
1391
  };
1011
1392
  export type TranscodeInput = Blob | File | string;
@@ -1014,276 +1395,673 @@ export type TranscodeResult = {
1014
1395
  blob: Blob;
1015
1396
  };
1016
1397
 
1017
- export declare function validateFile(file: File, config?: {
1018
- maxFileSize?: number;
1019
- maxRecordingTime?: number | null;
1020
- allowedFormats?: string[];
1021
- }): Promise<FileValidationResult>;
1398
+ export declare const ERROR_RECORDING_INVALID_CONTAINER_LAYOUT = "recording.invalid-container-layout";
1399
+ export type Mp4TopLevelBox = {
1400
+ type: string;
1401
+ size: number;
1402
+ startOffset: number;
1403
+ endOffset: number;
1404
+ };
1405
+ export type InvalidMp4ContainerLayoutError = Error & {
1406
+ code: typeof ERROR_RECORDING_INVALID_CONTAINER_LAYOUT;
1407
+ detectedBoxTypes: string[];
1408
+ };
1409
+ export declare function parseMp4TopLevelBoxes(input: ArrayBuffer | Uint8Array): Mp4TopLevelBox[];
1410
+ export declare function assertMp4ContainerIsNonFragmented(input: ArrayBuffer | Uint8Array): void;
1022
1411
 
1023
- export type NativeCameraFile = {
1024
- file: File;
1025
- previewUrl: string;
1026
- duration: number;
1027
- validated: boolean;
1412
+ import { type Quality } from "mediabunny";
1413
+ export declare function serializeBitrate(bitrate: number | Quality | undefined): number | string | undefined;
1414
+ export declare function deserializeBitrate(bitrate: number | string | undefined): number | Quality;
1415
+
1416
+ import type { Quality, VideoCodec } from "mediabunny";
1417
+ type VideoCodecCheckOptions = {
1418
+ width?: number;
1419
+ height?: number;
1420
+ bitrate?: number | Quality;
1028
1421
  };
1029
- export type FileValidationResult = {
1030
- valid: boolean;
1031
- error?: string;
1422
+ type AudioCodecCheckOptions = {
1423
+ bitrate?: number | Quality;
1032
1424
  };
1033
- export type NativeCameraConfig = {
1034
- maxFileSize?: number;
1035
- maxDuration?: number;
1036
- allowedFormats?: string[];
1425
+ type MediabunnyModule = {
1426
+ canEncodeVideo?: (codec: VideoCodec, options: VideoCodecCheckOptions) => Promise<boolean>;
1427
+ getFirstEncodableAudioCodec?: (codecs: AudioCodec[], options: AudioCodecCheckOptions) => Promise<string | null>;
1037
1428
  };
1429
+ type MediabunnyLoaderDependencies = {
1430
+ loadMediabunny?: () => Promise<MediabunnyModule>;
1431
+ };
1432
+ export declare function detectBestCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined, dependencies?: MediabunnyLoaderDependencies): Promise<VideoCodec>;
1433
+ export declare function detectBestWebmCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined, dependencies?: MediabunnyLoaderDependencies): Promise<VideoCodec>;
1434
+ export declare function detectBestAudioCodec(bitrate?: number | Quality, dependencies?: MediabunnyLoaderDependencies): Promise<AudioCodec>;
1435
+ export {};
1038
1436
 
1039
- import type { RecordingStopResult } from "../../vidtreo-recorder";
1040
- import type { ConfigService } from "../config/config-service";
1041
- import type { VideoUploadService } from "../upload/upload-service";
1042
- export type NativeCameraHandlerConfig = {
1043
- apiKey?: string | null;
1044
- backendUrl?: string | null;
1045
- maxRecordingTime?: number | null;
1046
- maxFileSize?: number;
1047
- userMetadata?: Record<string, unknown>;
1437
+ import type { StreamProcessorResult } from "../stream/types";
1438
+ type WorkerProcessorDependencies = {
1439
+ createWorker?: (workerUrl: string) => Worker;
1440
+ canUseMainThreadVideoProcessor?: () => boolean;
1441
+ createVideoStreamFromTrack?: (videoTrack: MediaStreamVideoTrack) => ReadableStream<VideoFrame> | null;
1048
1442
  };
1049
- export declare class NativeCameraHandler {
1050
- private pendingFile;
1051
- private readonly configService;
1052
- private readonly uploadService;
1053
- private readonly config;
1054
- constructor(config: NativeCameraHandlerConfig, configService: ConfigService | null, uploadService: VideoUploadService);
1055
- handleFileSelection(file: File): Promise<NativeCameraFile>;
1056
- processAndUpload(onTranscodeProgress: (progress: number) => void, onUploadProgress: (progress: number) => void): Promise<RecordingStopResult>;
1057
- cancel(): void;
1058
- preloadConfig(): Promise<void>;
1443
+ type OverlayConfig = {
1444
+ enabled: boolean;
1445
+ text: string;
1446
+ recordingStartTime?: number;
1447
+ };
1448
+ export declare class WorkerProcessor {
1449
+ private worker;
1450
+ private hasWorkerUrlLease;
1451
+ private chunks;
1452
+ private totalSize;
1453
+ private isActive;
1454
+ private onBufferUpdate?;
1455
+ private onError?;
1456
+ private onMuteStateChange?;
1457
+ private audioTrackClone;
1458
+ private readonly audioWorkletManager;
1459
+ private isMuted;
1460
+ private currentVideoTrack;
1461
+ private isPaused;
1462
+ private overlayConfig;
1463
+ private readyPromiseResolve;
1464
+ private readonly workerProbeManager;
1465
+ private readonly canUseMainThreadVideoProcessorFn;
1466
+ private readonly createVideoStreamFromTrackFn;
1467
+ constructor(dependencies?: WorkerProcessorDependencies);
1468
+ private getWorkerProbeResult;
1469
+ startProcessing(stream: MediaStream, config: TranscodeConfig, overlayConfig?: OverlayConfig): Promise<void>;
1470
+ private getWorkerOrThrow;
1471
+ private ensureProcessingInactive;
1472
+ private resetProcessingState;
1473
+ private resolveRecordingFormat;
1474
+ private resolveAudioCodec;
1475
+ private resolveVideoCodec;
1476
+ private buildWorkerTranscodeConfig;
1477
+ private prepareAudioPipeline;
1478
+ private buildOverlayConfigToSend;
1479
+ private postStartMessage;
1480
+ pause(): void;
1481
+ resume(): void;
1482
+ private isWorkerActive;
1483
+ toggleMute(): void;
1484
+ switchVideoSource(newStream: MediaStream): Promise<void>;
1485
+ finalize(): Promise<StreamProcessorResult>;
1486
+ private resetFinalizeRuntimeState;
1487
+ private createBlobFromChunks;
1488
+ private rejectFinalizeBlobCreationError;
1489
+ cancel(): Promise<void>;
1490
+ getBufferSize(): number;
1491
+ getMutedState(): boolean;
1492
+ updateTabVisibility(isHidden: boolean, timestamp: number): void;
1493
+ updateSourceType(isScreenCapture: boolean): void;
1494
+ private startAudioWorkletProcessing;
1495
+ private stopAudioWorklet;
1496
+ private setAudioWorkletMuted;
1497
+ private setAudioWorkletPaused;
1498
+ private prepareAudioConfig;
1499
+ private createAudioStreamFromTrack;
1500
+ private createBrowserUnsupportedError;
1501
+ private getVideoInputSelectorDependencies;
1502
+ isPausedState(): boolean;
1503
+ getClonedAudioTrack(): MediaStreamTrack | null;
1504
+ getAudioStreamForAnalysis(): MediaStream | null;
1505
+ setOnBufferUpdate(callback: (size: number, formatted: string) => void): void;
1506
+ setOnError(callback: (error: Error) => void): void;
1507
+ setOnMuteStateChange(callback: (muted: boolean) => void): void;
1508
+ private cloneVideoTrack;
1509
+ private cloneAudioTrack;
1510
+ private stopCurrentVideoTrack;
1511
+ cleanup(): void;
1512
+ private releaseWorkerUrlLease;
1059
1513
  }
1514
+ export {};
1060
1515
 
1061
- export declare function extractLastFrame(file: File, timeoutMs?: number): Promise<Blob>;
1062
-
1063
- import type { CameraStreamManager } from "../stream/stream";
1064
- export declare class DeviceManager {
1065
- private readonly streamManager;
1066
- private readonly callbacks?;
1067
- private availableDevices;
1068
- private selectedCameraDeviceId;
1069
- private selectedMicDeviceId;
1070
- constructor(streamManager: CameraStreamManager, callbacks?: DeviceCallbacks);
1071
- getAvailableDevices(): Promise<AvailableDevices>;
1072
- setCameraDevice(deviceId: string | null): void;
1073
- setMicDevice(deviceId: string | null): void;
1074
- getSelectedCameraDeviceId(): string | null;
1075
- getSelectedMicDeviceId(): string | null;
1076
- getAvailableDevicesList(): AvailableDevices;
1077
- }
1516
+ export {};
1078
1517
 
1079
- export type AvailableDevices = {
1080
- audioinput: MediaDeviceInfo[];
1081
- videoinput: MediaDeviceInfo[];
1518
+ export type WorkerMessage = WorkerStartMessage | WorkerProbeMessage | WorkerPauseMessage | WorkerResumeMessage | WorkerStopMessage | WorkerToggleMuteMessage | WorkerAudioChunkMessage | WorkerSwitchSourceMessage | WorkerUpdateFpsMessage | WorkerUpdateVisibilityMessage | WorkerUpdateSourceTypeMessage;
1519
+ export type WorkerResponse = WorkerReadyResponse | WorkerErrorResponse | WorkerChunkResponse | WorkerBufferUpdateResponse | WorkerStateChangeResponse | WorkerProbeResultResponse | WorkerDebugLogResponse;
1520
+ export type WorkerStartMessage = {
1521
+ type: "start";
1522
+ videoTrack: MediaStreamVideoTrack | null;
1523
+ videoStream?: ReadableStream<VideoFrame> | null;
1524
+ audioStream?: ReadableStream<AudioData> | null;
1525
+ isMobileDevice?: boolean;
1526
+ videoSettings?: WorkerVideoSettings;
1527
+ viewportMetadata?: WorkerViewportMetadata;
1528
+ audioConfig: WorkerAudioConfig | null;
1529
+ config: WorkerTranscodeConfig;
1530
+ overlayConfig?: {
1531
+ enabled: boolean;
1532
+ text: string;
1533
+ recordingStartTime?: number;
1534
+ };
1082
1535
  };
1083
- export type DeviceCallbacks = {
1084
- onDevicesChanged: (devices: AvailableDevices) => void;
1085
- onDeviceSelected: (type: "camera" | "mic", deviceId: string | null) => void;
1536
+ export declare const WORKER_MESSAGE_TYPE_PROBE: "probe";
1537
+ export declare const WORKER_MESSAGE_TYPE_AUDIO_CHUNK: "audioChunk";
1538
+ export type WorkerProbeMessage = {
1539
+ type: typeof WORKER_MESSAGE_TYPE_PROBE;
1540
+ };
1541
+ export type WorkerPauseMessage = {
1542
+ type: "pause";
1543
+ };
1544
+ export type WorkerResumeMessage = {
1545
+ type: "resume";
1546
+ };
1547
+ export type WorkerStopMessage = {
1548
+ type: "stop";
1549
+ };
1550
+ export type WorkerToggleMuteMessage = {
1551
+ type: "toggleMute";
1552
+ };
1553
+ export type WorkerAudioChunkMessage = {
1554
+ type: typeof WORKER_MESSAGE_TYPE_AUDIO_CHUNK;
1555
+ data: Float32Array;
1556
+ frames: number;
1557
+ numberOfChannels: number;
1558
+ sampleRate: number;
1559
+ timestamp: number;
1560
+ };
1561
+ export type WorkerSwitchSourceMessage = {
1562
+ type: "switchSource";
1563
+ videoTrack: MediaStreamVideoTrack | null;
1564
+ videoStream?: ReadableStream<VideoFrame> | null;
1565
+ };
1566
+ export type WorkerUpdateFpsMessage = {
1567
+ type: "updateFps";
1568
+ fps: number;
1569
+ };
1570
+ export type WorkerUpdateVisibilityMessage = {
1571
+ type: "updateVisibility";
1572
+ isHidden: boolean;
1573
+ timestamp: number;
1574
+ };
1575
+ export type WorkerUpdateSourceTypeMessage = {
1576
+ type: "updateSourceType";
1577
+ isScreenCapture: boolean;
1578
+ };
1579
+ export type WorkerReadyResponse = {
1580
+ type: "ready";
1581
+ };
1582
+ export type WorkerErrorResponse = {
1583
+ type: "error";
1584
+ error: string;
1585
+ };
1586
+ export type WorkerChunkResponse = {
1587
+ type: "chunk";
1588
+ data: Uint8Array;
1589
+ position: number;
1590
+ };
1591
+ export type WorkerBufferUpdateResponse = {
1592
+ type: "bufferUpdate";
1593
+ size: number;
1594
+ formatted: string;
1595
+ };
1596
+ export type WorkerStateChangeResponse = {
1597
+ type: "stateChange";
1598
+ state: "recording" | "paused" | "stopped";
1599
+ };
1600
+ export declare const WORKER_RESPONSE_TYPE_PROBE_RESULT: "probeResult";
1601
+ export declare const WORKER_RESPONSE_TYPE_DEBUG_LOG: "debugLog";
1602
+ export type WorkerProbeResultResponse = {
1603
+ type: typeof WORKER_RESPONSE_TYPE_PROBE_RESULT;
1604
+ hasMediaStreamTrackProcessor: boolean;
1605
+ hasVideoFrame: boolean;
1606
+ hasAudioData: boolean;
1607
+ hasOffscreenCanvas: boolean;
1608
+ hasCreateImageBitmap: boolean;
1609
+ hasReadableStream: boolean;
1610
+ };
1611
+ export type WorkerDebugLogResponse = {
1612
+ type: typeof WORKER_RESPONSE_TYPE_DEBUG_LOG;
1613
+ message: string;
1614
+ payload?: string;
1615
+ };
1616
+ import type { VideoCodec } from "mediabunny";
1617
+ import type { WatermarkConfig } from "../../transcode/transcode-types";
1618
+ export declare const WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR: "f32-planar";
1619
+ export type WorkerAudioSampleFormat = typeof WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR;
1620
+ export type WorkerAudioConfig = {
1621
+ sampleRate: number;
1622
+ numberOfChannels: number;
1623
+ format: WorkerAudioSampleFormat;
1624
+ };
1625
+ export type WorkerVideoFacingMode = "user" | "environment" | "left" | "right";
1626
+ export type WorkerVideoSettings = {
1627
+ width?: number;
1628
+ height?: number;
1629
+ facingMode?: WorkerVideoFacingMode;
1630
+ rotation?: number;
1631
+ };
1632
+ export type WorkerViewportMetadata = {
1633
+ orientationAngle?: number;
1634
+ windowOrientation?: number;
1635
+ };
1636
+ export type WorkerTranscodeConfig = {
1637
+ width?: number;
1638
+ height?: number;
1639
+ fps?: number;
1640
+ bitrate?: number | string;
1641
+ audioCodec: "aac" | "opus";
1642
+ audioBitrate?: number;
1643
+ codec: VideoCodec;
1644
+ keyFrameInterval: number;
1645
+ format: "mp4" | "mkv" | "mov" | "webm";
1646
+ watermark?: WatermarkConfig;
1086
1647
  };
1087
1648
 
1088
- export declare function createBrowserDependencies(): TelemetryClientDependencies;
1089
- export declare function createTelemetryClient(apiKey: string, backendUrl: string, options?: {
1090
- endpoint?: string;
1091
- sessionId?: string;
1092
- userId?: string;
1093
- environmentId?: string;
1094
- appVersion?: string;
1095
- release?: string;
1096
- pageUrl?: string;
1097
- referrerUrl?: string;
1098
- sdkLocation?: string;
1099
- clientLocation?: string;
1100
- }): TelemetryClient;
1649
+ export declare function acquireRecorderWorkerUrl(): string;
1650
+ export declare function releaseRecorderWorkerUrl(): void;
1101
1651
 
1102
- export declare const SDK_VERSION: string;
1652
+ export type FrameCompositorDependencies = {
1653
+ logger: {
1654
+ debug: (message: string, data?: Record<string, unknown>) => void;
1655
+ warn: (message: string, data?: Record<string, unknown>) => void;
1656
+ error: (message: string, data?: Record<string, unknown>) => void;
1657
+ };
1658
+ fetchResource: (input: RequestInfo, init?: RequestInit) => Promise<Response>;
1659
+ createImageBitmap: (image: ImageBitmapSource) => Promise<ImageBitmap>;
1660
+ sendDebugLog: (message: string, payload?: string) => void;
1661
+ };
1662
+ export type CompositionResult = {
1663
+ frameToProcess: VideoFrame;
1664
+ imageBitmap: ImageBitmap | null;
1665
+ };
1666
+ export declare class FrameCompositor {
1667
+ private overlayCanvas;
1668
+ private compositionCanvas;
1669
+ private compositionContext;
1670
+ private watermarkCanvas;
1671
+ private frameRotationDegrees;
1672
+ private videoSettings;
1673
+ private viewportMetadata;
1674
+ private isMobileDevice;
1675
+ private readonly logger;
1676
+ private readonly fetchResource;
1677
+ private readonly createImageBitmap;
1678
+ private readonly sendDebugLog;
1679
+ constructor(dependencies: FrameCompositorDependencies);
1680
+ reset(): void;
1681
+ setVideoSettings(settings: WorkerVideoSettings | null): void;
1682
+ setViewportMetadata(metadata: WorkerViewportMetadata | null): void;
1683
+ setIsMobileDevice(isMobileDevice: boolean): void;
1684
+ prepareWatermark(config: WorkerTranscodeConfig): Promise<void>;
1685
+ composeFrame(parameters: {
1686
+ videoFrame: VideoFrame;
1687
+ overlayConfig: {
1688
+ enabled: boolean;
1689
+ text: string;
1690
+ } | null;
1691
+ shouldApplyOverlay: boolean;
1692
+ config: WorkerTranscodeConfig;
1693
+ }): CompositionResult;
1694
+ private getCompositionPlan;
1695
+ private getValidFrameDimensions;
1696
+ private applyOverlayIfNeeded;
1697
+ private applyWatermarkIfNeeded;
1698
+ private buildCompositionResult;
1699
+ private createOverlayCanvas;
1700
+ private getOverlayPosition;
1701
+ private ensureCompositionCanvas;
1702
+ private getFrameRotationDegrees;
1703
+ private determineFrameRotationDegrees;
1704
+ private getFrameDimensions;
1705
+ private drawVideoFrame;
1706
+ private logWatermarkError;
1707
+ }
1103
1708
 
1104
- export declare class TelemetryClient {
1105
- private readonly config;
1709
+ export {};
1710
+
1711
+ export type WorkerProbeManagerDependencies = {
1712
+ setTimeout: (callback: () => void, timeout: number) => number;
1713
+ clearTimeout: (id: number) => void;
1714
+ timeoutMilliseconds: number;
1715
+ };
1716
+ export declare class WorkerProbeManager {
1717
+ private workerProbePromise;
1718
+ private workerProbeResolve;
1719
+ private workerProbeTimeoutId;
1720
+ private workerProbeResult;
1106
1721
  private readonly dependencies;
1107
- private readonly installationId;
1108
- private pendingEvents;
1109
- private flushTimeoutId;
1110
- private throttledEventTimestamps;
1111
- constructor(config: TelemetryClientConfig, dependencies: TelemetryClientDependencies);
1112
- triggerTelemetryEvent(event: TelemetryEventInput): void;
1113
- private enqueueEvent;
1114
- private scheduleFlush;
1115
- private flushQueue;
1116
- private clearFlushTimer;
1117
- private buildRequestPayload;
1118
- private shouldSkipEvent;
1119
- private markEventTracking;
1120
- private updateNumberMap;
1121
- private getOneTimeCacheKey;
1122
- private isOneTimeEvent;
1123
- private isThrottledEvent;
1124
- private buildBaseProperties;
1125
- private mergeProperties;
1126
- private buildFingerprint;
1127
- private buildContext;
1128
- private buildError;
1129
- private getBrowserName;
1130
- private getDeviceMemory;
1131
- private sendPayload;
1132
- private buildTelemetryEndpoint;
1722
+ constructor(dependencies: WorkerProbeManagerDependencies);
1723
+ handleProbeResult(response: WorkerProbeResultResponse): void;
1724
+ getProbeResult(worker: Worker): Promise<WorkerProbeResultResponse>;
1725
+ private finalizeProbeResult;
1726
+ private getEmptyProbeResult;
1133
1727
  }
1134
1728
 
1135
- export type TelemetryEventCategory = "lifecycle" | "interaction" | "performance" | "error";
1136
- export type TelemetryEventName = "sdk.init.started" | "sdk.init.succeeded" | "sdk.init.failed" | "preview.start.succeeded" | "preview.start.failed" | "recording.start.requested" | "recording.start.succeeded" | "recording.start.failed" | "recording.stop.requested" | "recording.stop.succeeded" | "recording.stop.failed" | "upload.started" | "upload.succeeded" | "upload.failed" | "source.switch.requested" | "source.switch.succeeded" | "source.switch.failed" | "stream.error";
1137
- export type TelemetryEventInput = {
1138
- name: TelemetryEventName;
1139
- properties?: Record<string, unknown>;
1140
- error?: unknown;
1141
- };
1142
- export type TelemetryErrorDto = {
1143
- message: string;
1144
- code?: string;
1145
- stack?: string;
1729
+ export type BufferTrackerDependencies = {
1730
+ getBufferSize: () => number;
1731
+ onBufferUpdate: (size: number, formatted: string) => void;
1732
+ setInterval: (handler: () => void, timeout: number) => number;
1733
+ clearInterval: (intervalId: number) => void;
1146
1734
  };
1147
- export type TelemetryFingerprintDto = {
1148
- userAgent?: string;
1149
- language?: string;
1150
- platform?: string;
1151
- hardwareConcurrency?: number;
1152
- deviceMemory?: number;
1735
+ export declare class BufferTracker {
1736
+ private intervalId;
1737
+ private readonly dependencies;
1738
+ constructor(dependencies: BufferTrackerDependencies);
1739
+ start(): void;
1740
+ stop(): void;
1741
+ }
1742
+
1743
+ export type StartMessageParameters = {
1744
+ videoTrack: MediaStreamVideoTrack | null;
1745
+ videoStream: ReadableStream<VideoFrame> | null;
1746
+ audioStream: ReadableStream<AudioData> | null;
1747
+ isMobileDevice: boolean;
1748
+ videoSettings: WorkerVideoSettings | undefined;
1749
+ viewportMetadata: WorkerViewportMetadata | undefined;
1750
+ audioConfig: WorkerAudioConfig | null;
1751
+ workerConfig: WorkerTranscodeConfig;
1752
+ overlayConfig: {
1753
+ enabled: boolean;
1754
+ text: string;
1755
+ recordingStartTime?: number;
1756
+ } | undefined;
1153
1757
  };
1154
- export type TelemetryContextDto = {
1155
- sessionId?: string;
1156
- userId?: string;
1157
- environmentId?: string;
1158
- appVersion?: string;
1159
- release?: string;
1160
- pageUrl?: string;
1161
- referrerUrl?: string;
1162
- sdkLocation?: string;
1163
- clientLocation?: string;
1758
+ export declare function createStartMessage(parameters: StartMessageParameters): WorkerMessage;
1759
+ export declare function createSwitchSourceMessage(videoTrack: MediaStreamVideoTrack | null, videoStream: ReadableStream<VideoFrame> | null): WorkerMessage;
1760
+ export declare function collectTransferables(videoStream: ReadableStream<VideoFrame> | null, audioStream: ReadableStream<AudioData> | null, videoTrack?: MediaStreamVideoTrack | null): Transferable[];
1761
+
1762
+ export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n this.pending = 0;\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n let resolved = false;\n resolver = () => {\n if (resolved) {\n return;\n }\n resolve();\n this.pending--;\n resolved = true;\n };\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n this.pending++;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isWebKitCache = null;\nvar isWebKit = () => {\n if (isWebKitCache !== null) {\n return isWebKitCache;\n }\n return isWebKitCache = !!(typeof navigator !== \"undefined\" && (navigator.vendor?.match(/apple/i) || /AppleWebKit/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent) || /\\b(iPad|iPhone|iPod)\\b/.test(navigator.userAgent)));\n};\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, maxDpbMbs: 396, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, maxDpbMbs: 900, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, maxDpbMbs: 2376, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, maxDpbMbs: 2376, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, maxDpbMbs: 2376, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, maxDpbMbs: 4752, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, maxDpbMbs: 8100, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, maxDpbMbs: 8100, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, maxDpbMbs: 18000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, maxDpbMbs: 20480, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, maxDpbMbs: 32768, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, maxDpbMbs: 32768, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, maxDpbMbs: 34816, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, maxDpbMbs: 110400, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, maxDpbMbs: 696320, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, maxDpbMbs: 696320, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, maxDpbMbs: 696320, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"NON_IDR_SLICE\"] = 1] = \"NON_IDR_SLICE\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPA\"] = 2] = \"SLICE_DPA\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPB\"] = 3] = \"SLICE_DPB\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPC\"] = 4] = \"SLICE_DPC\";\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"AUD\"] = 9] = \"AUD\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"AUD_NUT\"] = 35] = \"AUD_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar iterateNalUnitsInAnnexB = function* (packetData) {\n let i = 0;\n let nalStart = -1;\n while (i < packetData.length - 2) {\n const zeroIndex = packetData.indexOf(0, i);\n if (zeroIndex === -1 || zeroIndex >= packetData.length - 2) {\n break;\n }\n i = zeroIndex;\n let startCodeLength = 0;\n if (i + 3 < packetData.length && packetData[i + 1] === 0 && packetData[i + 2] === 0 && packetData[i + 3] === 1) {\n startCodeLength = 4;\n } else if (packetData[i + 1] === 0 && packetData[i + 2] === 1) {\n startCodeLength = 3;\n }\n if (startCodeLength === 0) {\n i++;\n continue;\n }\n if (nalStart !== -1 && i > nalStart) {\n yield {\n offset: nalStart,\n length: i - nalStart\n };\n }\n nalStart = i + startCodeLength;\n i = nalStart;\n }\n if (nalStart !== -1 && nalStart < packetData.length) {\n yield {\n offset: nalStart,\n length: packetData.length - nalStart\n };\n }\n};\nvar extractNalUnitTypeForAvc = (byte) => {\n return byte & 31;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar ANNEX_B_START_CODE = new Uint8Array([0, 0, 0, 1]);\nvar concatNalUnitsInLengthPrefixed = (nalUnits, lengthSize) => {\n const totalLength = nalUnits.reduce((a, b) => a + lengthSize + b.byteLength, 0);\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const dataView = new DataView(result.buffer, result.byteOffset, result.byteLength);\n switch (lengthSize) {\n case 1:\n dataView.setUint8(offset, nalUnit.byteLength);\n break;\n case 2:\n dataView.setUint16(offset, nalUnit.byteLength, false);\n break;\n case 3:\n setUint24(dataView, offset, nalUnit.byteLength, false);\n break;\n case 4:\n dataView.setUint32(offset, nalUnit.byteLength, false);\n break;\n }\n offset += lengthSize;\n result.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return result;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const spsUnits = [];\n const ppsUnits = [];\n const spsExtUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForAvc(nalUnit[0]);\n if (type === AvcNalUnitType.SPS) {\n spsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.PPS) {\n ppsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.SPS_EXT) {\n spsExtUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = 1;\n let bitDepthLumaMinus8 = 0;\n let bitDepthChromaMinus8 = 0;\n let separateColourPlaneFlag = 0;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const picWidthInMbsMinus1 = readExpGolomb(bitstream);\n const picHeightInMapUnitsMinus1 = readExpGolomb(bitstream);\n const codedWidth = 16 * (picWidthInMbsMinus1 + 1);\n const codedHeight = 16 * (picHeightInMapUnitsMinus1 + 1);\n let displayWidth = codedWidth;\n let displayHeight = codedHeight;\n const frameMbsOnlyFlag = bitstream.readBits(1);\n if (!frameMbsOnlyFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const frameCroppingFlag = bitstream.readBits(1);\n if (frameCroppingFlag) {\n const frameCropLeftOffset = readExpGolomb(bitstream);\n const frameCropRightOffset = readExpGolomb(bitstream);\n const frameCropTopOffset = readExpGolomb(bitstream);\n const frameCropBottomOffset = readExpGolomb(bitstream);\n let cropUnitX;\n let cropUnitY;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 0) {\n cropUnitX = 1;\n cropUnitY = 2 - frameMbsOnlyFlag;\n } else {\n const subWidthC = chromaFormatIdc === 3 ? 1 : 2;\n const subHeightC = chromaFormatIdc === 1 ? 2 : 1;\n cropUnitX = subWidthC;\n cropUnitY = subHeightC * (2 - frameMbsOnlyFlag);\n }\n displayWidth -= cropUnitX * (frameCropLeftOffset + frameCropRightOffset);\n displayHeight -= cropUnitY * (frameCropTopOffset + frameCropBottomOffset);\n }\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let numReorderFrames = null;\n let maxDecFrameBuffering = null;\n const vuiParametersPresentFlag = bitstream.readBits(1);\n if (vuiParametersPresentFlag) {\n const aspectRatioInfoPresentFlag = bitstream.readBits(1);\n if (aspectRatioInfoPresentFlag) {\n const aspectRatioIdc = bitstream.readBits(8);\n if (aspectRatioIdc === 255) {\n bitstream.skipBits(16);\n bitstream.skipBits(16);\n }\n }\n const overscanInfoPresentFlag = bitstream.readBits(1);\n if (overscanInfoPresentFlag) {\n bitstream.skipBits(1);\n }\n const videoSignalTypePresentFlag = bitstream.readBits(1);\n if (videoSignalTypePresentFlag) {\n bitstream.skipBits(3);\n fullRangeFlag = bitstream.readBits(1);\n const colourDescriptionPresentFlag = bitstream.readBits(1);\n if (colourDescriptionPresentFlag) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n const chromaLocInfoPresentFlag = bitstream.readBits(1);\n if (chromaLocInfoPresentFlag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const timingInfoPresentFlag = bitstream.readBits(1);\n if (timingInfoPresentFlag) {\n bitstream.skipBits(32);\n bitstream.skipBits(32);\n bitstream.skipBits(1);\n }\n const nalHrdParametersPresentFlag = bitstream.readBits(1);\n if (nalHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n const vclHrdParametersPresentFlag = bitstream.readBits(1);\n if (vclHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n if (nalHrdParametersPresentFlag || vclHrdParametersPresentFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const bitstreamRestrictionFlag = bitstream.readBits(1);\n if (bitstreamRestrictionFlag) {\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n numReorderFrames = readExpGolomb(bitstream);\n maxDecFrameBuffering = readExpGolomb(bitstream);\n }\n }\n if (numReorderFrames === null) {\n assert(maxDecFrameBuffering === null);\n const constraintSet3Flag = constraintFlags & 16;\n if ((profileIdc === 44 || profileIdc === 86 || profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244) && constraintSet3Flag) {\n numReorderFrames = 0;\n maxDecFrameBuffering = 0;\n } else {\n const picWidthInMbs = picWidthInMbsMinus1 + 1;\n const picHeightInMapUnits = picHeightInMapUnitsMinus1 + 1;\n const frameHeightInMbs = (2 - frameMbsOnlyFlag) * picHeightInMapUnits;\n const levelInfo = AVC_LEVEL_TABLE.find((x) => x.level >= levelIdc) ?? last(AVC_LEVEL_TABLE);\n const maxDpbFrames = Math.min(Math.floor(levelInfo.maxDpbMbs / (picWidthInMbs * frameHeightInMbs)), 16);\n numReorderFrames = maxDpbFrames;\n maxDecFrameBuffering = maxDpbFrames;\n }\n }\n assert(maxDecFrameBuffering !== null);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n codedWidth,\n codedHeight,\n displayWidth,\n displayHeight,\n colourPrimaries,\n matrixCoefficients,\n transferCharacteristics,\n fullRangeFlag,\n numReorderFrames,\n maxDecFrameBuffering\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar skipAvcHrdParameters = (bitstream) => {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n for (let i = 0;i <= cpb_cnt_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n};\nvar extractNalUnitTypeForHevc = (byte) => {\n return byte >> 1 & 63;\n};\nvar parseHevcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const spsMaxSubLayersMinus1 = bitstream.readBits(3);\n const spsTemporalIdNestingFlag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, spsMaxSubLayersMinus1);\n readExpGolomb(bitstream);\n const chromaFormatIdc = readExpGolomb(bitstream);\n let separateColourPlaneFlag = 0;\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n const picWidthInLumaSamples = readExpGolomb(bitstream);\n const picHeightInLumaSamples = readExpGolomb(bitstream);\n let displayWidth = picWidthInLumaSamples;\n let displayHeight = picHeightInLumaSamples;\n if (bitstream.readBits(1)) {\n const confWinLeftOffset = readExpGolomb(bitstream);\n const confWinRightOffset = readExpGolomb(bitstream);\n const confWinTopOffset = readExpGolomb(bitstream);\n const confWinBottomOffset = readExpGolomb(bitstream);\n let subWidthC = 1;\n let subHeightC = 1;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 1) {\n subWidthC = 2;\n subHeightC = 2;\n } else if (chromaArrayType === 2) {\n subWidthC = 2;\n subHeightC = 1;\n }\n displayWidth -= (confWinLeftOffset + confWinRightOffset) * subWidthC;\n displayHeight -= (confWinTopOffset + confWinBottomOffset) * subHeightC;\n }\n const bitDepthLumaMinus8 = readExpGolomb(bitstream);\n const bitDepthChromaMinus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const spsSubLayerOrderingInfoPresentFlag = bitstream.readBits(1);\n const startI = spsSubLayerOrderingInfoPresentFlag ? 0 : spsMaxSubLayersMinus1;\n let spsMaxNumReorderPics = 0;\n for (let i = startI;i <= spsMaxSubLayersMinus1; i++) {\n readExpGolomb(bitstream);\n spsMaxNumReorderPics = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const numShortTermRefPicSets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, numShortTermRefPicSets);\n if (bitstream.readBits(1)) {\n const numLongTermRefPicsSps = readExpGolomb(bitstream);\n for (let i = 0;i < numLongTermRefPicsSps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const vui = parseHevcVui(bitstream, spsMaxSubLayersMinus1);\n colourPrimaries = vui.colourPrimaries;\n transferCharacteristics = vui.transferCharacteristics;\n matrixCoefficients = vui.matrixCoefficients;\n fullRangeFlag = vui.fullRangeFlag;\n minSpatialSegmentationIdc = vui.minSpatialSegmentationIdc;\n }\n return {\n displayWidth,\n displayHeight,\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n maxDecFrameBuffering: spsMaxNumReorderPics + 1,\n spsMaxSubLayersMinus1,\n spsTemporalIdNestingFlag,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n minSpatialSegmentationIdc\n };\n } catch (error) {\n console.error(\"Error parsing HEVC SPS:\", error);\n return null;\n }\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const vpsUnits = [];\n const spsUnits = [];\n const ppsUnits = [];\n const seiUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForHevc(nalUnit[0]);\n if (type === HevcNalUnitType.VPS_NUT) {\n vpsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.SPS_NUT) {\n spsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PPS_NUT) {\n ppsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PREFIX_SEI_NUT || type === HevcNalUnitType.SUFFIX_SEI_NUT) {\n seiUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const spsInfo = parseHevcSps(spsUnits[0]);\n if (!spsInfo)\n return null;\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0][0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: spsInfo.generalProfileSpace,\n generalTierFlag: spsInfo.generalTierFlag,\n generalProfileIdc: spsInfo.generalProfileIdc,\n generalProfileCompatibilityFlags: spsInfo.generalProfileCompatibilityFlags,\n generalConstraintIndicatorFlags: spsInfo.generalConstraintIndicatorFlags,\n generalLevelIdc: spsInfo.generalLevelIdc,\n minSpatialSegmentationIdc: spsInfo.minSpatialSegmentationIdc,\n parallelismType,\n chromaFormatIdc: spsInfo.chromaFormatIdc,\n bitDepthLumaMinus8: spsInfo.bitDepthLumaMinus8,\n bitDepthChromaMinus8: spsInfo.bitDepthChromaMinus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: spsInfo.spsMaxSubLayersMinus1 + 1,\n temporalIdNested: spsInfo.spsTemporalIdNestingFlag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseHevcVui = (bitstream, sps_max_sub_layers_minus1) => {\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n fullRangeFlag = bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHevcHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n minSpatialSegmentationIdc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n return {\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n minSpatialSegmentationIdc\n };\n};\nvar skipHevcHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.data !== undefined && !(options.data instanceof Uint8Array)) {\n throw new TypeError(\"options.data, when provided, must be a Uint8Array.\");\n }\n if (options?.type !== undefined && options.type !== \"key\" && options.type !== \"delta\") {\n throw new TypeError('options.type, when provided, must be either \"key\" or \"delta\".');\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n if (options?.sequenceNumber !== undefined && !Number.isFinite(options.sequenceNumber)) {\n throw new TypeError(\"options.sequenceNumber, when provided, must be a number.\");\n }\n if (options?.sideData !== undefined && (typeof options.sideData !== \"object\" || options.sideData === null)) {\n throw new TypeError(\"options.sideData, when provided, must be an object.\");\n }\n return new EncodedPacket(options?.data ?? this.data, options?.type ?? this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, options?.sequenceNumber ?? this.sequenceNumber, this.byteLength, options?.sideData ?? this.sideData);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\nvar VIDEO_SAMPLE_PIXEL_FORMATS = [\n \"I420\",\n \"I420P10\",\n \"I420P12\",\n \"I420A\",\n \"I420AP10\",\n \"I420AP12\",\n \"I422\",\n \"I422P10\",\n \"I422P12\",\n \"I422A\",\n \"I422AP10\",\n \"I422AP12\",\n \"I444\",\n \"I444P10\",\n \"I444P12\",\n \"I444A\",\n \"I444AP10\",\n \"I444AP12\",\n \"NV12\",\n \"RGBA\",\n \"RGBX\",\n \"BGRA\",\n \"BGRX\"\n];\nvar VIDEO_SAMPLE_PIXEL_FORMATS_SET = new Set(VIDEO_SAMPLE_PIXEL_FORMATS);\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.format === undefined || !VIDEO_SAMPLE_PIXEL_FORMATS_SET.has(init.format)) {\n throw new TypeError(\"init.format must be one of: \" + VIDEO_SAMPLE_PIXEL_FORMATS.join(\", \"));\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this._layout = init.layout ?? createDefaultPlaneLayout(init.format, init.codedWidth, init.codedHeight);\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this._layout = null;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = new VideoSampleColorSpace(data.colorSpace);\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this._layout = null;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n return new VideoSample(this._data, {\n format: this.format,\n layout: this._layout,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize(options = {}) {\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot get allocation size when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const size = videoFrame.allocationSize(options);\n videoFrame.close();\n return size;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize(options);\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination, options = {}) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot copy video sample data when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const layout = await videoFrame.copyTo(destination, options);\n videoFrame.close();\n return layout;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.copyTo(destination, options);\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n const dest = toUint8Array(destination);\n dest.set(this._data);\n return this._layout;\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n return [{\n offset: 0,\n stride: 4 * this.codedWidth\n }];\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\n\nclass VideoSampleColorSpace {\n constructor(init) {\n this.primaries = init?.primaries ?? null;\n this.transfer = init?.transfer ?? null;\n this.matrix = init?.matrix ?? null;\n this.fullRange = init?.fullRange ?? null;\n }\n toJSON() {\n return {\n primaries: this.primaries,\n transfer: this.transfer,\n matrix: this.matrix,\n fullRange: this.fullRange\n };\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar validateVideoFrameCopyToOptions = (options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.colorSpace !== undefined && ![\"display-p3\", \"srgb\"].includes(options.colorSpace)) {\n throw new TypeError(\"options.colorSpace, when provided, must be 'display-p3' or 'srgb'.\");\n }\n if (options.format !== undefined && typeof options.format !== \"string\") {\n throw new TypeError(\"options.format, when provided, must be a string.\");\n }\n if (options.layout !== undefined) {\n if (!Array.isArray(options.layout)) {\n throw new TypeError(\"options.layout, when provided, must be an array.\");\n }\n for (const plane of options.layout) {\n if (!plane || typeof plane !== \"object\") {\n throw new TypeError(\"Each entry in options.layout must be an object.\");\n }\n if (!Number.isInteger(plane.offset) || plane.offset < 0) {\n throw new TypeError(\"plane.offset must be a non-negative integer.\");\n }\n if (!Number.isInteger(plane.stride) || plane.stride < 0) {\n throw new TypeError(\"plane.stride must be a non-negative integer.\");\n }\n }\n }\n if (options.rect !== undefined) {\n if (!options.rect || typeof options.rect !== \"object\") {\n throw new TypeError(\"options.rect, when provided, must be an object.\");\n }\n if (options.rect.x !== undefined && (!Number.isInteger(options.rect.x) || options.rect.x < 0)) {\n throw new TypeError(\"options.rect.x, when provided, must be a non-negative integer.\");\n }\n if (options.rect.y !== undefined && (!Number.isInteger(options.rect.y) || options.rect.y < 0)) {\n throw new TypeError(\"options.rect.y, when provided, must be a non-negative integer.\");\n }\n if (options.rect.width !== undefined && (!Number.isInteger(options.rect.width) || options.rect.width < 0)) {\n throw new TypeError(\"options.rect.width, when provided, must be a non-negative integer.\");\n }\n if (options.rect.height !== undefined && (!Number.isInteger(options.rect.height) || options.rect.height < 0)) {\n throw new TypeError(\"options.rect.height, when provided, must be a non-negative integer.\");\n }\n }\n};\nvar createDefaultPlaneLayout = (format, codedWidth, codedHeight) => {\n const planes = getPlaneConfigs(format);\n const layouts = [];\n let currentOffset = 0;\n for (const plane of planes) {\n const planeWidth = Math.ceil(codedWidth / plane.widthDivisor);\n const planeHeight = Math.ceil(codedHeight / plane.heightDivisor);\n const stride = planeWidth * plane.sampleBytes;\n const planeSize = stride * planeHeight;\n layouts.push({\n offset: currentOffset,\n stride\n });\n currentOffset += planeSize;\n }\n return layouts;\n};\nvar getPlaneConfigs = (format) => {\n const yuv = (yBytes, uvBytes, subX, subY, hasAlpha) => {\n const configs = [\n { sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY }\n ];\n if (hasAlpha) {\n configs.push({ sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 });\n }\n return configs;\n };\n switch (format) {\n case \"I420\":\n return yuv(1, 1, 2, 2, false);\n case \"I420P10\":\n case \"I420P12\":\n return yuv(2, 2, 2, 2, false);\n case \"I420A\":\n return yuv(1, 1, 2, 2, true);\n case \"I420AP10\":\n case \"I420AP12\":\n return yuv(2, 2, 2, 2, true);\n case \"I422\":\n return yuv(1, 1, 2, 1, false);\n case \"I422P10\":\n case \"I422P12\":\n return yuv(2, 2, 2, 1, false);\n case \"I422A\":\n return yuv(1, 1, 2, 1, true);\n case \"I422AP10\":\n case \"I422AP12\":\n return yuv(2, 2, 2, 1, true);\n case \"I444\":\n return yuv(1, 1, 1, 1, false);\n case \"I444P10\":\n case \"I444P12\":\n return yuv(2, 2, 1, 1, false);\n case \"I444A\":\n return yuv(1, 1, 1, 1, true);\n case \"I444AP10\":\n case \"I444AP12\":\n return yuv(2, 2, 1, 1, true);\n case \"NV12\":\n return [\n { sampleBytes: 1, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: 2, widthDivisor: 2, heightDivisor: 2 }\n ];\n case \"RGBA\":\n case \"RGBX\":\n case \"BGRA\":\n case \"BGRX\":\n return [\n { sampleBytes: 4, widthDivisor: 1, heightDivisor: 1 }\n ];\n default:\n assertNever(format);\n assert(false);\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const srcFormat = this.format;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (isWebKit() && numChannels > 2 && destFormat !== srcFormat) {\n doAudioDataCopyToWebKitWorkaround(this._data, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount);\n } else {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: destFormat\n });\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\nvar doAudioDataCopyToWebKitWorkaround = (audioData, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount) => {\n const readFn = getReadFunction(srcFormat);\n const writeFn = getWriteFunction(destFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const destBytesPerSample = getBytesPerSample(destFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar) {\n if (srcIsPlanar) {\n const data = new ArrayBuffer(copyFrameCount * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = (i * numChannels + planeIndex) * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n if (srcIsPlanar) {\n const planeSize = copyFrameCount * srcBytesPerSample;\n const data = new ArrayBuffer(planeSize);\n const dataView = toDataView(data);\n for (let ch = 0;ch < numChannels; ch++) {\n audioData.copyTo(data, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = (i * numChannels + ch) * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n for (let ch = 0;ch < numChannels; ch++) {\n const idx = i * numChannels + ch;\n const srcOffset = idx * srcBytesPerSample;\n const destOffset = idx * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n }\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/adts/adts-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_ADTS_FRAME_HEADER_SIZE = 7;\nvar MAX_ADTS_FRAME_HEADER_SIZE = 9;\nvar readAdtsFrameHeader = (slice) => {\n const startPos = slice.filePos;\n const bytes = readBytes(slice, 9);\n const bitstream = new Bitstream(bytes);\n const syncword = bitstream.readBits(12);\n if (syncword !== 4095) {\n return null;\n }\n bitstream.skipBits(1);\n const layer = bitstream.readBits(2);\n if (layer !== 0) {\n return null;\n }\n const protectionAbsence = bitstream.readBits(1);\n const objectType = bitstream.readBits(2) + 1;\n const samplingFrequencyIndex = bitstream.readBits(4);\n if (samplingFrequencyIndex === 15) {\n return null;\n }\n bitstream.skipBits(1);\n const channelConfiguration = bitstream.readBits(3);\n if (channelConfiguration === 0) {\n throw new Error(\"ADTS frames with channel configuration 0 are not supported.\");\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n const frameLength = bitstream.readBits(13);\n bitstream.skipBits(11);\n const numberOfAacFrames = bitstream.readBits(2) + 1;\n if (numberOfAacFrames !== 1) {\n throw new Error(\"ADTS frames with more than one AAC frame are not supported.\");\n }\n let crcCheck = null;\n if (protectionAbsence === 1) {\n slice.filePos -= 2;\n } else {\n crcCheck = bitstream.readBits(16);\n }\n return {\n objectType,\n samplingFrequencyIndex,\n channelConfiguration,\n frameLength,\n numberOfAacFrames,\n crcCheck,\n startPos\n };\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass FileSlice {\n constructor(bytes, view, offset, start, end) {\n this.bytes = bytes;\n this.view = view;\n this.offset = offset;\n this.start = start;\n this.end = end;\n this.bufferPos = start - offset;\n }\n static tempFromBytes(bytes) {\n return new FileSlice(bytes, toDataView(bytes), 0, 0, bytes.length);\n }\n get length() {\n return this.end - this.start;\n }\n get filePos() {\n return this.offset + this.bufferPos;\n }\n set filePos(value) {\n this.bufferPos = value - this.offset;\n }\n get remainingLength() {\n return Math.max(this.end - this.filePos, 0);\n }\n skip(byteCount) {\n this.bufferPos += byteCount;\n }\n slice(filePos, length = this.end - filePos) {\n if (filePos < this.start || filePos + length > this.end) {\n throw new RangeError(\"Slicing outside of original slice.\");\n }\n return new FileSlice(this.bytes, this.view, this.offset, filePos, filePos + length);\n }\n}\nvar checkIsInRange = (slice, bytesToRead) => {\n if (slice.filePos < slice.start || slice.filePos + bytesToRead > slice.end) {\n throw new RangeError(`Tried reading [${slice.filePos}, ${slice.filePos + bytesToRead}), but slice is` + ` [${slice.start}, ${slice.end}). This is likely an internal error, please report it alongside the file` + ` that caused it.`);\n }\n};\nvar readBytes = (slice, length) => {\n checkIsInRange(slice, length);\n const bytes = slice.bytes.subarray(slice.bufferPos, slice.bufferPos + length);\n slice.bufferPos += length;\n return bytes;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n let requiresAdtsStripping = false;\n if (track.source._codec === \"aac\" && !decoderConfig.description) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packet.data));\n if (!adtsFrame) {\n throw new Error(\"Couldn't parse ADTS header from the AAC packet. Make sure the packets are in ADTS format\" + \" (as specified in ISO 13818-7) when not providing a description, or provide a description\" + \" (must be an AudioSpecificConfig as specified in ISO 14496-3) and ensure the packets\" + \" are raw AAC data.\");\n }\n const sampleRate = aacFrequencyTable[adtsFrame.samplingFrequencyIndex];\n const numberOfChannels = aacChannelMap[adtsFrame.channelConfiguration];\n if (sampleRate === undefined || numberOfChannels === undefined) {\n throw new Error(\"Invalid ADTS frame header.\");\n }\n decoderConfig.description = buildAacAudioSpecificConfig({\n objectType: adtsFrame.objectType,\n sampleRate,\n numberOfChannels\n });\n requiresAdtsStripping = true;\n }\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec),\n requiresAdtsStripping\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const nalUnits = [...iterateNalUnitsInAnnexB(packetData)].map((loc) => packetData.subarray(loc.offset, loc.offset + loc.length));\n if (nalUnits.length === 0) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = concatNalUnitsInLengthPrefixed(nalUnits, 4);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAdtsStripping) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packetData));\n if (!adtsFrame) {\n throw new Error(\"Expected ADTS frame, didn't get one.\");\n }\n const headerLength = adtsFrame.crcCheck === null ? MIN_ADTS_FRAME_HEADER_SIZE : MAX_ADTS_FRAME_HEADER_SIZE;\n packetData = packetData.subarray(headerLength);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n const max = 2 ** 32 - 1;\n return {\n video: { min: 0, max },\n audio: { min: 0, max },\n subtitle: { min: 0, max },\n total: { min: 1, max }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\nvar QUALITY_LOW = /* @__PURE__ */ new Quality(0.6);\nvar QUALITY_MEDIUM = /* @__PURE__ */ new Quality(1);\nvar QUALITY_HIGH = /* @__PURE__ */ new Quality(2);\nvar QUALITY_VERY_HIGH = /* @__PURE__ */ new Quality(4);\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n return this._closingPromise ??= (async () => {\n await this._flushAndClose(forceClose);\n this._closed = true;\n })();\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MEDIABUNNY_LOADED_SYMBOL = Symbol.for(\"mediabunny loaded\");\nif (globalThis[MEDIABUNNY_LOADED_SYMBOL]) {\n console.error(`[WARNING]\nMediabunny was loaded twice.` + \" This will likely cause Mediabunny not to work correctly.\" + \" Check if multiple dependencies are importing different versions of Mediabunny,\" + \" or if something is being bundled incorrectly.\");\n}\nglobalThis[MEDIABUNNY_LOADED_SYMBOL] = true;\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/audio-state.ts\nvar MILLISECONDS_PER_SECOND = 1000;\n\nclass AudioState {\n getNowMilliseconds;\n isPaused = false;\n isMuted = false;\n pausedDuration = 0;\n pauseStartedAt = null;\n lastAudioTimestamp = 0;\n isProcessingActive = false;\n constructor(dependencies) {\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset() {\n this.isPaused = false;\n this.isMuted = false;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.lastAudioTimestamp = 0;\n this.isProcessingActive = false;\n }\n setProcessingActive(isActive) {\n this.isProcessingActive = isActive;\n }\n isActive() {\n return this.isProcessingActive;\n }\n toggleMuted() {\n this.isMuted = !this.isMuted;\n return this.isMuted;\n }\n setMuted(isMuted) {\n this.isMuted = isMuted;\n }\n getIsMuted() {\n return this.isMuted;\n }\n getIsPaused() {\n return this.isPaused;\n }\n getPausedDuration() {\n return this.pausedDuration;\n }\n pause() {\n if (this.isPaused) {\n return false;\n }\n this.pauseStartedAt = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n this.isPaused = true;\n return true;\n }\n resume() {\n if (!this.isPaused) {\n return false;\n }\n const now = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n return true;\n }\n getAudioTimestamp(timestamp) {\n if (timestamp >= this.lastAudioTimestamp) {\n return timestamp;\n }\n return this.lastAudioTimestamp;\n }\n updateLastAudioTimestamp(timestamp, duration) {\n this.lastAudioTimestamp = timestamp + duration;\n }\n getLastAudioTimestamp() {\n return this.lastAudioTimestamp;\n }\n}\n\n// src/core/processor/worker/buffer-tracker.ts\nvar BUFFER_UPDATE_INTERVAL_MILLISECONDS = 1000;\nvar BYTES_PER_KILOBYTE = 1024;\nvar FILE_SIZE_PRECISION_FACTOR = 100;\nvar FILE_SIZE_UNITS = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n\nclass BufferTracker {\n intervalId = null;\n dependencies;\n constructor(dependencies) {\n this.dependencies = dependencies;\n }\n start() {\n if (this.intervalId !== null) {\n return;\n }\n this.intervalId = this.dependencies.setInterval(() => {\n const size = this.dependencies.getBufferSize();\n const formatted = formatFileSize(size);\n this.dependencies.onBufferUpdate(size, formatted);\n }, BUFFER_UPDATE_INTERVAL_MILLISECONDS);\n }\n stop() {\n if (this.intervalId === null) {\n return;\n }\n this.dependencies.clearInterval(this.intervalId);\n this.intervalId = null;\n }\n}\nfunction formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return `0 ${FILE_SIZE_UNITS[0]}`;\n }\n const base = BYTES_PER_KILOBYTE;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * FILE_SIZE_PRECISION_FACTOR) / FILE_SIZE_PRECISION_FACTOR;\n return `${size} ${FILE_SIZE_UNITS[index]}`;\n}\n\n// src/core/processor/worker/rotation-utils.ts\nvar ROTATION_DEGREES_0 = 0;\nvar ROTATION_DEGREES_90 = 90;\nvar ROTATION_DEGREES_180 = 180;\nvar ROTATION_DEGREES_270 = 270;\nvar ROTATION_DEGREES_360 = 360;\nfunction calculateFrameRotationDegrees(input) {\n if (!input.isMobileDevice) {\n return ROTATION_DEGREES_0;\n }\n const targetWidth = input.targetWidth;\n const targetHeight = input.targetHeight;\n if (typeof targetWidth !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n if (typeof targetHeight !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n const isTargetPortrait = targetHeight > targetWidth;\n const isFramePortrait = input.frameHeight > input.frameWidth;\n if (isTargetPortrait === isFramePortrait) {\n return ROTATION_DEGREES_0;\n }\n const settingsRotation = resolveRotationHint(input.settingsRotation);\n if (settingsRotation !== null) {\n return settingsRotation;\n }\n const orientationRotation = resolveRotationHint(input.orientationAngle);\n if (orientationRotation !== null) {\n return orientationRotation;\n }\n const windowRotation = resolveRotationHint(input.windowOrientation);\n if (windowRotation !== null) {\n return windowRotation;\n }\n return getFallbackRotationDegrees();\n}\nfunction resolveRotationHint(rotationHint) {\n const normalizedRotation = normalizeRotationDegrees(rotationHint);\n if (normalizedRotation === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedRotation === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction normalizeRotationDegrees(rotationDegrees) {\n if (typeof rotationDegrees !== \"number\") {\n return null;\n }\n const normalizedValue = (rotationDegrees % ROTATION_DEGREES_360 + ROTATION_DEGREES_360) % ROTATION_DEGREES_360;\n const remainder = normalizedValue % ROTATION_DEGREES_90;\n if (remainder !== 0) {\n return null;\n }\n if (normalizedValue === ROTATION_DEGREES_0) {\n return ROTATION_DEGREES_0;\n }\n if (normalizedValue === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedValue === ROTATION_DEGREES_180) {\n return ROTATION_DEGREES_180;\n }\n if (normalizedValue === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction getFallbackRotationDegrees() {\n return ROTATION_DEGREES_90;\n}\n\n// src/core/processor/worker/watermark-utils.ts\nfunction calculateWatermarkTargetSize(videoWidth, imageWidth, imageHeight) {\n const targetWidth = Math.round(videoWidth * 0.07);\n const scaleFactor = targetWidth / imageWidth;\n const targetHeight = Math.round(imageHeight * scaleFactor);\n return { width: targetWidth, height: targetHeight };\n}\nfunction getWatermarkPosition(options) {\n const { watermarkWidth, watermarkHeight, videoWidth, videoHeight, position } = options;\n const padding = 20;\n switch (position) {\n case \"top-left\":\n return { x: padding, y: padding };\n case \"top-right\":\n return { x: videoWidth - watermarkWidth - padding, y: padding };\n case \"bottom-left\":\n return { x: padding, y: videoHeight - watermarkHeight - padding };\n case \"bottom-right\":\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n case \"center\":\n return {\n x: (videoWidth - watermarkWidth) / 2,\n y: (videoHeight - watermarkHeight) / 2\n };\n default:\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n }\n}\n\n// src/core/processor/worker/frame-compositor.ts\nvar DOUBLE_VALUE = 2;\nvar DEFAULT_WATERMARK_OPACITY = 1;\nvar DEFAULT_WATERMARK_BASE_WIDTH = 1280;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\nvar OVERLAY_COLOR_CHANNEL_VALUE = 20;\nvar OVERLAY_BORDER_RADIUS = 50;\nvar COMPOSITION_CONTEXT_ERROR_MESSAGE = \"Failed to get composition canvas context\";\nvar RECORDER_WORKER_LOG_PREFIX = \"[RecorderWorker]\";\nvar ROTATION_RADIANS_90 = Math.PI * ROTATION_DEGREES_90 / ROTATION_DEGREES_180;\nvar ROTATION_RADIANS_270 = Math.PI * ROTATION_DEGREES_270 / ROTATION_DEGREES_180;\n\nclass FrameCompositor {\n overlayCanvas = null;\n compositionCanvas = null;\n compositionContext = null;\n watermarkCanvas = null;\n frameRotationDegrees = null;\n videoSettings = null;\n viewportMetadata = null;\n isMobileDevice = false;\n logger;\n fetchResource;\n createImageBitmap;\n sendDebugLog;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.fetchResource = dependencies.fetchResource;\n this.createImageBitmap = dependencies.createImageBitmap;\n this.sendDebugLog = dependencies.sendDebugLog;\n }\n reset() {\n this.overlayCanvas = null;\n this.compositionCanvas = null;\n this.compositionContext = null;\n this.watermarkCanvas = null;\n this.frameRotationDegrees = null;\n this.videoSettings = null;\n this.viewportMetadata = null;\n this.isMobileDevice = false;\n }\n setVideoSettings(settings) {\n this.videoSettings = settings;\n this.frameRotationDegrees = null;\n }\n setViewportMetadata(metadata) {\n this.viewportMetadata = metadata;\n this.frameRotationDegrees = null;\n }\n setIsMobileDevice(isMobileDevice) {\n this.isMobileDevice = isMobileDevice;\n this.frameRotationDegrees = null;\n }\n async prepareWatermark(config) {\n const watermarkConfig = config.watermark;\n if (!watermarkConfig) {\n return;\n }\n if (this.watermarkCanvas) {\n return;\n }\n const url2 = watermarkConfig.url;\n let opacity = DEFAULT_WATERMARK_OPACITY;\n if (typeof watermarkConfig.opacity === \"number\") {\n opacity = watermarkConfig.opacity;\n }\n const response = await this.fetchResource(url2, { mode: \"cors\" }).catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!response) {\n return;\n }\n if (!response.ok) {\n const httpError = new Error(`HTTP error! status: ${response.status}`);\n this.logWatermarkError(url2, httpError);\n return;\n }\n const blob = await response.blob().catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!blob) {\n return;\n }\n let isVectorImageFormat = false;\n if (url2.toLowerCase().endsWith(\".svg\")) {\n isVectorImageFormat = true;\n }\n if (blob.type === \"image/svg+xml\") {\n isVectorImageFormat = true;\n }\n if (isVectorImageFormat) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Loading SVG watermark. Note: Some environments may not support SVG in createImageBitmap inside workers. If the watermark doesn't appear, consider using a PNG or a Data URL.`);\n }\n const imageBitmap = await this.createImageBitmap(blob).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n const bitmapError = new Error(`Failed to create ImageBitmap from blob (${blob.type}). Errors can happen with SVGs in workers or invalid formats: ${errorMessage}`);\n this.logWatermarkError(url2, bitmapError);\n return null;\n });\n if (!imageBitmap) {\n return;\n }\n let videoWidth = DEFAULT_WATERMARK_BASE_WIDTH;\n if (typeof config.width === \"number\") {\n videoWidth = config.width;\n }\n const { width: targetWidth, height: targetHeight } = calculateWatermarkTargetSize(videoWidth, imageBitmap.width, imageBitmap.height);\n const scaleFactor = targetWidth / imageBitmap.width;\n const canvas = new OffscreenCanvas(targetWidth, targetHeight);\n const context = requireNonNull(canvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get watermark canvas context\");\n context.globalAlpha = opacity;\n context.drawImage(imageBitmap, 0, 0, targetWidth, targetHeight);\n context.globalAlpha = 1;\n imageBitmap.close();\n this.watermarkCanvas = canvas;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Watermark prepared with pre-applied opacity`, {\n width: canvas.width,\n height: canvas.height,\n opacity,\n scaleFactor\n });\n }\n composeFrame(parameters) {\n const compositionPlan = this.getCompositionPlan(parameters);\n if (!compositionPlan.needsComposition) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const dimensions = this.getValidFrameDimensions(parameters.videoFrame, compositionPlan.rotationDegrees);\n if (!dimensions) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const width = dimensions.width;\n const height = dimensions.height;\n const context = this.ensureCompositionCanvas(width, height);\n context.clearRect(0, 0, width, height);\n this.drawVideoFrame({\n context,\n videoFrame: parameters.videoFrame,\n rotationDegrees: compositionPlan.rotationDegrees,\n width,\n height\n });\n this.applyOverlayIfNeeded(context, width, compositionPlan.shouldApplyOverlay, parameters.overlayConfig);\n this.applyWatermarkIfNeeded({\n context,\n videoWidth: width,\n videoHeight: height,\n needsWatermark: compositionPlan.needsWatermark,\n config: parameters.config\n });\n return this.buildCompositionResult(parameters.videoFrame);\n }\n getCompositionPlan(parameters) {\n const rotationDegrees = this.getFrameRotationDegrees(parameters.videoFrame, parameters.config);\n const shouldRotateFrame = rotationDegrees !== ROTATION_DEGREES_0;\n let needsWatermark = false;\n if (parameters.config.watermark && this.watermarkCanvas) {\n needsWatermark = true;\n }\n let needsComposition = false;\n if (parameters.shouldApplyOverlay) {\n needsComposition = true;\n }\n if (needsWatermark) {\n needsComposition = true;\n }\n if (shouldRotateFrame) {\n needsComposition = true;\n }\n return {\n rotationDegrees,\n shouldApplyOverlay: parameters.shouldApplyOverlay,\n needsWatermark,\n needsComposition\n };\n }\n getValidFrameDimensions(videoFrame, rotationDegrees) {\n const dimensions = this.getFrameDimensions(videoFrame, rotationDegrees);\n const width = dimensions.width;\n const height = dimensions.height;\n let hasInvalidDimensions = false;\n if (width <= 0) {\n hasInvalidDimensions = true;\n }\n if (height <= 0) {\n hasInvalidDimensions = true;\n }\n if (hasInvalidDimensions) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Invalid video frame dimensions, skipping composition`, { width, height });\n return null;\n }\n return { width, height };\n }\n applyOverlayIfNeeded(context, videoWidth, shouldApplyOverlay, overlayConfig) {\n if (!(shouldApplyOverlay && overlayConfig)) {\n return;\n }\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(overlayConfig.text);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Overlay canvas created`, {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n if (!this.overlayCanvas) {\n return;\n }\n const overlayPosition = this.getOverlayPosition(this.overlayCanvas.width, videoWidth);\n context.drawImage(this.overlayCanvas, overlayPosition.horizontal, overlayPosition.vertical);\n }\n applyWatermarkIfNeeded(options) {\n const { context, videoWidth, videoHeight, needsWatermark, config } = options;\n const watermarkCanvas = this.watermarkCanvas;\n if (!(needsWatermark && watermarkCanvas && config.watermark)) {\n return;\n }\n const watermarkPosition = getWatermarkPosition({\n watermarkWidth: watermarkCanvas.width,\n watermarkHeight: watermarkCanvas.height,\n videoWidth,\n videoHeight,\n position: config.watermark.position\n });\n context.drawImage(watermarkCanvas, watermarkPosition.x, watermarkPosition.y);\n }\n buildCompositionResult(videoFrame) {\n const compositionCanvas = requireNonNull(this.compositionCanvas, \"Composition canvas must exist after ensureCompositionCanvas\");\n const imageBitmap = compositionCanvas.transferToImageBitmap();\n let frameInitialization = {};\n if (typeof videoFrame.timestamp === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n timestamp: videoFrame.timestamp\n };\n }\n if (typeof videoFrame.duration === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n duration: videoFrame.duration\n };\n }\n const frameToProcess = new VideoFrame(imageBitmap, frameInitialization);\n return { frameToProcess, imageBitmap };\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const context = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = context.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * DOUBLE_VALUE);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * DOUBLE_VALUE);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const redValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const greenValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const blueValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const borderRadius = OVERLAY_BORDER_RADIUS;\n context.fillStyle = `rgba(${redValue}, ${greenValue}, ${blueValue}, ${OVERLAY_BACKGROUND_OPACITY})`;\n context.beginPath();\n context.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n context.fill();\n context.fillStyle = OVERLAY_TEXT_COLOR;\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n context.textBaseline = \"middle\";\n context.textAlign = \"center\";\n const textHorizontalPosition = overlayWidth / DOUBLE_VALUE;\n const textVerticalPosition = overlayHeight / DOUBLE_VALUE;\n context.fillText(text, textHorizontalPosition, textVerticalPosition);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n return {\n horizontal: videoWidth - overlayWidth - OVERLAY_PADDING,\n vertical: OVERLAY_PADDING\n };\n }\n ensureCompositionCanvas(width, height) {\n if (!this.compositionCanvas) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas created`, {\n width,\n height\n });\n return this.compositionContext;\n }\n if (!this.compositionContext) {\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n return this.compositionContext;\n }\n const widthChanged = this.compositionCanvas.width !== width;\n const heightChanged = this.compositionCanvas.height !== height;\n let shouldResize = false;\n if (widthChanged) {\n shouldResize = true;\n }\n if (heightChanged) {\n shouldResize = true;\n }\n if (shouldResize) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas resized`, {\n width,\n height\n });\n return this.compositionContext;\n }\n return this.compositionContext;\n }\n getFrameRotationDegrees(videoFrame, config) {\n if (this.frameRotationDegrees !== null) {\n return this.frameRotationDegrees;\n }\n const rotation = this.determineFrameRotationDegrees(videoFrame, config);\n this.frameRotationDegrees = rotation;\n const rotationLog = JSON.stringify({\n rotationDegrees: rotation,\n configWidth: config.width,\n configHeight: config.height,\n settingsWidth: this.videoSettings?.width,\n settingsHeight: this.videoSettings?.height,\n facingMode: this.videoSettings?.facingMode,\n frameDisplayWidth: videoFrame.displayWidth,\n frameDisplayHeight: videoFrame.displayHeight\n });\n this.sendDebugLog(`${RECORDER_WORKER_LOG_PREFIX} Rotation decision`, rotationLog);\n return rotation;\n }\n determineFrameRotationDegrees(videoFrame, config) {\n const configWidth = config.width;\n const configHeight = config.height;\n let facingMode;\n let settingsRotation;\n if (this.videoSettings) {\n facingMode = this.videoSettings.facingMode;\n settingsRotation = this.videoSettings.rotation;\n }\n let orientationAngle;\n let windowOrientation;\n if (this.viewportMetadata) {\n orientationAngle = this.viewportMetadata.orientationAngle;\n windowOrientation = this.viewportMetadata.windowOrientation;\n }\n return calculateFrameRotationDegrees({\n isMobileDevice: this.isMobileDevice,\n targetWidth: configWidth,\n targetHeight: configHeight,\n frameWidth: videoFrame.displayWidth,\n frameHeight: videoFrame.displayHeight,\n facingMode,\n settingsRotation,\n orientationAngle,\n windowOrientation\n });\n }\n getFrameDimensions(videoFrame, rotationDegrees) {\n let width = videoFrame.displayWidth;\n let height = videoFrame.displayHeight;\n let shouldSwapDimensions = false;\n if (rotationDegrees === ROTATION_DEGREES_90) {\n shouldSwapDimensions = true;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n shouldSwapDimensions = true;\n }\n if (shouldSwapDimensions) {\n width = videoFrame.displayHeight;\n height = videoFrame.displayWidth;\n }\n return { width, height };\n }\n drawVideoFrame(parameters) {\n const { context, videoFrame, rotationDegrees, width, height } = parameters;\n const sourceWidth = videoFrame.displayWidth;\n const sourceHeight = videoFrame.displayHeight;\n context.setTransform(1, 0, 0, 1, 0, 0);\n if (rotationDegrees === ROTATION_DEGREES_90) {\n context.translate(width, 0);\n context.rotate(ROTATION_RADIANS_90);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n context.translate(0, height);\n context.rotate(ROTATION_RADIANS_270);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n }\n logWatermarkError(url2, error) {\n const errorMessage = extractErrorMessage(error);\n this.logger.error(`${RECORDER_WORKER_LOG_PREFIX} Failed to load watermark. This is often caused by CORS if the image is on another domain. Try using a Data URL (base64) or ensure the server has Access-Control-Allow-Origin: *.`, {\n url: url2,\n error: errorMessage\n });\n }\n}\n\n// src/core/processor/worker/stop-finalization.ts\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS = 500;\nvar STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS = 10;\nvar ERROR_STOP_PENDING_WRITES_TIMEOUT = \"stop.pending-writes-timeout\";\nfunction createDefaultNowMilliseconds() {\n return () => performance.now();\n}\nfunction createDefaultWaitMilliseconds() {\n return (milliseconds) => new Promise((resolve) => {\n globalThis.setTimeout(resolve, milliseconds);\n });\n}\nasync function waitForPendingWritesToDrain(dependencies) {\n let getNowMilliseconds = dependencies.getNowMilliseconds;\n if (!getNowMilliseconds) {\n getNowMilliseconds = createDefaultNowMilliseconds();\n }\n let waitMilliseconds = dependencies.waitMilliseconds;\n if (!waitMilliseconds) {\n waitMilliseconds = createDefaultWaitMilliseconds();\n }\n let timeoutMilliseconds = dependencies.timeoutMilliseconds;\n if (timeoutMilliseconds === undefined) {\n timeoutMilliseconds = STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS;\n }\n const startedAtMilliseconds = getNowMilliseconds();\n let pendingWriteCount = dependencies.getPendingWriteCount();\n while (pendingWriteCount > 0) {\n const elapsedMilliseconds = getNowMilliseconds() - startedAtMilliseconds;\n if (elapsedMilliseconds >= timeoutMilliseconds) {\n throw new Error(ERROR_STOP_PENDING_WRITES_TIMEOUT);\n }\n await waitMilliseconds(STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS);\n pendingWriteCount = dependencies.getPendingWriteCount();\n }\n}\n\n// src/core/processor/worker/stop-transition.ts\nasync function runStopTransition(dependencies) {\n await dependencies.finalizeStopSequence().then(() => dependencies.completeStop()).catch((error) => {\n return dependencies.recoverStopFailure().then(() => {\n throw error;\n });\n }).finally(() => {\n dependencies.clearStoppingFlag();\n });\n}\n\n// src/core/processor/worker/timestamp-manager.ts\nvar DEFAULT_FRAME_RATE = 30;\nvar DEFAULT_KEY_FRAME_INTERVAL_SECONDS = 5;\nvar MILLISECONDS_PER_SECOND2 = 1000;\nvar MICROSECONDS_PER_SECOND = 1e6;\nvar MAX_LEAD_SECONDS = 0.05;\nvar MAX_LAG_SECONDS = 0.1;\nvar MAX_DRIFT_CORRECTION_SECONDS = MAX_LAG_SECONDS;\nvar DRIFT_OFFSET_DECAY_FACTOR = 0.5;\nvar DRIFT_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX2 = \"[RecorderWorker]\";\n\nclass TimestampManager {\n frameRate = DEFAULT_FRAME_RATE;\n lastVideoTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n lastKeyFrameTimestamp = 0;\n forceNextKeyFrame = false;\n driftOffset = 0;\n logger;\n getNowMilliseconds;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset(frameRate) {\n let resolvedFrameRate = DEFAULT_FRAME_RATE;\n if (typeof frameRate === \"number\" && frameRate > 0) {\n resolvedFrameRate = frameRate;\n }\n this.frameRate = resolvedFrameRate;\n this.lastVideoTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.driftOffset = 0;\n }\n setFrameRate(frameRate) {\n this.frameRate = frameRate;\n }\n getFrameRate() {\n return this.frameRate;\n }\n getFrameCount() {\n return this.frameCount;\n }\n getLastVideoTimestamp() {\n return this.lastVideoTimestamp;\n }\n getBaseVideoTimestamp() {\n return this.baseVideoTimestamp;\n }\n calculateVideoFrameTimestamp(parameters) {\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n let rawTimestamp = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND2;\n const hasTimestamp = typeof parameters.videoFrame.timestamp === \"number\" && parameters.videoFrame.timestamp !== null;\n if (hasTimestamp) {\n rawTimestamp = parameters.videoFrame.timestamp / MICROSECONDS_PER_SECOND;\n }\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTimestamp;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: parameters.recordingStartTime,\n difference: this.baseVideoTimestamp - parameters.recordingStartTime,\n pendingUpdates: parameters.pendingVisibilityUpdatesCount\n };\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} baseVideoTimestamp set`, logData);\n parameters.processPendingVisibilityUpdates();\n }\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set\");\n }\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTimestamp - originalBase;\n this.baseVideoTimestamp = rawTimestamp - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} First frame after source switch`, {\n rawTimestamp,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: parameters.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTimestamp = rawTimestamp - this.baseVideoTimestamp - parameters.pausedDuration;\n let previousTimestamp = 0;\n if (this.lastVideoTimestamp > 0) {\n previousTimestamp = this.lastVideoTimestamp;\n }\n let frameTimestamp = normalizedTimestamp;\n if (normalizedTimestamp < previousTimestamp) {\n frameTimestamp = previousTimestamp + 1 / this.frameRate;\n }\n if (frameTimestamp < 0) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX2} Negative frame timestamp detected, clamping to zero`, { frameTimestamp, normalizedTimestamp, previousTimestamp });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} Frame timestamp calculation`, {\n rawTimestamp,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTimestamp,\n previousTimestamp,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: parameters.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n prepareFrameTiming(parameters) {\n const frameDuration = 1 / this.frameRate;\n let adjustedTimestamp = parameters.frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - parameters.lastAudioTimestamp > MAX_LEAD_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp + MAX_LEAD_SECONDS;\n }\n if (parameters.lastAudioTimestamp - adjustedTimestamp > MAX_LAG_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp - MAX_LAG_SECONDS;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n let finalTimestamp = adjustedTimestamp;\n if (finalTimestamp < monotonicTimestamp) {\n finalTimestamp = monotonicTimestamp;\n }\n let keyFrameIntervalSeconds = parameters.keyFrameIntervalSeconds;\n if (!(keyFrameIntervalSeconds > 0)) {\n keyFrameIntervalSeconds = DEFAULT_KEY_FRAME_INTERVAL_SECONDS;\n }\n let keyFrameIntervalFrames = Math.round(keyFrameIntervalSeconds * this.frameRate);\n if (keyFrameIntervalFrames < 1) {\n keyFrameIntervalFrames = 1;\n }\n const timeSinceLastKeyFrame = finalTimestamp - this.lastKeyFrameTimestamp;\n let isKeyFrame = false;\n if (this.forceNextKeyFrame) {\n isKeyFrame = true;\n }\n if (timeSinceLastKeyFrame >= keyFrameIntervalSeconds) {\n isKeyFrame = true;\n }\n if (this.frameCount % keyFrameIntervalFrames === 0) {\n isKeyFrame = true;\n }\n this.driftOffset *= DRIFT_OFFSET_DECAY_FACTOR;\n return {\n finalTimestamp,\n frameDuration,\n isKeyFrame\n };\n }\n commitFrame(parameters) {\n this.frameCount += 1;\n this.lastVideoTimestamp = parameters.finalTimestamp;\n if (parameters.isKeyFrame) {\n this.lastKeyFrameTimestamp = parameters.finalTimestamp;\n this.forceNextKeyFrame = false;\n }\n let shouldLogDrift = false;\n let audioVideoDrift = 0;\n if (this.frameCount % DRIFT_LOG_FRAME_INTERVAL === 0 && parameters.audioProcessingActive) {\n audioVideoDrift = parameters.lastAudioTimestamp - this.lastVideoTimestamp;\n shouldLogDrift = true;\n }\n return {\n shouldLogDrift,\n audioVideoDrift,\n frameCount: this.frameCount,\n lastVideoTimestamp: this.lastVideoTimestamp\n };\n }\n handleSourceSwitch(lastAudioTimestamp) {\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set for source switch\");\n }\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = lastAudioTimestamp - this.lastVideoTimestamp;\n this.driftOffset = clampValue(rawDrift, -MAX_DRIFT_CORRECTION_SECONDS, MAX_DRIFT_CORRECTION_SECONDS);\n const continuationTimestamp = Math.max(lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n this.forceNextKeyFrame = true;\n return {\n continuationTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n };\n }\n}\nfunction clampValue(value, min, max) {\n return Math.max(min, Math.min(max, value));\n}\n\n// src/core/processor/worker/types.ts\nvar WORKER_MESSAGE_TYPE_PROBE = \"probe\";\nvar WORKER_MESSAGE_TYPE_AUDIO_CHUNK = \"audioChunk\";\nvar WORKER_RESPONSE_TYPE_PROBE_RESULT = \"probeResult\";\nvar WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR = \"f32-planar\";\n\n// src/core/processor/worker/visibility-tracker.ts\nvar MILLISECONDS_PER_SECOND3 = 1000;\nvar OVERLAY_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX3 = \"[RecorderWorker]\";\n\nclass VisibilityTracker {\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n pendingVisibilityUpdates = [];\n recordingStartTime = 0;\n isScreenCapture = false;\n logger;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n }\n reset(recordingStartTime, isScreenCapture) {\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.pendingVisibilityUpdates = [];\n this.recordingStartTime = recordingStartTime;\n this.isScreenCapture = isScreenCapture;\n }\n setRecordingStartTime(recordingStartTime) {\n this.recordingStartTime = recordingStartTime;\n }\n setIsScreenCapture(isScreenCapture) {\n this.isScreenCapture = isScreenCapture;\n }\n getPendingUpdatesCount() {\n return this.pendingVisibilityUpdates.length;\n }\n shouldApplyOverlay(parameters) {\n if (!parameters.overlayEnabled) {\n return false;\n }\n if (this.isScreenCapture) {\n return false;\n }\n const completedIntervalMatch = this.hiddenIntervals.some((interval) => parameters.timestamp >= interval.start && parameters.timestamp <= interval.end);\n const currentIntervalMatch = this.currentHiddenIntervalStart !== null && parameters.timestamp >= this.currentHiddenIntervalStart;\n let shouldApply = false;\n if (completedIntervalMatch) {\n shouldApply = true;\n }\n if (currentIntervalMatch) {\n shouldApply = true;\n }\n if (parameters.frameCount % OVERLAY_LOG_FRAME_INTERVAL === 0) {\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Overlay check`, {\n timestamp: parameters.timestamp,\n shouldApply,\n frameCount: parameters.frameCount,\n intervalsCount: this.hiddenIntervals.length\n });\n }\n return shouldApply;\n }\n handleUpdateVisibility(isHidden, timestamp, hasBaseVideoTimestamp, pausedDuration) {\n if (!hasBaseVideoTimestamp) {\n this.pendingVisibilityUpdates = [\n ...this.pendingVisibilityUpdates,\n { isHidden, timestamp }\n ];\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp, pausedDuration);\n }\n flushPendingUpdates(pausedDuration) {\n if (this.pendingVisibilityUpdates.length === 0) {\n return;\n }\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp, pausedDuration);\n }\n this.pendingVisibilityUpdates = [];\n }\n processVisibilityUpdate(isHidden, timestamp, pausedDuration) {\n const timestampSeconds = timestamp / MILLISECONDS_PER_SECOND3;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - pausedDuration;\n if (isHidden && this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Started hidden interval`, {\n start: this.currentHiddenIntervalStart\n });\n }\n if (isHidden) {\n return;\n }\n if (this.currentHiddenIntervalStart === null) {\n return;\n }\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals = [...this.hiddenIntervals, interval];\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Completed hidden interval`, {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX3} Invalid interval (end <= start), discarding`);\n }\n this.currentHiddenIntervalStart = null;\n }\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar DEFAULT_OUTPUT_FORMAT = \"mp4\";\nvar MILLISECONDS_PER_SECOND4 = 1000;\nvar ERROR_AUDIO_BITRATE_INVALID = \"Audio bitrate must be greater than zero\";\nvar ERROR_AUDIO_SAMPLE_RATE_INVALID = \"Audio sample rate must be greater than zero\";\nvar ERROR_AUDIO_CHANNELS_INVALID = \"Audio channels must be greater than zero\";\nvar ERROR_AUDIO_FRAMES_INVALID = \"Audio frames must be greater than zero\";\nvar STEREO_CHANNEL_COUNT = 2;\nvar AUDIO_SAMPLE_AVERAGE_SCALE = 0.5;\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2 = 500;\nvar MP4_FAST_START_DISABLED = false;\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n config = null;\n videoProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferTracker;\n audioState;\n timestampManager;\n frameCompositor;\n overlayConfig = null;\n visibilityTracker;\n recordingStartTime = 0;\n isScreenCapture = false;\n totalSize = 0;\n expectedAudioChannels = null;\n expectedAudioSampleRate = null;\n pendingWriteCount = 0;\n constructor() {\n this.bufferTracker = new BufferTracker({\n getBufferSize: () => this.totalSize,\n onBufferUpdate: (size, formatted) => {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n },\n setInterval: (handler, timeout) => self.setInterval(handler, timeout),\n clearInterval: (intervalId) => self.clearInterval(intervalId)\n });\n this.audioState = new AudioState({\n getNowMilliseconds: () => performance.now()\n });\n this.visibilityTracker = new VisibilityTracker({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n }\n });\n this.timestampManager = new TimestampManager({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n },\n getNowMilliseconds: () => performance.now()\n });\n this.frameCompositor = new FrameCompositor({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data),\n error: (message, data) => logger.error(message, data)\n },\n fetchResource: (input, init) => fetch(input, init),\n createImageBitmap: (image) => createImageBitmap(image),\n sendDebugLog: (_message, _payload) => {\n return;\n }\n });\n self.addEventListener(\"message\", this.handleMessage);\n }\n shouldIgnoreMessage() {\n if (this.isStopping) {\n return true;\n }\n if (this.isFinalized) {\n return true;\n }\n return false;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleProbe() {\n const response = {\n type: WORKER_RESPONSE_TYPE_PROBE_RESULT,\n hasMediaStreamTrackProcessor: typeof MediaStreamTrackProcessor !== \"undefined\",\n hasVideoFrame: typeof VideoFrame !== \"undefined\",\n hasAudioData: typeof AudioData !== \"undefined\",\n hasOffscreenCanvas: typeof OffscreenCanvas !== \"undefined\",\n hasCreateImageBitmap: typeof createImageBitmap !== \"undefined\",\n hasReadableStream: typeof ReadableStream !== \"undefined\"\n };\n self.postMessage(response);\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n switch (message.type) {\n case WORKER_MESSAGE_TYPE_PROBE:\n this.handleProbe();\n return;\n case \"start\":\n this.handleStartMessage(message);\n return;\n case \"pause\":\n this.handlePause();\n return;\n case \"resume\":\n this.handleResume();\n return;\n case \"stop\":\n this.handleStopMessage();\n return;\n case \"toggleMute\":\n this.handleToggleMute();\n return;\n case WORKER_MESSAGE_TYPE_AUDIO_CHUNK:\n this.handleAudioChunk(message);\n return;\n case \"switchSource\":\n this.handleSwitchSourceMessage(message);\n return;\n case \"updateFps\":\n this.handleUpdateFps(message.fps);\n return;\n case \"updateVisibility\":\n this.visibilityTracker.handleUpdateVisibility(message.isHidden, message.timestamp, this.timestampManager.getBaseVideoTimestamp() !== null, this.audioState.getPausedDuration());\n return;\n case \"updateSourceType\":\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n default:\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n }\n };\n handleStartMessage(message) {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleStart(message, videoTrack, videoStream), \"handleStart\");\n }\n handleStopMessage() {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n }\n handleSwitchSourceMessage(message) {\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleSwitchSource(videoTrack, videoStream), \"handleSwitchSource\");\n }\n validateConfig(config) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width !== undefined && config.width <= 0) {\n throw new Error(\"Video width must be greater than zero\");\n }\n if (config.height !== undefined && config.height <= 0) {\n throw new Error(\"Video height must be greater than zero\");\n }\n if (config.fps !== undefined && config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate !== undefined && typeof config.bitrate === \"number\" && config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n }\n validateFormat(format) {\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n }\n initializeRecordingState(config) {\n this.config = config;\n this.timestampManager.reset(config.fps);\n this.audioState.reset();\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.videoProcessingActive = false;\n this.frameCompositor.reset();\n this.recordingStartTime = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setupOverlayConfig(overlayConfig) {\n let nextOverlayConfig = null;\n if (overlayConfig) {\n nextOverlayConfig = {\n enabled: overlayConfig.enabled,\n text: overlayConfig.text\n };\n }\n this.overlayConfig = nextOverlayConfig;\n let recordingStartTimeSeconds = performance.now() / MILLISECONDS_PER_SECOND4;\n if (overlayConfig && overlayConfig.recordingStartTime !== undefined) {\n recordingStartTimeSeconds = overlayConfig.recordingStartTime / MILLISECONDS_PER_SECOND4;\n }\n this.recordingStartTime = recordingStartTimeSeconds;\n this.visibilityTracker.setRecordingStartTime(this.recordingStartTime);\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n }\n createOutput() {\n const writable = new WritableStream({\n write: (chunk) => this.handleOutputChunkWrite(chunk)\n });\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: MP4_FAST_START_DISABLED\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n }\n decrementPendingWriteCount() {\n this.pendingWriteCount -= 1;\n if (this.pendingWriteCount < 0) {\n this.pendingWriteCount = 0;\n }\n }\n handleOutputChunkWrite(chunk) {\n this.pendingWriteCount += 1;\n const writeOperation = Promise.resolve().then(() => {\n this.sendChunk(chunk.data, chunk.position);\n });\n return writeOperation.then(() => {\n this.decrementPendingWriteCount();\n }, (error) => {\n this.decrementPendingWriteCount();\n throw error;\n });\n }\n createVideoSource(config) {\n const fps = this.timestampManager.getFrameRate();\n const keyFrameIntervalSeconds = config.keyFrameInterval;\n const videoSourceOptions = {\n codec: config.codec,\n width: config.width,\n height: config.height,\n sizeChangeBehavior: \"contain\",\n alpha: \"discard\",\n bitrateMode: \"variable\",\n latencyMode: \"quality\",\n contentHint: \"detail\",\n hardwareAcceleration: \"no-preference\",\n keyFrameInterval: keyFrameIntervalSeconds,\n bitrate: this.deserializeBitrate(config.bitrate)\n };\n this.videoSource = new VideoSampleSource(videoSourceOptions);\n const output = requireNonNull(this.output, \"Output must be initialized before adding video track\");\n const trackOptions = {};\n if (fps !== undefined) {\n trackOptions.frameRate = fps;\n }\n output.addVideoTrack(this.videoSource, trackOptions);\n }\n setupAudioSource(audioConfig, config) {\n if (!audioConfig) {\n return;\n }\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n if (audioConfig.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (audioConfig.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.expectedAudioChannels = audioConfig.numberOfChannels;\n this.expectedAudioSampleRate = audioConfig.sampleRate;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioState.setProcessingActive(true);\n }\n setupAudioStream(audioStream, config) {\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioProcessor = audioStream.getReader();\n this.audioState.setProcessingActive(true);\n this.processAudioData();\n }\n async handleStart(message, videoTrack, videoStream) {\n const audioConfig = message.audioConfig;\n let audioStream = null;\n if (message.audioStream) {\n audioStream = message.audioStream;\n }\n const config = message.config;\n const overlayConfig = message.overlayConfig;\n this.validateConfig(config);\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoTrack: !!videoTrack,\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n hasAudioConfig: !!audioConfig,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.initializeRecordingState(config);\n if (message.videoSettings) {\n this.frameCompositor.setVideoSettings(message.videoSettings);\n } else {\n this.frameCompositor.setVideoSettings(null);\n }\n if (message.viewportMetadata) {\n this.frameCompositor.setViewportMetadata(message.viewportMetadata);\n } else {\n this.frameCompositor.setViewportMetadata(null);\n }\n this.frameCompositor.setIsMobileDevice(message.isMobileDevice === true);\n this.setupOverlayConfig(overlayConfig);\n let format = config.format;\n if (!format) {\n format = DEFAULT_OUTPUT_FORMAT;\n }\n this.validateFormat(format);\n this.createOutput();\n this.createVideoSource(config);\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n }\n if (!videoStream && videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n if (audioStream) {\n this.setupAudioStream(audioStream, config);\n } else {\n this.setupAudioSource(audioConfig, config);\n }\n const output = requireNonNull(this.output, \"Output must be initialized before starting\");\n if (this.config?.watermark) {\n this.frameCompositor.prepareWatermark(this.config);\n }\n await output.start();\n this.bufferTracker.start();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n setupVideoProcessing(videoTrack) {\n if (!this.videoSource) {\n return;\n }\n if (typeof MediaStreamTrackProcessor === \"undefined\") {\n throw new Error(\"MediaStreamTrackProcessor is not available in worker\");\n }\n const processor = new MediaStreamTrackProcessor({ track: videoTrack });\n this.videoProcessor = processor.readable.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n setupVideoProcessingFromStream(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const pausedDuration = this.audioState.getPausedDuration();\n const frameTimestamp = this.timestampManager.calculateVideoFrameTimestamp({\n videoFrame,\n pausedDuration,\n recordingStartTime: this.recordingStartTime,\n pendingVisibilityUpdatesCount: this.visibilityTracker.getPendingUpdatesCount(),\n processPendingVisibilityUpdates: () => {\n this.visibilityTracker.flushPendingUpdates(pausedDuration);\n },\n isScreenCapture: this.isScreenCapture\n });\n let overlayEnabled = false;\n if (this.overlayConfig?.enabled) {\n overlayEnabled = true;\n }\n const shouldApplyOverlay = this.visibilityTracker.shouldApplyOverlay({\n timestamp: frameTimestamp,\n overlayEnabled,\n frameCount: this.timestampManager.getFrameCount()\n });\n const compositionResult = this.frameCompositor.composeFrame({\n videoFrame,\n overlayConfig: this.overlayConfig,\n shouldApplyOverlay,\n config\n });\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const frameTiming = this.timestampManager.prepareFrameTiming({\n frameTimestamp,\n keyFrameIntervalSeconds: config.keyFrameInterval,\n lastAudioTimestamp\n });\n const sample = new VideoSample(compositionResult.frameToProcess, {\n timestamp: frameTiming.finalTimestamp,\n duration: frameTiming.frameDuration\n });\n let videoSampleOptions;\n if (frameTiming.isKeyFrame) {\n videoSampleOptions = { keyFrame: true };\n }\n const addError = await videoSource.add(sample, videoSampleOptions).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n const commitResult = this.timestampManager.commitFrame({\n finalTimestamp: frameTiming.finalTimestamp,\n isKeyFrame: frameTiming.isKeyFrame,\n lastAudioTimestamp,\n audioProcessingActive: this.audioState.isActive(),\n isScreenCapture: this.isScreenCapture\n });\n if (commitResult.shouldLogDrift) {\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: commitResult.frameCount,\n lastAudioTimestamp,\n lastVideoTimestamp: commitResult.lastVideoTimestamp,\n audioVideoDrift: commitResult.audioVideoDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (compositionResult.imageBitmap) {\n compositionResult.imageBitmap.close();\n }\n if (compositionResult.frameToProcess !== videoFrame) {\n compositionResult.frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.audioState.getIsPaused()) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n let channelIndex = 0;\n while (channelIndex < numberOfChannels) {\n const startIndex = channelIndex * numberOfFrames;\n const endIndex = startIndex + numberOfFrames;\n const channelBuffer = audioBuffer.subarray(startIndex, endIndex);\n audioData.copyTo(channelBuffer, { planeIndex: channelIndex });\n channelIndex += 1;\n }\n return audioBuffer;\n }\n createAudioSample(audioBuffer, audioTimestamp, sampleRate, numberOfChannels) {\n if (sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n let bufferToWrite = audioBuffer;\n if (this.audioState.getIsMuted()) {\n bufferToWrite = new Float32Array(audioBuffer.length);\n }\n return new AudioSample({\n data: bufferToWrite,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample, audioTimestamp, duration) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n return;\n }\n while (this.audioState.isActive() && !this.isStopping) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n this.audioState.setProcessingActive(false);\n break;\n }\n const audioData = result.value;\n if (this.shouldSkipAudioData(audioData)) {\n continue;\n }\n const audioFormat = this.getAudioDataFormat(audioData);\n if (!audioFormat) {\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const normalized = this.normalizeAudioBufferForFormat(audioBuffer, audioFormat);\n const duration = audioFormat.numberOfFrames / audioFormat.sampleRate;\n const audioTimestamp = this.audioState.getLastAudioTimestamp();\n const audioSample = this.createAudioSample(normalized.buffer, audioTimestamp, audioFormat.sampleRate, normalized.numberOfChannels);\n await this.processAudioSample(audioData, audioSample, audioTimestamp, duration);\n }\n }\n handleAudioChunk(message) {\n this.handleAsyncOperation(this.processAudioChunk(message), \"handleAudioChunk\");\n }\n async processAudioChunk(message) {\n if (this.shouldIgnoreMessage()) {\n return;\n }\n if (!this.audioSource) {\n return;\n }\n if (!this.audioState.isActive()) {\n return;\n }\n if (this.audioState.getIsPaused()) {\n return;\n }\n if (message.frames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n if (message.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (message.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.setExpectedAudioFormat(message.sampleRate, message.numberOfChannels);\n if (this.expectedAudioSampleRate !== null && message.sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: message.sampleRate\n });\n return;\n }\n let audioBuffer = message.data;\n let numberOfChannels = message.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, message.frames, numberOfChannels, this.expectedAudioChannels);\n audioBuffer = normalized.buffer;\n numberOfChannels = normalized.numberOfChannels;\n }\n const expectedSamples = message.frames * numberOfChannels;\n if (audioBuffer.length < expectedSamples) {\n throw new Error(\"Audio buffer length is shorter than expected\");\n }\n const sampleRate = message.sampleRate;\n const duration = message.frames / sampleRate;\n const audioTimestamp = this.audioState.getAudioTimestamp(message.timestamp);\n if (this.audioState.getIsMuted()) {\n audioBuffer = new Float32Array(audioBuffer.length);\n }\n const audioSample = new AudioSample({\n data: audioBuffer,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp,\n duration,\n sampleRate,\n numberOfFrames: message.frames\n });\n audioSample.close();\n }\n shouldSkipAudioData(audioData) {\n if (!audioData) {\n return true;\n }\n if (this.audioState.getIsPaused()) {\n this.handlePausedAudioData(audioData);\n return true;\n }\n return false;\n }\n getAudioDataFormat(audioData) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n audioData.close();\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n const numberOfFrames = audioData.numberOfFrames;\n const numberOfChannels = audioData.numberOfChannels;\n this.setExpectedAudioFormat(sampleRate, numberOfChannels);\n if (this.expectedAudioSampleRate !== null && sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: sampleRate\n });\n audioData.close();\n return null;\n }\n return {\n sampleRate,\n numberOfFrames,\n numberOfChannels\n };\n }\n normalizeAudioBufferForFormat(audioBuffer, audioFormat) {\n let bufferToWrite = audioBuffer;\n let channelsToWrite = audioFormat.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, audioFormat.numberOfFrames, audioFormat.numberOfChannels, this.expectedAudioChannels);\n bufferToWrite = normalized.buffer;\n channelsToWrite = normalized.numberOfChannels;\n }\n return {\n buffer: bufferToWrite,\n numberOfChannels: channelsToWrite\n };\n }\n handlePause() {\n if (!this.audioState.pause()) {\n return;\n }\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.audioState.resume()) {\n return;\n }\n this.sendStateChange(\"recording\");\n }\n handleStop() {\n if (this.isStopping) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n if (this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n return runStopTransition({\n finalizeStopSequence: () => this.finalizeStopSequence(),\n completeStop: () => this.completeStop(),\n recoverStopFailure: () => {\n if (this.isFinalized) {\n this.resetStopStateAfterFailure();\n }\n return this.cleanup().catch((cleanupError) => {\n logger.error(\"[RecorderWorker] Stop failure cleanup failed\", {\n error: extractErrorMessage(cleanupError)\n });\n });\n },\n clearStoppingFlag: () => {\n this.isStopping = false;\n }\n });\n }\n async completeStop() {\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n }\n async finalizeStopSequence() {\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize();\n }\n await waitForPendingWritesToDrain({\n getPendingWriteCount: () => this.pendingWriteCount,\n timeoutMilliseconds: STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2\n });\n }\n resetStopStateAfterFailure() {\n this.isFinalized = false;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n }\n handleToggleMute() {\n this.audioState.toggleMuted();\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const previousFps = this.timestampManager.getFrameRate();\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps\n });\n this.timestampManager.setFrameRate(fps);\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n this.visibilityTracker.setIsScreenCapture(isScreenCapture);\n }\n async handleSwitchSource(videoTrack, videoStream) {\n if (!(videoTrack || videoStream)) {\n throw new Error(\"Video track or stream is required\");\n }\n const frameRate = this.timestampManager.getFrameRate();\n requireDefined(frameRate, \"Frame rate must be set\");\n if (frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const baseVideoTimestamp = this.timestampManager.getBaseVideoTimestamp();\n requireNonNull(baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const switchResult = this.timestampManager.handleSourceSwitch(lastAudioTimestamp);\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp: switchResult.continuationTimestamp,\n lastVideoTimestamp: this.timestampManager.getLastVideoTimestamp(),\n frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp,\n previousVideoTimestamp: switchResult.previousVideoTimestamp,\n minFrameDuration: switchResult.minFrameDuration,\n rawDrift: switchResult.rawDrift,\n driftOffset: switchResult.driftOffset\n });\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n return;\n }\n if (videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n }\n async cleanup() {\n this.bufferTracker.stop();\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n const videoSource = this.videoSource;\n if (videoSource && !this.isFinalized) {\n videoSource.close();\n }\n if (videoSource) {\n this.videoSource = null;\n }\n const audioSource = this.audioSource;\n if (audioSource && !this.isFinalized) {\n audioSource.close();\n }\n if (audioSource) {\n this.audioSource = null;\n }\n const output = this.output;\n if (output && !this.isFinalized) {\n await output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n if (output) {\n this.output = null;\n }\n this.timestampManager.reset(undefined);\n this.totalSize = 0;\n this.audioState.reset();\n this.frameCompositor.reset();\n this.overlayConfig = null;\n this.recordingStartTime = 0;\n this.isScreenCapture = false;\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setExpectedAudioFormat(sampleRate, numberOfChannels) {\n if (this.expectedAudioSampleRate === null) {\n this.expectedAudioSampleRate = sampleRate;\n }\n if (this.expectedAudioChannels === null) {\n this.expectedAudioChannels = numberOfChannels;\n }\n }\n normalizeAudioBuffer(audioBuffer, frames, actualChannels, expectedChannels) {\n if (actualChannels === expectedChannels) {\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n if (actualChannels === 1 && expectedChannels === STEREO_CHANNEL_COUNT) {\n const expandedBuffer = new Float32Array(frames * STEREO_CHANNEL_COUNT);\n expandedBuffer.set(audioBuffer, 0);\n expandedBuffer.set(audioBuffer, frames);\n return {\n buffer: expandedBuffer,\n numberOfChannels: STEREO_CHANNEL_COUNT\n };\n }\n if (actualChannels === STEREO_CHANNEL_COUNT && expectedChannels === 1) {\n const mixedBuffer = new Float32Array(frames);\n let frameIndex = 0;\n while (frameIndex < frames) {\n const leftSample = audioBuffer[frameIndex];\n const rightSample = audioBuffer[frameIndex + frames];\n mixedBuffer[frameIndex] = (leftSample + rightSample) * AUDIO_SAMPLE_AVERAGE_SCALE;\n frameIndex += 1;\n }\n return { buffer: mixedBuffer, numberOfChannels: 1 };\n }\n logger.warn(\"[RecorderWorker] Audio channel mismatch\", {\n expectedChannels,\n receivedChannels: actualChannels\n });\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n deserializeBitrate(bitrate) {\n if (typeof bitrate === \"number\") {\n return bitrate;\n }\n if (bitrate === \"low\") {\n return QUALITY_LOW;\n }\n if (bitrate === \"medium\") {\n return QUALITY_MEDIUM;\n }\n if (bitrate === \"high\") {\n return QUALITY_HIGH;\n }\n if (bitrate === \"very-high\") {\n return QUALITY_VERY_HIGH;\n }\n return QUALITY_HIGH;\n }\n}\nnew RecorderWorker;\n";
1763
+
1764
+ import type { WatermarkPosition } from "../types";
1765
+ /**
1766
+ * Calculates the target size for a watermark based on video width.
1767
+ * Target is approximately 7% of video width.
1768
+ */
1769
+ export declare function calculateWatermarkTargetSize(videoWidth: number, imageWidth: number, imageHeight: number): {
1770
+ width: number;
1771
+ height: number;
1164
1772
  };
1165
- export type TelemetryEventDto = {
1166
- event: TelemetryEventName;
1167
- category: TelemetryEventCategory;
1168
- timestamp: number;
1169
- installationId: string;
1170
- fingerprint: TelemetryFingerprintDto;
1171
- sdkVersion: string;
1172
- context?: TelemetryContextDto;
1173
- properties?: Record<string, unknown>;
1174
- error?: TelemetryErrorDto;
1773
+ /**
1774
+ * Options for calculating watermark position.
1775
+ */
1776
+ export type WatermarkPositionOptions = {
1777
+ watermarkWidth: number;
1778
+ watermarkHeight: number;
1779
+ videoWidth: number;
1780
+ videoHeight: number;
1781
+ position: WatermarkPosition;
1175
1782
  };
1176
- export type SendTelemetryRequestDto = {
1177
- events: TelemetryEventDto[];
1783
+ /**
1784
+ * Calculates the (x, y) coordinates for drawing a watermark on a video frame.
1785
+ */
1786
+ export declare function getWatermarkPosition(options: WatermarkPositionOptions): {
1787
+ x: number;
1788
+ y: number;
1178
1789
  };
1179
- export type TelemetryClientConfig = {
1180
- apiKey: string;
1181
- backendUrl: string;
1182
- endpoint?: string;
1183
- sessionId?: string;
1184
- userId?: string;
1185
- environmentId?: string;
1186
- appVersion?: string;
1187
- release?: string;
1188
- pageUrl?: string;
1189
- referrerUrl?: string;
1190
- sdkLocation?: string;
1191
- clientLocation?: string;
1790
+
1791
+ export type AudioStateDependencies = {
1792
+ getNowMilliseconds: () => number;
1192
1793
  };
1193
- export type TelemetryNavigator = Navigator & {
1194
- deviceMemory?: number;
1794
+ export declare class AudioState {
1795
+ private readonly getNowMilliseconds;
1796
+ private isPaused;
1797
+ private isMuted;
1798
+ private pausedDuration;
1799
+ private pauseStartedAt;
1800
+ private lastAudioTimestamp;
1801
+ private isProcessingActive;
1802
+ constructor(dependencies: AudioStateDependencies);
1803
+ reset(): void;
1804
+ setProcessingActive(isActive: boolean): void;
1805
+ isActive(): boolean;
1806
+ toggleMuted(): boolean;
1807
+ setMuted(isMuted: boolean): void;
1808
+ getIsMuted(): boolean;
1809
+ getIsPaused(): boolean;
1810
+ getPausedDuration(): number;
1811
+ pause(): boolean;
1812
+ resume(): boolean;
1813
+ getAudioTimestamp(timestamp: number): number;
1814
+ updateLastAudioTimestamp(timestamp: number, duration: number): void;
1815
+ getLastAudioTimestamp(): number;
1816
+ }
1817
+
1818
+ export type WorkerLifecycleLogger = {
1819
+ debug: (message: string, data?: Record<string, unknown>) => void;
1820
+ error: (message: string, data?: unknown) => void;
1195
1821
  };
1196
- export type TelemetryClientDependencies = {
1197
- fetchFunction: typeof fetch;
1198
- cryptoProvider: Crypto | null;
1199
- storageProvider: Storage | null;
1200
- navigatorProvider: TelemetryNavigator | null;
1201
- locationProvider: Location | null;
1202
- documentProvider: Document | null;
1203
- nowProvider: () => number;
1204
- randomProvider: () => number;
1205
- setTimeoutFunction: (callback: () => void, delay: number) => ReturnType<typeof setTimeout>;
1206
- clearTimeoutFunction: (timeoutId: ReturnType<typeof setTimeout>) => void;
1822
+ export type WorkerLifecycleDependencies = {
1823
+ createWorker: (workerUrl: string) => Worker;
1824
+ workerUrl: string;
1825
+ onMessage: (event: MessageEvent<WorkerResponse>) => void;
1826
+ onError: (event: ErrorEvent) => void;
1827
+ logger: WorkerLifecycleLogger;
1207
1828
  };
1829
+ export declare function createWorkerInstance(dependencies: WorkerLifecycleDependencies): Worker;
1208
1830
 
1209
- export {};
1831
+ export declare function createProbeWorkerUrl(): string;
1832
+ export declare function revokeProbeWorkerUrl(workerUrl: string): void;
1210
1833
 
1211
- export type UploadResult = {
1212
- id: string;
1213
- uploadUrl: string | null;
1214
- };
1215
- export type UploadCallbacks = {
1216
- onProgress: (progress: number) => void;
1217
- onSuccess: (result: UploadResult) => void;
1218
- onError: (error: Error) => void;
1219
- onClearStatus: () => void;
1220
- };
1834
+ export type TimestampManagerDependencies = {
1835
+ logger: {
1836
+ debug: (message: string, data?: Record<string, unknown>) => void;
1837
+ warn: (message: string, data?: Record<string, unknown>) => void;
1838
+ };
1839
+ getNowMilliseconds: () => number;
1840
+ };
1841
+ export type FrameTimingResult = {
1842
+ finalTimestamp: number;
1843
+ frameDuration: number;
1844
+ isKeyFrame: boolean;
1845
+ };
1846
+ export type CommitFrameResult = {
1847
+ shouldLogDrift: boolean;
1848
+ audioVideoDrift: number;
1849
+ frameCount: number;
1850
+ lastVideoTimestamp: number;
1851
+ };
1852
+ export type SourceSwitchResult = {
1853
+ continuationTimestamp: number;
1854
+ previousVideoTimestamp: number;
1855
+ minFrameDuration: number;
1856
+ rawDrift: number;
1857
+ driftOffset: number;
1858
+ };
1859
+ export declare class TimestampManager {
1860
+ private frameRate;
1861
+ private lastVideoTimestamp;
1862
+ private baseVideoTimestamp;
1863
+ private frameCount;
1864
+ private lastKeyFrameTimestamp;
1865
+ private forceNextKeyFrame;
1866
+ private driftOffset;
1867
+ private readonly logger;
1868
+ private readonly getNowMilliseconds;
1869
+ constructor(dependencies: TimestampManagerDependencies);
1870
+ reset(frameRate: number | undefined): void;
1871
+ setFrameRate(frameRate: number): void;
1872
+ getFrameRate(): number;
1873
+ getFrameCount(): number;
1874
+ getLastVideoTimestamp(): number;
1875
+ getBaseVideoTimestamp(): number | null;
1876
+ calculateVideoFrameTimestamp(parameters: {
1877
+ videoFrame: VideoFrame;
1878
+ pausedDuration: number;
1879
+ recordingStartTime: number;
1880
+ pendingVisibilityUpdatesCount: number;
1881
+ processPendingVisibilityUpdates: () => void;
1882
+ isScreenCapture: boolean;
1883
+ }): number;
1884
+ prepareFrameTiming(parameters: {
1885
+ frameTimestamp: number;
1886
+ keyFrameIntervalSeconds: number;
1887
+ lastAudioTimestamp: number;
1888
+ }): FrameTimingResult;
1889
+ commitFrame(parameters: {
1890
+ finalTimestamp: number;
1891
+ isKeyFrame: boolean;
1892
+ lastAudioTimestamp: number;
1893
+ audioProcessingActive: boolean;
1894
+ isScreenCapture: boolean;
1895
+ }): CommitFrameResult;
1896
+ handleSourceSwitch(lastAudioTimestamp: number): SourceSwitchResult;
1897
+ }
1221
1898
 
1222
- import type { PendingUpload, VideoStorageService } from "../storage/video-storage";
1223
- type UploadCallbacks = {
1224
- onUploadProgress?: (id: string, progress: number) => void;
1225
- onUploadComplete?: (id: string, result: VideoUploadResult) => void;
1226
- onUploadError?: (id: string, error: Error) => void;
1899
+ export declare const ROTATION_DEGREES_0 = 0;
1900
+ export declare const ROTATION_DEGREES_90 = 90;
1901
+ export declare const ROTATION_DEGREES_180 = 180;
1902
+ export declare const ROTATION_DEGREES_270 = 270;
1903
+ export type RotationDecisionInput = {
1904
+ isMobileDevice: boolean;
1905
+ targetWidth?: number;
1906
+ targetHeight?: number;
1907
+ frameWidth: number;
1908
+ frameHeight: number;
1909
+ facingMode?: WorkerVideoFacingMode;
1910
+ settingsRotation?: number;
1911
+ orientationAngle?: number;
1912
+ windowOrientation?: number;
1913
+ };
1914
+ export declare function calculateFrameRotationDegrees(input: RotationDecisionInput): number;
1915
+
1916
+ export type StopTransitionDependencies = {
1917
+ finalizeStopSequence: () => Promise<void>;
1918
+ completeStop: () => Promise<void>;
1919
+ recoverStopFailure: () => Promise<void>;
1920
+ clearStoppingFlag: () => void;
1921
+ };
1922
+ export declare function runStopTransition(dependencies: StopTransitionDependencies): Promise<void>;
1923
+
1924
+ export declare const ERROR_STOP_PENDING_WRITES_TIMEOUT = "stop.pending-writes-timeout";
1925
+ type StopFinalizationDependencies = {
1926
+ getPendingWriteCount: () => number;
1927
+ getNowMilliseconds?: () => number;
1928
+ waitMilliseconds?: (milliseconds: number) => Promise<void>;
1929
+ timeoutMilliseconds?: number;
1930
+ };
1931
+ export declare function waitForPendingWritesToDrain(dependencies: StopFinalizationDependencies): Promise<void>;
1932
+ export {};
1933
+
1934
+ export type VisibilityTrackerDependencies = {
1935
+ logger: {
1936
+ debug: (message: string, data?: Record<string, unknown>) => void;
1937
+ warn: (message: string, data?: Record<string, unknown>) => void;
1938
+ };
1227
1939
  };
1228
- export declare class UploadQueueManager {
1229
- private readonly storageService;
1230
- private readonly uploadService;
1231
- private readonly processingIntervalId;
1232
- private readonly networkOnlineHandler;
1233
- private isProcessing;
1234
- private retryTimeoutId;
1235
- private callbacks;
1236
- constructor(storageService: VideoStorageService, uploadService: VideoUploadService);
1237
- destroy(): void;
1238
- setCallbacks(callbacks: UploadCallbacks): void;
1239
- queueUpload(upload: Omit<PendingUpload, "id" | "createdAt" | "updatedAt" | "status" | "retryCount">): Promise<string>;
1240
- processQueue(): Promise<void>;
1241
- getPendingUploads(): Promise<PendingUpload[]>;
1242
- getStats(): Promise<{
1243
- pending: number;
1244
- uploading: number;
1245
- failed: number;
1246
- total: number;
1247
- }>;
1248
- private getOldestUpload;
1249
- private getOldestFailedUpload;
1250
- private processUpload;
1251
- private calculateRetryDelay;
1252
- private scheduleRetry;
1253
- private clearTimer;
1940
+ export declare class VisibilityTracker {
1941
+ private hiddenIntervals;
1942
+ private currentHiddenIntervalStart;
1943
+ private pendingVisibilityUpdates;
1944
+ private recordingStartTime;
1945
+ private isScreenCapture;
1946
+ private readonly logger;
1947
+ constructor(dependencies: VisibilityTrackerDependencies);
1948
+ reset(recordingStartTime: number, isScreenCapture: boolean): void;
1949
+ setRecordingStartTime(recordingStartTime: number): void;
1950
+ setIsScreenCapture(isScreenCapture: boolean): void;
1951
+ getPendingUpdatesCount(): number;
1952
+ shouldApplyOverlay(parameters: {
1953
+ timestamp: number;
1954
+ overlayEnabled: boolean;
1955
+ frameCount: number;
1956
+ }): boolean;
1957
+ handleUpdateVisibility(isHidden: boolean, timestamp: number, hasBaseVideoTimestamp: boolean, pausedDuration: number): void;
1958
+ flushPendingUpdates(pausedDuration: number): void;
1959
+ private processVisibilityUpdate;
1254
1960
  }
1961
+
1255
1962
  export {};
1256
1963
 
1257
- export type VideoUploadOptions = {
1258
- apiKey: string;
1259
- backendUrl: string;
1260
- filename?: string;
1261
- metadata?: Record<string, unknown>;
1262
- userMetadata?: Record<string, unknown>;
1263
- onProgress?: (progress: number) => void;
1264
- };
1265
- export type VideoUploadResult = {
1266
- id: string;
1267
- publicId: string;
1268
- filename: string;
1269
- fileSize: number;
1270
- mimeType: string;
1271
- duration: number | null;
1272
- status: string;
1273
- uploadUrl: string | null;
1274
- createdAt: string;
1275
- };
1276
- export declare class VideoUploadService {
1277
- uploadVideo(blob: Blob, options: VideoUploadOptions): Promise<VideoUploadResult>;
1278
- private uploadVideoFile;
1279
- private parseSuccessResponse;
1280
- private parseErrorResponse;
1281
- private safeParseJsonFromXhr;
1964
+ export {};
1965
+
1966
+ export {};
1967
+
1968
+ import { type AudioWorkletChunk, AudioWorkletController } from "../../audio/audio-worklet-controller";
1969
+ export type AudioWorkletManagerDependencies = {
1970
+ onChunk: (chunk: AudioWorkletChunk) => void;
1971
+ createController?: () => AudioWorkletController;
1972
+ };
1973
+ export declare class AudioWorkletManager {
1974
+ private audioWorkletController;
1975
+ private audioWorkletConfig;
1976
+ private readonly onChunk;
1977
+ private readonly createController;
1978
+ constructor(dependencies: AudioWorkletManagerDependencies);
1979
+ prepareAudioConfig(audioTrack: MediaStreamAudioTrack | null): Promise<WorkerAudioConfig | null>;
1980
+ startProcessing(): Promise<void>;
1981
+ stop(): void;
1982
+ setMuted(isMuted: boolean): void;
1983
+ setPaused(isPaused: boolean): void;
1984
+ private createWorkerAudioConfig;
1282
1985
  }
1283
1986
 
1987
+ import type { BrowserGuardError } from "../../browser-guard/types";
1988
+ export type VideoInputSelectorLogger = {
1989
+ debug: (message: string, data?: Record<string, unknown>) => void;
1990
+ warn: (message: string, data?: Record<string, unknown>) => void;
1991
+ };
1992
+ export type VideoInputSelectorDependencies = {
1993
+ stopCurrentVideoTrack: () => void;
1994
+ cloneVideoTrack: (track: MediaStreamVideoTrack) => MediaStreamVideoTrack;
1995
+ cloneAudioTrack: (track: MediaStreamAudioTrack) => MediaStreamAudioTrack;
1996
+ setCurrentVideoTrack: (track: MediaStreamVideoTrack) => void;
1997
+ canUseMainThreadVideoProcessor: () => boolean;
1998
+ createVideoStreamFromTrack: (track: MediaStreamVideoTrack) => ReadableStream<VideoFrame> | null;
1999
+ createBrowserUnsupportedError: () => BrowserGuardError;
2000
+ getViewportMetadata: () => {
2001
+ innerWidth: number;
2002
+ innerHeight: number;
2003
+ orientation?: string;
2004
+ orientationAngle?: number;
2005
+ windowOrientation?: number;
2006
+ } | null;
2007
+ logger: VideoInputSelectorLogger;
2008
+ isMobileDevice: () => boolean;
2009
+ };
2010
+ export declare function prepareVideoTrack(videoTracks: MediaStreamTrack[], dependencies: VideoInputSelectorDependencies): MediaStreamVideoTrack | null;
2011
+ export declare function prepareAudioTrack(audioTracks: MediaStreamTrack[], dependencies: VideoInputSelectorDependencies): MediaStreamAudioTrack | null;
2012
+ export declare function selectVideoInput(videoTrack: MediaStreamVideoTrack | null, workerProbeResult: WorkerProbeResultResponse, dependencies: VideoInputSelectorDependencies): {
2013
+ videoTrack: MediaStreamVideoTrack | null;
2014
+ videoStream: ReadableStream<VideoFrame> | null;
2015
+ };
2016
+ export declare function buildWorkerVideoSettings(videoTrack: MediaStreamVideoTrack | null): WorkerVideoSettings | undefined;
2017
+ export declare function getIsMobileDeviceDetected(dependencies: VideoInputSelectorDependencies): boolean;
2018
+ export declare function logTrackDetails(videoTrack: MediaStreamVideoTrack | null, audioTrack: MediaStreamAudioTrack | null, videoStream: ReadableStream<VideoFrame> | null, dependencies: VideoInputSelectorDependencies): void;
2019
+ export declare function logVideoTrackMetadata(videoTrack: MediaStreamVideoTrack | null, dependencies: VideoInputSelectorDependencies): void;
2020
+ export declare function logViewportMetadata(dependencies: VideoInputSelectorDependencies): void;
2021
+
2022
+ export declare const FORMAT_DEFAULT_CODECS: Record<OutputFormat, AudioCodec>;
2023
+ export declare function getDefaultAudioCodecForFormat(format: OutputFormat): AudioCodec;
2024
+ export declare function getAudioCodecForFormat(format: OutputFormat, overrideCodec?: AudioCodec): AudioCodec;
2025
+
2026
+ export {};
2027
+
2028
+ declare const VIDEO_PATH_WORKER_TRACK = "worker-track";
2029
+ declare const VIDEO_PATH_MAIN_THREAD_STREAM = "main-thread-stream";
2030
+ declare const VIDEO_PATH_UNAVAILABLE = "unavailable";
2031
+ declare const AUDIO_PATH_MAIN_THREAD_AUDIO_STREAM = "main-thread-audio-stream";
2032
+ declare const AUDIO_PATH_AUDIO_WORKLET_CHUNKS = "audio-worklet-chunks";
2033
+ declare const AUDIO_PATH_NONE_REQUIRED = "none-required";
2034
+ declare const AUDIO_PATH_UNAVAILABLE = "unavailable";
2035
+ export type VideoProcessingPath = typeof VIDEO_PATH_WORKER_TRACK | typeof VIDEO_PATH_MAIN_THREAD_STREAM | typeof VIDEO_PATH_UNAVAILABLE;
2036
+ export type AudioProcessingPath = typeof AUDIO_PATH_MAIN_THREAD_AUDIO_STREAM | typeof AUDIO_PATH_AUDIO_WORKLET_CHUNKS | typeof AUDIO_PATH_NONE_REQUIRED | typeof AUDIO_PATH_UNAVAILABLE;
2037
+ export type SupportCheckOptions = {
2038
+ requiresAudio?: boolean;
2039
+ requiresWatermark?: boolean;
2040
+ };
2041
+ export type SupportReport = {
2042
+ isSupported: boolean;
2043
+ missing: string[];
2044
+ hasWorker: boolean;
2045
+ hasAudioContext: boolean;
2046
+ hasAudioWorklet: boolean;
2047
+ hasMediaStreamTrackProcessor: boolean;
2048
+ hasMainThreadMediaStreamTrackProcessor: boolean;
2049
+ hasVideoFrame: boolean;
2050
+ hasAudioData: boolean;
2051
+ hasOffscreenCanvas: boolean;
2052
+ hasCreateImageBitmap: boolean;
2053
+ hasReadableStream: boolean;
2054
+ requiresAudio: boolean;
2055
+ requiresWatermark: boolean;
2056
+ videoPath: VideoProcessingPath;
2057
+ audioPath: AudioProcessingPath;
2058
+ };
2059
+ export declare function checkRecorderSupport(options?: SupportCheckOptions): Promise<SupportReport>;
2060
+ export {};
2061
+
1284
2062
  export type VidtreoRecorderConfig = {
1285
2063
  apiKey: string;
1286
- apiUrl?: string;
2064
+ apiUrl: string;
1287
2065
  enableSourceSwitching?: boolean;
1288
2066
  enableMute?: boolean;
1289
2067
  enablePause?: boolean;
@@ -1291,8 +2069,6 @@ export type VidtreoRecorderConfig = {
1291
2069
  maxRecordingTime?: number;
1292
2070
  countdownDuration?: number;
1293
2071
  userMetadata?: Record<string, unknown>;
1294
- enableTabVisibilityOverlay?: boolean;
1295
- tabVisibilityOverlayText?: string;
1296
2072
  onUploadComplete?: (result: {
1297
2073
  recordingId: string;
1298
2074
  uploadUrl: string;
@@ -1315,6 +2091,7 @@ export type RecordingStopResult = {
1315
2091
  export declare class VidtreoRecorder {
1316
2092
  private readonly controller;
1317
2093
  private readonly config;
2094
+ private readonly uploadService;
1318
2095
  private isInitialized;
1319
2096
  constructor(config: VidtreoRecorderConfig);
1320
2097
  initialize(): Promise<void>;