@vidtreo/recorder 0.9.1 → 0.9.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.ts +75 -21
  2. package/dist/index.js +604 -235
  3. package/package.json +3 -2
package/dist/index.d.ts CHANGED
@@ -138,6 +138,52 @@ export declare class VidtreoRecorder {
138
138
  private ensureInitialized;
139
139
  }
140
140
 
141
+ export type NativeCameraFile = {
142
+ file: File;
143
+ previewUrl: string;
144
+ duration: number;
145
+ validated: boolean;
146
+ };
147
+ export type FileValidationResult = {
148
+ valid: boolean;
149
+ error?: string;
150
+ };
151
+ export type NativeCameraConfig = {
152
+ maxFileSize?: number;
153
+ maxDuration?: number;
154
+ allowedFormats?: string[];
155
+ };
156
+
157
+ export declare function validateFile(file: File, config?: {
158
+ maxFileSize?: number;
159
+ maxRecordingTime?: number | null;
160
+ allowedFormats?: string[];
161
+ }): Promise<FileValidationResult>;
162
+
163
+ import type { RecordingStopResult } from "../../vidtreo-recorder";
164
+ import type { ConfigService } from "../config/config-service";
165
+ import type { VideoUploadService } from "../upload/upload-service";
166
+ export type NativeCameraHandlerConfig = {
167
+ apiKey?: string | null;
168
+ backendUrl?: string | null;
169
+ maxRecordingTime?: number | null;
170
+ maxFileSize?: number;
171
+ userMetadata?: Record<string, unknown>;
172
+ };
173
+ export declare class NativeCameraHandler {
174
+ private pendingFile;
175
+ private readonly configService;
176
+ private readonly uploadService;
177
+ private readonly config;
178
+ constructor(config: NativeCameraHandlerConfig, configService: ConfigService | null, uploadService: VideoUploadService);
179
+ handleFileSelection(file: File): Promise<NativeCameraFile>;
180
+ processAndUpload(onTranscodeProgress: (progress: number) => void, onUploadProgress: (progress: number) => void): Promise<RecordingStopResult>;
181
+ cancel(): void;
182
+ preloadConfig(): Promise<void>;
183
+ }
184
+
185
+ export declare function extractLastFrame(file: File, timeoutMs?: number): Promise<Blob>;
186
+
141
187
  export type VideoUploadOptions = {
142
188
  apiKey: string;
143
189
  backendUrl: string;
@@ -278,6 +324,8 @@ export declare function requireInitialized<T>(value: T | null | undefined, compo
278
324
  export declare function requireStream(stream: MediaStream | null, message?: string): MediaStream;
279
325
  export declare function requireProcessor<T>(processor: T | null, componentName?: string): T;
280
326
 
327
+ export declare function isMobileDevice(): boolean;
328
+
281
329
  export declare function extractErrorMessage(error: unknown): string;
282
330
 
283
331
  export declare const FILE_SIZE_UNITS: readonly ["Bytes", "KB", "MB", "GB"];
@@ -285,7 +333,7 @@ export declare const FILE_SIZE_BASE = 1024;
285
333
  export declare function formatFileSize(bytes: number): string;
286
334
  export declare function formatTime(totalSeconds: number): string;
287
335
 
288
- export declare function extractVideoDuration(blob: Blob): Promise<number>;
336
+ export declare function extractVideoDuration(file: File | Blob): Promise<number>;
289
337
 
290
338
  export type StorageQuota = {
291
339
  usage: number;
@@ -463,33 +511,34 @@ export type WorkerStateChangeResponse = {
463
511
  type: "stateChange";
464
512
  state: "recording" | "paused" | "stopped";
465
513
  };
514
+ import type { Quality, VideoCodec } from "mediabunny";
466
515
  export type WorkerTranscodeConfig = {
467
- width: number;
468
- height: number;
469
- fps: number;
470
- bitrate: number;
516
+ width?: number;
517
+ height?: number;
518
+ fps?: number;
519
+ bitrate?: number | Quality;
471
520
  audioCodec: "aac" | "opus";
472
521
  audioBitrate?: number;
473
- codec: "avc";
522
+ codec: VideoCodec;
474
523
  keyFrameInterval: number;
475
524
  format: "mp4" | "mkv" | "mov" | "webm";
476
525
  };
477
526
 
478
- export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n resolver = resolve;\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for AAC must include a description, which is expected to be\" + \" an AudioSpecificConfig as specified in ISO 14496-3.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar findNalUnitsInAnnexB = (packetData) => {\n const nalUnits = [];\n let i = 0;\n while (i < packetData.length) {\n let startCodePos = -1;\n let startCodeLength = 0;\n for (let j = i;j < packetData.length - 3; j++) {\n if (packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 1) {\n startCodePos = j;\n startCodeLength = 3;\n break;\n }\n if (j < packetData.length - 4 && packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 0 && packetData[j + 3] === 1) {\n startCodePos = j;\n startCodeLength = 4;\n break;\n }\n }\n if (startCodePos === -1) {\n break;\n }\n if (i > 0 && startCodePos > i) {\n const nalData = packetData.subarray(i, startCodePos);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n i = startCodePos + startCodeLength;\n }\n if (i < packetData.length) {\n const nalData = packetData.subarray(i);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n return nalUnits;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar transformAnnexBToLengthPrefixed = (packetData) => {\n const NAL_UNIT_LENGTH_SIZE = 4;\n const nalUnits = findNalUnitsInAnnexB(packetData);\n if (nalUnits.length === 0) {\n return null;\n }\n let totalSize = 0;\n for (const nalUnit of nalUnits) {\n totalSize += NAL_UNIT_LENGTH_SIZE + nalUnit.byteLength;\n }\n const avccData = new Uint8Array(totalSize);\n const dataView = new DataView(avccData.buffer);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const length = nalUnit.byteLength;\n dataView.setUint32(offset, length, false);\n offset += 4;\n avccData.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return avccData;\n};\nvar extractNalUnitTypeForAvc = (data) => {\n return data[0] & 31;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.PPS);\n const spsExtUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS_EXT);\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = null;\n let bitDepthLumaMinus8 = null;\n let bitDepthChromaMinus8 = null;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n bitstream.skipBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const frameMbsOnlyFlag = bitstream.readBits(1);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar extractNalUnitTypeForHevc = (data) => {\n return data[0] >> 1 & 63;\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const vpsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.VPS_NUT);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SPS_NUT);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PPS_NUT);\n const seiUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PREFIX_SEI_NUT || extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SUFFIX_SEI_NUT);\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const sps = spsUnits[0];\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const sps_max_sub_layers_minus1 = bitstream.readBits(3);\n const sps_temporal_id_nesting_flag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, sps_max_sub_layers_minus1);\n readExpGolomb(bitstream);\n const chroma_format_idc = readExpGolomb(bitstream);\n if (chroma_format_idc === 3)\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const bit_depth_luma_minus8 = readExpGolomb(bitstream);\n const bit_depth_chroma_minus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const sps_sub_layer_ordering_info_present_flag = bitstream.readBits(1);\n const maxNum = sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1;\n for (let i = maxNum;i <= sps_max_sub_layers_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const num_short_term_ref_pic_sets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, num_short_term_ref_pic_sets);\n if (bitstream.readBits(1)) {\n const num_long_term_ref_pics_sps = readExpGolomb(bitstream);\n for (let i = 0;i < num_long_term_ref_pics_sps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let min_spatial_segmentation_idc = 0;\n if (bitstream.readBits(1)) {\n min_spatial_segmentation_idc = parseVuiForMinSpatialSegmentationIdc(bitstream, sps_max_sub_layers_minus1);\n }\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n minSpatialSegmentationIdc: min_spatial_segmentation_idc,\n parallelismType,\n chromaFormatIdc: chroma_format_idc,\n bitDepthLumaMinus8: bit_depth_luma_minus8,\n bitDepthChromaMinus8: bit_depth_chroma_minus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: sps_max_sub_layers_minus1 + 1,\n temporalIdNested: sps_temporal_id_nesting_flag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseVuiForMinSpatialSegmentationIdc = (bitstream, sps_max_sub_layers_minus1) => {\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n bitstream.readBits(8);\n bitstream.readBits(8);\n bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n const min_spatial_segmentation_idc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n return min_spatial_segmentation_idc;\n }\n return 0;\n};\nvar skipHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n return new EncodedPacket(this.data, this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, this.sequenceNumber, this.byteLength);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (!(\"format\" in init) || typeof init.format !== \"string\") {\n throw new TypeError(\"init.format must be a string.\");\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = data.colorSpace;\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoSample(this._data.slice(), {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize();\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n await this._data.copyTo(destination);\n } else if (this._data instanceof Uint8Array) {\n const dest = toUint8Array(destination);\n dest.set(this._data);\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (destIsPlanar) {\n if (destFormat === \"f32-planar\") {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n } else {\n const tempBuffer = new ArrayBuffer(copyFrameCount * 4);\n const tempArray = new Float32Array(tempBuffer);\n this._data.copyTo(tempArray, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n const tempView = new DataView(tempBuffer);\n for (let i = 0;i < copyFrameCount; i++) {\n const destOffset = i * destBytesPerSample;\n const sample = tempView.getFloat32(i * 4, true);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const numCh = numChannels;\n const temp = new Float32Array(copyFrameCount);\n for (let ch = 0;ch < numCh; ch++) {\n this._data.copyTo(temp, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const destIndex = i * numCh + ch;\n const destOffset = destIndex * destBytesPerSample;\n writeFn(destView, destOffset, temp[i]);\n }\n }\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const srcFormat = this.format;\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig: meta.decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec)\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const transformedData = transformAnnexBToLengthPrefixed(packetData);\n if (!transformedData) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = transformedData;\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, meta);\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packet.data, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n return {\n video: { min: 0, max: Infinity },\n audio: { min: 0, max: Infinity },\n subtitle: { min: 0, max: Infinity },\n total: { min: 1, max: 2 ** 32 - 1 }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n if (this._closingPromise) {\n return this._closingPromise;\n } else {\n return this._flushAndClose(forceClose);\n }\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n isPaused = false;\n isMuted = false;\n frameRate = 30;\n lastVideoTimestamp = 0;\n lastAudioTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n config = null;\n videoProcessingActive = false;\n audioProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferUpdateInterval = null;\n pausedDuration = 0;\n pauseStartedAt = null;\n overlayConfig = null;\n overlayCanvas = null;\n compositionCanvas = null;\n compositionCtx = null;\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n recordingStartTime = 0;\n pendingVisibilityUpdates = [];\n isScreenCapture = false;\n driftOffset = 0;\n constructor() {\n self.addEventListener(\"message\", this.handleMessage);\n }\n formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return \"0 Bytes\";\n }\n const units = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n const base = 1024;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * 100) / 100;\n return `${size} ${units[index]}`;\n }\n shouldIgnoreMessage() {\n return this.isStopping || this.isFinalized;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n if (message.type === \"start\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStart(message.videoStream, message.audioStream, message.config, message.overlayConfig), \"handleStart\");\n return;\n }\n if (message.type === \"pause\") {\n this.handlePause();\n return;\n }\n if (message.type === \"resume\") {\n this.handleResume();\n return;\n }\n if (message.type === \"stop\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n return;\n }\n if (message.type === \"toggleMute\") {\n this.handleToggleMute();\n return;\n }\n if (message.type === \"switchSource\") {\n this.handleAsyncOperation(this.handleSwitchSource(message.videoStream), \"handleSwitchSource\");\n return;\n }\n if (message.type === \"updateFps\") {\n this.handleUpdateFps(message.fps);\n return;\n }\n if (message.type === \"updateVisibility\") {\n this.handleUpdateVisibility(message.isHidden, message.timestamp);\n return;\n }\n if (message.type === \"updateSourceType\") {\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n }\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n };\n async handleStart(videoStream, audioStream, config, overlayConfig) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width <= 0 || config.height <= 0) {\n throw new Error(\"Video dimensions must be greater than zero\");\n }\n if (config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.config = config;\n this.frameRate = config.fps;\n this.isPaused = false;\n this.isMuted = false;\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayConfig = overlayConfig ? { enabled: overlayConfig.enabled, text: overlayConfig.text } : null;\n this.overlayCanvas = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.recordingStartTime = overlayConfig?.recordingStartTime !== undefined ? overlayConfig.recordingStartTime / 1000 : performance.now() / 1000;\n this.pendingVisibilityUpdates = [];\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n const writable = new WritableStream({\n write: (chunk) => {\n this.sendChunk(chunk.data, chunk.position);\n }\n });\n const format = config.format || \"mp4\";\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: \"fragmented\"\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n this.videoSource = new VideoSampleSource({\n codec: config.codec,\n bitrate: config.bitrate,\n sizeChangeBehavior: \"passThrough\"\n });\n this.output.addVideoTrack(this.videoSource);\n if (videoStream) {\n this.setupVideoProcessing(videoStream);\n }\n if (audioStream && config.audioBitrate && config.audioCodec) {\n if (config.audioBitrate <= 0) {\n throw new Error(\"Audio bitrate must be greater than zero\");\n }\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate\n });\n this.output.addAudioTrack(this.audioSource);\n this.setupAudioProcessing(audioStream);\n }\n await this.output.start();\n this.startBufferUpdates();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n startBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n return;\n }\n this.bufferUpdateInterval = self.setInterval(() => {\n if (this.output) {\n const size = this.getBufferSize();\n const formatted = this.formatFileSize(size);\n this.sendBufferUpdate(size, formatted);\n }\n }, 1000);\n }\n stopBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n self.clearInterval(this.bufferUpdateInterval);\n this.bufferUpdateInterval = null;\n }\n }\n getBufferSize() {\n return this.totalSize;\n }\n totalSize = 0;\n setupVideoProcessing(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n calculateVideoFrameTimestamp(videoFrame) {\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const rawTs = typeof videoFrame.timestamp === \"number\" && videoFrame.timestamp !== null ? videoFrame.timestamp / 1e6 : performance.now() / 1000;\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTs;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n difference: this.baseVideoTimestamp - this.recordingStartTime,\n pendingUpdates: this.pendingVisibilityUpdates.length\n };\n logger.debug(\"[RecorderWorker] baseVideoTimestamp set\", logData);\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp);\n }\n this.pendingVisibilityUpdates = [];\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set\");\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTs - originalBase;\n this.baseVideoTimestamp = rawTs - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] First frame after source switch\", {\n rawTs,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: this.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTs = rawTs - this.baseVideoTimestamp - this.pausedDuration;\n const prevTs = this.lastVideoTimestamp > 0 ? this.lastVideoTimestamp : 0;\n const frameTimestamp = normalizedTs >= prevTs ? normalizedTs : prevTs + 1 / this.frameRate;\n if (frameTimestamp < 0) {\n logger.warn(\"[RecorderWorker] Negative frame timestamp detected, clamping to zero\", { frameTimestamp, normalizedTs, prevTs });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n logger.debug(\"[RecorderWorker] Frame timestamp calculation\", {\n rawTs,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTs,\n prevTs,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: this.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const ctx = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = ctx.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * 2);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * 2);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const r = 20;\n const g = 20;\n const b = 20;\n const borderRadius = 50;\n ctx.fillStyle = `rgba(${r}, ${g}, ${b}, ${OVERLAY_BACKGROUND_OPACITY})`;\n ctx.beginPath();\n ctx.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n ctx.fill();\n ctx.fillStyle = OVERLAY_TEXT_COLOR;\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n ctx.textBaseline = \"middle\";\n ctx.textAlign = \"center\";\n const textX = overlayWidth / 2;\n const textY = overlayHeight / 2;\n ctx.fillText(text, textX, textY);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n const padding = OVERLAY_PADDING;\n return {\n x: videoWidth - overlayWidth - padding,\n y: padding\n };\n }\n shouldApplyOverlay(timestamp) {\n if (!this.overlayConfig?.enabled) {\n return false;\n }\n if (this.isScreenCapture) {\n return false;\n }\n const completedIntervalMatch = this.hiddenIntervals.some((interval) => timestamp >= interval.start && timestamp <= interval.end);\n const currentIntervalMatch = this.currentHiddenIntervalStart !== null && timestamp >= this.currentHiddenIntervalStart;\n const shouldApply = completedIntervalMatch || currentIntervalMatch;\n if (this.frameCount % 90 === 0) {\n logger.debug(\"[RecorderWorker] Overlay check\", {\n timestamp,\n shouldApply,\n frameCount: this.frameCount,\n intervalsCount: this.hiddenIntervals.length\n });\n }\n return shouldApply;\n }\n handleUpdateVisibility(isHidden, timestamp) {\n if (this.baseVideoTimestamp === null) {\n this.pendingVisibilityUpdates.push({ isHidden, timestamp });\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp);\n }\n processVisibilityUpdate(isHidden, timestamp) {\n const timestampSeconds = timestamp / 1000;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - this.pausedDuration;\n if (isHidden) {\n if (this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n logger.debug(\"[RecorderWorker] Started hidden interval\", {\n start: this.currentHiddenIntervalStart\n });\n }\n } else if (this.currentHiddenIntervalStart !== null) {\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals.push(interval);\n logger.debug(\"[RecorderWorker] Completed hidden interval\", {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n logger.warn(\"[RecorderWorker] Invalid interval (end <= start), discarding\");\n }\n this.currentHiddenIntervalStart = null;\n }\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const frameTimestamp = this.calculateVideoFrameTimestamp(videoFrame);\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const frameDuration = 1 / this.frameRate;\n let frameToProcess = videoFrame;\n let imageBitmap = null;\n if (this.shouldApplyOverlay(frameTimestamp)) {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (width <= 0 || height <= 0) {\n logger.warn(\"[RecorderWorker] Invalid video frame dimensions, skipping overlay\", { width, height });\n } else if (this.overlayConfig) {\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(this.overlayConfig.text);\n logger.debug(\"[RecorderWorker] Overlay canvas created\", {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n if (this.overlayCanvas) {\n if (!(this.compositionCanvas && this.compositionCtx) || this.compositionCanvas.width !== width || this.compositionCanvas.height !== height) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionCtx = requireNonNull(this.compositionCanvas.getContext(\"2d\"), \"Failed to get composition canvas context\");\n }\n requireNonNull(this.compositionCtx, \"Composition context must be available\");\n this.compositionCtx.clearRect(0, 0, width, height);\n this.compositionCtx.drawImage(videoFrame, 0, 0, width, height);\n const position = this.getOverlayPosition(this.overlayCanvas.width, width);\n this.compositionCtx.drawImage(this.overlayCanvas, position.x, position.y);\n imageBitmap = this.compositionCanvas.transferToImageBitmap();\n const frameInit = {};\n if (typeof videoFrame.timestamp === \"number\" && videoFrame.timestamp !== null) {\n frameInit.timestamp = videoFrame.timestamp;\n }\n if (typeof videoFrame.duration === \"number\" && videoFrame.duration !== null) {\n frameInit.duration = videoFrame.duration;\n }\n frameToProcess = new VideoFrame(imageBitmap, frameInit);\n }\n }\n }\n const keyFrameInterval = config.keyFrameInterval > 0 ? config.keyFrameInterval : 5;\n const isKeyFrame = this.frameCount % keyFrameInterval === 0;\n const maxLead = 0.05;\n const maxLag = 0.1;\n const targetAudio = this.lastAudioTimestamp;\n let adjustedTimestamp = frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - targetAudio > maxLead) {\n adjustedTimestamp = targetAudio + maxLead;\n } else if (targetAudio - adjustedTimestamp > maxLag) {\n adjustedTimestamp = targetAudio - maxLag;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n const finalTimestamp = adjustedTimestamp >= monotonicTimestamp ? adjustedTimestamp : monotonicTimestamp;\n this.driftOffset *= 0.5;\n const sample = new VideoSample(frameToProcess, {\n timestamp: finalTimestamp,\n duration: frameDuration\n });\n const addError = await videoSource.add(sample, isKeyFrame ? { keyFrame: true } : undefined).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n this.frameCount += 1;\n this.lastVideoTimestamp = finalTimestamp;\n if (this.frameCount % 90 === 0 && this.audioProcessingActive) {\n const avDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: this.frameCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n avDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (imageBitmap) {\n imageBitmap.close();\n imageBitmap = null;\n }\n if (frameToProcess !== videoFrame) {\n frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.isPaused) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n setupAudioProcessing(audioStream) {\n if (!this.audioSource) {\n logger.warn(\"[RecorderWorker] setupAudioProcessing called but audioSource is null\");\n return;\n }\n logger.debug(\"[RecorderWorker] setupAudioProcessing\", {\n hasAudioSource: !!this.audioSource,\n hasAudioStream: !!audioStream,\n audioProcessingActive: this.audioProcessingActive,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n this.audioProcessor = audioStream.getReader();\n this.audioProcessingActive = true;\n logger.debug(\"[RecorderWorker] Audio processing started\", {\n hasAudioProcessor: !!this.audioProcessor,\n audioProcessingActive: this.audioProcessingActive\n });\n this.processAudioData();\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(\"Number of frames must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n audioData.copyTo(audioBuffer, { planeIndex: 0 });\n return audioBuffer;\n }\n createAudioSample(audioData, audioBuffer) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const shouldWriteSilence = this.isMuted;\n return new AudioSample({\n data: shouldWriteSilence ? new Float32Array(audioBuffer.length) : audioBuffer,\n format: \"f32-planar\",\n numberOfChannels,\n sampleRate,\n timestamp: this.lastAudioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfFrames = audioData.numberOfFrames;\n const duration = numberOfFrames / sampleRate;\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp: this.lastAudioTimestamp,\n duration,\n newLastAudioTimestamp: this.lastAudioTimestamp + duration,\n sampleRate: audioData.sampleRate,\n numberOfFrames: audioData.numberOfFrames\n });\n this.lastAudioTimestamp += duration;\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n logger.warn(\"[RecorderWorker] processAudioData called but processor or source is null\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource\n });\n return;\n }\n logger.debug(\"[RecorderWorker] processAudioData loop started\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource,\n audioProcessingActive: this.audioProcessingActive,\n isPaused: this.isPaused,\n isMuted: this.isMuted,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n let audioSampleCount = 0;\n while (this.audioProcessingActive) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n logger.debug(\"[RecorderWorker] Audio processor stream ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n this.audioProcessingActive = false;\n break;\n }\n const audioData = result.value;\n if (!audioData) {\n logger.warn(\"[RecorderWorker] Received null audioData from processor\");\n continue;\n }\n audioSampleCount += 1;\n if (audioSampleCount % 100 === 0) {\n logger.debug(\"[RecorderWorker] Processing audio sample\", {\n sampleCount: audioSampleCount,\n numberOfFrames: audioData.numberOfFrames,\n sampleRate: audioData.sampleRate,\n numberOfChannels: audioData.numberOfChannels,\n lastAudioTimestamp: this.lastAudioTimestamp,\n isPaused: this.isPaused,\n isMuted: this.isMuted\n });\n }\n if (this.isPaused) {\n this.handlePausedAudioData(audioData);\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const audioSample = this.createAudioSample(audioData, audioBuffer);\n await this.processAudioSample(audioData, audioSample);\n }\n logger.debug(\"[RecorderWorker] processAudioData loop ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n }\n handlePause() {\n if (this.isPaused) {\n return;\n }\n this.pauseStartedAt = performance.now() / 1000;\n this.isPaused = true;\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.isPaused) {\n return;\n }\n const now = performance.now() / 1000;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n this.sendStateChange(\"recording\");\n }\n async handleStop() {\n if (this.isStopping || this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return;\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize().catch((error) => {\n logger.warn(\"[RecorderWorker] finalize failed (ignored, already finalized?)\", error);\n });\n }\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n this.isStopping = false;\n }\n handleToggleMute() {\n this.isMuted = !this.isMuted;\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps: this.frameRate\n });\n this.frameRate = fps;\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n }\n async handleSwitchSource(videoStream) {\n requireDefined(videoStream, \"Video stream is required\");\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n const maxDriftCorrection = 0.1;\n this.driftOffset = Math.max(-maxDriftCorrection, Math.min(maxDriftCorrection, rawDrift));\n const continuationTimestamp = Math.max(this.lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n frameRate: this.frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp: this.lastAudioTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n });\n this.setupVideoProcessing(videoStream);\n }\n async cleanup() {\n this.stopBufferUpdates();\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.videoSource) {\n if (!this.isFinalized) {\n this.videoSource.close();\n }\n this.videoSource = null;\n }\n if (this.audioSource) {\n if (!this.isFinalized) {\n this.audioSource.close();\n }\n this.audioSource = null;\n }\n if (this.output) {\n if (!this.isFinalized) {\n await this.output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n this.output = null;\n }\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.totalSize = 0;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayCanvas = null;\n this.overlayConfig = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.recordingStartTime = 0;\n this.pendingVisibilityUpdates = [];\n this.isScreenCapture = false;\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendBufferUpdate(size, formatted) {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n}\nnew RecorderWorker;\n";
527
+ export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n resolver = resolve;\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for AAC must include a description, which is expected to be\" + \" an AudioSpecificConfig as specified in ISO 14496-3.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar findNalUnitsInAnnexB = (packetData) => {\n const nalUnits = [];\n let i = 0;\n while (i < packetData.length) {\n let startCodePos = -1;\n let startCodeLength = 0;\n for (let j = i;j < packetData.length - 3; j++) {\n if (packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 1) {\n startCodePos = j;\n startCodeLength = 3;\n break;\n }\n if (j < packetData.length - 4 && packetData[j] === 0 && packetData[j + 1] === 0 && packetData[j + 2] === 0 && packetData[j + 3] === 1) {\n startCodePos = j;\n startCodeLength = 4;\n break;\n }\n }\n if (startCodePos === -1) {\n break;\n }\n if (i > 0 && startCodePos > i) {\n const nalData = packetData.subarray(i, startCodePos);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n i = startCodePos + startCodeLength;\n }\n if (i < packetData.length) {\n const nalData = packetData.subarray(i);\n if (nalData.length > 0) {\n nalUnits.push(nalData);\n }\n }\n return nalUnits;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar transformAnnexBToLengthPrefixed = (packetData) => {\n const NAL_UNIT_LENGTH_SIZE = 4;\n const nalUnits = findNalUnitsInAnnexB(packetData);\n if (nalUnits.length === 0) {\n return null;\n }\n let totalSize = 0;\n for (const nalUnit of nalUnits) {\n totalSize += NAL_UNIT_LENGTH_SIZE + nalUnit.byteLength;\n }\n const avccData = new Uint8Array(totalSize);\n const dataView = new DataView(avccData.buffer);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const length = nalUnit.byteLength;\n dataView.setUint32(offset, length, false);\n offset += 4;\n avccData.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return avccData;\n};\nvar extractNalUnitTypeForAvc = (data) => {\n return data[0] & 31;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.PPS);\n const spsExtUnits = nalUnits.filter((unit) => extractNalUnitTypeForAvc(unit) === AvcNalUnitType.SPS_EXT);\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = null;\n let bitDepthLumaMinus8 = null;\n let bitDepthChromaMinus8 = null;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n bitstream.skipBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const frameMbsOnlyFlag = bitstream.readBits(1);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar extractNalUnitTypeForHevc = (data) => {\n return data[0] >> 1 & 63;\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const nalUnits = findNalUnitsInAnnexB(packetData);\n const vpsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.VPS_NUT);\n const spsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SPS_NUT);\n const ppsUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PPS_NUT);\n const seiUnits = nalUnits.filter((unit) => extractNalUnitTypeForHevc(unit) === HevcNalUnitType.PREFIX_SEI_NUT || extractNalUnitTypeForHevc(unit) === HevcNalUnitType.SUFFIX_SEI_NUT);\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const sps = spsUnits[0];\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const sps_max_sub_layers_minus1 = bitstream.readBits(3);\n const sps_temporal_id_nesting_flag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, sps_max_sub_layers_minus1);\n readExpGolomb(bitstream);\n const chroma_format_idc = readExpGolomb(bitstream);\n if (chroma_format_idc === 3)\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const bit_depth_luma_minus8 = readExpGolomb(bitstream);\n const bit_depth_chroma_minus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const sps_sub_layer_ordering_info_present_flag = bitstream.readBits(1);\n const maxNum = sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1;\n for (let i = maxNum;i <= sps_max_sub_layers_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const num_short_term_ref_pic_sets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, num_short_term_ref_pic_sets);\n if (bitstream.readBits(1)) {\n const num_long_term_ref_pics_sps = readExpGolomb(bitstream);\n for (let i = 0;i < num_long_term_ref_pics_sps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let min_spatial_segmentation_idc = 0;\n if (bitstream.readBits(1)) {\n min_spatial_segmentation_idc = parseVuiForMinSpatialSegmentationIdc(bitstream, sps_max_sub_layers_minus1);\n }\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n minSpatialSegmentationIdc: min_spatial_segmentation_idc,\n parallelismType,\n chromaFormatIdc: chroma_format_idc,\n bitDepthLumaMinus8: bit_depth_luma_minus8,\n bitDepthChromaMinus8: bit_depth_chroma_minus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: sps_max_sub_layers_minus1 + 1,\n temporalIdNested: sps_temporal_id_nesting_flag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseVuiForMinSpatialSegmentationIdc = (bitstream, sps_max_sub_layers_minus1) => {\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n bitstream.readBits(8);\n bitstream.readBits(8);\n bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n const min_spatial_segmentation_idc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n return min_spatial_segmentation_idc;\n }\n return 0;\n};\nvar skipHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n return new EncodedPacket(this.data, this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, this.sequenceNumber, this.byteLength);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (!(\"format\" in init) || typeof init.format !== \"string\") {\n throw new TypeError(\"init.format must be a string.\");\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = data.colorSpace;\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoSample(this._data.slice(), {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize();\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n await this._data.copyTo(destination);\n } else if (this._data instanceof Uint8Array) {\n const dest = toUint8Array(destination);\n dest.set(this._data);\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (destIsPlanar) {\n if (destFormat === \"f32-planar\") {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n } else {\n const tempBuffer = new ArrayBuffer(copyFrameCount * 4);\n const tempArray = new Float32Array(tempBuffer);\n this._data.copyTo(tempArray, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n const tempView = new DataView(tempBuffer);\n for (let i = 0;i < copyFrameCount; i++) {\n const destOffset = i * destBytesPerSample;\n const sample = tempView.getFloat32(i * 4, true);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const numCh = numChannels;\n const temp = new Float32Array(copyFrameCount);\n for (let ch = 0;ch < numCh; ch++) {\n this._data.copyTo(temp, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: \"f32-planar\"\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const destIndex = i * numCh + ch;\n const destOffset = destIndex * destBytesPerSample;\n writeFn(destView, destOffset, temp[i]);\n }\n }\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const srcFormat = this.format;\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig: meta.decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec)\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const transformedData = transformAnnexBToLengthPrefixed(packetData);\n if (!transformedData) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = transformedData;\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, meta);\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packet.data, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n return {\n video: { min: 0, max: Infinity },\n audio: { min: 0, max: Infinity },\n subtitle: { min: 0, max: Infinity },\n total: { min: 1, max: 2 ** 32 - 1 }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\nvar QUALITY_HIGH = /* @__PURE__ */ new Quality(2);\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n if (this._closingPromise) {\n return this._closingPromise;\n } else {\n return this._flushAndClose(forceClose);\n }\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2025-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n isPaused = false;\n isMuted = false;\n frameRate = 30;\n lastVideoTimestamp = 0;\n lastAudioTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n config = null;\n lastKeyFrameTimestamp = 0;\n forceNextKeyFrame = false;\n videoProcessingActive = false;\n audioProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferUpdateInterval = null;\n pausedDuration = 0;\n pauseStartedAt = null;\n overlayConfig = null;\n overlayCanvas = null;\n compositionCanvas = null;\n compositionCtx = null;\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n recordingStartTime = 0;\n pendingVisibilityUpdates = [];\n isScreenCapture = false;\n driftOffset = 0;\n constructor() {\n self.addEventListener(\"message\", this.handleMessage);\n }\n formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return \"0 Bytes\";\n }\n const units = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n const base = 1024;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * 100) / 100;\n return `${size} ${units[index]}`;\n }\n shouldIgnoreMessage() {\n return this.isStopping || this.isFinalized;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n if (message.type === \"start\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStart(message.videoStream, message.audioStream, message.config, message.overlayConfig), \"handleStart\");\n return;\n }\n if (message.type === \"pause\") {\n this.handlePause();\n return;\n }\n if (message.type === \"resume\") {\n this.handleResume();\n return;\n }\n if (message.type === \"stop\") {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n return;\n }\n if (message.type === \"toggleMute\") {\n this.handleToggleMute();\n return;\n }\n if (message.type === \"switchSource\") {\n this.handleAsyncOperation(this.handleSwitchSource(message.videoStream), \"handleSwitchSource\");\n return;\n }\n if (message.type === \"updateFps\") {\n this.handleUpdateFps(message.fps);\n return;\n }\n if (message.type === \"updateVisibility\") {\n this.handleUpdateVisibility(message.isHidden, message.timestamp);\n return;\n }\n if (message.type === \"updateSourceType\") {\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n }\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n };\n validateConfig(config) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width !== undefined && config.width <= 0) {\n throw new Error(\"Video width must be greater than zero\");\n }\n if (config.height !== undefined && config.height <= 0) {\n throw new Error(\"Video height must be greater than zero\");\n }\n if (config.fps !== undefined && config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate !== undefined && typeof config.bitrate === \"number\" && config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n }\n validateFormat(format) {\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n }\n initializeRecordingState(config) {\n this.config = config;\n this.frameRate = config.fps || 30;\n this.isPaused = false;\n this.isMuted = false;\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayCanvas = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.pendingVisibilityUpdates = [];\n }\n setupOverlayConfig(overlayConfig) {\n this.overlayConfig = overlayConfig ? { enabled: overlayConfig.enabled, text: overlayConfig.text } : null;\n this.recordingStartTime = overlayConfig?.recordingStartTime !== undefined ? overlayConfig.recordingStartTime / 1000 : performance.now() / 1000;\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n }\n createOutput() {\n const writable = new WritableStream({\n write: (chunk) => {\n this.sendChunk(chunk.data, chunk.position);\n }\n });\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: \"fragmented\"\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n }\n createVideoSource(config) {\n const fps = config.fps || 30;\n const keyFrameIntervalSeconds = config.keyFrameInterval / fps;\n const videoSourceOptions = {\n codec: config.codec,\n sizeChangeBehavior: \"passThrough\",\n bitrateMode: \"variable\",\n latencyMode: \"quality\",\n contentHint: \"detail\",\n hardwareAcceleration: \"prefer-hardware\",\n keyFrameInterval: keyFrameIntervalSeconds,\n bitrate: config.bitrate ?? QUALITY_HIGH\n };\n this.videoSource = new VideoSampleSource(videoSourceOptions);\n const output = requireNonNull(this.output, \"Output must be initialized before adding video track\");\n const trackOptions = {};\n if (fps !== undefined) {\n trackOptions.frameRate = fps;\n }\n output.addVideoTrack(this.videoSource, trackOptions);\n }\n setupAudioSource(audioStream, config) {\n if (audioStream && config.audioBitrate && config.audioCodec) {\n if (config.audioBitrate <= 0) {\n throw new Error(\"Audio bitrate must be greater than zero\");\n }\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.setupAudioProcessing(audioStream);\n }\n }\n async handleStart(videoStream, audioStream, config, overlayConfig) {\n this.validateConfig(config);\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.initializeRecordingState(config);\n this.setupOverlayConfig(overlayConfig);\n const format = config.format || \"mp4\";\n this.validateFormat(format);\n this.createOutput();\n this.createVideoSource(config);\n if (videoStream) {\n this.setupVideoProcessing(videoStream);\n }\n this.setupAudioSource(audioStream, config);\n const output = requireNonNull(this.output, \"Output must be initialized before starting\");\n await output.start();\n this.startBufferUpdates();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n startBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n return;\n }\n this.bufferUpdateInterval = self.setInterval(() => {\n if (this.output) {\n const size = this.getBufferSize();\n const formatted = this.formatFileSize(size);\n this.sendBufferUpdate(size, formatted);\n }\n }, 1000);\n }\n stopBufferUpdates() {\n if (this.bufferUpdateInterval !== null) {\n self.clearInterval(this.bufferUpdateInterval);\n this.bufferUpdateInterval = null;\n }\n }\n getBufferSize() {\n return this.totalSize;\n }\n totalSize = 0;\n setupVideoProcessing(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n calculateVideoFrameTimestamp(videoFrame) {\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const rawTs = typeof videoFrame.timestamp === \"number\" && videoFrame.timestamp !== null ? videoFrame.timestamp / 1e6 : performance.now() / 1000;\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTs;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n difference: this.baseVideoTimestamp - this.recordingStartTime,\n pendingUpdates: this.pendingVisibilityUpdates.length\n };\n logger.debug(\"[RecorderWorker] baseVideoTimestamp set\", logData);\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp);\n }\n this.pendingVisibilityUpdates = [];\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set\");\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTs - originalBase;\n this.baseVideoTimestamp = rawTs - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] First frame after source switch\", {\n rawTs,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: this.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTs = rawTs - this.baseVideoTimestamp - this.pausedDuration;\n const prevTs = this.lastVideoTimestamp > 0 ? this.lastVideoTimestamp : 0;\n const frameTimestamp = normalizedTs >= prevTs ? normalizedTs : prevTs + 1 / this.frameRate;\n if (frameTimestamp < 0) {\n logger.warn(\"[RecorderWorker] Negative frame timestamp detected, clamping to zero\", { frameTimestamp, normalizedTs, prevTs });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n logger.debug(\"[RecorderWorker] Frame timestamp calculation\", {\n rawTs,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTs,\n prevTs,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: this.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const ctx = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = ctx.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * 2);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * 2);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const r = 20;\n const g = 20;\n const b = 20;\n const borderRadius = 50;\n ctx.fillStyle = `rgba(${r}, ${g}, ${b}, ${OVERLAY_BACKGROUND_OPACITY})`;\n ctx.beginPath();\n ctx.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n ctx.fill();\n ctx.fillStyle = OVERLAY_TEXT_COLOR;\n ctx.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n ctx.textBaseline = \"middle\";\n ctx.textAlign = \"center\";\n const textX = overlayWidth / 2;\n const textY = overlayHeight / 2;\n ctx.fillText(text, textX, textY);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n const padding = OVERLAY_PADDING;\n return {\n x: videoWidth - overlayWidth - padding,\n y: padding\n };\n }\n shouldApplyOverlay(timestamp) {\n if (!this.overlayConfig?.enabled) {\n return false;\n }\n if (this.isScreenCapture) {\n return false;\n }\n const completedIntervalMatch = this.hiddenIntervals.some((interval) => timestamp >= interval.start && timestamp <= interval.end);\n const currentIntervalMatch = this.currentHiddenIntervalStart !== null && timestamp >= this.currentHiddenIntervalStart;\n const shouldApply = completedIntervalMatch || currentIntervalMatch;\n if (this.frameCount % 90 === 0) {\n logger.debug(\"[RecorderWorker] Overlay check\", {\n timestamp,\n shouldApply,\n frameCount: this.frameCount,\n intervalsCount: this.hiddenIntervals.length\n });\n }\n return shouldApply;\n }\n handleUpdateVisibility(isHidden, timestamp) {\n if (this.baseVideoTimestamp === null) {\n this.pendingVisibilityUpdates.push({ isHidden, timestamp });\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp);\n }\n processVisibilityUpdate(isHidden, timestamp) {\n const timestampSeconds = timestamp / 1000;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - this.pausedDuration;\n if (isHidden) {\n if (this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n logger.debug(\"[RecorderWorker] Started hidden interval\", {\n start: this.currentHiddenIntervalStart\n });\n }\n } else if (this.currentHiddenIntervalStart !== null) {\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals.push(interval);\n logger.debug(\"[RecorderWorker] Completed hidden interval\", {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n logger.warn(\"[RecorderWorker] Invalid interval (end <= start), discarding\");\n }\n this.currentHiddenIntervalStart = null;\n }\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const frameTimestamp = this.calculateVideoFrameTimestamp(videoFrame);\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const frameDuration = 1 / this.frameRate;\n let frameToProcess = videoFrame;\n let imageBitmap = null;\n if (this.shouldApplyOverlay(frameTimestamp)) {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (width <= 0 || height <= 0) {\n logger.warn(\"[RecorderWorker] Invalid video frame dimensions, skipping overlay\", { width, height });\n } else if (this.overlayConfig) {\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(this.overlayConfig.text);\n logger.debug(\"[RecorderWorker] Overlay canvas created\", {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n if (this.overlayCanvas) {\n if (!(this.compositionCanvas && this.compositionCtx) || this.compositionCanvas.width !== width || this.compositionCanvas.height !== height) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionCtx = requireNonNull(this.compositionCanvas.getContext(\"2d\"), \"Failed to get composition canvas context\");\n }\n requireNonNull(this.compositionCtx, \"Composition context must be available\");\n this.compositionCtx.clearRect(0, 0, width, height);\n this.compositionCtx.drawImage(videoFrame, 0, 0, width, height);\n const position = this.getOverlayPosition(this.overlayCanvas.width, width);\n this.compositionCtx.drawImage(this.overlayCanvas, position.x, position.y);\n imageBitmap = this.compositionCanvas.transferToImageBitmap();\n const frameInit = {};\n if (typeof videoFrame.timestamp === \"number\" && videoFrame.timestamp !== null) {\n frameInit.timestamp = videoFrame.timestamp;\n }\n if (typeof videoFrame.duration === \"number\" && videoFrame.duration !== null) {\n frameInit.duration = videoFrame.duration;\n }\n frameToProcess = new VideoFrame(imageBitmap, frameInit);\n }\n }\n }\n const maxLead = 0.05;\n const maxLag = 0.1;\n const targetAudio = this.lastAudioTimestamp;\n let adjustedTimestamp = frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - targetAudio > maxLead) {\n adjustedTimestamp = targetAudio + maxLead;\n } else if (targetAudio - adjustedTimestamp > maxLag) {\n adjustedTimestamp = targetAudio - maxLag;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n const finalTimestamp = adjustedTimestamp >= monotonicTimestamp ? adjustedTimestamp : monotonicTimestamp;\n const keyFrameIntervalFrames = config.keyFrameInterval > 0 ? config.keyFrameInterval : 5;\n const keyFrameIntervalSeconds = keyFrameIntervalFrames / this.frameRate;\n const timeSinceLastKeyFrame = finalTimestamp - this.lastKeyFrameTimestamp;\n const isKeyFrame = this.forceNextKeyFrame || timeSinceLastKeyFrame >= keyFrameIntervalSeconds || this.frameCount % keyFrameIntervalFrames === 0;\n this.driftOffset *= 0.5;\n const sample = new VideoSample(frameToProcess, {\n timestamp: finalTimestamp,\n duration: frameDuration\n });\n const addError = await videoSource.add(sample, isKeyFrame ? { keyFrame: true } : undefined).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n this.frameCount += 1;\n this.lastVideoTimestamp = finalTimestamp;\n if (isKeyFrame) {\n this.lastKeyFrameTimestamp = finalTimestamp;\n this.forceNextKeyFrame = false;\n }\n if (this.frameCount % 90 === 0 && this.audioProcessingActive) {\n const avDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: this.frameCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n avDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (imageBitmap) {\n imageBitmap.close();\n imageBitmap = null;\n }\n if (frameToProcess !== videoFrame) {\n frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.isPaused) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n setupAudioProcessing(audioStream) {\n if (!this.audioSource) {\n logger.warn(\"[RecorderWorker] setupAudioProcessing called but audioSource is null\");\n return;\n }\n logger.debug(\"[RecorderWorker] setupAudioProcessing\", {\n hasAudioSource: !!this.audioSource,\n hasAudioStream: !!audioStream,\n audioProcessingActive: this.audioProcessingActive,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n this.audioProcessor = audioStream.getReader();\n this.audioProcessingActive = true;\n logger.debug(\"[RecorderWorker] Audio processing started\", {\n hasAudioProcessor: !!this.audioProcessor,\n audioProcessingActive: this.audioProcessingActive\n });\n this.processAudioData();\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(\"Number of frames must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n audioData.copyTo(audioBuffer, { planeIndex: 0 });\n return audioBuffer;\n }\n createAudioSample(audioData, audioBuffer) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(\"Number of channels must be greater than zero\");\n }\n const shouldWriteSilence = this.isMuted;\n return new AudioSample({\n data: shouldWriteSilence ? new Float32Array(audioBuffer.length) : audioBuffer,\n format: \"f32-planar\",\n numberOfChannels,\n sampleRate,\n timestamp: this.lastAudioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n throw new Error(\"Sample rate must be greater than zero\");\n }\n const numberOfFrames = audioData.numberOfFrames;\n const duration = numberOfFrames / sampleRate;\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp: this.lastAudioTimestamp,\n duration,\n newLastAudioTimestamp: this.lastAudioTimestamp + duration,\n sampleRate: audioData.sampleRate,\n numberOfFrames: audioData.numberOfFrames\n });\n this.lastAudioTimestamp += duration;\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n logger.warn(\"[RecorderWorker] processAudioData called but processor or source is null\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource\n });\n return;\n }\n logger.debug(\"[RecorderWorker] processAudioData loop started\", {\n hasAudioProcessor: !!this.audioProcessor,\n hasAudioSource: !!this.audioSource,\n audioProcessingActive: this.audioProcessingActive,\n isPaused: this.isPaused,\n isMuted: this.isMuted,\n lastAudioTimestamp: this.lastAudioTimestamp\n });\n let audioSampleCount = 0;\n while (this.audioProcessingActive) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n logger.debug(\"[RecorderWorker] Audio processor stream ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n this.audioProcessingActive = false;\n break;\n }\n const audioData = result.value;\n if (!audioData) {\n logger.warn(\"[RecorderWorker] Received null audioData from processor\");\n continue;\n }\n audioSampleCount += 1;\n if (audioSampleCount % 100 === 0) {\n logger.debug(\"[RecorderWorker] Processing audio sample\", {\n sampleCount: audioSampleCount,\n numberOfFrames: audioData.numberOfFrames,\n sampleRate: audioData.sampleRate,\n numberOfChannels: audioData.numberOfChannels,\n lastAudioTimestamp: this.lastAudioTimestamp,\n isPaused: this.isPaused,\n isMuted: this.isMuted\n });\n }\n if (this.isPaused) {\n this.handlePausedAudioData(audioData);\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const audioSample = this.createAudioSample(audioData, audioBuffer);\n await this.processAudioSample(audioData, audioSample);\n }\n logger.debug(\"[RecorderWorker] processAudioData loop ended\", {\n audioSampleCount,\n lastAudioTimestamp: this.lastAudioTimestamp,\n audioProcessingActive: this.audioProcessingActive\n });\n }\n handlePause() {\n if (this.isPaused) {\n return;\n }\n this.pauseStartedAt = performance.now() / 1000;\n this.isPaused = true;\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.isPaused) {\n return;\n }\n const now = performance.now() / 1000;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n this.sendStateChange(\"recording\");\n }\n async handleStop() {\n if (this.isStopping || this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return;\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize().catch((error) => {\n logger.warn(\"[RecorderWorker] finalize failed (ignored, already finalized?)\", error);\n });\n }\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n this.isStopping = false;\n }\n handleToggleMute() {\n this.isMuted = !this.isMuted;\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps: this.frameRate\n });\n this.frameRate = fps;\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n }\n async handleSwitchSource(videoStream) {\n requireDefined(videoStream, \"Video stream is required\");\n requireDefined(this.frameRate, \"Frame rate must be set\");\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n requireNonNull(this.baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = this.lastAudioTimestamp - this.lastVideoTimestamp;\n const maxDriftCorrection = 0.1;\n this.driftOffset = Math.max(-maxDriftCorrection, Math.min(maxDriftCorrection, rawDrift));\n const continuationTimestamp = Math.max(this.lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n this.forceNextKeyFrame = true;\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n frameRate: this.frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp: this.lastAudioTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n });\n this.setupVideoProcessing(videoStream);\n }\n async cleanup() {\n this.stopBufferUpdates();\n this.videoProcessingActive = false;\n this.audioProcessingActive = false;\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.videoSource) {\n if (!this.isFinalized) {\n this.videoSource.close();\n }\n this.videoSource = null;\n }\n if (this.audioSource) {\n if (!this.isFinalized) {\n this.audioSource.close();\n }\n this.audioSource = null;\n }\n if (this.output) {\n if (!this.isFinalized) {\n await this.output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n this.output = null;\n }\n this.lastVideoTimestamp = 0;\n this.lastAudioTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.totalSize = 0;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.overlayCanvas = null;\n this.overlayConfig = null;\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.recordingStartTime = 0;\n this.pendingVisibilityUpdates = [];\n this.isScreenCapture = false;\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendBufferUpdate(size, formatted) {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n}\nnew RecorderWorker;\n";
479
528
 
480
529
  export {};
481
530
 
531
+ import type { Quality, VideoCodec } from "mediabunny";
482
532
  export type OutputFormat = "mp4" | "mkv" | "mov" | "webm";
483
533
  export type AudioCodec = "aac" | "opus";
484
534
  export type TranscodeConfig = {
485
535
  format: OutputFormat;
486
- fps: number;
487
- width: number;
488
- height: number;
489
- bitrate: number;
536
+ fps?: number;
537
+ width?: number;
538
+ height?: number;
539
+ bitrate?: number | Quality;
540
+ codec?: VideoCodec;
490
541
  audioCodec?: AudioCodec;
491
- preset: "medium";
492
- packetCount: number;
493
542
  audioBitrate?: number;
494
543
  };
495
544
  export type TranscodeInput = Blob | File | string;
@@ -498,6 +547,9 @@ export type TranscodeResult = {
498
547
  blob: Blob;
499
548
  };
500
549
 
550
+ import type { Quality, VideoCodec } from "mediabunny";
551
+ export declare function detectBestCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined): Promise<VideoCodec>;
552
+
501
553
  export declare const FORMAT_DEFAULT_CODECS: Record<OutputFormat, AudioCodec>;
502
554
  export declare function getDefaultAudioCodecForFormat(format: OutputFormat): AudioCodec;
503
555
  export declare function getAudioCodecForFormat(format: OutputFormat, overrideCodec?: AudioCodec): AudioCodec;
@@ -884,25 +936,25 @@ export type BackendConfigResponse = {
884
936
  presetEncoding: BackendPreset;
885
937
  max_width: number;
886
938
  max_height: number;
939
+ outputFormat?: "mp4" | "webm" | "mkv" | "mov";
887
940
  };
888
- export declare function mapPresetToConfig(preset: BackendPreset, maxWidth: number, maxHeight: number, format?: TranscodeConfig["format"]): TranscodeConfig;
941
+ export declare function mapPresetToConfig(preset: BackendPreset, maxWidth: number, maxHeight: number, outputFormat?: TranscodeConfig["format"]): TranscodeConfig;
889
942
 
890
943
  import type { TranscodeConfig } from "../transcode/transcode-types";
891
944
  export declare const DEFAULT_BACKEND_URL = "https://api.vidtreo.com";
892
945
  export declare const DEFAULT_TRANSCODE_CONFIG: Readonly<TranscodeConfig>;
893
946
  export declare function getDefaultConfigForFormat(format: TranscodeConfig["format"]): TranscodeConfig;
894
947
 
895
- export type { AudioCodec, OutputFormat } from "../processor/types";
948
+ import type { Quality, VideoCodec } from "mediabunny";
896
949
  import type { AudioCodec, OutputFormat } from "../processor/types";
897
950
  export type TranscodeConfig = {
898
951
  format: OutputFormat;
899
- fps: number;
900
- width: number;
901
- height: number;
902
- bitrate: number;
952
+ fps?: number;
953
+ width?: number;
954
+ height?: number;
955
+ bitrate?: number | Quality;
956
+ codec?: VideoCodec;
903
957
  audioCodec?: AudioCodec;
904
- preset: "medium";
905
- packetCount: number;
906
958
  audioBitrate?: number;
907
959
  tabVisibilityIntervals?: Array<{
908
960
  start: number;
@@ -917,6 +969,7 @@ export type TranscodeResult = {
917
969
  };
918
970
 
919
971
  export declare function transcodeVideo(input: TranscodeInput, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
972
+ export declare function transcodeVideoForNativeCamera(file: File, config?: Partial<TranscodeConfig>, onProgress?: (progress: number) => void): Promise<TranscodeResult>;
920
973
 
921
974
  import type { AudioLevelCallbacks } from "../audio/types";
922
975
  import type { DeviceCallbacks } from "../device/types";
@@ -947,6 +1000,7 @@ export type RecorderConfig = {
947
1000
  enableDeviceChange?: boolean;
948
1001
  enableTabVisibilityOverlay?: boolean;
949
1002
  tabVisibilityOverlayText?: string;
1003
+ nativeCamera?: boolean;
950
1004
  };
951
1005
  export type RecorderCallbacks = {
952
1006
  recording?: Partial<RecordingCallbacks>;