@vidtreo/recorder 1.2.1 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -45,6 +45,20 @@ export {};
45
45
 
46
46
  export {};
47
47
 
48
+ export {};
49
+
50
+ export {};
51
+
52
+ export {};
53
+
54
+ export {};
55
+
56
+ export {};
57
+
58
+ export {};
59
+
60
+ export {};
61
+
48
62
  export type VidtreoRecorderConfig = {
49
63
  apiKey: string;
50
64
  apiUrl?: string;
@@ -402,8 +416,10 @@ export declare class ConfigManager {
402
416
  initialize(apiKey: string | null, backendUrl: string | null): Promise<void>;
403
417
  fetchConfig(): Promise<void>;
404
418
  getConfig(): Promise<TranscodeConfig>;
419
+ getConfigForRecording(): TranscodeConfig;
405
420
  isConfigReady(): boolean;
406
421
  clearCache(): void;
422
+ private fetchConfigInBackground;
407
423
  }
408
424
 
409
425
  import type { TranscodeConfig } from "../transcode/transcode-types";
@@ -429,6 +445,7 @@ export declare class ConfigService {
429
445
  private fetchConfigFromBackend;
430
446
  }
431
447
 
448
+ import type { AudioCodec, OutputFormat } from "../processor/types";
432
449
  import type { TranscodeConfig } from "../transcode/transcode-types";
433
450
  export type BackendPreset = "sd" | "hd" | "fhd" | "4k";
434
451
  export declare const DEFAULT_AUDIO_BITRATE = 128000;
@@ -441,6 +458,24 @@ export type BackendConfigResponse = {
441
458
  position: string;
442
459
  };
443
460
  };
461
+ export type FormatCompatibilityPolicy = {
462
+ preferredVideoCodec: NonNullable<TranscodeConfig["codec"]>;
463
+ preferredAudioCodec: AudioCodec;
464
+ audioBitrate: number;
465
+ videoCodecFallbackOrder: NonNullable<TranscodeConfig["codec"]>[];
466
+ audioCodecFallbackOrder: AudioCodec[];
467
+ };
468
+ export type FormatCompatibilityContext = {
469
+ isLinuxPlatform: boolean;
470
+ };
471
+ export declare const FORMAT_COMPATIBILITY_POLICY: Record<OutputFormat, FormatCompatibilityPolicy>;
472
+ export declare const PRESET_SIZE_LIMIT_MB_PER_MINUTE: Record<BackendPreset, number>;
473
+ export declare function getFormatCompatibilityPolicy(format: OutputFormat, formatCompatibilityContext?: FormatCompatibilityContext): FormatCompatibilityPolicy;
474
+ export declare function calculateTotalBitrateFromMbPerMinute(sizeMbPerMinute: number): number;
475
+ export declare function calculateVideoBitrate(totalBitrate: number, audioBitrate: number): number;
476
+ export declare function getPresetTotalBitrate(preset: BackendPreset): number;
477
+ export declare function getPresetAudioBitrateForFormat(format: OutputFormat): number;
478
+ export declare function getPresetVideoBitrateForFormat(preset: BackendPreset, format: OutputFormat): number;
444
479
  export declare const PRESET_VIDEO_BITRATE_MAP: Record<BackendPreset, number>;
445
480
  export declare const RESOLUTION_MAP: Record<BackendPreset, {
446
481
  width: number;
@@ -463,6 +498,7 @@ export type MapPresetOptions = {
463
498
  outputFormat?: TranscodeConfig["format"];
464
499
  watermark?: BackendConfigResponse["watermark"];
465
500
  isMobile?: boolean;
501
+ isLinuxPlatform?: boolean;
466
502
  };
467
503
  export declare function mapPresetToConfig(options: MapPresetOptions): Promise<TranscodeConfig>;
468
504
 
@@ -563,6 +599,10 @@ export type VisibilityInterval = {
563
599
  start: number;
564
600
  end: number;
565
601
  };
602
+ type GetCurrentTimestampDependency = () => number;
603
+ type TabVisibilityTrackerDependencies = {
604
+ getCurrentTimestamp: GetCurrentTimestampDependency;
605
+ };
566
606
  export declare class TabVisibilityTracker {
567
607
  private recordingStartTime;
568
608
  private totalPausedTime;
@@ -573,7 +613,8 @@ export declare class TabVisibilityTracker {
573
613
  private readonly visibilityChangeHandler;
574
614
  private readonly blurHandler;
575
615
  private readonly focusHandler;
576
- constructor();
616
+ private readonly getCurrentTimestamp;
617
+ constructor(dependencies?: Partial<TabVisibilityTrackerDependencies>);
577
618
  start(recordingStartTime: number): void;
578
619
  pause(): void;
579
620
  resume(): void;
@@ -588,6 +629,18 @@ export declare class TabVisibilityTracker {
588
629
  private endCurrentIntervalIfActive;
589
630
  private normalizeTimestamp;
590
631
  }
632
+ export {};
633
+
634
+ type NavigatorUserAgentData = {
635
+ platform?: string;
636
+ };
637
+ type NavigatorProvider = {
638
+ platform?: string;
639
+ userAgent?: string;
640
+ userAgentData?: NavigatorUserAgentData;
641
+ };
642
+ export declare function isLinuxPlatform(navigatorProvider?: NavigatorProvider): boolean;
643
+ export {};
591
644
 
592
645
  export {};
593
646
 
@@ -804,8 +857,10 @@ export declare class RecorderController {
804
857
  private uploadQueueManager;
805
858
  private isInitialized;
806
859
  private isDemo;
860
+ private isDestroyed;
807
861
  private enableTabVisibilityOverlay;
808
862
  private tabVisibilityOverlayText;
863
+ private recordingWarmupTimeoutId;
809
864
  constructor(callbacks?: RecorderCallbacks);
810
865
  initialize(config: RecorderConfig): Promise<void>;
811
866
  startStream(): Promise<void>;
@@ -851,6 +906,8 @@ export declare class RecorderController {
851
906
  private applyRecordingConfig;
852
907
  private initializeStorage;
853
908
  private validateRecorderSupport;
909
+ private scheduleRecordingWarmup;
910
+ private ignorePromiseRejection;
854
911
  }
855
912
 
856
913
  import type { CameraStreamManager } from "../stream/stream";
@@ -888,6 +945,7 @@ export declare class RecordingManager {
888
945
  updateSourceType(isScreenCapture: boolean): void;
889
946
  setOriginalCameraStream(stream: MediaStream | null): void;
890
947
  getOriginalCameraStream(): MediaStream | null;
948
+ prewarmStreamProcessor(): void;
891
949
  startRecording(): Promise<void>;
892
950
  private startCountdown;
893
951
  private doStartRecording;
@@ -898,6 +956,7 @@ export declare class RecordingManager {
898
956
  cleanup(): void;
899
957
  private resetRecordingState;
900
958
  private resetPauseState;
959
+ private getOrCreateStreamProcessor;
901
960
  private updatePausedDuration;
902
961
  private startRecordingTimer;
903
962
  private startMaxTimeTimer;
@@ -1147,12 +1206,19 @@ export declare class StreamProcessor {
1147
1206
  }
1148
1207
  export {};
1149
1208
 
1209
+ import { type SupportCheckOptions, type SupportReport } from "../processor/support-check";
1150
1210
  import type { TranscodeConfig } from "../transcode/transcode-types";
1151
1211
  import { type VisibilityInterval } from "../utils/tab-visibility-tracker";
1152
1212
  export type StopRecordingResult = {
1153
1213
  blob: Blob;
1154
1214
  tabVisibilityIntervals: VisibilityInterval[];
1155
1215
  };
1216
+ type CheckRecorderSupportDependency = (options: SupportCheckOptions) => Promise<SupportReport>;
1217
+ type GetCurrentTimestampDependency = () => number;
1218
+ type StreamRecordingStateDependencies = {
1219
+ checkRecorderSupport: CheckRecorderSupportDependency;
1220
+ getCurrentTimestamp: GetCurrentTimestampDependency;
1221
+ };
1156
1222
  export declare class StreamRecordingState {
1157
1223
  private recordingStartTime;
1158
1224
  private recordingTimer;
@@ -1165,7 +1231,8 @@ export declare class StreamRecordingState {
1165
1231
  private blurHandler;
1166
1232
  private focusHandler;
1167
1233
  private readonly streamManager;
1168
- constructor(streamManager: StreamManager);
1234
+ private readonly dependencies;
1235
+ constructor(streamManager: StreamManager, dependencies?: Partial<StreamRecordingStateDependencies>);
1169
1236
  isRecording(): boolean;
1170
1237
  getStreamProcessor(): StreamProcessor | null;
1171
1238
  getAudioStreamForAnalysis(): MediaStream | null;
@@ -1185,10 +1252,12 @@ export declare class StreamRecordingState {
1185
1252
  private clearBufferSizeInterval;
1186
1253
  private resetRecordingState;
1187
1254
  private resetPauseState;
1255
+ private resolveTabVisibilityOverlayText;
1188
1256
  private setupVisibilityUpdates;
1189
1257
  private cleanupVisibilityUpdates;
1190
1258
  destroy(): void;
1191
1259
  }
1260
+ export {};
1192
1261
 
1193
1262
  export declare const DEFAULT_CAMERA_CONSTRAINTS: Readonly<CameraConstraints>;
1194
1263
  export declare const DEFAULT_STREAM_CONFIG: Readonly<StreamConfig>;
@@ -1418,6 +1487,7 @@ type VideoCodecCheckOptions = {
1418
1487
  width?: number;
1419
1488
  height?: number;
1420
1489
  bitrate?: number | Quality;
1490
+ hardwareAcceleration?: HardwareAccelerationPreference;
1421
1491
  };
1422
1492
  type AudioCodecCheckOptions = {
1423
1493
  bitrate?: number | Quality;
@@ -1428,7 +1498,9 @@ type MediabunnyModule = {
1428
1498
  };
1429
1499
  type MediabunnyLoaderDependencies = {
1430
1500
  loadMediabunny?: () => Promise<MediabunnyModule>;
1501
+ hardwareAcceleration?: HardwareAccelerationPreference;
1431
1502
  };
1503
+ type HardwareAccelerationPreference = "no-preference" | "prefer-hardware" | "prefer-software";
1432
1504
  export declare function detectBestCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined, dependencies?: MediabunnyLoaderDependencies): Promise<VideoCodec>;
1433
1505
  export declare function detectBestWebmCodec(width: number | undefined, height: number | undefined, bitrate: number | Quality | undefined, dependencies?: MediabunnyLoaderDependencies): Promise<VideoCodec>;
1434
1506
  export declare function detectBestAudioCodec(bitrate?: number | Quality, dependencies?: MediabunnyLoaderDependencies): Promise<AudioCodec>;
@@ -1439,6 +1511,7 @@ type WorkerProcessorDependencies = {
1439
1511
  createWorker?: (workerUrl: string) => Worker;
1440
1512
  canUseMainThreadVideoProcessor?: () => boolean;
1441
1513
  createVideoStreamFromTrack?: (videoTrack: MediaStreamVideoTrack) => ReadableStream<VideoFrame> | null;
1514
+ isLinuxPlatform?: () => boolean;
1442
1515
  };
1443
1516
  type OverlayConfig = {
1444
1517
  enabled: boolean;
@@ -1464,6 +1537,7 @@ export declare class WorkerProcessor {
1464
1537
  private readonly workerProbeManager;
1465
1538
  private readonly canUseMainThreadVideoProcessorFn;
1466
1539
  private readonly createVideoStreamFromTrackFn;
1540
+ private readonly isLinuxPlatformFn;
1467
1541
  constructor(dependencies?: WorkerProcessorDependencies);
1468
1542
  private getWorkerProbeResult;
1469
1543
  startProcessing(stream: MediaStream, config: TranscodeConfig, overlayConfig?: OverlayConfig): Promise<void>;
@@ -1471,8 +1545,13 @@ export declare class WorkerProcessor {
1471
1545
  private ensureProcessingInactive;
1472
1546
  private resetProcessingState;
1473
1547
  private resolveRecordingFormat;
1548
+ private resolveAudioBitrate;
1474
1549
  private resolveAudioCodec;
1475
1550
  private resolveVideoCodec;
1551
+ private resolveAudioCodecWithCache;
1552
+ private resolveVideoCodecWithCache;
1553
+ private buildAudioCodecCacheKey;
1554
+ private buildVideoCodecCacheKey;
1476
1555
  private buildWorkerTranscodeConfig;
1477
1556
  private prepareAudioPipeline;
1478
1557
  private buildOverlayConfigToSend;
@@ -1759,7 +1838,7 @@ export declare function createStartMessage(parameters: StartMessageParameters):
1759
1838
  export declare function createSwitchSourceMessage(videoTrack: MediaStreamVideoTrack | null, videoStream: ReadableStream<VideoFrame> | null): WorkerMessage;
1760
1839
  export declare function collectTransferables(videoStream: ReadableStream<VideoFrame> | null, audioStream: ReadableStream<AudioData> | null, videoTrack?: MediaStreamVideoTrack | null): Transferable[];
1761
1840
 
1762
- export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n this.pending = 0;\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n let resolved = false;\n resolver = () => {\n if (resolved) {\n return;\n }\n resolve();\n this.pending--;\n resolved = true;\n };\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n this.pending++;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isWebKitCache = null;\nvar isWebKit = () => {\n if (isWebKitCache !== null) {\n return isWebKitCache;\n }\n return isWebKitCache = !!(typeof navigator !== \"undefined\" && (navigator.vendor?.match(/apple/i) || /AppleWebKit/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent) || /\\b(iPad|iPhone|iPod)\\b/.test(navigator.userAgent)));\n};\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, maxDpbMbs: 396, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, maxDpbMbs: 900, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, maxDpbMbs: 2376, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, maxDpbMbs: 2376, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, maxDpbMbs: 2376, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, maxDpbMbs: 4752, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, maxDpbMbs: 8100, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, maxDpbMbs: 8100, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, maxDpbMbs: 18000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, maxDpbMbs: 20480, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, maxDpbMbs: 32768, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, maxDpbMbs: 32768, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, maxDpbMbs: 34816, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, maxDpbMbs: 110400, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, maxDpbMbs: 696320, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, maxDpbMbs: 696320, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, maxDpbMbs: 696320, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"NON_IDR_SLICE\"] = 1] = \"NON_IDR_SLICE\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPA\"] = 2] = \"SLICE_DPA\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPB\"] = 3] = \"SLICE_DPB\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPC\"] = 4] = \"SLICE_DPC\";\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"AUD\"] = 9] = \"AUD\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"AUD_NUT\"] = 35] = \"AUD_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar iterateNalUnitsInAnnexB = function* (packetData) {\n let i = 0;\n let nalStart = -1;\n while (i < packetData.length - 2) {\n const zeroIndex = packetData.indexOf(0, i);\n if (zeroIndex === -1 || zeroIndex >= packetData.length - 2) {\n break;\n }\n i = zeroIndex;\n let startCodeLength = 0;\n if (i + 3 < packetData.length && packetData[i + 1] === 0 && packetData[i + 2] === 0 && packetData[i + 3] === 1) {\n startCodeLength = 4;\n } else if (packetData[i + 1] === 0 && packetData[i + 2] === 1) {\n startCodeLength = 3;\n }\n if (startCodeLength === 0) {\n i++;\n continue;\n }\n if (nalStart !== -1 && i > nalStart) {\n yield {\n offset: nalStart,\n length: i - nalStart\n };\n }\n nalStart = i + startCodeLength;\n i = nalStart;\n }\n if (nalStart !== -1 && nalStart < packetData.length) {\n yield {\n offset: nalStart,\n length: packetData.length - nalStart\n };\n }\n};\nvar extractNalUnitTypeForAvc = (byte) => {\n return byte & 31;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar ANNEX_B_START_CODE = new Uint8Array([0, 0, 0, 1]);\nvar concatNalUnitsInLengthPrefixed = (nalUnits, lengthSize) => {\n const totalLength = nalUnits.reduce((a, b) => a + lengthSize + b.byteLength, 0);\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const dataView = new DataView(result.buffer, result.byteOffset, result.byteLength);\n switch (lengthSize) {\n case 1:\n dataView.setUint8(offset, nalUnit.byteLength);\n break;\n case 2:\n dataView.setUint16(offset, nalUnit.byteLength, false);\n break;\n case 3:\n setUint24(dataView, offset, nalUnit.byteLength, false);\n break;\n case 4:\n dataView.setUint32(offset, nalUnit.byteLength, false);\n break;\n }\n offset += lengthSize;\n result.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return result;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const spsUnits = [];\n const ppsUnits = [];\n const spsExtUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForAvc(nalUnit[0]);\n if (type === AvcNalUnitType.SPS) {\n spsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.PPS) {\n ppsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.SPS_EXT) {\n spsExtUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = 1;\n let bitDepthLumaMinus8 = 0;\n let bitDepthChromaMinus8 = 0;\n let separateColourPlaneFlag = 0;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const picWidthInMbsMinus1 = readExpGolomb(bitstream);\n const picHeightInMapUnitsMinus1 = readExpGolomb(bitstream);\n const codedWidth = 16 * (picWidthInMbsMinus1 + 1);\n const codedHeight = 16 * (picHeightInMapUnitsMinus1 + 1);\n let displayWidth = codedWidth;\n let displayHeight = codedHeight;\n const frameMbsOnlyFlag = bitstream.readBits(1);\n if (!frameMbsOnlyFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const frameCroppingFlag = bitstream.readBits(1);\n if (frameCroppingFlag) {\n const frameCropLeftOffset = readExpGolomb(bitstream);\n const frameCropRightOffset = readExpGolomb(bitstream);\n const frameCropTopOffset = readExpGolomb(bitstream);\n const frameCropBottomOffset = readExpGolomb(bitstream);\n let cropUnitX;\n let cropUnitY;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 0) {\n cropUnitX = 1;\n cropUnitY = 2 - frameMbsOnlyFlag;\n } else {\n const subWidthC = chromaFormatIdc === 3 ? 1 : 2;\n const subHeightC = chromaFormatIdc === 1 ? 2 : 1;\n cropUnitX = subWidthC;\n cropUnitY = subHeightC * (2 - frameMbsOnlyFlag);\n }\n displayWidth -= cropUnitX * (frameCropLeftOffset + frameCropRightOffset);\n displayHeight -= cropUnitY * (frameCropTopOffset + frameCropBottomOffset);\n }\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let numReorderFrames = null;\n let maxDecFrameBuffering = null;\n const vuiParametersPresentFlag = bitstream.readBits(1);\n if (vuiParametersPresentFlag) {\n const aspectRatioInfoPresentFlag = bitstream.readBits(1);\n if (aspectRatioInfoPresentFlag) {\n const aspectRatioIdc = bitstream.readBits(8);\n if (aspectRatioIdc === 255) {\n bitstream.skipBits(16);\n bitstream.skipBits(16);\n }\n }\n const overscanInfoPresentFlag = bitstream.readBits(1);\n if (overscanInfoPresentFlag) {\n bitstream.skipBits(1);\n }\n const videoSignalTypePresentFlag = bitstream.readBits(1);\n if (videoSignalTypePresentFlag) {\n bitstream.skipBits(3);\n fullRangeFlag = bitstream.readBits(1);\n const colourDescriptionPresentFlag = bitstream.readBits(1);\n if (colourDescriptionPresentFlag) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n const chromaLocInfoPresentFlag = bitstream.readBits(1);\n if (chromaLocInfoPresentFlag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const timingInfoPresentFlag = bitstream.readBits(1);\n if (timingInfoPresentFlag) {\n bitstream.skipBits(32);\n bitstream.skipBits(32);\n bitstream.skipBits(1);\n }\n const nalHrdParametersPresentFlag = bitstream.readBits(1);\n if (nalHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n const vclHrdParametersPresentFlag = bitstream.readBits(1);\n if (vclHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n if (nalHrdParametersPresentFlag || vclHrdParametersPresentFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const bitstreamRestrictionFlag = bitstream.readBits(1);\n if (bitstreamRestrictionFlag) {\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n numReorderFrames = readExpGolomb(bitstream);\n maxDecFrameBuffering = readExpGolomb(bitstream);\n }\n }\n if (numReorderFrames === null) {\n assert(maxDecFrameBuffering === null);\n const constraintSet3Flag = constraintFlags & 16;\n if ((profileIdc === 44 || profileIdc === 86 || profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244) && constraintSet3Flag) {\n numReorderFrames = 0;\n maxDecFrameBuffering = 0;\n } else {\n const picWidthInMbs = picWidthInMbsMinus1 + 1;\n const picHeightInMapUnits = picHeightInMapUnitsMinus1 + 1;\n const frameHeightInMbs = (2 - frameMbsOnlyFlag) * picHeightInMapUnits;\n const levelInfo = AVC_LEVEL_TABLE.find((x) => x.level >= levelIdc) ?? last(AVC_LEVEL_TABLE);\n const maxDpbFrames = Math.min(Math.floor(levelInfo.maxDpbMbs / (picWidthInMbs * frameHeightInMbs)), 16);\n numReorderFrames = maxDpbFrames;\n maxDecFrameBuffering = maxDpbFrames;\n }\n }\n assert(maxDecFrameBuffering !== null);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n codedWidth,\n codedHeight,\n displayWidth,\n displayHeight,\n colourPrimaries,\n matrixCoefficients,\n transferCharacteristics,\n fullRangeFlag,\n numReorderFrames,\n maxDecFrameBuffering\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar skipAvcHrdParameters = (bitstream) => {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n for (let i = 0;i <= cpb_cnt_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n};\nvar extractNalUnitTypeForHevc = (byte) => {\n return byte >> 1 & 63;\n};\nvar parseHevcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const spsMaxSubLayersMinus1 = bitstream.readBits(3);\n const spsTemporalIdNestingFlag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, spsMaxSubLayersMinus1);\n readExpGolomb(bitstream);\n const chromaFormatIdc = readExpGolomb(bitstream);\n let separateColourPlaneFlag = 0;\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n const picWidthInLumaSamples = readExpGolomb(bitstream);\n const picHeightInLumaSamples = readExpGolomb(bitstream);\n let displayWidth = picWidthInLumaSamples;\n let displayHeight = picHeightInLumaSamples;\n if (bitstream.readBits(1)) {\n const confWinLeftOffset = readExpGolomb(bitstream);\n const confWinRightOffset = readExpGolomb(bitstream);\n const confWinTopOffset = readExpGolomb(bitstream);\n const confWinBottomOffset = readExpGolomb(bitstream);\n let subWidthC = 1;\n let subHeightC = 1;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 1) {\n subWidthC = 2;\n subHeightC = 2;\n } else if (chromaArrayType === 2) {\n subWidthC = 2;\n subHeightC = 1;\n }\n displayWidth -= (confWinLeftOffset + confWinRightOffset) * subWidthC;\n displayHeight -= (confWinTopOffset + confWinBottomOffset) * subHeightC;\n }\n const bitDepthLumaMinus8 = readExpGolomb(bitstream);\n const bitDepthChromaMinus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const spsSubLayerOrderingInfoPresentFlag = bitstream.readBits(1);\n const startI = spsSubLayerOrderingInfoPresentFlag ? 0 : spsMaxSubLayersMinus1;\n let spsMaxNumReorderPics = 0;\n for (let i = startI;i <= spsMaxSubLayersMinus1; i++) {\n readExpGolomb(bitstream);\n spsMaxNumReorderPics = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const numShortTermRefPicSets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, numShortTermRefPicSets);\n if (bitstream.readBits(1)) {\n const numLongTermRefPicsSps = readExpGolomb(bitstream);\n for (let i = 0;i < numLongTermRefPicsSps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const vui = parseHevcVui(bitstream, spsMaxSubLayersMinus1);\n colourPrimaries = vui.colourPrimaries;\n transferCharacteristics = vui.transferCharacteristics;\n matrixCoefficients = vui.matrixCoefficients;\n fullRangeFlag = vui.fullRangeFlag;\n minSpatialSegmentationIdc = vui.minSpatialSegmentationIdc;\n }\n return {\n displayWidth,\n displayHeight,\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n maxDecFrameBuffering: spsMaxNumReorderPics + 1,\n spsMaxSubLayersMinus1,\n spsTemporalIdNestingFlag,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n minSpatialSegmentationIdc\n };\n } catch (error) {\n console.error(\"Error parsing HEVC SPS:\", error);\n return null;\n }\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const vpsUnits = [];\n const spsUnits = [];\n const ppsUnits = [];\n const seiUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForHevc(nalUnit[0]);\n if (type === HevcNalUnitType.VPS_NUT) {\n vpsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.SPS_NUT) {\n spsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PPS_NUT) {\n ppsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PREFIX_SEI_NUT || type === HevcNalUnitType.SUFFIX_SEI_NUT) {\n seiUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const spsInfo = parseHevcSps(spsUnits[0]);\n if (!spsInfo)\n return null;\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0][0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: spsInfo.generalProfileSpace,\n generalTierFlag: spsInfo.generalTierFlag,\n generalProfileIdc: spsInfo.generalProfileIdc,\n generalProfileCompatibilityFlags: spsInfo.generalProfileCompatibilityFlags,\n generalConstraintIndicatorFlags: spsInfo.generalConstraintIndicatorFlags,\n generalLevelIdc: spsInfo.generalLevelIdc,\n minSpatialSegmentationIdc: spsInfo.minSpatialSegmentationIdc,\n parallelismType,\n chromaFormatIdc: spsInfo.chromaFormatIdc,\n bitDepthLumaMinus8: spsInfo.bitDepthLumaMinus8,\n bitDepthChromaMinus8: spsInfo.bitDepthChromaMinus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: spsInfo.spsMaxSubLayersMinus1 + 1,\n temporalIdNested: spsInfo.spsTemporalIdNestingFlag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseHevcVui = (bitstream, sps_max_sub_layers_minus1) => {\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n fullRangeFlag = bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHevcHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n minSpatialSegmentationIdc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n return {\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n minSpatialSegmentationIdc\n };\n};\nvar skipHevcHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.data !== undefined && !(options.data instanceof Uint8Array)) {\n throw new TypeError(\"options.data, when provided, must be a Uint8Array.\");\n }\n if (options?.type !== undefined && options.type !== \"key\" && options.type !== \"delta\") {\n throw new TypeError('options.type, when provided, must be either \"key\" or \"delta\".');\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n if (options?.sequenceNumber !== undefined && !Number.isFinite(options.sequenceNumber)) {\n throw new TypeError(\"options.sequenceNumber, when provided, must be a number.\");\n }\n if (options?.sideData !== undefined && (typeof options.sideData !== \"object\" || options.sideData === null)) {\n throw new TypeError(\"options.sideData, when provided, must be an object.\");\n }\n return new EncodedPacket(options?.data ?? this.data, options?.type ?? this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, options?.sequenceNumber ?? this.sequenceNumber, this.byteLength, options?.sideData ?? this.sideData);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\nvar VIDEO_SAMPLE_PIXEL_FORMATS = [\n \"I420\",\n \"I420P10\",\n \"I420P12\",\n \"I420A\",\n \"I420AP10\",\n \"I420AP12\",\n \"I422\",\n \"I422P10\",\n \"I422P12\",\n \"I422A\",\n \"I422AP10\",\n \"I422AP12\",\n \"I444\",\n \"I444P10\",\n \"I444P12\",\n \"I444A\",\n \"I444AP10\",\n \"I444AP12\",\n \"NV12\",\n \"RGBA\",\n \"RGBX\",\n \"BGRA\",\n \"BGRX\"\n];\nvar VIDEO_SAMPLE_PIXEL_FORMATS_SET = new Set(VIDEO_SAMPLE_PIXEL_FORMATS);\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.format === undefined || !VIDEO_SAMPLE_PIXEL_FORMATS_SET.has(init.format)) {\n throw new TypeError(\"init.format must be one of: \" + VIDEO_SAMPLE_PIXEL_FORMATS.join(\", \"));\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this._layout = init.layout ?? createDefaultPlaneLayout(init.format, init.codedWidth, init.codedHeight);\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this._layout = null;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = new VideoSampleColorSpace(data.colorSpace);\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this._layout = null;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n return new VideoSample(this._data, {\n format: this.format,\n layout: this._layout,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize(options = {}) {\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot get allocation size when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const size = videoFrame.allocationSize(options);\n videoFrame.close();\n return size;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize(options);\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination, options = {}) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot copy video sample data when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const layout = await videoFrame.copyTo(destination, options);\n videoFrame.close();\n return layout;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.copyTo(destination, options);\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n const dest = toUint8Array(destination);\n dest.set(this._data);\n return this._layout;\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n return [{\n offset: 0,\n stride: 4 * this.codedWidth\n }];\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\n\nclass VideoSampleColorSpace {\n constructor(init) {\n this.primaries = init?.primaries ?? null;\n this.transfer = init?.transfer ?? null;\n this.matrix = init?.matrix ?? null;\n this.fullRange = init?.fullRange ?? null;\n }\n toJSON() {\n return {\n primaries: this.primaries,\n transfer: this.transfer,\n matrix: this.matrix,\n fullRange: this.fullRange\n };\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar validateVideoFrameCopyToOptions = (options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.colorSpace !== undefined && ![\"display-p3\", \"srgb\"].includes(options.colorSpace)) {\n throw new TypeError(\"options.colorSpace, when provided, must be 'display-p3' or 'srgb'.\");\n }\n if (options.format !== undefined && typeof options.format !== \"string\") {\n throw new TypeError(\"options.format, when provided, must be a string.\");\n }\n if (options.layout !== undefined) {\n if (!Array.isArray(options.layout)) {\n throw new TypeError(\"options.layout, when provided, must be an array.\");\n }\n for (const plane of options.layout) {\n if (!plane || typeof plane !== \"object\") {\n throw new TypeError(\"Each entry in options.layout must be an object.\");\n }\n if (!Number.isInteger(plane.offset) || plane.offset < 0) {\n throw new TypeError(\"plane.offset must be a non-negative integer.\");\n }\n if (!Number.isInteger(plane.stride) || plane.stride < 0) {\n throw new TypeError(\"plane.stride must be a non-negative integer.\");\n }\n }\n }\n if (options.rect !== undefined) {\n if (!options.rect || typeof options.rect !== \"object\") {\n throw new TypeError(\"options.rect, when provided, must be an object.\");\n }\n if (options.rect.x !== undefined && (!Number.isInteger(options.rect.x) || options.rect.x < 0)) {\n throw new TypeError(\"options.rect.x, when provided, must be a non-negative integer.\");\n }\n if (options.rect.y !== undefined && (!Number.isInteger(options.rect.y) || options.rect.y < 0)) {\n throw new TypeError(\"options.rect.y, when provided, must be a non-negative integer.\");\n }\n if (options.rect.width !== undefined && (!Number.isInteger(options.rect.width) || options.rect.width < 0)) {\n throw new TypeError(\"options.rect.width, when provided, must be a non-negative integer.\");\n }\n if (options.rect.height !== undefined && (!Number.isInteger(options.rect.height) || options.rect.height < 0)) {\n throw new TypeError(\"options.rect.height, when provided, must be a non-negative integer.\");\n }\n }\n};\nvar createDefaultPlaneLayout = (format, codedWidth, codedHeight) => {\n const planes = getPlaneConfigs(format);\n const layouts = [];\n let currentOffset = 0;\n for (const plane of planes) {\n const planeWidth = Math.ceil(codedWidth / plane.widthDivisor);\n const planeHeight = Math.ceil(codedHeight / plane.heightDivisor);\n const stride = planeWidth * plane.sampleBytes;\n const planeSize = stride * planeHeight;\n layouts.push({\n offset: currentOffset,\n stride\n });\n currentOffset += planeSize;\n }\n return layouts;\n};\nvar getPlaneConfigs = (format) => {\n const yuv = (yBytes, uvBytes, subX, subY, hasAlpha) => {\n const configs = [\n { sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY }\n ];\n if (hasAlpha) {\n configs.push({ sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 });\n }\n return configs;\n };\n switch (format) {\n case \"I420\":\n return yuv(1, 1, 2, 2, false);\n case \"I420P10\":\n case \"I420P12\":\n return yuv(2, 2, 2, 2, false);\n case \"I420A\":\n return yuv(1, 1, 2, 2, true);\n case \"I420AP10\":\n case \"I420AP12\":\n return yuv(2, 2, 2, 2, true);\n case \"I422\":\n return yuv(1, 1, 2, 1, false);\n case \"I422P10\":\n case \"I422P12\":\n return yuv(2, 2, 2, 1, false);\n case \"I422A\":\n return yuv(1, 1, 2, 1, true);\n case \"I422AP10\":\n case \"I422AP12\":\n return yuv(2, 2, 2, 1, true);\n case \"I444\":\n return yuv(1, 1, 1, 1, false);\n case \"I444P10\":\n case \"I444P12\":\n return yuv(2, 2, 1, 1, false);\n case \"I444A\":\n return yuv(1, 1, 1, 1, true);\n case \"I444AP10\":\n case \"I444AP12\":\n return yuv(2, 2, 1, 1, true);\n case \"NV12\":\n return [\n { sampleBytes: 1, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: 2, widthDivisor: 2, heightDivisor: 2 }\n ];\n case \"RGBA\":\n case \"RGBX\":\n case \"BGRA\":\n case \"BGRX\":\n return [\n { sampleBytes: 4, widthDivisor: 1, heightDivisor: 1 }\n ];\n default:\n assertNever(format);\n assert(false);\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const srcFormat = this.format;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (isWebKit() && numChannels > 2 && destFormat !== srcFormat) {\n doAudioDataCopyToWebKitWorkaround(this._data, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount);\n } else {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: destFormat\n });\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\nvar doAudioDataCopyToWebKitWorkaround = (audioData, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount) => {\n const readFn = getReadFunction(srcFormat);\n const writeFn = getWriteFunction(destFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const destBytesPerSample = getBytesPerSample(destFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar) {\n if (srcIsPlanar) {\n const data = new ArrayBuffer(copyFrameCount * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = (i * numChannels + planeIndex) * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n if (srcIsPlanar) {\n const planeSize = copyFrameCount * srcBytesPerSample;\n const data = new ArrayBuffer(planeSize);\n const dataView = toDataView(data);\n for (let ch = 0;ch < numChannels; ch++) {\n audioData.copyTo(data, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = (i * numChannels + ch) * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n for (let ch = 0;ch < numChannels; ch++) {\n const idx = i * numChannels + ch;\n const srcOffset = idx * srcBytesPerSample;\n const destOffset = idx * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n }\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/adts/adts-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_ADTS_FRAME_HEADER_SIZE = 7;\nvar MAX_ADTS_FRAME_HEADER_SIZE = 9;\nvar readAdtsFrameHeader = (slice) => {\n const startPos = slice.filePos;\n const bytes = readBytes(slice, 9);\n const bitstream = new Bitstream(bytes);\n const syncword = bitstream.readBits(12);\n if (syncword !== 4095) {\n return null;\n }\n bitstream.skipBits(1);\n const layer = bitstream.readBits(2);\n if (layer !== 0) {\n return null;\n }\n const protectionAbsence = bitstream.readBits(1);\n const objectType = bitstream.readBits(2) + 1;\n const samplingFrequencyIndex = bitstream.readBits(4);\n if (samplingFrequencyIndex === 15) {\n return null;\n }\n bitstream.skipBits(1);\n const channelConfiguration = bitstream.readBits(3);\n if (channelConfiguration === 0) {\n throw new Error(\"ADTS frames with channel configuration 0 are not supported.\");\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n const frameLength = bitstream.readBits(13);\n bitstream.skipBits(11);\n const numberOfAacFrames = bitstream.readBits(2) + 1;\n if (numberOfAacFrames !== 1) {\n throw new Error(\"ADTS frames with more than one AAC frame are not supported.\");\n }\n let crcCheck = null;\n if (protectionAbsence === 1) {\n slice.filePos -= 2;\n } else {\n crcCheck = bitstream.readBits(16);\n }\n return {\n objectType,\n samplingFrequencyIndex,\n channelConfiguration,\n frameLength,\n numberOfAacFrames,\n crcCheck,\n startPos\n };\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass FileSlice {\n constructor(bytes, view, offset, start, end) {\n this.bytes = bytes;\n this.view = view;\n this.offset = offset;\n this.start = start;\n this.end = end;\n this.bufferPos = start - offset;\n }\n static tempFromBytes(bytes) {\n return new FileSlice(bytes, toDataView(bytes), 0, 0, bytes.length);\n }\n get length() {\n return this.end - this.start;\n }\n get filePos() {\n return this.offset + this.bufferPos;\n }\n set filePos(value) {\n this.bufferPos = value - this.offset;\n }\n get remainingLength() {\n return Math.max(this.end - this.filePos, 0);\n }\n skip(byteCount) {\n this.bufferPos += byteCount;\n }\n slice(filePos, length = this.end - filePos) {\n if (filePos < this.start || filePos + length > this.end) {\n throw new RangeError(\"Slicing outside of original slice.\");\n }\n return new FileSlice(this.bytes, this.view, this.offset, filePos, filePos + length);\n }\n}\nvar checkIsInRange = (slice, bytesToRead) => {\n if (slice.filePos < slice.start || slice.filePos + bytesToRead > slice.end) {\n throw new RangeError(`Tried reading [${slice.filePos}, ${slice.filePos + bytesToRead}), but slice is` + ` [${slice.start}, ${slice.end}). This is likely an internal error, please report it alongside the file` + ` that caused it.`);\n }\n};\nvar readBytes = (slice, length) => {\n checkIsInRange(slice, length);\n const bytes = slice.bytes.subarray(slice.bufferPos, slice.bufferPos + length);\n slice.bufferPos += length;\n return bytes;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n let requiresAdtsStripping = false;\n if (track.source._codec === \"aac\" && !decoderConfig.description) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packet.data));\n if (!adtsFrame) {\n throw new Error(\"Couldn't parse ADTS header from the AAC packet. Make sure the packets are in ADTS format\" + \" (as specified in ISO 13818-7) when not providing a description, or provide a description\" + \" (must be an AudioSpecificConfig as specified in ISO 14496-3) and ensure the packets\" + \" are raw AAC data.\");\n }\n const sampleRate = aacFrequencyTable[adtsFrame.samplingFrequencyIndex];\n const numberOfChannels = aacChannelMap[adtsFrame.channelConfiguration];\n if (sampleRate === undefined || numberOfChannels === undefined) {\n throw new Error(\"Invalid ADTS frame header.\");\n }\n decoderConfig.description = buildAacAudioSpecificConfig({\n objectType: adtsFrame.objectType,\n sampleRate,\n numberOfChannels\n });\n requiresAdtsStripping = true;\n }\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec),\n requiresAdtsStripping\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const nalUnits = [...iterateNalUnitsInAnnexB(packetData)].map((loc) => packetData.subarray(loc.offset, loc.offset + loc.length));\n if (nalUnits.length === 0) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = concatNalUnitsInLengthPrefixed(nalUnits, 4);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAdtsStripping) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packetData));\n if (!adtsFrame) {\n throw new Error(\"Expected ADTS frame, didn't get one.\");\n }\n const headerLength = adtsFrame.crcCheck === null ? MIN_ADTS_FRAME_HEADER_SIZE : MAX_ADTS_FRAME_HEADER_SIZE;\n packetData = packetData.subarray(headerLength);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n const max = 2 ** 32 - 1;\n return {\n video: { min: 0, max },\n audio: { min: 0, max },\n subtitle: { min: 0, max },\n total: { min: 1, max }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\nvar QUALITY_LOW = /* @__PURE__ */ new Quality(0.6);\nvar QUALITY_MEDIUM = /* @__PURE__ */ new Quality(1);\nvar QUALITY_HIGH = /* @__PURE__ */ new Quality(2);\nvar QUALITY_VERY_HIGH = /* @__PURE__ */ new Quality(4);\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n return this._closingPromise ??= (async () => {\n await this._flushAndClose(forceClose);\n this._closed = true;\n })();\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MEDIABUNNY_LOADED_SYMBOL = Symbol.for(\"mediabunny loaded\");\nif (globalThis[MEDIABUNNY_LOADED_SYMBOL]) {\n console.error(`[WARNING]\nMediabunny was loaded twice.` + \" This will likely cause Mediabunny not to work correctly.\" + \" Check if multiple dependencies are importing different versions of Mediabunny,\" + \" or if something is being bundled incorrectly.\");\n}\nglobalThis[MEDIABUNNY_LOADED_SYMBOL] = true;\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/audio-state.ts\nvar MILLISECONDS_PER_SECOND = 1000;\n\nclass AudioState {\n getNowMilliseconds;\n isPaused = false;\n isMuted = false;\n pausedDuration = 0;\n pauseStartedAt = null;\n lastAudioTimestamp = 0;\n isProcessingActive = false;\n constructor(dependencies) {\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset() {\n this.isPaused = false;\n this.isMuted = false;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.lastAudioTimestamp = 0;\n this.isProcessingActive = false;\n }\n setProcessingActive(isActive) {\n this.isProcessingActive = isActive;\n }\n isActive() {\n return this.isProcessingActive;\n }\n toggleMuted() {\n this.isMuted = !this.isMuted;\n return this.isMuted;\n }\n setMuted(isMuted) {\n this.isMuted = isMuted;\n }\n getIsMuted() {\n return this.isMuted;\n }\n getIsPaused() {\n return this.isPaused;\n }\n getPausedDuration() {\n return this.pausedDuration;\n }\n pause() {\n if (this.isPaused) {\n return false;\n }\n this.pauseStartedAt = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n this.isPaused = true;\n return true;\n }\n resume() {\n if (!this.isPaused) {\n return false;\n }\n const now = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n return true;\n }\n getAudioTimestamp(timestamp) {\n if (timestamp >= this.lastAudioTimestamp) {\n return timestamp;\n }\n return this.lastAudioTimestamp;\n }\n updateLastAudioTimestamp(timestamp, duration) {\n this.lastAudioTimestamp = timestamp + duration;\n }\n getLastAudioTimestamp() {\n return this.lastAudioTimestamp;\n }\n}\n\n// src/core/processor/worker/buffer-tracker.ts\nvar BUFFER_UPDATE_INTERVAL_MILLISECONDS = 1000;\nvar BYTES_PER_KILOBYTE = 1024;\nvar FILE_SIZE_PRECISION_FACTOR = 100;\nvar FILE_SIZE_UNITS = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n\nclass BufferTracker {\n intervalId = null;\n dependencies;\n constructor(dependencies) {\n this.dependencies = dependencies;\n }\n start() {\n if (this.intervalId !== null) {\n return;\n }\n this.intervalId = this.dependencies.setInterval(() => {\n const size = this.dependencies.getBufferSize();\n const formatted = formatFileSize(size);\n this.dependencies.onBufferUpdate(size, formatted);\n }, BUFFER_UPDATE_INTERVAL_MILLISECONDS);\n }\n stop() {\n if (this.intervalId === null) {\n return;\n }\n this.dependencies.clearInterval(this.intervalId);\n this.intervalId = null;\n }\n}\nfunction formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return `0 ${FILE_SIZE_UNITS[0]}`;\n }\n const base = BYTES_PER_KILOBYTE;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * FILE_SIZE_PRECISION_FACTOR) / FILE_SIZE_PRECISION_FACTOR;\n return `${size} ${FILE_SIZE_UNITS[index]}`;\n}\n\n// src/core/processor/worker/rotation-utils.ts\nvar ROTATION_DEGREES_0 = 0;\nvar ROTATION_DEGREES_90 = 90;\nvar ROTATION_DEGREES_180 = 180;\nvar ROTATION_DEGREES_270 = 270;\nvar ROTATION_DEGREES_360 = 360;\nfunction calculateFrameRotationDegrees(input) {\n if (!input.isMobileDevice) {\n return ROTATION_DEGREES_0;\n }\n const targetWidth = input.targetWidth;\n const targetHeight = input.targetHeight;\n if (typeof targetWidth !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n if (typeof targetHeight !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n const isTargetPortrait = targetHeight > targetWidth;\n const isFramePortrait = input.frameHeight > input.frameWidth;\n if (isTargetPortrait === isFramePortrait) {\n return ROTATION_DEGREES_0;\n }\n const settingsRotation = resolveRotationHint(input.settingsRotation);\n if (settingsRotation !== null) {\n return settingsRotation;\n }\n const orientationRotation = resolveRotationHint(input.orientationAngle);\n if (orientationRotation !== null) {\n return orientationRotation;\n }\n const windowRotation = resolveRotationHint(input.windowOrientation);\n if (windowRotation !== null) {\n return windowRotation;\n }\n return getFallbackRotationDegrees();\n}\nfunction resolveRotationHint(rotationHint) {\n const normalizedRotation = normalizeRotationDegrees(rotationHint);\n if (normalizedRotation === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedRotation === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction normalizeRotationDegrees(rotationDegrees) {\n if (typeof rotationDegrees !== \"number\") {\n return null;\n }\n const normalizedValue = (rotationDegrees % ROTATION_DEGREES_360 + ROTATION_DEGREES_360) % ROTATION_DEGREES_360;\n const remainder = normalizedValue % ROTATION_DEGREES_90;\n if (remainder !== 0) {\n return null;\n }\n if (normalizedValue === ROTATION_DEGREES_0) {\n return ROTATION_DEGREES_0;\n }\n if (normalizedValue === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedValue === ROTATION_DEGREES_180) {\n return ROTATION_DEGREES_180;\n }\n if (normalizedValue === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction getFallbackRotationDegrees() {\n return ROTATION_DEGREES_90;\n}\n\n// src/core/processor/worker/watermark-utils.ts\nfunction calculateWatermarkTargetSize(videoWidth, imageWidth, imageHeight) {\n const targetWidth = Math.round(videoWidth * 0.07);\n const scaleFactor = targetWidth / imageWidth;\n const targetHeight = Math.round(imageHeight * scaleFactor);\n return { width: targetWidth, height: targetHeight };\n}\nfunction getWatermarkPosition(options) {\n const { watermarkWidth, watermarkHeight, videoWidth, videoHeight, position } = options;\n const padding = 20;\n switch (position) {\n case \"top-left\":\n return { x: padding, y: padding };\n case \"top-right\":\n return { x: videoWidth - watermarkWidth - padding, y: padding };\n case \"bottom-left\":\n return { x: padding, y: videoHeight - watermarkHeight - padding };\n case \"bottom-right\":\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n case \"center\":\n return {\n x: (videoWidth - watermarkWidth) / 2,\n y: (videoHeight - watermarkHeight) / 2\n };\n default:\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n }\n}\n\n// src/core/processor/worker/frame-compositor.ts\nvar DOUBLE_VALUE = 2;\nvar DEFAULT_WATERMARK_OPACITY = 1;\nvar DEFAULT_WATERMARK_BASE_WIDTH = 1280;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\nvar OVERLAY_COLOR_CHANNEL_VALUE = 20;\nvar OVERLAY_BORDER_RADIUS = 50;\nvar COMPOSITION_CONTEXT_ERROR_MESSAGE = \"Failed to get composition canvas context\";\nvar RECORDER_WORKER_LOG_PREFIX = \"[RecorderWorker]\";\nvar ROTATION_RADIANS_90 = Math.PI * ROTATION_DEGREES_90 / ROTATION_DEGREES_180;\nvar ROTATION_RADIANS_270 = Math.PI * ROTATION_DEGREES_270 / ROTATION_DEGREES_180;\n\nclass FrameCompositor {\n overlayCanvas = null;\n compositionCanvas = null;\n compositionContext = null;\n watermarkCanvas = null;\n frameRotationDegrees = null;\n videoSettings = null;\n viewportMetadata = null;\n isMobileDevice = false;\n logger;\n fetchResource;\n createImageBitmap;\n sendDebugLog;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.fetchResource = dependencies.fetchResource;\n this.createImageBitmap = dependencies.createImageBitmap;\n this.sendDebugLog = dependencies.sendDebugLog;\n }\n reset() {\n this.overlayCanvas = null;\n this.compositionCanvas = null;\n this.compositionContext = null;\n this.watermarkCanvas = null;\n this.frameRotationDegrees = null;\n this.videoSettings = null;\n this.viewportMetadata = null;\n this.isMobileDevice = false;\n }\n setVideoSettings(settings) {\n this.videoSettings = settings;\n this.frameRotationDegrees = null;\n }\n setViewportMetadata(metadata) {\n this.viewportMetadata = metadata;\n this.frameRotationDegrees = null;\n }\n setIsMobileDevice(isMobileDevice) {\n this.isMobileDevice = isMobileDevice;\n this.frameRotationDegrees = null;\n }\n async prepareWatermark(config) {\n const watermarkConfig = config.watermark;\n if (!watermarkConfig) {\n return;\n }\n if (this.watermarkCanvas) {\n return;\n }\n const url2 = watermarkConfig.url;\n let opacity = DEFAULT_WATERMARK_OPACITY;\n if (typeof watermarkConfig.opacity === \"number\") {\n opacity = watermarkConfig.opacity;\n }\n const response = await this.fetchResource(url2, { mode: \"cors\" }).catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!response) {\n return;\n }\n if (!response.ok) {\n const httpError = new Error(`HTTP error! status: ${response.status}`);\n this.logWatermarkError(url2, httpError);\n return;\n }\n const blob = await response.blob().catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!blob) {\n return;\n }\n let isVectorImageFormat = false;\n if (url2.toLowerCase().endsWith(\".svg\")) {\n isVectorImageFormat = true;\n }\n if (blob.type === \"image/svg+xml\") {\n isVectorImageFormat = true;\n }\n if (isVectorImageFormat) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Loading SVG watermark. Note: Some environments may not support SVG in createImageBitmap inside workers. If the watermark doesn't appear, consider using a PNG or a Data URL.`);\n }\n const imageBitmap = await this.createImageBitmap(blob).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n const bitmapError = new Error(`Failed to create ImageBitmap from blob (${blob.type}). Errors can happen with SVGs in workers or invalid formats: ${errorMessage}`);\n this.logWatermarkError(url2, bitmapError);\n return null;\n });\n if (!imageBitmap) {\n return;\n }\n let videoWidth = DEFAULT_WATERMARK_BASE_WIDTH;\n if (typeof config.width === \"number\") {\n videoWidth = config.width;\n }\n const { width: targetWidth, height: targetHeight } = calculateWatermarkTargetSize(videoWidth, imageBitmap.width, imageBitmap.height);\n const scaleFactor = targetWidth / imageBitmap.width;\n const canvas = new OffscreenCanvas(targetWidth, targetHeight);\n const context = requireNonNull(canvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get watermark canvas context\");\n context.globalAlpha = opacity;\n context.drawImage(imageBitmap, 0, 0, targetWidth, targetHeight);\n context.globalAlpha = 1;\n imageBitmap.close();\n this.watermarkCanvas = canvas;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Watermark prepared with pre-applied opacity`, {\n width: canvas.width,\n height: canvas.height,\n opacity,\n scaleFactor\n });\n }\n composeFrame(parameters) {\n const compositionPlan = this.getCompositionPlan(parameters);\n if (!compositionPlan.needsComposition) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const dimensions = this.getValidFrameDimensions(parameters.videoFrame, compositionPlan.rotationDegrees);\n if (!dimensions) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const width = dimensions.width;\n const height = dimensions.height;\n const context = this.ensureCompositionCanvas(width, height);\n context.clearRect(0, 0, width, height);\n this.drawVideoFrame({\n context,\n videoFrame: parameters.videoFrame,\n rotationDegrees: compositionPlan.rotationDegrees,\n width,\n height\n });\n this.applyOverlayIfNeeded(context, width, compositionPlan.shouldApplyOverlay, parameters.overlayConfig);\n this.applyWatermarkIfNeeded({\n context,\n videoWidth: width,\n videoHeight: height,\n needsWatermark: compositionPlan.needsWatermark,\n config: parameters.config\n });\n return this.buildCompositionResult(parameters.videoFrame);\n }\n getCompositionPlan(parameters) {\n const rotationDegrees = this.getFrameRotationDegrees(parameters.videoFrame, parameters.config);\n const shouldRotateFrame = rotationDegrees !== ROTATION_DEGREES_0;\n let needsWatermark = false;\n if (parameters.config.watermark && this.watermarkCanvas) {\n needsWatermark = true;\n }\n let needsComposition = false;\n if (parameters.shouldApplyOverlay) {\n needsComposition = true;\n }\n if (needsWatermark) {\n needsComposition = true;\n }\n if (shouldRotateFrame) {\n needsComposition = true;\n }\n return {\n rotationDegrees,\n shouldApplyOverlay: parameters.shouldApplyOverlay,\n needsWatermark,\n needsComposition\n };\n }\n getValidFrameDimensions(videoFrame, rotationDegrees) {\n const dimensions = this.getFrameDimensions(videoFrame, rotationDegrees);\n const width = dimensions.width;\n const height = dimensions.height;\n let hasInvalidDimensions = false;\n if (width <= 0) {\n hasInvalidDimensions = true;\n }\n if (height <= 0) {\n hasInvalidDimensions = true;\n }\n if (hasInvalidDimensions) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Invalid video frame dimensions, skipping composition`, { width, height });\n return null;\n }\n return { width, height };\n }\n applyOverlayIfNeeded(context, videoWidth, shouldApplyOverlay, overlayConfig) {\n if (!(shouldApplyOverlay && overlayConfig)) {\n return;\n }\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(overlayConfig.text);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Overlay canvas created`, {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n if (!this.overlayCanvas) {\n return;\n }\n const overlayPosition = this.getOverlayPosition(this.overlayCanvas.width, videoWidth);\n context.drawImage(this.overlayCanvas, overlayPosition.horizontal, overlayPosition.vertical);\n }\n applyWatermarkIfNeeded(options) {\n const { context, videoWidth, videoHeight, needsWatermark, config } = options;\n const watermarkCanvas = this.watermarkCanvas;\n if (!(needsWatermark && watermarkCanvas && config.watermark)) {\n return;\n }\n const watermarkPosition = getWatermarkPosition({\n watermarkWidth: watermarkCanvas.width,\n watermarkHeight: watermarkCanvas.height,\n videoWidth,\n videoHeight,\n position: config.watermark.position\n });\n context.drawImage(watermarkCanvas, watermarkPosition.x, watermarkPosition.y);\n }\n buildCompositionResult(videoFrame) {\n const compositionCanvas = requireNonNull(this.compositionCanvas, \"Composition canvas must exist after ensureCompositionCanvas\");\n const imageBitmap = compositionCanvas.transferToImageBitmap();\n let frameInitialization = {};\n if (typeof videoFrame.timestamp === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n timestamp: videoFrame.timestamp\n };\n }\n if (typeof videoFrame.duration === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n duration: videoFrame.duration\n };\n }\n const frameToProcess = new VideoFrame(imageBitmap, frameInitialization);\n return { frameToProcess, imageBitmap };\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const context = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = context.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * DOUBLE_VALUE);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * DOUBLE_VALUE);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const redValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const greenValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const blueValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const borderRadius = OVERLAY_BORDER_RADIUS;\n context.fillStyle = `rgba(${redValue}, ${greenValue}, ${blueValue}, ${OVERLAY_BACKGROUND_OPACITY})`;\n context.beginPath();\n context.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n context.fill();\n context.fillStyle = OVERLAY_TEXT_COLOR;\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n context.textBaseline = \"middle\";\n context.textAlign = \"center\";\n const textHorizontalPosition = overlayWidth / DOUBLE_VALUE;\n const textVerticalPosition = overlayHeight / DOUBLE_VALUE;\n context.fillText(text, textHorizontalPosition, textVerticalPosition);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n return {\n horizontal: videoWidth - overlayWidth - OVERLAY_PADDING,\n vertical: OVERLAY_PADDING\n };\n }\n ensureCompositionCanvas(width, height) {\n if (!this.compositionCanvas) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas created`, {\n width,\n height\n });\n return this.compositionContext;\n }\n if (!this.compositionContext) {\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n return this.compositionContext;\n }\n const widthChanged = this.compositionCanvas.width !== width;\n const heightChanged = this.compositionCanvas.height !== height;\n let shouldResize = false;\n if (widthChanged) {\n shouldResize = true;\n }\n if (heightChanged) {\n shouldResize = true;\n }\n if (shouldResize) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas resized`, {\n width,\n height\n });\n return this.compositionContext;\n }\n return this.compositionContext;\n }\n getFrameRotationDegrees(videoFrame, config) {\n if (this.frameRotationDegrees !== null) {\n return this.frameRotationDegrees;\n }\n const rotation = this.determineFrameRotationDegrees(videoFrame, config);\n this.frameRotationDegrees = rotation;\n const rotationLog = JSON.stringify({\n rotationDegrees: rotation,\n configWidth: config.width,\n configHeight: config.height,\n settingsWidth: this.videoSettings?.width,\n settingsHeight: this.videoSettings?.height,\n facingMode: this.videoSettings?.facingMode,\n frameDisplayWidth: videoFrame.displayWidth,\n frameDisplayHeight: videoFrame.displayHeight\n });\n this.sendDebugLog(`${RECORDER_WORKER_LOG_PREFIX} Rotation decision`, rotationLog);\n return rotation;\n }\n determineFrameRotationDegrees(videoFrame, config) {\n const configWidth = config.width;\n const configHeight = config.height;\n let facingMode;\n let settingsRotation;\n if (this.videoSettings) {\n facingMode = this.videoSettings.facingMode;\n settingsRotation = this.videoSettings.rotation;\n }\n let orientationAngle;\n let windowOrientation;\n if (this.viewportMetadata) {\n orientationAngle = this.viewportMetadata.orientationAngle;\n windowOrientation = this.viewportMetadata.windowOrientation;\n }\n return calculateFrameRotationDegrees({\n isMobileDevice: this.isMobileDevice,\n targetWidth: configWidth,\n targetHeight: configHeight,\n frameWidth: videoFrame.displayWidth,\n frameHeight: videoFrame.displayHeight,\n facingMode,\n settingsRotation,\n orientationAngle,\n windowOrientation\n });\n }\n getFrameDimensions(videoFrame, rotationDegrees) {\n let width = videoFrame.displayWidth;\n let height = videoFrame.displayHeight;\n let shouldSwapDimensions = false;\n if (rotationDegrees === ROTATION_DEGREES_90) {\n shouldSwapDimensions = true;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n shouldSwapDimensions = true;\n }\n if (shouldSwapDimensions) {\n width = videoFrame.displayHeight;\n height = videoFrame.displayWidth;\n }\n return { width, height };\n }\n drawVideoFrame(parameters) {\n const { context, videoFrame, rotationDegrees, width, height } = parameters;\n const sourceWidth = videoFrame.displayWidth;\n const sourceHeight = videoFrame.displayHeight;\n context.setTransform(1, 0, 0, 1, 0, 0);\n if (rotationDegrees === ROTATION_DEGREES_90) {\n context.translate(width, 0);\n context.rotate(ROTATION_RADIANS_90);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n context.translate(0, height);\n context.rotate(ROTATION_RADIANS_270);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n }\n logWatermarkError(url2, error) {\n const errorMessage = extractErrorMessage(error);\n this.logger.error(`${RECORDER_WORKER_LOG_PREFIX} Failed to load watermark. This is often caused by CORS if the image is on another domain. Try using a Data URL (base64) or ensure the server has Access-Control-Allow-Origin: *.`, {\n url: url2,\n error: errorMessage\n });\n }\n}\n\n// src/core/processor/worker/stop-finalization.ts\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS = 500;\nvar STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS = 10;\nvar ERROR_STOP_PENDING_WRITES_TIMEOUT = \"stop.pending-writes-timeout\";\nfunction createDefaultNowMilliseconds() {\n return () => performance.now();\n}\nfunction createDefaultWaitMilliseconds() {\n return (milliseconds) => new Promise((resolve) => {\n globalThis.setTimeout(resolve, milliseconds);\n });\n}\nasync function waitForPendingWritesToDrain(dependencies) {\n let getNowMilliseconds = dependencies.getNowMilliseconds;\n if (!getNowMilliseconds) {\n getNowMilliseconds = createDefaultNowMilliseconds();\n }\n let waitMilliseconds = dependencies.waitMilliseconds;\n if (!waitMilliseconds) {\n waitMilliseconds = createDefaultWaitMilliseconds();\n }\n let timeoutMilliseconds = dependencies.timeoutMilliseconds;\n if (timeoutMilliseconds === undefined) {\n timeoutMilliseconds = STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS;\n }\n const startedAtMilliseconds = getNowMilliseconds();\n let pendingWriteCount = dependencies.getPendingWriteCount();\n while (pendingWriteCount > 0) {\n const elapsedMilliseconds = getNowMilliseconds() - startedAtMilliseconds;\n if (elapsedMilliseconds >= timeoutMilliseconds) {\n throw new Error(ERROR_STOP_PENDING_WRITES_TIMEOUT);\n }\n await waitMilliseconds(STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS);\n pendingWriteCount = dependencies.getPendingWriteCount();\n }\n}\n\n// src/core/processor/worker/stop-transition.ts\nasync function runStopTransition(dependencies) {\n await dependencies.finalizeStopSequence().then(() => dependencies.completeStop()).catch((error) => {\n return dependencies.recoverStopFailure().then(() => {\n throw error;\n });\n }).finally(() => {\n dependencies.clearStoppingFlag();\n });\n}\n\n// src/core/processor/worker/timestamp-manager.ts\nvar DEFAULT_FRAME_RATE = 30;\nvar DEFAULT_KEY_FRAME_INTERVAL_SECONDS = 5;\nvar MILLISECONDS_PER_SECOND2 = 1000;\nvar MICROSECONDS_PER_SECOND = 1e6;\nvar MAX_LEAD_SECONDS = 0.05;\nvar MAX_LAG_SECONDS = 0.1;\nvar MAX_DRIFT_CORRECTION_SECONDS = MAX_LAG_SECONDS;\nvar DRIFT_OFFSET_DECAY_FACTOR = 0.5;\nvar DRIFT_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX2 = \"[RecorderWorker]\";\n\nclass TimestampManager {\n frameRate = DEFAULT_FRAME_RATE;\n lastVideoTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n lastKeyFrameTimestamp = 0;\n forceNextKeyFrame = false;\n driftOffset = 0;\n logger;\n getNowMilliseconds;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset(frameRate) {\n let resolvedFrameRate = DEFAULT_FRAME_RATE;\n if (typeof frameRate === \"number\" && frameRate > 0) {\n resolvedFrameRate = frameRate;\n }\n this.frameRate = resolvedFrameRate;\n this.lastVideoTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.driftOffset = 0;\n }\n setFrameRate(frameRate) {\n this.frameRate = frameRate;\n }\n getFrameRate() {\n return this.frameRate;\n }\n getFrameCount() {\n return this.frameCount;\n }\n getLastVideoTimestamp() {\n return this.lastVideoTimestamp;\n }\n getBaseVideoTimestamp() {\n return this.baseVideoTimestamp;\n }\n calculateVideoFrameTimestamp(parameters) {\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n let rawTimestamp = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND2;\n const hasTimestamp = typeof parameters.videoFrame.timestamp === \"number\" && parameters.videoFrame.timestamp !== null;\n if (hasTimestamp) {\n rawTimestamp = parameters.videoFrame.timestamp / MICROSECONDS_PER_SECOND;\n }\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTimestamp;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: parameters.recordingStartTime,\n difference: this.baseVideoTimestamp - parameters.recordingStartTime,\n pendingUpdates: parameters.pendingVisibilityUpdatesCount\n };\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} baseVideoTimestamp set`, logData);\n parameters.processPendingVisibilityUpdates();\n }\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set\");\n }\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTimestamp - originalBase;\n this.baseVideoTimestamp = rawTimestamp - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} First frame after source switch`, {\n rawTimestamp,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: parameters.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTimestamp = rawTimestamp - this.baseVideoTimestamp - parameters.pausedDuration;\n let previousTimestamp = 0;\n if (this.lastVideoTimestamp > 0) {\n previousTimestamp = this.lastVideoTimestamp;\n }\n let frameTimestamp = normalizedTimestamp;\n if (normalizedTimestamp < previousTimestamp) {\n frameTimestamp = previousTimestamp + 1 / this.frameRate;\n }\n if (frameTimestamp < 0) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX2} Negative frame timestamp detected, clamping to zero`, { frameTimestamp, normalizedTimestamp, previousTimestamp });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} Frame timestamp calculation`, {\n rawTimestamp,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTimestamp,\n previousTimestamp,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: parameters.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n prepareFrameTiming(parameters) {\n const frameDuration = 1 / this.frameRate;\n let adjustedTimestamp = parameters.frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - parameters.lastAudioTimestamp > MAX_LEAD_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp + MAX_LEAD_SECONDS;\n }\n if (parameters.lastAudioTimestamp - adjustedTimestamp > MAX_LAG_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp - MAX_LAG_SECONDS;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n let finalTimestamp = adjustedTimestamp;\n if (finalTimestamp < monotonicTimestamp) {\n finalTimestamp = monotonicTimestamp;\n }\n let keyFrameIntervalSeconds = parameters.keyFrameIntervalSeconds;\n if (!(keyFrameIntervalSeconds > 0)) {\n keyFrameIntervalSeconds = DEFAULT_KEY_FRAME_INTERVAL_SECONDS;\n }\n let keyFrameIntervalFrames = Math.round(keyFrameIntervalSeconds * this.frameRate);\n if (keyFrameIntervalFrames < 1) {\n keyFrameIntervalFrames = 1;\n }\n const timeSinceLastKeyFrame = finalTimestamp - this.lastKeyFrameTimestamp;\n let isKeyFrame = false;\n if (this.forceNextKeyFrame) {\n isKeyFrame = true;\n }\n if (timeSinceLastKeyFrame >= keyFrameIntervalSeconds) {\n isKeyFrame = true;\n }\n if (this.frameCount % keyFrameIntervalFrames === 0) {\n isKeyFrame = true;\n }\n this.driftOffset *= DRIFT_OFFSET_DECAY_FACTOR;\n return {\n finalTimestamp,\n frameDuration,\n isKeyFrame\n };\n }\n commitFrame(parameters) {\n this.frameCount += 1;\n this.lastVideoTimestamp = parameters.finalTimestamp;\n if (parameters.isKeyFrame) {\n this.lastKeyFrameTimestamp = parameters.finalTimestamp;\n this.forceNextKeyFrame = false;\n }\n let shouldLogDrift = false;\n let audioVideoDrift = 0;\n if (this.frameCount % DRIFT_LOG_FRAME_INTERVAL === 0 && parameters.audioProcessingActive) {\n audioVideoDrift = parameters.lastAudioTimestamp - this.lastVideoTimestamp;\n shouldLogDrift = true;\n }\n return {\n shouldLogDrift,\n audioVideoDrift,\n frameCount: this.frameCount,\n lastVideoTimestamp: this.lastVideoTimestamp\n };\n }\n handleSourceSwitch(lastAudioTimestamp) {\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set for source switch\");\n }\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = lastAudioTimestamp - this.lastVideoTimestamp;\n this.driftOffset = clampValue(rawDrift, -MAX_DRIFT_CORRECTION_SECONDS, MAX_DRIFT_CORRECTION_SECONDS);\n const continuationTimestamp = Math.max(lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n this.forceNextKeyFrame = true;\n return {\n continuationTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n };\n }\n}\nfunction clampValue(value, min, max) {\n return Math.max(min, Math.min(max, value));\n}\n\n// src/core/processor/worker/types.ts\nvar WORKER_MESSAGE_TYPE_PROBE = \"probe\";\nvar WORKER_MESSAGE_TYPE_AUDIO_CHUNK = \"audioChunk\";\nvar WORKER_RESPONSE_TYPE_PROBE_RESULT = \"probeResult\";\nvar WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR = \"f32-planar\";\n\n// src/core/processor/worker/visibility-tracker.ts\nvar MILLISECONDS_PER_SECOND3 = 1000;\nvar OVERLAY_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX3 = \"[RecorderWorker]\";\n\nclass VisibilityTracker {\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n pendingVisibilityUpdates = [];\n recordingStartTime = 0;\n isScreenCapture = false;\n logger;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n }\n reset(recordingStartTime, isScreenCapture) {\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.pendingVisibilityUpdates = [];\n this.recordingStartTime = recordingStartTime;\n this.isScreenCapture = isScreenCapture;\n }\n setRecordingStartTime(recordingStartTime) {\n this.recordingStartTime = recordingStartTime;\n }\n setIsScreenCapture(isScreenCapture) {\n this.isScreenCapture = isScreenCapture;\n }\n getPendingUpdatesCount() {\n return this.pendingVisibilityUpdates.length;\n }\n shouldApplyOverlay(parameters) {\n if (!parameters.overlayEnabled) {\n return false;\n }\n if (this.isScreenCapture) {\n return false;\n }\n const completedIntervalMatch = this.hiddenIntervals.some((interval) => parameters.timestamp >= interval.start && parameters.timestamp <= interval.end);\n const currentIntervalMatch = this.currentHiddenIntervalStart !== null && parameters.timestamp >= this.currentHiddenIntervalStart;\n let shouldApply = false;\n if (completedIntervalMatch) {\n shouldApply = true;\n }\n if (currentIntervalMatch) {\n shouldApply = true;\n }\n if (parameters.frameCount % OVERLAY_LOG_FRAME_INTERVAL === 0) {\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Overlay check`, {\n timestamp: parameters.timestamp,\n shouldApply,\n frameCount: parameters.frameCount,\n intervalsCount: this.hiddenIntervals.length\n });\n }\n return shouldApply;\n }\n handleUpdateVisibility(isHidden, timestamp, hasBaseVideoTimestamp, pausedDuration) {\n if (!hasBaseVideoTimestamp) {\n this.pendingVisibilityUpdates = [\n ...this.pendingVisibilityUpdates,\n { isHidden, timestamp }\n ];\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp, pausedDuration);\n }\n flushPendingUpdates(pausedDuration) {\n if (this.pendingVisibilityUpdates.length === 0) {\n return;\n }\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp, pausedDuration);\n }\n this.pendingVisibilityUpdates = [];\n }\n processVisibilityUpdate(isHidden, timestamp, pausedDuration) {\n const timestampSeconds = timestamp / MILLISECONDS_PER_SECOND3;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - pausedDuration;\n if (isHidden && this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Started hidden interval`, {\n start: this.currentHiddenIntervalStart\n });\n }\n if (isHidden) {\n return;\n }\n if (this.currentHiddenIntervalStart === null) {\n return;\n }\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals = [...this.hiddenIntervals, interval];\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Completed hidden interval`, {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX3} Invalid interval (end <= start), discarding`);\n }\n this.currentHiddenIntervalStart = null;\n }\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar DEFAULT_OUTPUT_FORMAT = \"mp4\";\nvar MILLISECONDS_PER_SECOND4 = 1000;\nvar ERROR_AUDIO_BITRATE_INVALID = \"Audio bitrate must be greater than zero\";\nvar ERROR_AUDIO_SAMPLE_RATE_INVALID = \"Audio sample rate must be greater than zero\";\nvar ERROR_AUDIO_CHANNELS_INVALID = \"Audio channels must be greater than zero\";\nvar ERROR_AUDIO_FRAMES_INVALID = \"Audio frames must be greater than zero\";\nvar STEREO_CHANNEL_COUNT = 2;\nvar AUDIO_SAMPLE_AVERAGE_SCALE = 0.5;\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2 = 500;\nvar MP4_FAST_START_DISABLED = false;\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n config = null;\n videoProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferTracker;\n audioState;\n timestampManager;\n frameCompositor;\n overlayConfig = null;\n visibilityTracker;\n recordingStartTime = 0;\n isScreenCapture = false;\n totalSize = 0;\n expectedAudioChannels = null;\n expectedAudioSampleRate = null;\n pendingWriteCount = 0;\n constructor() {\n this.bufferTracker = new BufferTracker({\n getBufferSize: () => this.totalSize,\n onBufferUpdate: (size, formatted) => {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n },\n setInterval: (handler, timeout) => self.setInterval(handler, timeout),\n clearInterval: (intervalId) => self.clearInterval(intervalId)\n });\n this.audioState = new AudioState({\n getNowMilliseconds: () => performance.now()\n });\n this.visibilityTracker = new VisibilityTracker({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n }\n });\n this.timestampManager = new TimestampManager({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n },\n getNowMilliseconds: () => performance.now()\n });\n this.frameCompositor = new FrameCompositor({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data),\n error: (message, data) => logger.error(message, data)\n },\n fetchResource: (input, init) => fetch(input, init),\n createImageBitmap: (image) => createImageBitmap(image),\n sendDebugLog: (_message, _payload) => {\n return;\n }\n });\n self.addEventListener(\"message\", this.handleMessage);\n }\n shouldIgnoreMessage() {\n if (this.isStopping) {\n return true;\n }\n if (this.isFinalized) {\n return true;\n }\n return false;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleProbe() {\n const response = {\n type: WORKER_RESPONSE_TYPE_PROBE_RESULT,\n hasMediaStreamTrackProcessor: typeof MediaStreamTrackProcessor !== \"undefined\",\n hasVideoFrame: typeof VideoFrame !== \"undefined\",\n hasAudioData: typeof AudioData !== \"undefined\",\n hasOffscreenCanvas: typeof OffscreenCanvas !== \"undefined\",\n hasCreateImageBitmap: typeof createImageBitmap !== \"undefined\",\n hasReadableStream: typeof ReadableStream !== \"undefined\"\n };\n self.postMessage(response);\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n switch (message.type) {\n case WORKER_MESSAGE_TYPE_PROBE:\n this.handleProbe();\n return;\n case \"start\":\n this.handleStartMessage(message);\n return;\n case \"pause\":\n this.handlePause();\n return;\n case \"resume\":\n this.handleResume();\n return;\n case \"stop\":\n this.handleStopMessage();\n return;\n case \"toggleMute\":\n this.handleToggleMute();\n return;\n case WORKER_MESSAGE_TYPE_AUDIO_CHUNK:\n this.handleAudioChunk(message);\n return;\n case \"switchSource\":\n this.handleSwitchSourceMessage(message);\n return;\n case \"updateFps\":\n this.handleUpdateFps(message.fps);\n return;\n case \"updateVisibility\":\n this.visibilityTracker.handleUpdateVisibility(message.isHidden, message.timestamp, this.timestampManager.getBaseVideoTimestamp() !== null, this.audioState.getPausedDuration());\n return;\n case \"updateSourceType\":\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n default:\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n }\n };\n handleStartMessage(message) {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleStart(message, videoTrack, videoStream), \"handleStart\");\n }\n handleStopMessage() {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n }\n handleSwitchSourceMessage(message) {\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleSwitchSource(videoTrack, videoStream), \"handleSwitchSource\");\n }\n validateConfig(config) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width !== undefined && config.width <= 0) {\n throw new Error(\"Video width must be greater than zero\");\n }\n if (config.height !== undefined && config.height <= 0) {\n throw new Error(\"Video height must be greater than zero\");\n }\n if (config.fps !== undefined && config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate !== undefined && typeof config.bitrate === \"number\" && config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n }\n validateFormat(format) {\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n }\n initializeRecordingState(config) {\n this.config = config;\n this.timestampManager.reset(config.fps);\n this.audioState.reset();\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.videoProcessingActive = false;\n this.frameCompositor.reset();\n this.recordingStartTime = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setupOverlayConfig(overlayConfig) {\n let nextOverlayConfig = null;\n if (overlayConfig) {\n nextOverlayConfig = {\n enabled: overlayConfig.enabled,\n text: overlayConfig.text\n };\n }\n this.overlayConfig = nextOverlayConfig;\n let recordingStartTimeSeconds = performance.now() / MILLISECONDS_PER_SECOND4;\n if (overlayConfig && overlayConfig.recordingStartTime !== undefined) {\n recordingStartTimeSeconds = overlayConfig.recordingStartTime / MILLISECONDS_PER_SECOND4;\n }\n this.recordingStartTime = recordingStartTimeSeconds;\n this.visibilityTracker.setRecordingStartTime(this.recordingStartTime);\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n }\n createOutput() {\n const writable = new WritableStream({\n write: (chunk) => this.handleOutputChunkWrite(chunk)\n });\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: MP4_FAST_START_DISABLED\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n }\n decrementPendingWriteCount() {\n this.pendingWriteCount -= 1;\n if (this.pendingWriteCount < 0) {\n this.pendingWriteCount = 0;\n }\n }\n handleOutputChunkWrite(chunk) {\n this.pendingWriteCount += 1;\n const writeOperation = Promise.resolve().then(() => {\n this.sendChunk(chunk.data, chunk.position);\n });\n return writeOperation.then(() => {\n this.decrementPendingWriteCount();\n }, (error) => {\n this.decrementPendingWriteCount();\n throw error;\n });\n }\n createVideoSource(config) {\n const fps = this.timestampManager.getFrameRate();\n const keyFrameIntervalSeconds = config.keyFrameInterval;\n const videoSourceOptions = {\n codec: config.codec,\n width: config.width,\n height: config.height,\n sizeChangeBehavior: \"contain\",\n alpha: \"discard\",\n bitrateMode: \"variable\",\n latencyMode: \"quality\",\n contentHint: \"detail\",\n hardwareAcceleration: \"no-preference\",\n keyFrameInterval: keyFrameIntervalSeconds,\n bitrate: this.deserializeBitrate(config.bitrate)\n };\n this.videoSource = new VideoSampleSource(videoSourceOptions);\n const output = requireNonNull(this.output, \"Output must be initialized before adding video track\");\n const trackOptions = {};\n if (fps !== undefined) {\n trackOptions.frameRate = fps;\n }\n output.addVideoTrack(this.videoSource, trackOptions);\n }\n setupAudioSource(audioConfig, config) {\n if (!audioConfig) {\n return;\n }\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n if (audioConfig.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (audioConfig.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.expectedAudioChannels = audioConfig.numberOfChannels;\n this.expectedAudioSampleRate = audioConfig.sampleRate;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioState.setProcessingActive(true);\n }\n setupAudioStream(audioStream, config) {\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioProcessor = audioStream.getReader();\n this.audioState.setProcessingActive(true);\n this.processAudioData();\n }\n async handleStart(message, videoTrack, videoStream) {\n const audioConfig = message.audioConfig;\n let audioStream = null;\n if (message.audioStream) {\n audioStream = message.audioStream;\n }\n const config = message.config;\n const overlayConfig = message.overlayConfig;\n this.validateConfig(config);\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoTrack: !!videoTrack,\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n hasAudioConfig: !!audioConfig,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.initializeRecordingState(config);\n if (message.videoSettings) {\n this.frameCompositor.setVideoSettings(message.videoSettings);\n } else {\n this.frameCompositor.setVideoSettings(null);\n }\n if (message.viewportMetadata) {\n this.frameCompositor.setViewportMetadata(message.viewportMetadata);\n } else {\n this.frameCompositor.setViewportMetadata(null);\n }\n this.frameCompositor.setIsMobileDevice(message.isMobileDevice === true);\n this.setupOverlayConfig(overlayConfig);\n let format = config.format;\n if (!format) {\n format = DEFAULT_OUTPUT_FORMAT;\n }\n this.validateFormat(format);\n this.createOutput();\n this.createVideoSource(config);\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n }\n if (!videoStream && videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n if (audioStream) {\n this.setupAudioStream(audioStream, config);\n } else {\n this.setupAudioSource(audioConfig, config);\n }\n const output = requireNonNull(this.output, \"Output must be initialized before starting\");\n if (this.config?.watermark) {\n this.frameCompositor.prepareWatermark(this.config);\n }\n await output.start();\n this.bufferTracker.start();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n setupVideoProcessing(videoTrack) {\n if (!this.videoSource) {\n return;\n }\n if (typeof MediaStreamTrackProcessor === \"undefined\") {\n throw new Error(\"MediaStreamTrackProcessor is not available in worker\");\n }\n const processor = new MediaStreamTrackProcessor({ track: videoTrack });\n this.videoProcessor = processor.readable.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n setupVideoProcessingFromStream(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const pausedDuration = this.audioState.getPausedDuration();\n const frameTimestamp = this.timestampManager.calculateVideoFrameTimestamp({\n videoFrame,\n pausedDuration,\n recordingStartTime: this.recordingStartTime,\n pendingVisibilityUpdatesCount: this.visibilityTracker.getPendingUpdatesCount(),\n processPendingVisibilityUpdates: () => {\n this.visibilityTracker.flushPendingUpdates(pausedDuration);\n },\n isScreenCapture: this.isScreenCapture\n });\n let overlayEnabled = false;\n if (this.overlayConfig?.enabled) {\n overlayEnabled = true;\n }\n const shouldApplyOverlay = this.visibilityTracker.shouldApplyOverlay({\n timestamp: frameTimestamp,\n overlayEnabled,\n frameCount: this.timestampManager.getFrameCount()\n });\n const compositionResult = this.frameCompositor.composeFrame({\n videoFrame,\n overlayConfig: this.overlayConfig,\n shouldApplyOverlay,\n config\n });\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const frameTiming = this.timestampManager.prepareFrameTiming({\n frameTimestamp,\n keyFrameIntervalSeconds: config.keyFrameInterval,\n lastAudioTimestamp\n });\n const sample = new VideoSample(compositionResult.frameToProcess, {\n timestamp: frameTiming.finalTimestamp,\n duration: frameTiming.frameDuration\n });\n let videoSampleOptions;\n if (frameTiming.isKeyFrame) {\n videoSampleOptions = { keyFrame: true };\n }\n const addError = await videoSource.add(sample, videoSampleOptions).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n const commitResult = this.timestampManager.commitFrame({\n finalTimestamp: frameTiming.finalTimestamp,\n isKeyFrame: frameTiming.isKeyFrame,\n lastAudioTimestamp,\n audioProcessingActive: this.audioState.isActive(),\n isScreenCapture: this.isScreenCapture\n });\n if (commitResult.shouldLogDrift) {\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: commitResult.frameCount,\n lastAudioTimestamp,\n lastVideoTimestamp: commitResult.lastVideoTimestamp,\n audioVideoDrift: commitResult.audioVideoDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (compositionResult.imageBitmap) {\n compositionResult.imageBitmap.close();\n }\n if (compositionResult.frameToProcess !== videoFrame) {\n compositionResult.frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.audioState.getIsPaused()) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n let channelIndex = 0;\n while (channelIndex < numberOfChannels) {\n const startIndex = channelIndex * numberOfFrames;\n const endIndex = startIndex + numberOfFrames;\n const channelBuffer = audioBuffer.subarray(startIndex, endIndex);\n audioData.copyTo(channelBuffer, { planeIndex: channelIndex });\n channelIndex += 1;\n }\n return audioBuffer;\n }\n createAudioSample(audioBuffer, audioTimestamp, sampleRate, numberOfChannels) {\n if (sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n let bufferToWrite = audioBuffer;\n if (this.audioState.getIsMuted()) {\n bufferToWrite = new Float32Array(audioBuffer.length);\n }\n return new AudioSample({\n data: bufferToWrite,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample, audioTimestamp, duration) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n return;\n }\n while (this.audioState.isActive() && !this.isStopping) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n this.audioState.setProcessingActive(false);\n break;\n }\n const audioData = result.value;\n if (this.shouldSkipAudioData(audioData)) {\n continue;\n }\n const audioFormat = this.getAudioDataFormat(audioData);\n if (!audioFormat) {\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const normalized = this.normalizeAudioBufferForFormat(audioBuffer, audioFormat);\n const duration = audioFormat.numberOfFrames / audioFormat.sampleRate;\n const audioTimestamp = this.audioState.getLastAudioTimestamp();\n const audioSample = this.createAudioSample(normalized.buffer, audioTimestamp, audioFormat.sampleRate, normalized.numberOfChannels);\n await this.processAudioSample(audioData, audioSample, audioTimestamp, duration);\n }\n }\n handleAudioChunk(message) {\n this.handleAsyncOperation(this.processAudioChunk(message), \"handleAudioChunk\");\n }\n async processAudioChunk(message) {\n if (this.shouldIgnoreMessage()) {\n return;\n }\n if (!this.audioSource) {\n return;\n }\n if (!this.audioState.isActive()) {\n return;\n }\n if (this.audioState.getIsPaused()) {\n return;\n }\n if (message.frames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n if (message.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (message.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.setExpectedAudioFormat(message.sampleRate, message.numberOfChannels);\n if (this.expectedAudioSampleRate !== null && message.sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: message.sampleRate\n });\n return;\n }\n let audioBuffer = message.data;\n let numberOfChannels = message.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, message.frames, numberOfChannels, this.expectedAudioChannels);\n audioBuffer = normalized.buffer;\n numberOfChannels = normalized.numberOfChannels;\n }\n const expectedSamples = message.frames * numberOfChannels;\n if (audioBuffer.length < expectedSamples) {\n throw new Error(\"Audio buffer length is shorter than expected\");\n }\n const sampleRate = message.sampleRate;\n const duration = message.frames / sampleRate;\n const audioTimestamp = this.audioState.getAudioTimestamp(message.timestamp);\n if (this.audioState.getIsMuted()) {\n audioBuffer = new Float32Array(audioBuffer.length);\n }\n const audioSample = new AudioSample({\n data: audioBuffer,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp,\n duration,\n sampleRate,\n numberOfFrames: message.frames\n });\n audioSample.close();\n }\n shouldSkipAudioData(audioData) {\n if (!audioData) {\n return true;\n }\n if (this.audioState.getIsPaused()) {\n this.handlePausedAudioData(audioData);\n return true;\n }\n return false;\n }\n getAudioDataFormat(audioData) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n audioData.close();\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n const numberOfFrames = audioData.numberOfFrames;\n const numberOfChannels = audioData.numberOfChannels;\n this.setExpectedAudioFormat(sampleRate, numberOfChannels);\n if (this.expectedAudioSampleRate !== null && sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: sampleRate\n });\n audioData.close();\n return null;\n }\n return {\n sampleRate,\n numberOfFrames,\n numberOfChannels\n };\n }\n normalizeAudioBufferForFormat(audioBuffer, audioFormat) {\n let bufferToWrite = audioBuffer;\n let channelsToWrite = audioFormat.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, audioFormat.numberOfFrames, audioFormat.numberOfChannels, this.expectedAudioChannels);\n bufferToWrite = normalized.buffer;\n channelsToWrite = normalized.numberOfChannels;\n }\n return {\n buffer: bufferToWrite,\n numberOfChannels: channelsToWrite\n };\n }\n handlePause() {\n if (!this.audioState.pause()) {\n return;\n }\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.audioState.resume()) {\n return;\n }\n this.sendStateChange(\"recording\");\n }\n handleStop() {\n if (this.isStopping) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n if (this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n return runStopTransition({\n finalizeStopSequence: () => this.finalizeStopSequence(),\n completeStop: () => this.completeStop(),\n recoverStopFailure: () => {\n if (this.isFinalized) {\n this.resetStopStateAfterFailure();\n }\n return this.cleanup().catch((cleanupError) => {\n logger.error(\"[RecorderWorker] Stop failure cleanup failed\", {\n error: extractErrorMessage(cleanupError)\n });\n });\n },\n clearStoppingFlag: () => {\n this.isStopping = false;\n }\n });\n }\n async completeStop() {\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n }\n async finalizeStopSequence() {\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize();\n }\n await waitForPendingWritesToDrain({\n getPendingWriteCount: () => this.pendingWriteCount,\n timeoutMilliseconds: STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2\n });\n }\n resetStopStateAfterFailure() {\n this.isFinalized = false;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n }\n handleToggleMute() {\n this.audioState.toggleMuted();\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const previousFps = this.timestampManager.getFrameRate();\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps\n });\n this.timestampManager.setFrameRate(fps);\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n this.visibilityTracker.setIsScreenCapture(isScreenCapture);\n }\n async handleSwitchSource(videoTrack, videoStream) {\n if (!(videoTrack || videoStream)) {\n throw new Error(\"Video track or stream is required\");\n }\n const frameRate = this.timestampManager.getFrameRate();\n requireDefined(frameRate, \"Frame rate must be set\");\n if (frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const baseVideoTimestamp = this.timestampManager.getBaseVideoTimestamp();\n requireNonNull(baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const switchResult = this.timestampManager.handleSourceSwitch(lastAudioTimestamp);\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp: switchResult.continuationTimestamp,\n lastVideoTimestamp: this.timestampManager.getLastVideoTimestamp(),\n frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp,\n previousVideoTimestamp: switchResult.previousVideoTimestamp,\n minFrameDuration: switchResult.minFrameDuration,\n rawDrift: switchResult.rawDrift,\n driftOffset: switchResult.driftOffset\n });\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n return;\n }\n if (videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n }\n async cleanup() {\n this.bufferTracker.stop();\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n const videoSource = this.videoSource;\n if (videoSource && !this.isFinalized) {\n videoSource.close();\n }\n if (videoSource) {\n this.videoSource = null;\n }\n const audioSource = this.audioSource;\n if (audioSource && !this.isFinalized) {\n audioSource.close();\n }\n if (audioSource) {\n this.audioSource = null;\n }\n const output = this.output;\n if (output && !this.isFinalized) {\n await output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n if (output) {\n this.output = null;\n }\n this.timestampManager.reset(undefined);\n this.totalSize = 0;\n this.audioState.reset();\n this.frameCompositor.reset();\n this.overlayConfig = null;\n this.recordingStartTime = 0;\n this.isScreenCapture = false;\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setExpectedAudioFormat(sampleRate, numberOfChannels) {\n if (this.expectedAudioSampleRate === null) {\n this.expectedAudioSampleRate = sampleRate;\n }\n if (this.expectedAudioChannels === null) {\n this.expectedAudioChannels = numberOfChannels;\n }\n }\n normalizeAudioBuffer(audioBuffer, frames, actualChannels, expectedChannels) {\n if (actualChannels === expectedChannels) {\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n if (actualChannels === 1 && expectedChannels === STEREO_CHANNEL_COUNT) {\n const expandedBuffer = new Float32Array(frames * STEREO_CHANNEL_COUNT);\n expandedBuffer.set(audioBuffer, 0);\n expandedBuffer.set(audioBuffer, frames);\n return {\n buffer: expandedBuffer,\n numberOfChannels: STEREO_CHANNEL_COUNT\n };\n }\n if (actualChannels === STEREO_CHANNEL_COUNT && expectedChannels === 1) {\n const mixedBuffer = new Float32Array(frames);\n let frameIndex = 0;\n while (frameIndex < frames) {\n const leftSample = audioBuffer[frameIndex];\n const rightSample = audioBuffer[frameIndex + frames];\n mixedBuffer[frameIndex] = (leftSample + rightSample) * AUDIO_SAMPLE_AVERAGE_SCALE;\n frameIndex += 1;\n }\n return { buffer: mixedBuffer, numberOfChannels: 1 };\n }\n logger.warn(\"[RecorderWorker] Audio channel mismatch\", {\n expectedChannels,\n receivedChannels: actualChannels\n });\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n deserializeBitrate(bitrate) {\n if (typeof bitrate === \"number\") {\n return bitrate;\n }\n if (bitrate === \"low\") {\n return QUALITY_LOW;\n }\n if (bitrate === \"medium\") {\n return QUALITY_MEDIUM;\n }\n if (bitrate === \"high\") {\n return QUALITY_HIGH;\n }\n if (bitrate === \"very-high\") {\n return QUALITY_VERY_HIGH;\n }\n return QUALITY_HIGH;\n }\n}\nnew RecorderWorker;\n";
1841
+ export declare const workerCode = "// ../../node_modules/mediabunny/dist/modules/src/misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nfunction assert(x) {\n if (!x) {\n throw new Error(\"Assertion failed.\");\n }\n}\nvar last = (arr) => {\n return arr && arr[arr.length - 1];\n};\nvar isU32 = (value) => {\n return value >= 0 && value < 2 ** 32;\n};\n\nclass Bitstream {\n constructor(bytes) {\n this.bytes = bytes;\n this.pos = 0;\n }\n seekToByte(byteOffset) {\n this.pos = 8 * byteOffset;\n }\n readBit() {\n const byteIndex = Math.floor(this.pos / 8);\n const byte = this.bytes[byteIndex] ?? 0;\n const bitIndex = 7 - (this.pos & 7);\n const bit = (byte & 1 << bitIndex) >> bitIndex;\n this.pos++;\n return bit;\n }\n readBits(n) {\n if (n === 1) {\n return this.readBit();\n }\n let result = 0;\n for (let i = 0;i < n; i++) {\n result <<= 1;\n result |= this.readBit();\n }\n return result;\n }\n writeBits(n, value) {\n const end = this.pos + n;\n for (let i = this.pos;i < end; i++) {\n const byteIndex = Math.floor(i / 8);\n let byte = this.bytes[byteIndex];\n const bitIndex = 7 - (i & 7);\n byte &= ~(1 << bitIndex);\n byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;\n this.bytes[byteIndex] = byte;\n }\n this.pos = end;\n }\n readAlignedByte() {\n if (this.pos % 8 !== 0) {\n throw new Error(\"Bitstream is not byte-aligned.\");\n }\n const byteIndex = this.pos / 8;\n const byte = this.bytes[byteIndex] ?? 0;\n this.pos += 8;\n return byte;\n }\n skipBits(n) {\n this.pos += n;\n }\n getBitsLeft() {\n return this.bytes.length * 8 - this.pos;\n }\n clone() {\n const clone = new Bitstream(this.bytes);\n clone.pos = this.pos;\n return clone;\n }\n}\nvar readExpGolomb = (bitstream) => {\n let leadingZeroBits = 0;\n while (bitstream.readBits(1) === 0 && leadingZeroBits < 32) {\n leadingZeroBits++;\n }\n if (leadingZeroBits >= 32) {\n throw new Error(\"Invalid exponential-Golomb code.\");\n }\n const result = (1 << leadingZeroBits) - 1 + bitstream.readBits(leadingZeroBits);\n return result;\n};\nvar readSignedExpGolomb = (bitstream) => {\n const codeNum = readExpGolomb(bitstream);\n return (codeNum & 1) === 0 ? -(codeNum >> 1) : codeNum + 1 >> 1;\n};\nvar toUint8Array = (source) => {\n if (source.constructor === Uint8Array) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new Uint8Array(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new Uint8Array(source);\n }\n};\nvar toDataView = (source) => {\n if (source.constructor === DataView) {\n return source;\n } else if (ArrayBuffer.isView(source)) {\n return new DataView(source.buffer, source.byteOffset, source.byteLength);\n } else {\n return new DataView(source);\n }\n};\nvar textEncoder = /* @__PURE__ */ new TextEncoder;\nvar COLOR_PRIMARIES_MAP = {\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n bt2020: 9,\n smpte432: 12\n};\nvar TRANSFER_CHARACTERISTICS_MAP = {\n bt709: 1,\n smpte170m: 6,\n linear: 8,\n \"iec61966-2-1\": 13,\n pq: 16,\n hlg: 18\n};\nvar MATRIX_COEFFICIENTS_MAP = {\n rgb: 0,\n bt709: 1,\n bt470bg: 5,\n smpte170m: 6,\n \"bt2020-ncl\": 9\n};\nvar colorSpaceIsComplete = (colorSpace) => {\n return !!colorSpace && !!colorSpace.primaries && !!colorSpace.transfer && !!colorSpace.matrix && colorSpace.fullRange !== undefined;\n};\nvar isAllowSharedBufferSource = (x) => {\n return x instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && x instanceof SharedArrayBuffer || ArrayBuffer.isView(x);\n};\n\nclass AsyncMutex {\n constructor() {\n this.currentPromise = Promise.resolve();\n this.pending = 0;\n }\n async acquire() {\n let resolver;\n const nextPromise = new Promise((resolve) => {\n let resolved = false;\n resolver = () => {\n if (resolved) {\n return;\n }\n resolve();\n this.pending--;\n resolved = true;\n };\n });\n const currentPromiseAlias = this.currentPromise;\n this.currentPromise = nextPromise;\n this.pending++;\n await currentPromiseAlias;\n return resolver;\n }\n}\nvar promiseWithResolvers = () => {\n let resolve;\n let reject;\n const promise = new Promise((res, rej) => {\n resolve = res;\n reject = rej;\n });\n return { promise, resolve, reject };\n};\nvar assertNever = (x) => {\n throw new Error(`Unexpected value: ${x}`);\n};\nvar setUint24 = (view, byteOffset, value, littleEndian) => {\n value = value >>> 0;\n value = value & 16777215;\n if (littleEndian) {\n view.setUint8(byteOffset, value & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value >>> 16 & 255);\n } else {\n view.setUint8(byteOffset, value >>> 16 & 255);\n view.setUint8(byteOffset + 1, value >>> 8 & 255);\n view.setUint8(byteOffset + 2, value & 255);\n }\n};\nvar setInt24 = (view, byteOffset, value, littleEndian) => {\n value = clamp(value, -8388608, 8388607);\n if (value < 0) {\n value = value + 16777216 & 16777215;\n }\n setUint24(view, byteOffset, value, littleEndian);\n};\nvar clamp = (value, min, max) => {\n return Math.max(min, Math.min(max, value));\n};\nvar UNDETERMINED_LANGUAGE = \"und\";\nvar ISO_639_2_REGEX = /^[a-z]{3}$/;\nvar isIso639Dash2LanguageCode = (x) => {\n return ISO_639_2_REGEX.test(x);\n};\nvar SECOND_TO_MICROSECOND_FACTOR = 1e6 * (1 + Number.EPSILON);\nvar computeRationalApproximation = (x, maxDenominator) => {\n const sign = x < 0 ? -1 : 1;\n x = Math.abs(x);\n let prevNumerator = 0, prevDenominator = 1;\n let currNumerator = 1, currDenominator = 0;\n let remainder = x;\n while (true) {\n const integer = Math.floor(remainder);\n const nextNumerator = integer * currNumerator + prevNumerator;\n const nextDenominator = integer * currDenominator + prevDenominator;\n if (nextDenominator > maxDenominator) {\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n }\n prevNumerator = currNumerator;\n prevDenominator = currDenominator;\n currNumerator = nextNumerator;\n currDenominator = nextDenominator;\n remainder = 1 / (remainder - integer);\n if (!isFinite(remainder)) {\n break;\n }\n }\n return {\n numerator: sign * currNumerator,\n denominator: currDenominator\n };\n};\n\nclass CallSerializer {\n constructor() {\n this.currentPromise = Promise.resolve();\n }\n call(fn) {\n return this.currentPromise = this.currentPromise.then(fn);\n }\n}\nvar isWebKitCache = null;\nvar isWebKit = () => {\n if (isWebKitCache !== null) {\n return isWebKitCache;\n }\n return isWebKitCache = !!(typeof navigator !== \"undefined\" && (navigator.vendor?.match(/apple/i) || /AppleWebKit/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent) || /\\b(iPad|iPhone|iPod)\\b/.test(navigator.userAgent)));\n};\nvar isFirefoxCache = null;\nvar isFirefox = () => {\n if (isFirefoxCache !== null) {\n return isFirefoxCache;\n }\n return isFirefoxCache = typeof navigator !== \"undefined\" && navigator.userAgent?.includes(\"Firefox\");\n};\nvar keyValueIterator = function* (object) {\n for (const key in object) {\n const value = object[key];\n if (value === undefined) {\n continue;\n }\n yield { key, value };\n }\n};\nvar polyfillSymbolDispose = () => {\n Symbol.dispose ??= Symbol(\"Symbol.dispose\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/metadata.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass RichImageData {\n constructor(data, mimeType) {\n this.data = data;\n this.mimeType = mimeType;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType must be a string.\");\n }\n }\n}\n\nclass AttachedFile {\n constructor(data, mimeType, name, description) {\n this.data = data;\n this.mimeType = mimeType;\n this.name = name;\n this.description = description;\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (mimeType !== undefined && typeof mimeType !== \"string\") {\n throw new TypeError(\"mimeType, when provided, must be a string.\");\n }\n if (name !== undefined && typeof name !== \"string\") {\n throw new TypeError(\"name, when provided, must be a string.\");\n }\n if (description !== undefined && typeof description !== \"string\") {\n throw new TypeError(\"description, when provided, must be a string.\");\n }\n }\n}\nvar validateMetadataTags = (tags) => {\n if (!tags || typeof tags !== \"object\") {\n throw new TypeError(\"tags must be an object.\");\n }\n if (tags.title !== undefined && typeof tags.title !== \"string\") {\n throw new TypeError(\"tags.title, when provided, must be a string.\");\n }\n if (tags.description !== undefined && typeof tags.description !== \"string\") {\n throw new TypeError(\"tags.description, when provided, must be a string.\");\n }\n if (tags.artist !== undefined && typeof tags.artist !== \"string\") {\n throw new TypeError(\"tags.artist, when provided, must be a string.\");\n }\n if (tags.album !== undefined && typeof tags.album !== \"string\") {\n throw new TypeError(\"tags.album, when provided, must be a string.\");\n }\n if (tags.albumArtist !== undefined && typeof tags.albumArtist !== \"string\") {\n throw new TypeError(\"tags.albumArtist, when provided, must be a string.\");\n }\n if (tags.trackNumber !== undefined && (!Number.isInteger(tags.trackNumber) || tags.trackNumber <= 0)) {\n throw new TypeError(\"tags.trackNumber, when provided, must be a positive integer.\");\n }\n if (tags.tracksTotal !== undefined && (!Number.isInteger(tags.tracksTotal) || tags.tracksTotal <= 0)) {\n throw new TypeError(\"tags.tracksTotal, when provided, must be a positive integer.\");\n }\n if (tags.discNumber !== undefined && (!Number.isInteger(tags.discNumber) || tags.discNumber <= 0)) {\n throw new TypeError(\"tags.discNumber, when provided, must be a positive integer.\");\n }\n if (tags.discsTotal !== undefined && (!Number.isInteger(tags.discsTotal) || tags.discsTotal <= 0)) {\n throw new TypeError(\"tags.discsTotal, when provided, must be a positive integer.\");\n }\n if (tags.genre !== undefined && typeof tags.genre !== \"string\") {\n throw new TypeError(\"tags.genre, when provided, must be a string.\");\n }\n if (tags.date !== undefined && (!(tags.date instanceof Date) || Number.isNaN(tags.date.getTime()))) {\n throw new TypeError(\"tags.date, when provided, must be a valid Date.\");\n }\n if (tags.lyrics !== undefined && typeof tags.lyrics !== \"string\") {\n throw new TypeError(\"tags.lyrics, when provided, must be a string.\");\n }\n if (tags.images !== undefined) {\n if (!Array.isArray(tags.images)) {\n throw new TypeError(\"tags.images, when provided, must be an array.\");\n }\n for (const image of tags.images) {\n if (!image || typeof image !== \"object\") {\n throw new TypeError(\"Each image in tags.images must be an object.\");\n }\n if (!(image.data instanceof Uint8Array)) {\n throw new TypeError(\"Each image.data must be a Uint8Array.\");\n }\n if (typeof image.mimeType !== \"string\") {\n throw new TypeError(\"Each image.mimeType must be a string.\");\n }\n if (![\"coverFront\", \"coverBack\", \"unknown\"].includes(image.kind)) {\n throw new TypeError(\"Each image.kind must be 'coverFront', 'coverBack', or 'unknown'.\");\n }\n }\n }\n if (tags.comment !== undefined && typeof tags.comment !== \"string\") {\n throw new TypeError(\"tags.comment, when provided, must be a string.\");\n }\n if (tags.raw !== undefined) {\n if (!tags.raw || typeof tags.raw !== \"object\") {\n throw new TypeError(\"tags.raw, when provided, must be an object.\");\n }\n for (const value of Object.values(tags.raw)) {\n if (value !== null && typeof value !== \"string\" && !(value instanceof Uint8Array) && !(value instanceof RichImageData) && !(value instanceof AttachedFile)) {\n throw new TypeError(\"Each value in tags.raw must be a string, Uint8Array, RichImageData, AttachedFile, or null.\");\n }\n }\n }\n};\nvar validateTrackDisposition = (disposition) => {\n if (!disposition || typeof disposition !== \"object\") {\n throw new TypeError(\"disposition must be an object.\");\n }\n if (disposition.default !== undefined && typeof disposition.default !== \"boolean\") {\n throw new TypeError(\"disposition.default must be a boolean.\");\n }\n if (disposition.forced !== undefined && typeof disposition.forced !== \"boolean\") {\n throw new TypeError(\"disposition.forced must be a boolean.\");\n }\n if (disposition.original !== undefined && typeof disposition.original !== \"boolean\") {\n throw new TypeError(\"disposition.original must be a boolean.\");\n }\n if (disposition.commentary !== undefined && typeof disposition.commentary !== \"boolean\") {\n throw new TypeError(\"disposition.commentary must be a boolean.\");\n }\n if (disposition.hearingImpaired !== undefined && typeof disposition.hearingImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.hearingImpaired must be a boolean.\");\n }\n if (disposition.visuallyImpaired !== undefined && typeof disposition.visuallyImpaired !== \"boolean\") {\n throw new TypeError(\"disposition.visuallyImpaired must be a boolean.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar VIDEO_CODECS = [\n \"avc\",\n \"hevc\",\n \"vp9\",\n \"av1\",\n \"vp8\"\n];\nvar PCM_AUDIO_CODECS = [\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n \"pcm-u8\",\n \"pcm-s8\",\n \"ulaw\",\n \"alaw\"\n];\nvar NON_PCM_AUDIO_CODECS = [\n \"aac\",\n \"opus\",\n \"mp3\",\n \"vorbis\",\n \"flac\"\n];\nvar AUDIO_CODECS = [\n ...NON_PCM_AUDIO_CODECS,\n ...PCM_AUDIO_CODECS\n];\nvar SUBTITLE_CODECS = [\n \"webvtt\"\n];\nvar AVC_LEVEL_TABLE = [\n { maxMacroblocks: 99, maxBitrate: 64000, maxDpbMbs: 396, level: 10 },\n { maxMacroblocks: 396, maxBitrate: 192000, maxDpbMbs: 900, level: 11 },\n { maxMacroblocks: 396, maxBitrate: 384000, maxDpbMbs: 2376, level: 12 },\n { maxMacroblocks: 396, maxBitrate: 768000, maxDpbMbs: 2376, level: 13 },\n { maxMacroblocks: 396, maxBitrate: 2000000, maxDpbMbs: 2376, level: 20 },\n { maxMacroblocks: 792, maxBitrate: 4000000, maxDpbMbs: 4752, level: 21 },\n { maxMacroblocks: 1620, maxBitrate: 4000000, maxDpbMbs: 8100, level: 22 },\n { maxMacroblocks: 1620, maxBitrate: 1e7, maxDpbMbs: 8100, level: 30 },\n { maxMacroblocks: 3600, maxBitrate: 14000000, maxDpbMbs: 18000, level: 31 },\n { maxMacroblocks: 5120, maxBitrate: 20000000, maxDpbMbs: 20480, level: 32 },\n { maxMacroblocks: 8192, maxBitrate: 20000000, maxDpbMbs: 32768, level: 40 },\n { maxMacroblocks: 8192, maxBitrate: 50000000, maxDpbMbs: 32768, level: 41 },\n { maxMacroblocks: 8704, maxBitrate: 50000000, maxDpbMbs: 34816, level: 42 },\n { maxMacroblocks: 22080, maxBitrate: 135000000, maxDpbMbs: 110400, level: 50 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 51 },\n { maxMacroblocks: 36864, maxBitrate: 240000000, maxDpbMbs: 184320, level: 52 },\n { maxMacroblocks: 139264, maxBitrate: 240000000, maxDpbMbs: 696320, level: 60 },\n { maxMacroblocks: 139264, maxBitrate: 480000000, maxDpbMbs: 696320, level: 61 },\n { maxMacroblocks: 139264, maxBitrate: 800000000, maxDpbMbs: 696320, level: 62 }\n];\nvar HEVC_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 128000, tier: \"L\", level: 30 },\n { maxPictureSize: 122880, maxBitrate: 1500000, tier: \"L\", level: 60 },\n { maxPictureSize: 245760, maxBitrate: 3000000, tier: \"L\", level: 63 },\n { maxPictureSize: 552960, maxBitrate: 6000000, tier: \"L\", level: 90 },\n { maxPictureSize: 983040, maxBitrate: 1e7, tier: \"L\", level: 93 },\n { maxPictureSize: 2228224, maxBitrate: 12000000, tier: \"L\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, tier: \"H\", level: 120 },\n { maxPictureSize: 2228224, maxBitrate: 20000000, tier: \"L\", level: 123 },\n { maxPictureSize: 2228224, maxBitrate: 50000000, tier: \"H\", level: 123 },\n { maxPictureSize: 8912896, maxBitrate: 25000000, tier: \"L\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 150 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"L\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 153 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"L\", level: 156 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 156 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"L\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 180 },\n { maxPictureSize: 35651584, maxBitrate: 120000000, tier: \"L\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 183 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"L\", level: 186 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 186 }\n];\nvar VP9_LEVEL_TABLE = [\n { maxPictureSize: 36864, maxBitrate: 200000, level: 10 },\n { maxPictureSize: 73728, maxBitrate: 800000, level: 11 },\n { maxPictureSize: 122880, maxBitrate: 1800000, level: 20 },\n { maxPictureSize: 245760, maxBitrate: 3600000, level: 21 },\n { maxPictureSize: 552960, maxBitrate: 7200000, level: 30 },\n { maxPictureSize: 983040, maxBitrate: 12000000, level: 31 },\n { maxPictureSize: 2228224, maxBitrate: 18000000, level: 40 },\n { maxPictureSize: 2228224, maxBitrate: 30000000, level: 41 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, level: 50 },\n { maxPictureSize: 8912896, maxBitrate: 120000000, level: 51 },\n { maxPictureSize: 8912896, maxBitrate: 180000000, level: 52 },\n { maxPictureSize: 35651584, maxBitrate: 180000000, level: 60 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, level: 61 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, level: 62 }\n];\nvar AV1_LEVEL_TABLE = [\n { maxPictureSize: 147456, maxBitrate: 1500000, tier: \"M\", level: 0 },\n { maxPictureSize: 278784, maxBitrate: 3000000, tier: \"M\", level: 1 },\n { maxPictureSize: 665856, maxBitrate: 6000000, tier: \"M\", level: 4 },\n { maxPictureSize: 1065024, maxBitrate: 1e7, tier: \"M\", level: 5 },\n { maxPictureSize: 2359296, maxBitrate: 12000000, tier: \"M\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 30000000, tier: \"H\", level: 8 },\n { maxPictureSize: 2359296, maxBitrate: 20000000, tier: \"M\", level: 9 },\n { maxPictureSize: 2359296, maxBitrate: 50000000, tier: \"H\", level: 9 },\n { maxPictureSize: 8912896, maxBitrate: 30000000, tier: \"M\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 1e8, tier: \"H\", level: 12 },\n { maxPictureSize: 8912896, maxBitrate: 40000000, tier: \"M\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 160000000, tier: \"H\", level: 13 },\n { maxPictureSize: 8912896, maxBitrate: 60000000, tier: \"M\", level: 14 },\n { maxPictureSize: 8912896, maxBitrate: 240000000, tier: \"H\", level: 14 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 15 },\n { maxPictureSize: 35651584, maxBitrate: 60000000, tier: \"M\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 240000000, tier: \"H\", level: 16 },\n { maxPictureSize: 35651584, maxBitrate: 1e8, tier: \"M\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 480000000, tier: \"H\", level: 17 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 18 },\n { maxPictureSize: 35651584, maxBitrate: 160000000, tier: \"M\", level: 19 },\n { maxPictureSize: 35651584, maxBitrate: 800000000, tier: \"H\", level: 19 }\n];\nvar buildVideoCodecString = (codec, width, height, bitrate) => {\n if (codec === \"avc\") {\n const profileIndication = 100;\n const totalMacroblocks = Math.ceil(width / 16) * Math.ceil(height / 16);\n const levelInfo = AVC_LEVEL_TABLE.find((level) => totalMacroblocks <= level.maxMacroblocks && bitrate <= level.maxBitrate) ?? last(AVC_LEVEL_TABLE);\n const levelIndication = levelInfo ? levelInfo.level : 0;\n const hexProfileIndication = profileIndication.toString(16).padStart(2, \"0\");\n const hexProfileCompatibility = \"00\";\n const hexLevelIndication = levelIndication.toString(16).padStart(2, \"0\");\n return `avc1.${hexProfileIndication}${hexProfileCompatibility}${hexLevelIndication}`;\n } else if (codec === \"hevc\") {\n const profilePrefix = \"\";\n const profileIdc = 1;\n const compatibilityFlags = \"6\";\n const pictureSize = width * height;\n const levelInfo = HEVC_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(HEVC_LEVEL_TABLE);\n const constraintFlags = \"B0\";\n return \"hev1.\" + `${profilePrefix}${profileIdc}.` + `${compatibilityFlags}.` + `${levelInfo.tier}${levelInfo.level}.` + `${constraintFlags}`;\n } else if (codec === \"vp8\") {\n return \"vp8\";\n } else if (codec === \"vp9\") {\n const profile = \"00\";\n const pictureSize = width * height;\n const levelInfo = VP9_LEVEL_TABLE.find((level) => pictureSize <= level.maxPictureSize && bitrate <= level.maxBitrate) ?? last(VP9_LEVEL_TABLE);\n const bitDepth = \"08\";\n return `vp09.${profile}.${levelInfo.level.toString().padStart(2, \"0\")}.${bitDepth}`;\n } else if (codec === \"av1\") {\n const profile = 0;\n const pictureSize = width * height;\n const levelInfo = AV1_LEVEL_TABLE.find((level2) => pictureSize <= level2.maxPictureSize && bitrate <= level2.maxBitrate) ?? last(AV1_LEVEL_TABLE);\n const level = levelInfo.level.toString().padStart(2, \"0\");\n const bitDepth = \"08\";\n return `av01.${profile}.${level}${levelInfo.tier}.${bitDepth}`;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar generateAv1CodecConfigurationFromCodecString = (codecString) => {\n const parts = codecString.split(\".\");\n const marker = 1;\n const version = 1;\n const firstByte = (marker << 7) + version;\n const profile = Number(parts[1]);\n const levelAndTier = parts[2];\n const level = Number(levelAndTier.slice(0, -1));\n const secondByte = (profile << 5) + level;\n const tier = levelAndTier.slice(-1) === \"H\" ? 1 : 0;\n const bitDepth = Number(parts[3]);\n const highBitDepth = bitDepth === 8 ? 0 : 1;\n const twelveBit = 0;\n const monochrome = parts[4] ? Number(parts[4]) : 0;\n const chromaSubsamplingX = parts[5] ? Number(parts[5][0]) : 1;\n const chromaSubsamplingY = parts[5] ? Number(parts[5][1]) : 1;\n const chromaSamplePosition = parts[5] ? Number(parts[5][2]) : 0;\n const thirdByte = (tier << 7) + (highBitDepth << 6) + (twelveBit << 5) + (monochrome << 4) + (chromaSubsamplingX << 3) + (chromaSubsamplingY << 2) + chromaSamplePosition;\n const initialPresentationDelayPresent = 0;\n const fourthByte = initialPresentationDelayPresent;\n return [firstByte, secondByte, thirdByte, fourthByte];\n};\nvar buildAudioCodecString = (codec, numberOfChannels, sampleRate) => {\n if (codec === \"aac\") {\n if (numberOfChannels >= 2 && sampleRate <= 24000) {\n return \"mp4a.40.29\";\n }\n if (sampleRate <= 24000) {\n return \"mp4a.40.5\";\n }\n return \"mp4a.40.2\";\n } else if (codec === \"mp3\") {\n return \"mp3\";\n } else if (codec === \"opus\") {\n return \"opus\";\n } else if (codec === \"vorbis\") {\n return \"vorbis\";\n } else if (codec === \"flac\") {\n return \"flac\";\n } else if (PCM_AUDIO_CODECS.includes(codec)) {\n return codec;\n }\n throw new TypeError(`Unhandled codec '${codec}'.`);\n};\nvar aacFrequencyTable = [\n 96000,\n 88200,\n 64000,\n 48000,\n 44100,\n 32000,\n 24000,\n 22050,\n 16000,\n 12000,\n 11025,\n 8000,\n 7350\n];\nvar aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];\nvar parseAacAudioSpecificConfig = (bytes) => {\n if (!bytes || bytes.byteLength < 2) {\n throw new TypeError(\"AAC description must be at least 2 bytes long.\");\n }\n const bitstream = new Bitstream(bytes);\n let objectType = bitstream.readBits(5);\n if (objectType === 31) {\n objectType = 32 + bitstream.readBits(6);\n }\n const frequencyIndex = bitstream.readBits(4);\n let sampleRate = null;\n if (frequencyIndex === 15) {\n sampleRate = bitstream.readBits(24);\n } else {\n if (frequencyIndex < aacFrequencyTable.length) {\n sampleRate = aacFrequencyTable[frequencyIndex];\n }\n }\n const channelConfiguration = bitstream.readBits(4);\n let numberOfChannels = null;\n if (channelConfiguration >= 1 && channelConfiguration <= 7) {\n numberOfChannels = aacChannelMap[channelConfiguration];\n }\n return {\n objectType,\n frequencyIndex,\n sampleRate,\n channelConfiguration,\n numberOfChannels\n };\n};\nvar buildAacAudioSpecificConfig = (config) => {\n let frequencyIndex = aacFrequencyTable.indexOf(config.sampleRate);\n let customSampleRate = null;\n if (frequencyIndex === -1) {\n frequencyIndex = 15;\n customSampleRate = config.sampleRate;\n }\n const channelConfiguration = aacChannelMap.indexOf(config.numberOfChannels);\n if (channelConfiguration === -1) {\n throw new TypeError(`Unsupported number of channels: ${config.numberOfChannels}`);\n }\n let bitCount = 5 + 4 + 4;\n if (config.objectType >= 32) {\n bitCount += 6;\n }\n if (frequencyIndex === 15) {\n bitCount += 24;\n }\n const byteCount = Math.ceil(bitCount / 8);\n const bytes = new Uint8Array(byteCount);\n const bitstream = new Bitstream(bytes);\n if (config.objectType < 32) {\n bitstream.writeBits(5, config.objectType);\n } else {\n bitstream.writeBits(5, 31);\n bitstream.writeBits(6, config.objectType - 32);\n }\n bitstream.writeBits(4, frequencyIndex);\n if (frequencyIndex === 15) {\n bitstream.writeBits(24, customSampleRate);\n }\n bitstream.writeBits(4, channelConfiguration);\n return bytes;\n};\nvar PCM_CODEC_REGEX = /^pcm-([usf])(\\d+)+(be)?$/;\nvar parsePcmCodec = (codec) => {\n assert(PCM_AUDIO_CODECS.includes(codec));\n if (codec === \"ulaw\") {\n return { dataType: \"ulaw\", sampleSize: 1, littleEndian: true, silentValue: 255 };\n } else if (codec === \"alaw\") {\n return { dataType: \"alaw\", sampleSize: 1, littleEndian: true, silentValue: 213 };\n }\n const match = PCM_CODEC_REGEX.exec(codec);\n assert(match);\n let dataType;\n if (match[1] === \"u\") {\n dataType = \"unsigned\";\n } else if (match[1] === \"s\") {\n dataType = \"signed\";\n } else {\n dataType = \"float\";\n }\n const sampleSize = Number(match[2]) / 8;\n const littleEndian = match[3] !== \"be\";\n const silentValue = codec === \"pcm-u8\" ? 2 ** 7 : 0;\n return { dataType, sampleSize, littleEndian, silentValue };\n};\nvar inferCodecFromCodecString = (codecString) => {\n if (codecString.startsWith(\"avc1\") || codecString.startsWith(\"avc3\")) {\n return \"avc\";\n } else if (codecString.startsWith(\"hev1\") || codecString.startsWith(\"hvc1\")) {\n return \"hevc\";\n } else if (codecString === \"vp8\") {\n return \"vp8\";\n } else if (codecString.startsWith(\"vp09\")) {\n return \"vp9\";\n } else if (codecString.startsWith(\"av01\")) {\n return \"av1\";\n }\n if (codecString.startsWith(\"mp4a.40\") || codecString === \"mp4a.67\") {\n return \"aac\";\n } else if (codecString === \"mp3\" || codecString === \"mp4a.69\" || codecString === \"mp4a.6B\" || codecString === \"mp4a.6b\") {\n return \"mp3\";\n } else if (codecString === \"opus\") {\n return \"opus\";\n } else if (codecString === \"vorbis\") {\n return \"vorbis\";\n } else if (codecString === \"flac\") {\n return \"flac\";\n } else if (codecString === \"ulaw\") {\n return \"ulaw\";\n } else if (codecString === \"alaw\") {\n return \"alaw\";\n } else if (PCM_CODEC_REGEX.test(codecString)) {\n return codecString;\n }\n if (codecString === \"webvtt\") {\n return \"webvtt\";\n }\n return null;\n};\nvar getVideoEncoderConfigExtension = (codec) => {\n if (codec === \"avc\") {\n return {\n avc: {\n format: \"avc\"\n }\n };\n } else if (codec === \"hevc\") {\n return {\n hevc: {\n format: \"hevc\"\n }\n };\n }\n return {};\n};\nvar getAudioEncoderConfigExtension = (codec) => {\n if (codec === \"aac\") {\n return {\n aac: {\n format: \"aac\"\n }\n };\n } else if (codec === \"opus\") {\n return {\n opus: {\n format: \"opus\"\n }\n };\n }\n return {};\n};\nvar VALID_VIDEO_CODEC_STRING_PREFIXES = [\"avc1\", \"avc3\", \"hev1\", \"hvc1\", \"vp8\", \"vp09\", \"av01\"];\nvar AVC_CODEC_STRING_REGEX = /^(avc1|avc3)\\.[0-9a-fA-F]{6}$/;\nvar HEVC_CODEC_STRING_REGEX = /^(hev1|hvc1)\\.(?:[ABC]?\\d+)\\.[0-9a-fA-F]{1,8}\\.[LH]\\d+(?:\\.[0-9a-fA-F]{1,2}){0,6}$/;\nvar VP9_CODEC_STRING_REGEX = /^vp09(?:\\.\\d{2}){3}(?:(?:\\.\\d{2}){5})?$/;\nvar AV1_CODEC_STRING_REGEX = /^av01\\.\\d\\.\\d{2}[MH]\\.\\d{2}(?:\\.\\d\\.\\d{3}\\.\\d{2}\\.\\d{2}\\.\\d{2}\\.\\d)?$/;\nvar validateVideoChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Video chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Video chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Video chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_VIDEO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string must be a valid video codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedWidth) || metadata.decoderConfig.codedWidth <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedWidth (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.codedHeight) || metadata.decoderConfig.codedHeight <= 0) {\n throw new TypeError(\"Video chunk metadata decoder configuration must specify a valid codedHeight (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Video chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.colorSpace !== undefined) {\n const { colorSpace } = metadata.decoderConfig;\n if (typeof colorSpace !== \"object\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace, when provided, must be an object.\");\n }\n const primariesValues = Object.keys(COLOR_PRIMARIES_MAP);\n if (colorSpace.primaries != null && !primariesValues.includes(colorSpace.primaries)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace primaries, when defined, must be one of` + ` ${primariesValues.join(\", \")}.`);\n }\n const transferValues = Object.keys(TRANSFER_CHARACTERISTICS_MAP);\n if (colorSpace.transfer != null && !transferValues.includes(colorSpace.transfer)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace transfer, when defined, must be one of` + ` ${transferValues.join(\", \")}.`);\n }\n const matrixValues = Object.keys(MATRIX_COEFFICIENTS_MAP);\n if (colorSpace.matrix != null && !matrixValues.includes(colorSpace.matrix)) {\n throw new TypeError(`Video chunk metadata decoder configuration colorSpace matrix, when defined, must be one of` + ` ${matrixValues.join(\", \")}.`);\n }\n if (colorSpace.fullRange != null && typeof colorSpace.fullRange !== \"boolean\") {\n throw new TypeError(\"Video chunk metadata decoder configuration colorSpace fullRange, when defined, must be a boolean.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"avc1\") || metadata.decoderConfig.codec.startsWith(\"avc3\")) {\n if (!AVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AVC must be a valid AVC codec string as\" + \" specified in Section 3.4 of RFC 6381.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"hev1\") || metadata.decoderConfig.codec.startsWith(\"hvc1\")) {\n if (!HEVC_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for HEVC must be a valid HEVC codec string as\" + \" specified in Section E.3 of ISO 14496-15.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp8\")) {\n if (metadata.decoderConfig.codec !== \"vp8\") {\n throw new TypeError('Video chunk metadata decoder configuration codec string for VP8 must be \"vp8\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vp09\")) {\n if (!VP9_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for VP9 must be a valid VP9 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://www.webmproject.org/vp9/mp4/.');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"av01\")) {\n if (!AV1_CODEC_STRING_REGEX.test(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Video chunk metadata decoder configuration codec string for AV1 must be a valid AV1 codec string as\" + ' specified in Section \"Codecs Parameter String\" of https://aomediacodec.github.io/av1-isobmff/.');\n }\n }\n};\nvar VALID_AUDIO_CODEC_STRING_PREFIXES = [\"mp4a\", \"mp3\", \"opus\", \"vorbis\", \"flac\", \"ulaw\", \"alaw\", \"pcm\"];\nvar validateAudioChunkMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Audio chunk metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Audio chunk metadata must be an object.\");\n }\n if (!metadata.decoderConfig) {\n throw new TypeError(\"Audio chunk metadata must include a decoder configuration.\");\n }\n if (typeof metadata.decoderConfig !== \"object\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must be an object.\");\n }\n if (typeof metadata.decoderConfig.codec !== \"string\") {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a codec string.\");\n }\n if (!VALID_AUDIO_CODEC_STRING_PREFIXES.some((prefix) => metadata.decoderConfig.codec.startsWith(prefix))) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string must be a valid audio codec string as specified in\" + \" the WebCodecs Codec Registry.\");\n }\n if (!Number.isInteger(metadata.decoderConfig.sampleRate) || metadata.decoderConfig.sampleRate <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid sampleRate (positive integer).\");\n }\n if (!Number.isInteger(metadata.decoderConfig.numberOfChannels) || metadata.decoderConfig.numberOfChannels <= 0) {\n throw new TypeError(\"Audio chunk metadata decoder configuration must specify a valid numberOfChannels (positive integer).\");\n }\n if (metadata.decoderConfig.description !== undefined) {\n if (!isAllowSharedBufferSource(metadata.decoderConfig.description)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when defined, must be an ArrayBuffer or an\" + \" ArrayBuffer view.\");\n }\n }\n if (metadata.decoderConfig.codec.startsWith(\"mp4a\") && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n const validStrings = [\"mp4a.40.2\", \"mp4a.40.02\", \"mp4a.40.5\", \"mp4a.40.05\", \"mp4a.40.29\", \"mp4a.67\"];\n if (!validStrings.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for AAC must be a valid AAC codec string as\" + \" specified in https://www.w3.org/TR/webcodecs-aac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"mp3\") || metadata.decoderConfig.codec.startsWith(\"mp4a\")) {\n if (metadata.decoderConfig.codec !== \"mp3\" && metadata.decoderConfig.codec !== \"mp4a.69\" && metadata.decoderConfig.codec !== \"mp4a.6B\" && metadata.decoderConfig.codec !== \"mp4a.6b\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for MP3 must be \"mp3\", \"mp4a.69\" or' + ' \"mp4a.6B\".');\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"opus\")) {\n if (metadata.decoderConfig.codec !== \"opus\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Opus must be \"opus\".');\n }\n if (metadata.decoderConfig.description && metadata.decoderConfig.description.byteLength < 18) {\n throw new TypeError(\"Audio chunk metadata decoder configuration description, when specified, is expected to be an\" + \" Identification Header as specified in Section 5.1 of RFC 7845.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"vorbis\")) {\n if (metadata.decoderConfig.codec !== \"vorbis\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for Vorbis must be \"vorbis\".');\n }\n if (!metadata.decoderConfig.description) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for Vorbis must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-vorbis-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"flac\")) {\n if (metadata.decoderConfig.codec !== \"flac\") {\n throw new TypeError('Audio chunk metadata decoder configuration codec string for FLAC must be \"flac\".');\n }\n const minDescriptionSize = 4 + 4 + 34;\n if (!metadata.decoderConfig.description || metadata.decoderConfig.description.byteLength < minDescriptionSize) {\n throw new TypeError(\"Audio chunk metadata decoder configuration for FLAC must include a description, which is expected to\" + \" adhere to the format described in https://www.w3.org/TR/webcodecs-flac-codec-registration/.\");\n }\n } else if (metadata.decoderConfig.codec.startsWith(\"pcm\") || metadata.decoderConfig.codec.startsWith(\"ulaw\") || metadata.decoderConfig.codec.startsWith(\"alaw\")) {\n if (!PCM_AUDIO_CODECS.includes(metadata.decoderConfig.codec)) {\n throw new TypeError(\"Audio chunk metadata decoder configuration codec string for PCM must be one of the supported PCM\" + ` codecs (${PCM_AUDIO_CODECS.join(\", \")}).`);\n }\n }\n};\nvar validateSubtitleMetadata = (metadata) => {\n if (!metadata) {\n throw new TypeError(\"Subtitle metadata must be provided.\");\n }\n if (typeof metadata !== \"object\") {\n throw new TypeError(\"Subtitle metadata must be an object.\");\n }\n if (!metadata.config) {\n throw new TypeError(\"Subtitle metadata must include a config object.\");\n }\n if (typeof metadata.config !== \"object\") {\n throw new TypeError(\"Subtitle metadata config must be an object.\");\n }\n if (typeof metadata.config.description !== \"string\") {\n throw new TypeError(\"Subtitle metadata config description must be a string.\");\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/codec-data.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar AvcNalUnitType;\n(function(AvcNalUnitType2) {\n AvcNalUnitType2[AvcNalUnitType2[\"NON_IDR_SLICE\"] = 1] = \"NON_IDR_SLICE\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPA\"] = 2] = \"SLICE_DPA\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPB\"] = 3] = \"SLICE_DPB\";\n AvcNalUnitType2[AvcNalUnitType2[\"SLICE_DPC\"] = 4] = \"SLICE_DPC\";\n AvcNalUnitType2[AvcNalUnitType2[\"IDR\"] = 5] = \"IDR\";\n AvcNalUnitType2[AvcNalUnitType2[\"SEI\"] = 6] = \"SEI\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS\"] = 7] = \"SPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"PPS\"] = 8] = \"PPS\";\n AvcNalUnitType2[AvcNalUnitType2[\"AUD\"] = 9] = \"AUD\";\n AvcNalUnitType2[AvcNalUnitType2[\"SPS_EXT\"] = 13] = \"SPS_EXT\";\n})(AvcNalUnitType || (AvcNalUnitType = {}));\nvar HevcNalUnitType;\n(function(HevcNalUnitType2) {\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_N\"] = 8] = \"RASL_N\";\n HevcNalUnitType2[HevcNalUnitType2[\"RASL_R\"] = 9] = \"RASL_R\";\n HevcNalUnitType2[HevcNalUnitType2[\"BLA_W_LP\"] = 16] = \"BLA_W_LP\";\n HevcNalUnitType2[HevcNalUnitType2[\"RSV_IRAP_VCL23\"] = 23] = \"RSV_IRAP_VCL23\";\n HevcNalUnitType2[HevcNalUnitType2[\"VPS_NUT\"] = 32] = \"VPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SPS_NUT\"] = 33] = \"SPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PPS_NUT\"] = 34] = \"PPS_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"AUD_NUT\"] = 35] = \"AUD_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"PREFIX_SEI_NUT\"] = 39] = \"PREFIX_SEI_NUT\";\n HevcNalUnitType2[HevcNalUnitType2[\"SUFFIX_SEI_NUT\"] = 40] = \"SUFFIX_SEI_NUT\";\n})(HevcNalUnitType || (HevcNalUnitType = {}));\nvar iterateNalUnitsInAnnexB = function* (packetData) {\n let i = 0;\n let nalStart = -1;\n while (i < packetData.length - 2) {\n const zeroIndex = packetData.indexOf(0, i);\n if (zeroIndex === -1 || zeroIndex >= packetData.length - 2) {\n break;\n }\n i = zeroIndex;\n let startCodeLength = 0;\n if (i + 3 < packetData.length && packetData[i + 1] === 0 && packetData[i + 2] === 0 && packetData[i + 3] === 1) {\n startCodeLength = 4;\n } else if (packetData[i + 1] === 0 && packetData[i + 2] === 1) {\n startCodeLength = 3;\n }\n if (startCodeLength === 0) {\n i++;\n continue;\n }\n if (nalStart !== -1 && i > nalStart) {\n yield {\n offset: nalStart,\n length: i - nalStart\n };\n }\n nalStart = i + startCodeLength;\n i = nalStart;\n }\n if (nalStart !== -1 && nalStart < packetData.length) {\n yield {\n offset: nalStart,\n length: packetData.length - nalStart\n };\n }\n};\nvar extractNalUnitTypeForAvc = (byte) => {\n return byte & 31;\n};\nvar removeEmulationPreventionBytes = (data) => {\n const result = [];\n const len = data.length;\n for (let i = 0;i < len; i++) {\n if (i + 2 < len && data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 3) {\n result.push(0, 0);\n i += 2;\n } else {\n result.push(data[i]);\n }\n }\n return new Uint8Array(result);\n};\nvar ANNEX_B_START_CODE = new Uint8Array([0, 0, 0, 1]);\nvar concatNalUnitsInLengthPrefixed = (nalUnits, lengthSize) => {\n const totalLength = nalUnits.reduce((a, b) => a + lengthSize + b.byteLength, 0);\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const nalUnit of nalUnits) {\n const dataView = new DataView(result.buffer, result.byteOffset, result.byteLength);\n switch (lengthSize) {\n case 1:\n dataView.setUint8(offset, nalUnit.byteLength);\n break;\n case 2:\n dataView.setUint16(offset, nalUnit.byteLength, false);\n break;\n case 3:\n setUint24(dataView, offset, nalUnit.byteLength, false);\n break;\n case 4:\n dataView.setUint32(offset, nalUnit.byteLength, false);\n break;\n }\n offset += lengthSize;\n result.set(nalUnit, offset);\n offset += nalUnit.byteLength;\n }\n return result;\n};\nvar extractAvcDecoderConfigurationRecord = (packetData) => {\n try {\n const spsUnits = [];\n const ppsUnits = [];\n const spsExtUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForAvc(nalUnit[0]);\n if (type === AvcNalUnitType.SPS) {\n spsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.PPS) {\n ppsUnits.push(nalUnit);\n } else if (type === AvcNalUnitType.SPS_EXT) {\n spsExtUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0) {\n return null;\n }\n if (ppsUnits.length === 0) {\n return null;\n }\n const spsData = spsUnits[0];\n const spsInfo = parseAvcSps(spsData);\n assert(spsInfo !== null);\n const hasExtendedData = spsInfo.profileIdc === 100 || spsInfo.profileIdc === 110 || spsInfo.profileIdc === 122 || spsInfo.profileIdc === 144;\n return {\n configurationVersion: 1,\n avcProfileIndication: spsInfo.profileIdc,\n profileCompatibility: spsInfo.constraintFlags,\n avcLevelIndication: spsInfo.levelIdc,\n lengthSizeMinusOne: 3,\n sequenceParameterSets: spsUnits,\n pictureParameterSets: ppsUnits,\n chromaFormat: hasExtendedData ? spsInfo.chromaFormatIdc : null,\n bitDepthLumaMinus8: hasExtendedData ? spsInfo.bitDepthLumaMinus8 : null,\n bitDepthChromaMinus8: hasExtendedData ? spsInfo.bitDepthChromaMinus8 : null,\n sequenceParameterSetExt: hasExtendedData ? spsExtUnits : null\n };\n } catch (error) {\n console.error(\"Error building AVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar serializeAvcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push(record.avcProfileIndication);\n bytes.push(record.profileCompatibility);\n bytes.push(record.avcLevelIndication);\n bytes.push(252 | record.lengthSizeMinusOne & 3);\n bytes.push(224 | record.sequenceParameterSets.length & 31);\n for (const sps of record.sequenceParameterSets) {\n const length = sps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(sps[i]);\n }\n }\n bytes.push(record.pictureParameterSets.length);\n for (const pps of record.pictureParameterSets) {\n const length = pps.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(pps[i]);\n }\n }\n if (record.avcProfileIndication === 100 || record.avcProfileIndication === 110 || record.avcProfileIndication === 122 || record.avcProfileIndication === 144) {\n assert(record.chromaFormat !== null);\n assert(record.bitDepthLumaMinus8 !== null);\n assert(record.bitDepthChromaMinus8 !== null);\n assert(record.sequenceParameterSetExt !== null);\n bytes.push(252 | record.chromaFormat & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.sequenceParameterSetExt.length);\n for (const spsExt of record.sequenceParameterSetExt) {\n const length = spsExt.byteLength;\n bytes.push(length >> 8);\n bytes.push(length & 255);\n for (let i = 0;i < length; i++) {\n bytes.push(spsExt[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseAvcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(1);\n bitstream.skipBits(2);\n const nalUnitType = bitstream.readBits(5);\n if (nalUnitType !== 7) {\n return null;\n }\n const profileIdc = bitstream.readAlignedByte();\n const constraintFlags = bitstream.readAlignedByte();\n const levelIdc = bitstream.readAlignedByte();\n readExpGolomb(bitstream);\n let chromaFormatIdc = 1;\n let bitDepthLumaMinus8 = 0;\n let bitDepthChromaMinus8 = 0;\n let separateColourPlaneFlag = 0;\n if (profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244 || profileIdc === 44 || profileIdc === 83 || profileIdc === 86 || profileIdc === 118 || profileIdc === 128) {\n chromaFormatIdc = readExpGolomb(bitstream);\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n bitDepthLumaMinus8 = readExpGolomb(bitstream);\n bitDepthChromaMinus8 = readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const seqScalingMatrixPresentFlag = bitstream.readBits(1);\n if (seqScalingMatrixPresentFlag) {\n for (let i = 0;i < (chromaFormatIdc !== 3 ? 8 : 12); i++) {\n const seqScalingListPresentFlag = bitstream.readBits(1);\n if (seqScalingListPresentFlag) {\n const sizeOfScalingList = i < 6 ? 16 : 64;\n let lastScale = 8;\n let nextScale = 8;\n for (let j = 0;j < sizeOfScalingList; j++) {\n if (nextScale !== 0) {\n const deltaScale = readSignedExpGolomb(bitstream);\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n }\n }\n }\n }\n readExpGolomb(bitstream);\n const picOrderCntType = readExpGolomb(bitstream);\n if (picOrderCntType === 0) {\n readExpGolomb(bitstream);\n } else if (picOrderCntType === 1) {\n bitstream.skipBits(1);\n readSignedExpGolomb(bitstream);\n readSignedExpGolomb(bitstream);\n const numRefFramesInPicOrderCntCycle = readExpGolomb(bitstream);\n for (let i = 0;i < numRefFramesInPicOrderCntCycle; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n const picWidthInMbsMinus1 = readExpGolomb(bitstream);\n const picHeightInMapUnitsMinus1 = readExpGolomb(bitstream);\n const codedWidth = 16 * (picWidthInMbsMinus1 + 1);\n const codedHeight = 16 * (picHeightInMapUnitsMinus1 + 1);\n let displayWidth = codedWidth;\n let displayHeight = codedHeight;\n const frameMbsOnlyFlag = bitstream.readBits(1);\n if (!frameMbsOnlyFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const frameCroppingFlag = bitstream.readBits(1);\n if (frameCroppingFlag) {\n const frameCropLeftOffset = readExpGolomb(bitstream);\n const frameCropRightOffset = readExpGolomb(bitstream);\n const frameCropTopOffset = readExpGolomb(bitstream);\n const frameCropBottomOffset = readExpGolomb(bitstream);\n let cropUnitX;\n let cropUnitY;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 0) {\n cropUnitX = 1;\n cropUnitY = 2 - frameMbsOnlyFlag;\n } else {\n const subWidthC = chromaFormatIdc === 3 ? 1 : 2;\n const subHeightC = chromaFormatIdc === 1 ? 2 : 1;\n cropUnitX = subWidthC;\n cropUnitY = subHeightC * (2 - frameMbsOnlyFlag);\n }\n displayWidth -= cropUnitX * (frameCropLeftOffset + frameCropRightOffset);\n displayHeight -= cropUnitY * (frameCropTopOffset + frameCropBottomOffset);\n }\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let numReorderFrames = null;\n let maxDecFrameBuffering = null;\n const vuiParametersPresentFlag = bitstream.readBits(1);\n if (vuiParametersPresentFlag) {\n const aspectRatioInfoPresentFlag = bitstream.readBits(1);\n if (aspectRatioInfoPresentFlag) {\n const aspectRatioIdc = bitstream.readBits(8);\n if (aspectRatioIdc === 255) {\n bitstream.skipBits(16);\n bitstream.skipBits(16);\n }\n }\n const overscanInfoPresentFlag = bitstream.readBits(1);\n if (overscanInfoPresentFlag) {\n bitstream.skipBits(1);\n }\n const videoSignalTypePresentFlag = bitstream.readBits(1);\n if (videoSignalTypePresentFlag) {\n bitstream.skipBits(3);\n fullRangeFlag = bitstream.readBits(1);\n const colourDescriptionPresentFlag = bitstream.readBits(1);\n if (colourDescriptionPresentFlag) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n const chromaLocInfoPresentFlag = bitstream.readBits(1);\n if (chromaLocInfoPresentFlag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n const timingInfoPresentFlag = bitstream.readBits(1);\n if (timingInfoPresentFlag) {\n bitstream.skipBits(32);\n bitstream.skipBits(32);\n bitstream.skipBits(1);\n }\n const nalHrdParametersPresentFlag = bitstream.readBits(1);\n if (nalHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n const vclHrdParametersPresentFlag = bitstream.readBits(1);\n if (vclHrdParametersPresentFlag) {\n skipAvcHrdParameters(bitstream);\n }\n if (nalHrdParametersPresentFlag || vclHrdParametersPresentFlag) {\n bitstream.skipBits(1);\n }\n bitstream.skipBits(1);\n const bitstreamRestrictionFlag = bitstream.readBits(1);\n if (bitstreamRestrictionFlag) {\n bitstream.skipBits(1);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n numReorderFrames = readExpGolomb(bitstream);\n maxDecFrameBuffering = readExpGolomb(bitstream);\n }\n }\n if (numReorderFrames === null) {\n assert(maxDecFrameBuffering === null);\n const constraintSet3Flag = constraintFlags & 16;\n if ((profileIdc === 44 || profileIdc === 86 || profileIdc === 100 || profileIdc === 110 || profileIdc === 122 || profileIdc === 244) && constraintSet3Flag) {\n numReorderFrames = 0;\n maxDecFrameBuffering = 0;\n } else {\n const picWidthInMbs = picWidthInMbsMinus1 + 1;\n const picHeightInMapUnits = picHeightInMapUnitsMinus1 + 1;\n const frameHeightInMbs = (2 - frameMbsOnlyFlag) * picHeightInMapUnits;\n const levelInfo = AVC_LEVEL_TABLE.find((x) => x.level >= levelIdc) ?? last(AVC_LEVEL_TABLE);\n const maxDpbFrames = Math.min(Math.floor(levelInfo.maxDpbMbs / (picWidthInMbs * frameHeightInMbs)), 16);\n numReorderFrames = maxDpbFrames;\n maxDecFrameBuffering = maxDpbFrames;\n }\n }\n assert(maxDecFrameBuffering !== null);\n return {\n profileIdc,\n constraintFlags,\n levelIdc,\n frameMbsOnlyFlag,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n codedWidth,\n codedHeight,\n displayWidth,\n displayHeight,\n colourPrimaries,\n matrixCoefficients,\n transferCharacteristics,\n fullRangeFlag,\n numReorderFrames,\n maxDecFrameBuffering\n };\n } catch (error) {\n console.error(\"Error parsing AVC SPS:\", error);\n return null;\n }\n};\nvar skipAvcHrdParameters = (bitstream) => {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n for (let i = 0;i <= cpb_cnt_minus1; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n bitstream.skipBits(5);\n};\nvar extractNalUnitTypeForHevc = (byte) => {\n return byte >> 1 & 63;\n};\nvar parseHevcSps = (sps) => {\n try {\n const bitstream = new Bitstream(removeEmulationPreventionBytes(sps));\n bitstream.skipBits(16);\n bitstream.readBits(4);\n const spsMaxSubLayersMinus1 = bitstream.readBits(3);\n const spsTemporalIdNestingFlag = bitstream.readBits(1);\n const { general_profile_space, general_tier_flag, general_profile_idc, general_profile_compatibility_flags, general_constraint_indicator_flags, general_level_idc } = parseProfileTierLevel(bitstream, spsMaxSubLayersMinus1);\n readExpGolomb(bitstream);\n const chromaFormatIdc = readExpGolomb(bitstream);\n let separateColourPlaneFlag = 0;\n if (chromaFormatIdc === 3) {\n separateColourPlaneFlag = bitstream.readBits(1);\n }\n const picWidthInLumaSamples = readExpGolomb(bitstream);\n const picHeightInLumaSamples = readExpGolomb(bitstream);\n let displayWidth = picWidthInLumaSamples;\n let displayHeight = picHeightInLumaSamples;\n if (bitstream.readBits(1)) {\n const confWinLeftOffset = readExpGolomb(bitstream);\n const confWinRightOffset = readExpGolomb(bitstream);\n const confWinTopOffset = readExpGolomb(bitstream);\n const confWinBottomOffset = readExpGolomb(bitstream);\n let subWidthC = 1;\n let subHeightC = 1;\n const chromaArrayType = separateColourPlaneFlag === 0 ? chromaFormatIdc : 0;\n if (chromaArrayType === 1) {\n subWidthC = 2;\n subHeightC = 2;\n } else if (chromaArrayType === 2) {\n subWidthC = 2;\n subHeightC = 1;\n }\n displayWidth -= (confWinLeftOffset + confWinRightOffset) * subWidthC;\n displayHeight -= (confWinTopOffset + confWinBottomOffset) * subHeightC;\n }\n const bitDepthLumaMinus8 = readExpGolomb(bitstream);\n const bitDepthChromaMinus8 = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n const spsSubLayerOrderingInfoPresentFlag = bitstream.readBits(1);\n const startI = spsSubLayerOrderingInfoPresentFlag ? 0 : spsMaxSubLayersMinus1;\n let spsMaxNumReorderPics = 0;\n for (let i = startI;i <= spsMaxSubLayersMinus1; i++) {\n readExpGolomb(bitstream);\n spsMaxNumReorderPics = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (bitstream.readBits(1)) {\n if (bitstream.readBits(1)) {\n skipScalingListData(bitstream);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n if (bitstream.readBits(1)) {\n bitstream.skipBits(4);\n bitstream.skipBits(4);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n const numShortTermRefPicSets = readExpGolomb(bitstream);\n skipAllStRefPicSets(bitstream, numShortTermRefPicSets);\n if (bitstream.readBits(1)) {\n const numLongTermRefPicsSps = readExpGolomb(bitstream);\n for (let i = 0;i < numLongTermRefPicsSps; i++) {\n readExpGolomb(bitstream);\n bitstream.skipBits(1);\n }\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const vui = parseHevcVui(bitstream, spsMaxSubLayersMinus1);\n colourPrimaries = vui.colourPrimaries;\n transferCharacteristics = vui.transferCharacteristics;\n matrixCoefficients = vui.matrixCoefficients;\n fullRangeFlag = vui.fullRangeFlag;\n minSpatialSegmentationIdc = vui.minSpatialSegmentationIdc;\n }\n return {\n displayWidth,\n displayHeight,\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n maxDecFrameBuffering: spsMaxNumReorderPics + 1,\n spsMaxSubLayersMinus1,\n spsTemporalIdNestingFlag,\n generalProfileSpace: general_profile_space,\n generalTierFlag: general_tier_flag,\n generalProfileIdc: general_profile_idc,\n generalProfileCompatibilityFlags: general_profile_compatibility_flags,\n generalConstraintIndicatorFlags: general_constraint_indicator_flags,\n generalLevelIdc: general_level_idc,\n chromaFormatIdc,\n bitDepthLumaMinus8,\n bitDepthChromaMinus8,\n minSpatialSegmentationIdc\n };\n } catch (error) {\n console.error(\"Error parsing HEVC SPS:\", error);\n return null;\n }\n};\nvar extractHevcDecoderConfigurationRecord = (packetData) => {\n try {\n const vpsUnits = [];\n const spsUnits = [];\n const ppsUnits = [];\n const seiUnits = [];\n for (const loc of iterateNalUnitsInAnnexB(packetData)) {\n const nalUnit = packetData.subarray(loc.offset, loc.offset + loc.length);\n const type = extractNalUnitTypeForHevc(nalUnit[0]);\n if (type === HevcNalUnitType.VPS_NUT) {\n vpsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.SPS_NUT) {\n spsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PPS_NUT) {\n ppsUnits.push(nalUnit);\n } else if (type === HevcNalUnitType.PREFIX_SEI_NUT || type === HevcNalUnitType.SUFFIX_SEI_NUT) {\n seiUnits.push(nalUnit);\n }\n }\n if (spsUnits.length === 0 || ppsUnits.length === 0)\n return null;\n const spsInfo = parseHevcSps(spsUnits[0]);\n if (!spsInfo)\n return null;\n let parallelismType = 0;\n if (ppsUnits.length > 0) {\n const pps = ppsUnits[0];\n const ppsBitstream = new Bitstream(removeEmulationPreventionBytes(pps));\n ppsBitstream.skipBits(16);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(3);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n readExpGolomb(ppsBitstream);\n readExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n if (ppsBitstream.readBits(1)) {\n readExpGolomb(ppsBitstream);\n }\n readSignedExpGolomb(ppsBitstream);\n readSignedExpGolomb(ppsBitstream);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n ppsBitstream.skipBits(1);\n const tiles_enabled_flag = ppsBitstream.readBits(1);\n const entropy_coding_sync_enabled_flag = ppsBitstream.readBits(1);\n if (!tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 0;\n else if (tiles_enabled_flag && !entropy_coding_sync_enabled_flag)\n parallelismType = 2;\n else if (!tiles_enabled_flag && entropy_coding_sync_enabled_flag)\n parallelismType = 3;\n else\n parallelismType = 0;\n }\n const arrays = [\n ...vpsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.VPS_NUT,\n nalUnits: vpsUnits\n }\n ] : [],\n ...spsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.SPS_NUT,\n nalUnits: spsUnits\n }\n ] : [],\n ...ppsUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: HevcNalUnitType.PPS_NUT,\n nalUnits: ppsUnits\n }\n ] : [],\n ...seiUnits.length ? [\n {\n arrayCompleteness: 1,\n nalUnitType: extractNalUnitTypeForHevc(seiUnits[0][0]),\n nalUnits: seiUnits\n }\n ] : []\n ];\n const record = {\n configurationVersion: 1,\n generalProfileSpace: spsInfo.generalProfileSpace,\n generalTierFlag: spsInfo.generalTierFlag,\n generalProfileIdc: spsInfo.generalProfileIdc,\n generalProfileCompatibilityFlags: spsInfo.generalProfileCompatibilityFlags,\n generalConstraintIndicatorFlags: spsInfo.generalConstraintIndicatorFlags,\n generalLevelIdc: spsInfo.generalLevelIdc,\n minSpatialSegmentationIdc: spsInfo.minSpatialSegmentationIdc,\n parallelismType,\n chromaFormatIdc: spsInfo.chromaFormatIdc,\n bitDepthLumaMinus8: spsInfo.bitDepthLumaMinus8,\n bitDepthChromaMinus8: spsInfo.bitDepthChromaMinus8,\n avgFrameRate: 0,\n constantFrameRate: 0,\n numTemporalLayers: spsInfo.spsMaxSubLayersMinus1 + 1,\n temporalIdNested: spsInfo.spsTemporalIdNestingFlag,\n lengthSizeMinusOne: 3,\n arrays\n };\n return record;\n } catch (error) {\n console.error(\"Error building HEVC Decoder Configuration Record:\", error);\n return null;\n }\n};\nvar parseProfileTierLevel = (bitstream, maxNumSubLayersMinus1) => {\n const general_profile_space = bitstream.readBits(2);\n const general_tier_flag = bitstream.readBits(1);\n const general_profile_idc = bitstream.readBits(5);\n let general_profile_compatibility_flags = 0;\n for (let i = 0;i < 32; i++) {\n general_profile_compatibility_flags = general_profile_compatibility_flags << 1 | bitstream.readBits(1);\n }\n const general_constraint_indicator_flags = new Uint8Array(6);\n for (let i = 0;i < 6; i++) {\n general_constraint_indicator_flags[i] = bitstream.readBits(8);\n }\n const general_level_idc = bitstream.readBits(8);\n const sub_layer_profile_present_flag = [];\n const sub_layer_level_present_flag = [];\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n sub_layer_profile_present_flag.push(bitstream.readBits(1));\n sub_layer_level_present_flag.push(bitstream.readBits(1));\n }\n if (maxNumSubLayersMinus1 > 0) {\n for (let i = maxNumSubLayersMinus1;i < 8; i++) {\n bitstream.skipBits(2);\n }\n }\n for (let i = 0;i < maxNumSubLayersMinus1; i++) {\n if (sub_layer_profile_present_flag[i])\n bitstream.skipBits(88);\n if (sub_layer_level_present_flag[i])\n bitstream.skipBits(8);\n }\n return {\n general_profile_space,\n general_tier_flag,\n general_profile_idc,\n general_profile_compatibility_flags,\n general_constraint_indicator_flags,\n general_level_idc\n };\n};\nvar skipScalingListData = (bitstream) => {\n for (let sizeId = 0;sizeId < 4; sizeId++) {\n for (let matrixId = 0;matrixId < (sizeId === 3 ? 2 : 6); matrixId++) {\n const scaling_list_pred_mode_flag = bitstream.readBits(1);\n if (!scaling_list_pred_mode_flag) {\n readExpGolomb(bitstream);\n } else {\n const coefNum = Math.min(64, 1 << 4 + (sizeId << 1));\n if (sizeId > 1) {\n readSignedExpGolomb(bitstream);\n }\n for (let i = 0;i < coefNum; i++) {\n readSignedExpGolomb(bitstream);\n }\n }\n }\n }\n};\nvar skipAllStRefPicSets = (bitstream, num_short_term_ref_pic_sets) => {\n const NumDeltaPocs = [];\n for (let stRpsIdx = 0;stRpsIdx < num_short_term_ref_pic_sets; stRpsIdx++) {\n NumDeltaPocs[stRpsIdx] = skipStRefPicSet(bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs);\n }\n};\nvar skipStRefPicSet = (bitstream, stRpsIdx, num_short_term_ref_pic_sets, NumDeltaPocs) => {\n let NumDeltaPocsThis = 0;\n let inter_ref_pic_set_prediction_flag = 0;\n let RefRpsIdx = 0;\n if (stRpsIdx !== 0) {\n inter_ref_pic_set_prediction_flag = bitstream.readBits(1);\n }\n if (inter_ref_pic_set_prediction_flag) {\n if (stRpsIdx === num_short_term_ref_pic_sets) {\n const delta_idx_minus1 = readExpGolomb(bitstream);\n RefRpsIdx = stRpsIdx - (delta_idx_minus1 + 1);\n } else {\n RefRpsIdx = stRpsIdx - 1;\n }\n bitstream.readBits(1);\n readExpGolomb(bitstream);\n const numDelta = NumDeltaPocs[RefRpsIdx] ?? 0;\n for (let j = 0;j <= numDelta; j++) {\n const used_by_curr_pic_flag = bitstream.readBits(1);\n if (!used_by_curr_pic_flag) {\n bitstream.readBits(1);\n }\n }\n NumDeltaPocsThis = NumDeltaPocs[RefRpsIdx];\n } else {\n const num_negative_pics = readExpGolomb(bitstream);\n const num_positive_pics = readExpGolomb(bitstream);\n for (let i = 0;i < num_negative_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n for (let i = 0;i < num_positive_pics; i++) {\n readExpGolomb(bitstream);\n bitstream.readBits(1);\n }\n NumDeltaPocsThis = num_negative_pics + num_positive_pics;\n }\n return NumDeltaPocsThis;\n};\nvar parseHevcVui = (bitstream, sps_max_sub_layers_minus1) => {\n let colourPrimaries = 2;\n let transferCharacteristics = 2;\n let matrixCoefficients = 2;\n let fullRangeFlag = 0;\n let minSpatialSegmentationIdc = 0;\n if (bitstream.readBits(1)) {\n const aspect_ratio_idc = bitstream.readBits(8);\n if (aspect_ratio_idc === 255) {\n bitstream.readBits(16);\n bitstream.readBits(16);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(3);\n fullRangeFlag = bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n colourPrimaries = bitstream.readBits(8);\n transferCharacteristics = bitstream.readBits(8);\n matrixCoefficients = bitstream.readBits(8);\n }\n }\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(32);\n bitstream.readBits(32);\n if (bitstream.readBits(1)) {\n readExpGolomb(bitstream);\n }\n if (bitstream.readBits(1)) {\n skipHevcHrdParameters(bitstream, true, sps_max_sub_layers_minus1);\n }\n }\n if (bitstream.readBits(1)) {\n bitstream.readBits(1);\n bitstream.readBits(1);\n bitstream.readBits(1);\n minSpatialSegmentationIdc = readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n return {\n colourPrimaries,\n transferCharacteristics,\n matrixCoefficients,\n fullRangeFlag,\n minSpatialSegmentationIdc\n };\n};\nvar skipHevcHrdParameters = (bitstream, commonInfPresentFlag, maxNumSubLayersMinus1) => {\n let nal_hrd_parameters_present_flag = false;\n let vcl_hrd_parameters_present_flag = false;\n let sub_pic_hrd_params_present_flag = false;\n if (commonInfPresentFlag) {\n nal_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n vcl_hrd_parameters_present_flag = bitstream.readBits(1) === 1;\n if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) {\n sub_pic_hrd_params_present_flag = bitstream.readBits(1) === 1;\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(8);\n bitstream.readBits(5);\n bitstream.readBits(1);\n bitstream.readBits(5);\n }\n bitstream.readBits(4);\n bitstream.readBits(4);\n if (sub_pic_hrd_params_present_flag) {\n bitstream.readBits(4);\n }\n bitstream.readBits(5);\n bitstream.readBits(5);\n bitstream.readBits(5);\n }\n }\n for (let i = 0;i <= maxNumSubLayersMinus1; i++) {\n const fixed_pic_rate_general_flag = bitstream.readBits(1) === 1;\n let fixed_pic_rate_within_cvs_flag = true;\n if (!fixed_pic_rate_general_flag) {\n fixed_pic_rate_within_cvs_flag = bitstream.readBits(1) === 1;\n }\n let low_delay_hrd_flag = false;\n if (fixed_pic_rate_within_cvs_flag) {\n readExpGolomb(bitstream);\n } else {\n low_delay_hrd_flag = bitstream.readBits(1) === 1;\n }\n let CpbCnt = 1;\n if (!low_delay_hrd_flag) {\n const cpb_cnt_minus1 = readExpGolomb(bitstream);\n CpbCnt = cpb_cnt_minus1 + 1;\n }\n if (nal_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n if (vcl_hrd_parameters_present_flag) {\n skipSubLayerHrdParameters(bitstream, CpbCnt, sub_pic_hrd_params_present_flag);\n }\n }\n};\nvar skipSubLayerHrdParameters = (bitstream, CpbCnt, sub_pic_hrd_params_present_flag) => {\n for (let i = 0;i < CpbCnt; i++) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n if (sub_pic_hrd_params_present_flag) {\n readExpGolomb(bitstream);\n readExpGolomb(bitstream);\n }\n bitstream.readBits(1);\n }\n};\nvar serializeHevcDecoderConfigurationRecord = (record) => {\n const bytes = [];\n bytes.push(record.configurationVersion);\n bytes.push((record.generalProfileSpace & 3) << 6 | (record.generalTierFlag & 1) << 5 | record.generalProfileIdc & 31);\n bytes.push(record.generalProfileCompatibilityFlags >>> 24 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 16 & 255);\n bytes.push(record.generalProfileCompatibilityFlags >>> 8 & 255);\n bytes.push(record.generalProfileCompatibilityFlags & 255);\n bytes.push(...record.generalConstraintIndicatorFlags);\n bytes.push(record.generalLevelIdc & 255);\n bytes.push(240 | record.minSpatialSegmentationIdc >> 8 & 15);\n bytes.push(record.minSpatialSegmentationIdc & 255);\n bytes.push(252 | record.parallelismType & 3);\n bytes.push(252 | record.chromaFormatIdc & 3);\n bytes.push(248 | record.bitDepthLumaMinus8 & 7);\n bytes.push(248 | record.bitDepthChromaMinus8 & 7);\n bytes.push(record.avgFrameRate >> 8 & 255);\n bytes.push(record.avgFrameRate & 255);\n bytes.push((record.constantFrameRate & 3) << 6 | (record.numTemporalLayers & 7) << 3 | (record.temporalIdNested & 1) << 2 | record.lengthSizeMinusOne & 3);\n bytes.push(record.arrays.length & 255);\n for (const arr of record.arrays) {\n bytes.push((arr.arrayCompleteness & 1) << 7 | 0 << 6 | arr.nalUnitType & 63);\n bytes.push(arr.nalUnits.length >> 8 & 255);\n bytes.push(arr.nalUnits.length & 255);\n for (const nal of arr.nalUnits) {\n bytes.push(nal.length >> 8 & 255);\n bytes.push(nal.length & 255);\n for (let i = 0;i < nal.length; i++) {\n bytes.push(nal[i]);\n }\n }\n }\n return new Uint8Array(bytes);\n};\nvar parseOpusIdentificationHeader = (bytes) => {\n const view = toDataView(bytes);\n const outputChannelCount = view.getUint8(9);\n const preSkip = view.getUint16(10, true);\n const inputSampleRate = view.getUint32(12, true);\n const outputGain = view.getInt16(16, true);\n const channelMappingFamily = view.getUint8(18);\n let channelMappingTable = null;\n if (channelMappingFamily) {\n channelMappingTable = bytes.subarray(19, 19 + 2 + outputChannelCount);\n }\n return {\n outputChannelCount,\n preSkip,\n inputSampleRate,\n outputGain,\n channelMappingFamily,\n channelMappingTable\n };\n};\nvar FlacBlockType;\n(function(FlacBlockType2) {\n FlacBlockType2[FlacBlockType2[\"STREAMINFO\"] = 0] = \"STREAMINFO\";\n FlacBlockType2[FlacBlockType2[\"VORBIS_COMMENT\"] = 4] = \"VORBIS_COMMENT\";\n FlacBlockType2[FlacBlockType2[\"PICTURE\"] = 6] = \"PICTURE\";\n})(FlacBlockType || (FlacBlockType = {}));\n\n// ../../node_modules/mediabunny/dist/modules/src/custom-coder.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar customVideoEncoders = [];\nvar customAudioEncoders = [];\n\n// ../../node_modules/mediabunny/dist/modules/src/packet.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar PLACEHOLDER_DATA = /* @__PURE__ */ new Uint8Array(0);\n\nclass EncodedPacket {\n constructor(data, type, timestamp, duration, sequenceNumber = -1, byteLength, sideData) {\n this.data = data;\n this.type = type;\n this.timestamp = timestamp;\n this.duration = duration;\n this.sequenceNumber = sequenceNumber;\n if (data === PLACEHOLDER_DATA && byteLength === undefined) {\n throw new Error(\"Internal error: byteLength must be explicitly provided when constructing metadata-only packets.\");\n }\n if (byteLength === undefined) {\n byteLength = data.byteLength;\n }\n if (!(data instanceof Uint8Array)) {\n throw new TypeError(\"data must be a Uint8Array.\");\n }\n if (type !== \"key\" && type !== \"delta\") {\n throw new TypeError('type must be either \"key\" or \"delta\".');\n }\n if (!Number.isFinite(timestamp)) {\n throw new TypeError(\"timestamp must be a number.\");\n }\n if (!Number.isFinite(duration) || duration < 0) {\n throw new TypeError(\"duration must be a non-negative number.\");\n }\n if (!Number.isFinite(sequenceNumber)) {\n throw new TypeError(\"sequenceNumber must be a number.\");\n }\n if (!Number.isInteger(byteLength) || byteLength < 0) {\n throw new TypeError(\"byteLength must be a non-negative integer.\");\n }\n if (sideData !== undefined && (typeof sideData !== \"object\" || !sideData)) {\n throw new TypeError(\"sideData, when provided, must be an object.\");\n }\n if (sideData?.alpha !== undefined && !(sideData.alpha instanceof Uint8Array)) {\n throw new TypeError(\"sideData.alpha, when provided, must be a Uint8Array.\");\n }\n if (sideData?.alphaByteLength !== undefined && (!Number.isInteger(sideData.alphaByteLength) || sideData.alphaByteLength < 0)) {\n throw new TypeError(\"sideData.alphaByteLength, when provided, must be a non-negative integer.\");\n }\n this.byteLength = byteLength;\n this.sideData = sideData ?? {};\n if (this.sideData.alpha && this.sideData.alphaByteLength === undefined) {\n this.sideData.alphaByteLength = this.sideData.alpha.byteLength;\n }\n }\n get isMetadataOnly() {\n return this.data === PLACEHOLDER_DATA;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n toEncodedVideoChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n alphaToEncodedVideoChunk(type = this.type) {\n if (!this.sideData.alpha) {\n throw new TypeError(\"This packet does not contain alpha side data.\");\n }\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to a video chunk.\");\n }\n if (typeof EncodedVideoChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedVideoChunk.\");\n }\n return new EncodedVideoChunk({\n data: this.sideData.alpha,\n type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n toEncodedAudioChunk() {\n if (this.isMetadataOnly) {\n throw new TypeError(\"Metadata-only packets cannot be converted to an audio chunk.\");\n }\n if (typeof EncodedAudioChunk === \"undefined\") {\n throw new Error(\"Your browser does not support EncodedAudioChunk.\");\n }\n return new EncodedAudioChunk({\n data: this.data,\n type: this.type,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration\n });\n }\n static fromEncodedChunk(chunk, sideData) {\n if (!(chunk instanceof EncodedVideoChunk || chunk instanceof EncodedAudioChunk)) {\n throw new TypeError(\"chunk must be an EncodedVideoChunk or EncodedAudioChunk.\");\n }\n const data = new Uint8Array(chunk.byteLength);\n chunk.copyTo(data);\n return new EncodedPacket(data, chunk.type, chunk.timestamp / 1e6, (chunk.duration ?? 0) / 1e6, undefined, undefined, sideData);\n }\n clone(options) {\n if (options !== undefined && (typeof options !== \"object\" || options === null)) {\n throw new TypeError(\"options, when provided, must be an object.\");\n }\n if (options?.data !== undefined && !(options.data instanceof Uint8Array)) {\n throw new TypeError(\"options.data, when provided, must be a Uint8Array.\");\n }\n if (options?.type !== undefined && options.type !== \"key\" && options.type !== \"delta\") {\n throw new TypeError('options.type, when provided, must be either \"key\" or \"delta\".');\n }\n if (options?.timestamp !== undefined && !Number.isFinite(options.timestamp)) {\n throw new TypeError(\"options.timestamp, when provided, must be a number.\");\n }\n if (options?.duration !== undefined && !Number.isFinite(options.duration)) {\n throw new TypeError(\"options.duration, when provided, must be a number.\");\n }\n if (options?.sequenceNumber !== undefined && !Number.isFinite(options.sequenceNumber)) {\n throw new TypeError(\"options.sequenceNumber, when provided, must be a number.\");\n }\n if (options?.sideData !== undefined && (typeof options.sideData !== \"object\" || options.sideData === null)) {\n throw new TypeError(\"options.sideData, when provided, must be an object.\");\n }\n return new EncodedPacket(options?.data ?? this.data, options?.type ?? this.type, options?.timestamp ?? this.timestamp, options?.duration ?? this.duration, options?.sequenceNumber ?? this.sequenceNumber, this.byteLength, options?.sideData ?? this.sideData);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/pcm.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar toUlaw = (s16) => {\n const MULAW_MAX = 8191;\n const MULAW_BIAS = 33;\n let number = s16;\n let mask = 4096;\n let sign = 0;\n let position = 12;\n let lsb = 0;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n number += MULAW_BIAS;\n if (number > MULAW_MAX) {\n number = MULAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> position - 4 & 15;\n return ~(sign | position - 5 << 4 | lsb) & 255;\n};\nvar toAlaw = (s16) => {\n const ALAW_MAX = 4095;\n let mask = 2048;\n let sign = 0;\n let position = 11;\n let lsb = 0;\n let number = s16;\n if (number < 0) {\n number = -number;\n sign = 128;\n }\n if (number > ALAW_MAX) {\n number = ALAW_MAX;\n }\n while ((number & mask) !== mask && position >= 5) {\n mask >>= 1;\n position--;\n }\n lsb = number >> (position === 4 ? 1 : position - 4) & 15;\n return (sign | position - 4 << 4 | lsb) ^ 85;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/sample.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\npolyfillSymbolDispose();\nvar lastVideoGcErrorLog = -Infinity;\nvar lastAudioGcErrorLog = -Infinity;\nvar finalizationRegistry = null;\nif (typeof FinalizationRegistry !== \"undefined\") {\n finalizationRegistry = new FinalizationRegistry((value) => {\n const now = Date.now();\n if (value.type === \"video\") {\n if (now - lastVideoGcErrorLog >= 1000) {\n console.error(`A VideoSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your VideoSamples as soon as you're done using them.`);\n lastVideoGcErrorLog = now;\n }\n if (typeof VideoFrame !== \"undefined\" && value.data instanceof VideoFrame) {\n value.data.close();\n }\n } else {\n if (now - lastAudioGcErrorLog >= 1000) {\n console.error(`An AudioSample was garbage collected without first being closed. For proper resource management,` + ` make sure to call close() on all your AudioSamples as soon as you're done using them.`);\n lastAudioGcErrorLog = now;\n }\n if (typeof AudioData !== \"undefined\" && value.data instanceof AudioData) {\n value.data.close();\n }\n }\n });\n}\nvar VIDEO_SAMPLE_PIXEL_FORMATS = [\n \"I420\",\n \"I420P10\",\n \"I420P12\",\n \"I420A\",\n \"I420AP10\",\n \"I420AP12\",\n \"I422\",\n \"I422P10\",\n \"I422P12\",\n \"I422A\",\n \"I422AP10\",\n \"I422AP12\",\n \"I444\",\n \"I444P10\",\n \"I444P12\",\n \"I444A\",\n \"I444AP10\",\n \"I444AP12\",\n \"NV12\",\n \"RGBA\",\n \"RGBX\",\n \"BGRA\",\n \"BGRX\"\n];\nvar VIDEO_SAMPLE_PIXEL_FORMATS_SET = new Set(VIDEO_SAMPLE_PIXEL_FORMATS);\n\nclass VideoSample {\n get displayWidth() {\n return this.rotation % 180 === 0 ? this.codedWidth : this.codedHeight;\n }\n get displayHeight() {\n return this.rotation % 180 === 0 ? this.codedHeight : this.codedWidth;\n }\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n get hasAlpha() {\n return this.format && this.format.includes(\"A\");\n }\n constructor(data, init) {\n this._closed = false;\n if (data instanceof ArrayBuffer || typeof SharedArrayBuffer !== \"undefined\" && data instanceof SharedArrayBuffer || ArrayBuffer.isView(data)) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.format === undefined || !VIDEO_SAMPLE_PIXEL_FORMATS_SET.has(init.format)) {\n throw new TypeError(\"init.format must be one of: \" + VIDEO_SAMPLE_PIXEL_FORMATS.join(\", \"));\n }\n if (!Number.isInteger(init.codedWidth) || init.codedWidth <= 0) {\n throw new TypeError(\"init.codedWidth must be a positive integer.\");\n }\n if (!Number.isInteger(init.codedHeight) || init.codedHeight <= 0) {\n throw new TypeError(\"init.codedHeight must be a positive integer.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = toUint8Array(data).slice();\n this._layout = init.layout ?? createDefaultPlaneLayout(init.format, init.codedWidth, init.codedHeight);\n this.format = init.format;\n this.codedWidth = init.codedWidth;\n this.codedHeight = init.codedHeight;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace(init.colorSpace);\n } else if (typeof VideoFrame !== \"undefined\" && data instanceof VideoFrame) {\n if (init?.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (init?.timestamp !== undefined && !Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp, when provided, must be a number.\");\n }\n if (init?.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n this._data = data;\n this._layout = null;\n this.format = data.format;\n this.codedWidth = data.displayWidth;\n this.codedHeight = data.displayHeight;\n this.rotation = init?.rotation ?? 0;\n this.timestamp = init?.timestamp ?? data.timestamp / 1e6;\n this.duration = init?.duration ?? (data.duration ?? 0) / 1e6;\n this.colorSpace = new VideoSampleColorSpace(data.colorSpace);\n } else if (typeof HTMLImageElement !== \"undefined\" && data instanceof HTMLImageElement || typeof SVGImageElement !== \"undefined\" && data instanceof SVGImageElement || typeof ImageBitmap !== \"undefined\" && data instanceof ImageBitmap || typeof HTMLVideoElement !== \"undefined\" && data instanceof HTMLVideoElement || typeof HTMLCanvasElement !== \"undefined\" && data instanceof HTMLCanvasElement || typeof OffscreenCanvas !== \"undefined\" && data instanceof OffscreenCanvas) {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"init must be an object.\");\n }\n if (init.rotation !== undefined && ![0, 90, 180, 270].includes(init.rotation)) {\n throw new TypeError(\"init.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (!Number.isFinite(init.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n if (init.duration !== undefined && (!Number.isFinite(init.duration) || init.duration < 0)) {\n throw new TypeError(\"init.duration, when provided, must be a non-negative number.\");\n }\n if (typeof VideoFrame !== \"undefined\") {\n return new VideoSample(new VideoFrame(data, {\n timestamp: Math.trunc(init.timestamp * SECOND_TO_MICROSECOND_FACTOR),\n duration: Math.trunc((init.duration ?? 0) * SECOND_TO_MICROSECOND_FACTOR) || undefined\n }), init);\n }\n let width = 0;\n let height = 0;\n if (\"naturalWidth\" in data) {\n width = data.naturalWidth;\n height = data.naturalHeight;\n } else if (\"videoWidth\" in data) {\n width = data.videoWidth;\n height = data.videoHeight;\n } else if (\"width\" in data) {\n width = Number(data.width);\n height = Number(data.height);\n }\n if (!width || !height) {\n throw new TypeError(\"Could not determine dimensions.\");\n }\n const canvas = new OffscreenCanvas(width, height);\n const context = canvas.getContext(\"2d\", {\n alpha: isFirefox(),\n willReadFrequently: true\n });\n assert(context);\n context.drawImage(data, 0, 0);\n this._data = canvas;\n this._layout = null;\n this.format = \"RGBX\";\n this.codedWidth = width;\n this.codedHeight = height;\n this.rotation = init.rotation ?? 0;\n this.timestamp = init.timestamp;\n this.duration = init.duration ?? 0;\n this.colorSpace = new VideoSampleColorSpace({\n matrix: \"rgb\",\n primaries: \"bt709\",\n transfer: \"iec61966-2-1\",\n fullRange: true\n });\n } else {\n throw new TypeError(\"Invalid data type: Must be a BufferSource or CanvasImageSource.\");\n }\n finalizationRegistry?.register(this, { type: \"video\", data: this._data }, this);\n }\n clone() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoSample(this._data.clone(), {\n timestamp: this.timestamp,\n duration: this.duration,\n rotation: this.rotation\n });\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n return new VideoSample(this._data, {\n format: this.format,\n layout: this._layout,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n } else {\n return new VideoSample(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.timestamp,\n duration: this.duration,\n colorSpace: this.colorSpace,\n rotation: this.rotation\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isVideoFrame(this._data)) {\n this._data.close();\n } else {\n this._data = null;\n }\n this._closed = true;\n }\n allocationSize(options = {}) {\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot get allocation size when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const size = videoFrame.allocationSize(options);\n videoFrame.close();\n return size;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.allocationSize(options);\n } else if (this._data instanceof Uint8Array) {\n return this._data.byteLength;\n } else {\n return this.codedWidth * this.codedHeight * 4;\n }\n }\n async copyTo(destination, options = {}) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n validateVideoFrameCopyToOptions(options);\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n if (this.format === null) {\n throw new Error(\"Cannot copy video sample data when format is null. Sorry!\");\n }\n assert(this._data !== null);\n if (!isVideoFrame(this._data)) {\n if (options.colorSpace || options.format && options.format !== this.format || options.layout || options.rect) {\n const videoFrame = this.toVideoFrame();\n const layout = await videoFrame.copyTo(destination, options);\n videoFrame.close();\n return layout;\n }\n }\n if (isVideoFrame(this._data)) {\n return this._data.copyTo(destination, options);\n } else if (this._data instanceof Uint8Array) {\n assert(this._layout);\n const dest = toUint8Array(destination);\n dest.set(this._data);\n return this._layout;\n } else {\n const canvas = this._data;\n const context = canvas.getContext(\"2d\");\n assert(context);\n const imageData = context.getImageData(0, 0, this.codedWidth, this.codedHeight);\n const dest = toUint8Array(destination);\n dest.set(imageData.data);\n return [{\n offset: 0,\n stride: 4 * this.codedWidth\n }];\n }\n }\n toVideoFrame() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (isVideoFrame(this._data)) {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n } else if (this._data instanceof Uint8Array) {\n return new VideoFrame(this._data, {\n format: this.format,\n codedWidth: this.codedWidth,\n codedHeight: this.codedHeight,\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined,\n colorSpace: this.colorSpace\n });\n } else {\n return new VideoFrame(this._data, {\n timestamp: this.microsecondTimestamp,\n duration: this.microsecondDuration || undefined\n });\n }\n }\n draw(context, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) {\n let sx = 0;\n let sy = 0;\n let sWidth = this.displayWidth;\n let sHeight = this.displayHeight;\n let dx = 0;\n let dy = 0;\n let dWidth = this.displayWidth;\n let dHeight = this.displayHeight;\n if (arg5 !== undefined) {\n sx = arg1;\n sy = arg2;\n sWidth = arg3;\n sHeight = arg4;\n dx = arg5;\n dy = arg6;\n if (arg7 !== undefined) {\n dWidth = arg7;\n dHeight = arg8;\n } else {\n dWidth = sWidth;\n dHeight = sHeight;\n }\n } else {\n dx = arg1;\n dy = arg2;\n if (arg3 !== undefined) {\n dWidth = arg3;\n dHeight = arg4;\n }\n }\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!Number.isFinite(sx)) {\n throw new TypeError(\"sx must be a number.\");\n }\n if (!Number.isFinite(sy)) {\n throw new TypeError(\"sy must be a number.\");\n }\n if (!Number.isFinite(sWidth) || sWidth < 0) {\n throw new TypeError(\"sWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(sHeight) || sHeight < 0) {\n throw new TypeError(\"sHeight must be a non-negative number.\");\n }\n if (!Number.isFinite(dx)) {\n throw new TypeError(\"dx must be a number.\");\n }\n if (!Number.isFinite(dy)) {\n throw new TypeError(\"dy must be a number.\");\n }\n if (!Number.isFinite(dWidth) || dWidth < 0) {\n throw new TypeError(\"dWidth must be a non-negative number.\");\n }\n if (!Number.isFinite(dHeight) || dHeight < 0) {\n throw new TypeError(\"dHeight must be a non-negative number.\");\n }\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n ({ sx, sy, sWidth, sHeight } = this._rotateSourceRegion(sx, sy, sWidth, sHeight, this.rotation));\n const source = this.toCanvasImageSource();\n context.save();\n const centerX = dx + dWidth / 2;\n const centerY = dy + dHeight / 2;\n context.translate(centerX, centerY);\n context.rotate(this.rotation * Math.PI / 180);\n const aspectRatioChange = this.rotation % 180 === 0 ? 1 : dWidth / dHeight;\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.drawImage(source, sx, sy, sWidth, sHeight, -dWidth / 2, -dHeight / 2, dWidth, dHeight);\n context.restore();\n }\n drawWithFit(context, options) {\n if (!(typeof CanvasRenderingContext2D !== \"undefined\" && context instanceof CanvasRenderingContext2D || typeof OffscreenCanvasRenderingContext2D !== \"undefined\" && context instanceof OffscreenCanvasRenderingContext2D)) {\n throw new TypeError(\"context must be a CanvasRenderingContext2D or OffscreenCanvasRenderingContext2D.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (![\"fill\", \"contain\", \"cover\"].includes(options.fit)) {\n throw new TypeError(\"options.fit must be 'fill', 'contain', or 'cover'.\");\n }\n if (options.rotation !== undefined && ![0, 90, 180, 270].includes(options.rotation)) {\n throw new TypeError(\"options.rotation, when provided, must be 0, 90, 180, or 270.\");\n }\n if (options.crop !== undefined) {\n validateCropRectangle(options.crop, \"options.\");\n }\n const canvasWidth = context.canvas.width;\n const canvasHeight = context.canvas.height;\n const rotation = options.rotation ?? this.rotation;\n const [rotatedWidth, rotatedHeight] = rotation % 180 === 0 ? [this.codedWidth, this.codedHeight] : [this.codedHeight, this.codedWidth];\n if (options.crop) {\n clampCropRectangle(options.crop, rotatedWidth, rotatedHeight);\n }\n let dx;\n let dy;\n let newWidth;\n let newHeight;\n const { sx, sy, sWidth, sHeight } = this._rotateSourceRegion(options.crop?.left ?? 0, options.crop?.top ?? 0, options.crop?.width ?? rotatedWidth, options.crop?.height ?? rotatedHeight, rotation);\n if (options.fit === \"fill\") {\n dx = 0;\n dy = 0;\n newWidth = canvasWidth;\n newHeight = canvasHeight;\n } else {\n const [sampleWidth, sampleHeight] = options.crop ? [options.crop.width, options.crop.height] : [rotatedWidth, rotatedHeight];\n const scale = options.fit === \"contain\" ? Math.min(canvasWidth / sampleWidth, canvasHeight / sampleHeight) : Math.max(canvasWidth / sampleWidth, canvasHeight / sampleHeight);\n newWidth = sampleWidth * scale;\n newHeight = sampleHeight * scale;\n dx = (canvasWidth - newWidth) / 2;\n dy = (canvasHeight - newHeight) / 2;\n }\n context.save();\n const aspectRatioChange = rotation % 180 === 0 ? 1 : newWidth / newHeight;\n context.translate(canvasWidth / 2, canvasHeight / 2);\n context.rotate(rotation * Math.PI / 180);\n context.scale(1 / aspectRatioChange, aspectRatioChange);\n context.translate(-canvasWidth / 2, -canvasHeight / 2);\n context.drawImage(this.toCanvasImageSource(), sx, sy, sWidth, sHeight, dx, dy, newWidth, newHeight);\n context.restore();\n }\n _rotateSourceRegion(sx, sy, sWidth, sHeight, rotation) {\n if (rotation === 90) {\n [sx, sy, sWidth, sHeight] = [\n sy,\n this.codedHeight - sx - sWidth,\n sHeight,\n sWidth\n ];\n } else if (rotation === 180) {\n [sx, sy] = [\n this.codedWidth - sx - sWidth,\n this.codedHeight - sy - sHeight\n ];\n } else if (rotation === 270) {\n [sx, sy, sWidth, sHeight] = [\n this.codedWidth - sy - sHeight,\n sx,\n sHeight,\n sWidth\n ];\n }\n return { sx, sy, sWidth, sHeight };\n }\n toCanvasImageSource() {\n if (this._closed) {\n throw new Error(\"VideoSample is closed.\");\n }\n assert(this._data !== null);\n if (this._data instanceof Uint8Array) {\n const videoFrame = this.toVideoFrame();\n queueMicrotask(() => videoFrame.close());\n return videoFrame;\n } else {\n return this._data;\n }\n }\n setRotation(newRotation) {\n if (![0, 90, 180, 270].includes(newRotation)) {\n throw new TypeError(\"newRotation must be 0, 90, 180, or 270.\");\n }\n this.rotation = newRotation;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n setDuration(newDuration) {\n if (!Number.isFinite(newDuration) || newDuration < 0) {\n throw new TypeError(\"newDuration must be a non-negative number.\");\n }\n this.duration = newDuration;\n }\n [Symbol.dispose]() {\n this.close();\n }\n}\n\nclass VideoSampleColorSpace {\n constructor(init) {\n this.primaries = init?.primaries ?? null;\n this.transfer = init?.transfer ?? null;\n this.matrix = init?.matrix ?? null;\n this.fullRange = init?.fullRange ?? null;\n }\n toJSON() {\n return {\n primaries: this.primaries,\n transfer: this.transfer,\n matrix: this.matrix,\n fullRange: this.fullRange\n };\n }\n}\nvar isVideoFrame = (x) => {\n return typeof VideoFrame !== \"undefined\" && x instanceof VideoFrame;\n};\nvar clampCropRectangle = (crop, outerWidth, outerHeight) => {\n crop.left = Math.min(crop.left, outerWidth);\n crop.top = Math.min(crop.top, outerHeight);\n crop.width = Math.min(crop.width, outerWidth - crop.left);\n crop.height = Math.min(crop.height, outerHeight - crop.top);\n assert(crop.width >= 0);\n assert(crop.height >= 0);\n};\nvar validateCropRectangle = (crop, prefix) => {\n if (!crop || typeof crop !== \"object\") {\n throw new TypeError(prefix + \"crop, when provided, must be an object.\");\n }\n if (!Number.isInteger(crop.left) || crop.left < 0) {\n throw new TypeError(prefix + \"crop.left must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.top) || crop.top < 0) {\n throw new TypeError(prefix + \"crop.top must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.width) || crop.width < 0) {\n throw new TypeError(prefix + \"crop.width must be a non-negative integer.\");\n }\n if (!Number.isInteger(crop.height) || crop.height < 0) {\n throw new TypeError(prefix + \"crop.height must be a non-negative integer.\");\n }\n};\nvar validateVideoFrameCopyToOptions = (options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.colorSpace !== undefined && ![\"display-p3\", \"srgb\"].includes(options.colorSpace)) {\n throw new TypeError(\"options.colorSpace, when provided, must be 'display-p3' or 'srgb'.\");\n }\n if (options.format !== undefined && typeof options.format !== \"string\") {\n throw new TypeError(\"options.format, when provided, must be a string.\");\n }\n if (options.layout !== undefined) {\n if (!Array.isArray(options.layout)) {\n throw new TypeError(\"options.layout, when provided, must be an array.\");\n }\n for (const plane of options.layout) {\n if (!plane || typeof plane !== \"object\") {\n throw new TypeError(\"Each entry in options.layout must be an object.\");\n }\n if (!Number.isInteger(plane.offset) || plane.offset < 0) {\n throw new TypeError(\"plane.offset must be a non-negative integer.\");\n }\n if (!Number.isInteger(plane.stride) || plane.stride < 0) {\n throw new TypeError(\"plane.stride must be a non-negative integer.\");\n }\n }\n }\n if (options.rect !== undefined) {\n if (!options.rect || typeof options.rect !== \"object\") {\n throw new TypeError(\"options.rect, when provided, must be an object.\");\n }\n if (options.rect.x !== undefined && (!Number.isInteger(options.rect.x) || options.rect.x < 0)) {\n throw new TypeError(\"options.rect.x, when provided, must be a non-negative integer.\");\n }\n if (options.rect.y !== undefined && (!Number.isInteger(options.rect.y) || options.rect.y < 0)) {\n throw new TypeError(\"options.rect.y, when provided, must be a non-negative integer.\");\n }\n if (options.rect.width !== undefined && (!Number.isInteger(options.rect.width) || options.rect.width < 0)) {\n throw new TypeError(\"options.rect.width, when provided, must be a non-negative integer.\");\n }\n if (options.rect.height !== undefined && (!Number.isInteger(options.rect.height) || options.rect.height < 0)) {\n throw new TypeError(\"options.rect.height, when provided, must be a non-negative integer.\");\n }\n }\n};\nvar createDefaultPlaneLayout = (format, codedWidth, codedHeight) => {\n const planes = getPlaneConfigs(format);\n const layouts = [];\n let currentOffset = 0;\n for (const plane of planes) {\n const planeWidth = Math.ceil(codedWidth / plane.widthDivisor);\n const planeHeight = Math.ceil(codedHeight / plane.heightDivisor);\n const stride = planeWidth * plane.sampleBytes;\n const planeSize = stride * planeHeight;\n layouts.push({\n offset: currentOffset,\n stride\n });\n currentOffset += planeSize;\n }\n return layouts;\n};\nvar getPlaneConfigs = (format) => {\n const yuv = (yBytes, uvBytes, subX, subY, hasAlpha) => {\n const configs = [\n { sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY },\n { sampleBytes: uvBytes, widthDivisor: subX, heightDivisor: subY }\n ];\n if (hasAlpha) {\n configs.push({ sampleBytes: yBytes, widthDivisor: 1, heightDivisor: 1 });\n }\n return configs;\n };\n switch (format) {\n case \"I420\":\n return yuv(1, 1, 2, 2, false);\n case \"I420P10\":\n case \"I420P12\":\n return yuv(2, 2, 2, 2, false);\n case \"I420A\":\n return yuv(1, 1, 2, 2, true);\n case \"I420AP10\":\n case \"I420AP12\":\n return yuv(2, 2, 2, 2, true);\n case \"I422\":\n return yuv(1, 1, 2, 1, false);\n case \"I422P10\":\n case \"I422P12\":\n return yuv(2, 2, 2, 1, false);\n case \"I422A\":\n return yuv(1, 1, 2, 1, true);\n case \"I422AP10\":\n case \"I422AP12\":\n return yuv(2, 2, 2, 1, true);\n case \"I444\":\n return yuv(1, 1, 1, 1, false);\n case \"I444P10\":\n case \"I444P12\":\n return yuv(2, 2, 1, 1, false);\n case \"I444A\":\n return yuv(1, 1, 1, 1, true);\n case \"I444AP10\":\n case \"I444AP12\":\n return yuv(2, 2, 1, 1, true);\n case \"NV12\":\n return [\n { sampleBytes: 1, widthDivisor: 1, heightDivisor: 1 },\n { sampleBytes: 2, widthDivisor: 2, heightDivisor: 2 }\n ];\n case \"RGBA\":\n case \"RGBX\":\n case \"BGRA\":\n case \"BGRX\":\n return [\n { sampleBytes: 4, widthDivisor: 1, heightDivisor: 1 }\n ];\n default:\n assertNever(format);\n assert(false);\n }\n};\nvar AUDIO_SAMPLE_FORMATS = new Set([\"f32\", \"f32-planar\", \"s16\", \"s16-planar\", \"s32\", \"s32-planar\", \"u8\", \"u8-planar\"]);\n\nclass AudioSample {\n get microsecondTimestamp() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.timestamp);\n }\n get microsecondDuration() {\n return Math.trunc(SECOND_TO_MICROSECOND_FACTOR * this.duration);\n }\n constructor(init) {\n this._closed = false;\n if (isAudioData(init)) {\n if (init.format === null) {\n throw new TypeError(\"AudioData with null format is not supported.\");\n }\n this._data = init;\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = init.numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp / 1e6;\n this.duration = init.numberOfFrames / init.sampleRate;\n } else {\n if (!init || typeof init !== \"object\") {\n throw new TypeError(\"Invalid AudioDataInit: must be an object.\");\n }\n if (!AUDIO_SAMPLE_FORMATS.has(init.format)) {\n throw new TypeError(\"Invalid AudioDataInit: invalid format.\");\n }\n if (!Number.isFinite(init.sampleRate) || init.sampleRate <= 0) {\n throw new TypeError(\"Invalid AudioDataInit: sampleRate must be > 0.\");\n }\n if (!Number.isInteger(init.numberOfChannels) || init.numberOfChannels === 0) {\n throw new TypeError(\"Invalid AudioDataInit: numberOfChannels must be an integer > 0.\");\n }\n if (!Number.isFinite(init?.timestamp)) {\n throw new TypeError(\"init.timestamp must be a number.\");\n }\n const numberOfFrames = init.data.byteLength / (getBytesPerSample(init.format) * init.numberOfChannels);\n if (!Number.isInteger(numberOfFrames)) {\n throw new TypeError(\"Invalid AudioDataInit: data size is not a multiple of frame size.\");\n }\n this.format = init.format;\n this.sampleRate = init.sampleRate;\n this.numberOfFrames = numberOfFrames;\n this.numberOfChannels = init.numberOfChannels;\n this.timestamp = init.timestamp;\n this.duration = numberOfFrames / init.sampleRate;\n let dataBuffer;\n if (init.data instanceof ArrayBuffer) {\n dataBuffer = new Uint8Array(init.data);\n } else if (ArrayBuffer.isView(init.data)) {\n dataBuffer = new Uint8Array(init.data.buffer, init.data.byteOffset, init.data.byteLength);\n } else {\n throw new TypeError(\"Invalid AudioDataInit: data is not a BufferSource.\");\n }\n const expectedSize = this.numberOfFrames * this.numberOfChannels * getBytesPerSample(this.format);\n if (dataBuffer.byteLength < expectedSize) {\n throw new TypeError(\"Invalid AudioDataInit: insufficient data size.\");\n }\n this._data = dataBuffer;\n }\n finalizationRegistry?.register(this, { type: \"audio\", data: this._data }, this);\n }\n allocationSize(options) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const destFormat = options.format ?? this.format;\n const frameOffset = options.frameOffset ?? 0;\n if (frameOffset >= this.numberOfFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = options.frameCount !== undefined ? options.frameCount : this.numberOfFrames - frameOffset;\n if (copyFrameCount > this.numberOfFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const bytesPerSample = getBytesPerSample(destFormat);\n const isPlanar = formatIsPlanar(destFormat);\n if (isPlanar && options.planeIndex >= this.numberOfChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!isPlanar && options.planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const elementCount = isPlanar ? copyFrameCount : copyFrameCount * this.numberOfChannels;\n return elementCount * bytesPerSample;\n }\n copyTo(destination, options) {\n if (!isAllowSharedBufferSource(destination)) {\n throw new TypeError(\"destination must be an ArrayBuffer or an ArrayBuffer view.\");\n }\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!Number.isInteger(options.planeIndex) || options.planeIndex < 0) {\n throw new TypeError(\"planeIndex must be a non-negative integer.\");\n }\n if (options.format !== undefined && !AUDIO_SAMPLE_FORMATS.has(options.format)) {\n throw new TypeError(\"Invalid format.\");\n }\n if (options.frameOffset !== undefined && (!Number.isInteger(options.frameOffset) || options.frameOffset < 0)) {\n throw new TypeError(\"frameOffset must be a non-negative integer.\");\n }\n if (options.frameCount !== undefined && (!Number.isInteger(options.frameCount) || options.frameCount < 0)) {\n throw new TypeError(\"frameCount must be a non-negative integer.\");\n }\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const { planeIndex, format, frameCount: optFrameCount, frameOffset: optFrameOffset } = options;\n const srcFormat = this.format;\n const destFormat = format ?? this.format;\n if (!destFormat)\n throw new Error(\"Destination format not determined\");\n const numFrames = this.numberOfFrames;\n const numChannels = this.numberOfChannels;\n const frameOffset = optFrameOffset ?? 0;\n if (frameOffset >= numFrames) {\n throw new RangeError(\"frameOffset out of range\");\n }\n const copyFrameCount = optFrameCount !== undefined ? optFrameCount : numFrames - frameOffset;\n if (copyFrameCount > numFrames - frameOffset) {\n throw new RangeError(\"frameCount out of range\");\n }\n const destBytesPerSample = getBytesPerSample(destFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar && planeIndex >= numChannels) {\n throw new RangeError(\"planeIndex out of range\");\n }\n if (!destIsPlanar && planeIndex !== 0) {\n throw new RangeError(\"planeIndex out of range\");\n }\n const destElementCount = destIsPlanar ? copyFrameCount : copyFrameCount * numChannels;\n const requiredSize = destElementCount * destBytesPerSample;\n if (destination.byteLength < requiredSize) {\n throw new RangeError(\"Destination buffer is too small\");\n }\n const destView = toDataView(destination);\n const writeFn = getWriteFunction(destFormat);\n if (isAudioData(this._data)) {\n if (isWebKit() && numChannels > 2 && destFormat !== srcFormat) {\n doAudioDataCopyToWebKitWorkaround(this._data, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount);\n } else {\n this._data.copyTo(destination, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: destFormat\n });\n }\n } else {\n const uint8Data = this._data;\n const srcView = toDataView(uint8Data);\n const readFn = getReadFunction(srcFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n for (let i = 0;i < copyFrameCount; i++) {\n if (destIsPlanar) {\n const destOffset = i * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (planeIndex * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + planeIndex) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n } else {\n for (let ch = 0;ch < numChannels; ch++) {\n const destIndex = i * numChannels + ch;\n const destOffset = destIndex * destBytesPerSample;\n let srcOffset;\n if (srcIsPlanar) {\n srcOffset = (ch * numFrames + (i + frameOffset)) * srcBytesPerSample;\n } else {\n srcOffset = ((i + frameOffset) * numChannels + ch) * srcBytesPerSample;\n }\n const normalized = readFn(srcView, srcOffset);\n writeFn(destView, destOffset, normalized);\n }\n }\n }\n }\n }\n clone() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n const sample = new AudioSample(this._data.clone());\n sample.setTimestamp(this.timestamp);\n return sample;\n } else {\n return new AudioSample({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.timestamp,\n data: this._data\n });\n }\n }\n close() {\n if (this._closed) {\n return;\n }\n finalizationRegistry?.unregister(this);\n if (isAudioData(this._data)) {\n this._data.close();\n } else {\n this._data = new Uint8Array(0);\n }\n this._closed = true;\n }\n toAudioData() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n if (isAudioData(this._data)) {\n if (this._data.timestamp === this.microsecondTimestamp) {\n return this._data.clone();\n } else {\n if (formatIsPlanar(this.format)) {\n const size = this.allocationSize({ planeIndex: 0, format: this.format });\n const data = new ArrayBuffer(size * this.numberOfChannels);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(new Uint8Array(data, i * size, size), { planeIndex: i, format: this.format });\n }\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n } else {\n const data = new ArrayBuffer(this.allocationSize({ planeIndex: 0, format: this.format }));\n this.copyTo(data, { planeIndex: 0, format: this.format });\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data\n });\n }\n }\n } else {\n return new AudioData({\n format: this.format,\n sampleRate: this.sampleRate,\n numberOfFrames: this.numberOfFrames,\n numberOfChannels: this.numberOfChannels,\n timestamp: this.microsecondTimestamp,\n data: this._data.buffer instanceof ArrayBuffer ? this._data.buffer : this._data.slice()\n });\n }\n }\n toAudioBuffer() {\n if (this._closed) {\n throw new Error(\"AudioSample is closed.\");\n }\n const audioBuffer = new AudioBuffer({\n numberOfChannels: this.numberOfChannels,\n length: this.numberOfFrames,\n sampleRate: this.sampleRate\n });\n const dataBytes = new Float32Array(this.allocationSize({ planeIndex: 0, format: \"f32-planar\" }) / 4);\n for (let i = 0;i < this.numberOfChannels; i++) {\n this.copyTo(dataBytes, { planeIndex: i, format: \"f32-planar\" });\n audioBuffer.copyToChannel(dataBytes, i);\n }\n return audioBuffer;\n }\n setTimestamp(newTimestamp) {\n if (!Number.isFinite(newTimestamp)) {\n throw new TypeError(\"newTimestamp must be a number.\");\n }\n this.timestamp = newTimestamp;\n }\n [Symbol.dispose]() {\n this.close();\n }\n static *_fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n yield new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n }\n static fromAudioBuffer(audioBuffer, timestamp) {\n if (!(audioBuffer instanceof AudioBuffer)) {\n throw new TypeError(\"audioBuffer must be an AudioBuffer.\");\n }\n const MAX_FLOAT_COUNT = 48000 * 5;\n const numberOfChannels = audioBuffer.numberOfChannels;\n const sampleRate = audioBuffer.sampleRate;\n const totalFrames = audioBuffer.length;\n const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);\n let currentRelativeFrame = 0;\n let remainingFrames = totalFrames;\n const result = [];\n while (remainingFrames > 0) {\n const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);\n const chunkData = new Float32Array(numberOfChannels * framesToCopy);\n for (let channel = 0;channel < numberOfChannels; channel++) {\n audioBuffer.copyFromChannel(chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy), channel, currentRelativeFrame);\n }\n const audioSample = new AudioSample({\n format: \"f32-planar\",\n sampleRate,\n numberOfFrames: framesToCopy,\n numberOfChannels,\n timestamp: timestamp + currentRelativeFrame / sampleRate,\n data: chunkData\n });\n result.push(audioSample);\n currentRelativeFrame += framesToCopy;\n remainingFrames -= framesToCopy;\n }\n return result;\n }\n}\nvar getBytesPerSample = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return 1;\n case \"s16\":\n case \"s16-planar\":\n return 2;\n case \"s32\":\n case \"s32-planar\":\n return 4;\n case \"f32\":\n case \"f32-planar\":\n return 4;\n default:\n throw new Error(\"Unknown AudioSampleFormat\");\n }\n};\nvar formatIsPlanar = (format) => {\n switch (format) {\n case \"u8-planar\":\n case \"s16-planar\":\n case \"s32-planar\":\n case \"f32-planar\":\n return true;\n default:\n return false;\n }\n};\nvar getReadFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset) => (view.getUint8(offset) - 128) / 128;\n case \"s16\":\n case \"s16-planar\":\n return (view, offset) => view.getInt16(offset, true) / 32768;\n case \"s32\":\n case \"s32-planar\":\n return (view, offset) => view.getInt32(offset, true) / 2147483648;\n case \"f32\":\n case \"f32-planar\":\n return (view, offset) => view.getFloat32(offset, true);\n }\n};\nvar getWriteFunction = (format) => {\n switch (format) {\n case \"u8\":\n case \"u8-planar\":\n return (view, offset, value) => view.setUint8(offset, clamp((value + 1) * 127.5, 0, 255));\n case \"s16\":\n case \"s16-planar\":\n return (view, offset, value) => view.setInt16(offset, clamp(Math.round(value * 32767), -32768, 32767), true);\n case \"s32\":\n case \"s32-planar\":\n return (view, offset, value) => view.setInt32(offset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), true);\n case \"f32\":\n case \"f32-planar\":\n return (view, offset, value) => view.setFloat32(offset, value, true);\n }\n};\nvar isAudioData = (x) => {\n return typeof AudioData !== \"undefined\" && x instanceof AudioData;\n};\nvar doAudioDataCopyToWebKitWorkaround = (audioData, destView, srcFormat, destFormat, numChannels, planeIndex, frameOffset, copyFrameCount) => {\n const readFn = getReadFunction(srcFormat);\n const writeFn = getWriteFunction(destFormat);\n const srcBytesPerSample = getBytesPerSample(srcFormat);\n const destBytesPerSample = getBytesPerSample(destFormat);\n const srcIsPlanar = formatIsPlanar(srcFormat);\n const destIsPlanar = formatIsPlanar(destFormat);\n if (destIsPlanar) {\n if (srcIsPlanar) {\n const data = new ArrayBuffer(copyFrameCount * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = (i * numChannels + planeIndex) * srcBytesPerSample;\n const destOffset = i * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n if (srcIsPlanar) {\n const planeSize = copyFrameCount * srcBytesPerSample;\n const data = new ArrayBuffer(planeSize);\n const dataView = toDataView(data);\n for (let ch = 0;ch < numChannels; ch++) {\n audioData.copyTo(data, {\n planeIndex: ch,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n const srcOffset = i * srcBytesPerSample;\n const destOffset = (i * numChannels + ch) * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n } else {\n const data = new ArrayBuffer(copyFrameCount * numChannels * srcBytesPerSample);\n const dataView = toDataView(data);\n audioData.copyTo(data, {\n planeIndex: 0,\n frameOffset,\n frameCount: copyFrameCount,\n format: srcFormat\n });\n for (let i = 0;i < copyFrameCount; i++) {\n for (let ch = 0;ch < numChannels; ch++) {\n const idx = i * numChannels + ch;\n const srcOffset = idx * srcBytesPerSample;\n const destOffset = idx * destBytesPerSample;\n const sample = readFn(dataView, srcOffset);\n writeFn(destView, destOffset, sample);\n }\n }\n }\n }\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-misc.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar buildIsobmffMimeType = (info) => {\n const base = info.hasVideo ? \"video/\" : info.hasAudio ? \"audio/\" : \"application/\";\n let string = base + (info.isQuickTime ? \"quicktime\" : \"mp4\");\n if (info.codecStrings.length > 0) {\n const uniqueCodecMimeTypes = [...new Set(info.codecStrings)];\n string += `; codecs=\"${uniqueCodecMimeTypes.join(\", \")}\"`;\n }\n return string;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_BOX_HEADER_SIZE = 8;\nvar MAX_BOX_HEADER_SIZE = 16;\n\n// ../../node_modules/mediabunny/dist/modules/src/adts/adts-reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MIN_ADTS_FRAME_HEADER_SIZE = 7;\nvar MAX_ADTS_FRAME_HEADER_SIZE = 9;\nvar readAdtsFrameHeader = (slice) => {\n const startPos = slice.filePos;\n const bytes = readBytes(slice, 9);\n const bitstream = new Bitstream(bytes);\n const syncword = bitstream.readBits(12);\n if (syncword !== 4095) {\n return null;\n }\n bitstream.skipBits(1);\n const layer = bitstream.readBits(2);\n if (layer !== 0) {\n return null;\n }\n const protectionAbsence = bitstream.readBits(1);\n const objectType = bitstream.readBits(2) + 1;\n const samplingFrequencyIndex = bitstream.readBits(4);\n if (samplingFrequencyIndex === 15) {\n return null;\n }\n bitstream.skipBits(1);\n const channelConfiguration = bitstream.readBits(3);\n if (channelConfiguration === 0) {\n throw new Error(\"ADTS frames with channel configuration 0 are not supported.\");\n }\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n bitstream.skipBits(1);\n const frameLength = bitstream.readBits(13);\n bitstream.skipBits(11);\n const numberOfAacFrames = bitstream.readBits(2) + 1;\n if (numberOfAacFrames !== 1) {\n throw new Error(\"ADTS frames with more than one AAC frame are not supported.\");\n }\n let crcCheck = null;\n if (protectionAbsence === 1) {\n slice.filePos -= 2;\n } else {\n crcCheck = bitstream.readBits(16);\n }\n return {\n objectType,\n samplingFrequencyIndex,\n channelConfiguration,\n frameLength,\n numberOfAacFrames,\n crcCheck,\n startPos\n };\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/reader.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass FileSlice {\n constructor(bytes, view, offset, start, end) {\n this.bytes = bytes;\n this.view = view;\n this.offset = offset;\n this.start = start;\n this.end = end;\n this.bufferPos = start - offset;\n }\n static tempFromBytes(bytes) {\n return new FileSlice(bytes, toDataView(bytes), 0, 0, bytes.length);\n }\n get length() {\n return this.end - this.start;\n }\n get filePos() {\n return this.offset + this.bufferPos;\n }\n set filePos(value) {\n this.bufferPos = value - this.offset;\n }\n get remainingLength() {\n return Math.max(this.end - this.filePos, 0);\n }\n skip(byteCount) {\n this.bufferPos += byteCount;\n }\n slice(filePos, length = this.end - filePos) {\n if (filePos < this.start || filePos + length > this.end) {\n throw new RangeError(\"Slicing outside of original slice.\");\n }\n return new FileSlice(this.bytes, this.view, this.offset, filePos, filePos + length);\n }\n}\nvar checkIsInRange = (slice, bytesToRead) => {\n if (slice.filePos < slice.start || slice.filePos + bytesToRead > slice.end) {\n throw new RangeError(`Tried reading [${slice.filePos}, ${slice.filePos + bytesToRead}), but slice is` + ` [${slice.start}, ${slice.end}). This is likely an internal error, please report it alongside the file` + ` that caused it.`);\n }\n};\nvar readBytes = (slice, length) => {\n checkIsInRange(slice, length);\n const bytes = slice.bytes.subarray(slice.bufferPos, slice.bufferPos + length);\n slice.bufferPos += length;\n return bytes;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Muxer {\n constructor(output) {\n this.mutex = new AsyncMutex;\n this.firstMediaStreamTimestamp = null;\n this.trackTimestampInfo = new WeakMap;\n this.output = output;\n }\n onTrackClose(track) {}\n validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyPacket) {\n timestampInSeconds += track.source._timestampOffset;\n let timestampInfo = this.trackTimestampInfo.get(track);\n if (!timestampInfo) {\n if (!isKeyPacket) {\n throw new Error(\"First packet must be a key packet.\");\n }\n timestampInfo = {\n maxTimestamp: timestampInSeconds,\n maxTimestampBeforeLastKeyPacket: timestampInSeconds\n };\n this.trackTimestampInfo.set(track, timestampInfo);\n }\n if (timestampInSeconds < 0) {\n throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);\n }\n if (isKeyPacket) {\n timestampInfo.maxTimestampBeforeLastKeyPacket = timestampInfo.maxTimestamp;\n }\n if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyPacket) {\n throw new Error(`Timestamps cannot be smaller than the largest timestamp of the previous GOP (a GOP begins with a key` + ` packet and ends right before the next key packet). Got ${timestampInSeconds}s, but largest` + ` timestamp is ${timestampInfo.maxTimestampBeforeLastKeyPacket}s.`);\n }\n timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);\n return timestampInSeconds;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/subtitles.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar inlineTimestampRegex = /<(?:(\\d{2}):)?(\\d{2}):(\\d{2}).(\\d{3})>/g;\nvar formatSubtitleTimestamp = (timestamp) => {\n const hours = Math.floor(timestamp / (60 * 60 * 1000));\n const minutes = Math.floor(timestamp % (60 * 60 * 1000) / (60 * 1000));\n const seconds = Math.floor(timestamp % (60 * 1000) / 1000);\n const milliseconds = timestamp % 1000;\n return hours.toString().padStart(2, \"0\") + \":\" + minutes.toString().padStart(2, \"0\") + \":\" + seconds.toString().padStart(2, \"0\") + \".\" + milliseconds.toString().padStart(3, \"0\");\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-boxes.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass IsobmffBoxWriter {\n constructor(writer) {\n this.writer = writer;\n this.helper = new Uint8Array(8);\n this.helperView = new DataView(this.helper.buffer);\n this.offsets = new WeakMap;\n }\n writeU32(value) {\n this.helperView.setUint32(0, value, false);\n this.writer.write(this.helper.subarray(0, 4));\n }\n writeU64(value) {\n this.helperView.setUint32(0, Math.floor(value / 2 ** 32), false);\n this.helperView.setUint32(4, value, false);\n this.writer.write(this.helper.subarray(0, 8));\n }\n writeAscii(text) {\n for (let i = 0;i < text.length; i++) {\n this.helperView.setUint8(i % 8, text.charCodeAt(i));\n if (i % 8 === 7)\n this.writer.write(this.helper);\n }\n if (text.length % 8 !== 0) {\n this.writer.write(this.helper.subarray(0, text.length % 8));\n }\n }\n writeBox(box) {\n this.offsets.set(box, this.writer.getPos());\n if (box.contents && !box.children) {\n this.writeBoxHeader(box, box.size ?? box.contents.byteLength + 8);\n this.writer.write(box.contents);\n } else {\n const startPos = this.writer.getPos();\n this.writeBoxHeader(box, 0);\n if (box.contents)\n this.writer.write(box.contents);\n if (box.children) {\n for (const child of box.children)\n if (child)\n this.writeBox(child);\n }\n const endPos = this.writer.getPos();\n const size = box.size ?? endPos - startPos;\n this.writer.seek(startPos);\n this.writeBoxHeader(box, size);\n this.writer.seek(endPos);\n }\n }\n writeBoxHeader(box, size) {\n this.writeU32(box.largeSize ? 1 : size);\n this.writeAscii(box.type);\n if (box.largeSize)\n this.writeU64(size);\n }\n measureBoxHeader(box) {\n return 8 + (box.largeSize ? 8 : 0);\n }\n patchBox(box) {\n const boxOffset = this.offsets.get(box);\n assert(boxOffset !== undefined);\n const endPos = this.writer.getPos();\n this.writer.seek(boxOffset);\n this.writeBox(box);\n this.writer.seek(endPos);\n }\n measureBox(box) {\n if (box.contents && !box.children) {\n const headerSize = this.measureBoxHeader(box);\n return headerSize + box.contents.byteLength;\n } else {\n let result = this.measureBoxHeader(box);\n if (box.contents)\n result += box.contents.byteLength;\n if (box.children) {\n for (const child of box.children)\n if (child)\n result += this.measureBox(child);\n }\n return result;\n }\n }\n}\nvar bytes = /* @__PURE__ */ new Uint8Array(8);\nvar view = /* @__PURE__ */ new DataView(bytes.buffer);\nvar u8 = (value) => {\n return [(value % 256 + 256) % 256];\n};\nvar u16 = (value) => {\n view.setUint16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar i16 = (value) => {\n view.setInt16(0, value, false);\n return [bytes[0], bytes[1]];\n};\nvar u24 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[1], bytes[2], bytes[3]];\n};\nvar u32 = (value) => {\n view.setUint32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar i32 = (value) => {\n view.setInt32(0, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar u64 = (value) => {\n view.setUint32(0, Math.floor(value / 2 ** 32), false);\n view.setUint32(4, value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]];\n};\nvar fixed_8_8 = (value) => {\n view.setInt16(0, 2 ** 8 * value, false);\n return [bytes[0], bytes[1]];\n};\nvar fixed_16_16 = (value) => {\n view.setInt32(0, 2 ** 16 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar fixed_2_30 = (value) => {\n view.setInt32(0, 2 ** 30 * value, false);\n return [bytes[0], bytes[1], bytes[2], bytes[3]];\n};\nvar variableUnsignedInt = (value, byteLength) => {\n const bytes2 = [];\n let remaining = value;\n do {\n let byte = remaining & 127;\n remaining >>= 7;\n if (bytes2.length > 0) {\n byte |= 128;\n }\n bytes2.push(byte);\n if (byteLength !== undefined) {\n byteLength--;\n }\n } while (remaining > 0 || byteLength);\n return bytes2.reverse();\n};\nvar ascii = (text, nullTerminated = false) => {\n const bytes2 = Array(text.length).fill(null).map((_, i) => text.charCodeAt(i));\n if (nullTerminated)\n bytes2.push(0);\n return bytes2;\n};\nvar lastPresentedSample = (samples) => {\n let result = null;\n for (const sample of samples) {\n if (!result || sample.timestamp > result.timestamp) {\n result = sample;\n }\n }\n return result;\n};\nvar rotationMatrix = (rotationInDegrees) => {\n const theta = rotationInDegrees * (Math.PI / 180);\n const cosTheta = Math.round(Math.cos(theta));\n const sinTheta = Math.round(Math.sin(theta));\n return [\n cosTheta,\n sinTheta,\n 0,\n -sinTheta,\n cosTheta,\n 0,\n 0,\n 0,\n 1\n ];\n};\nvar IDENTITY_MATRIX = /* @__PURE__ */ rotationMatrix(0);\nvar matrixToBytes = (matrix) => {\n return [\n fixed_16_16(matrix[0]),\n fixed_16_16(matrix[1]),\n fixed_2_30(matrix[2]),\n fixed_16_16(matrix[3]),\n fixed_16_16(matrix[4]),\n fixed_2_30(matrix[5]),\n fixed_16_16(matrix[6]),\n fixed_16_16(matrix[7]),\n fixed_2_30(matrix[8])\n ];\n};\nvar box = (type, contents, children) => ({\n type,\n contents: contents && new Uint8Array(contents.flat(10)),\n children\n});\nvar fullBox = (type, version, flags, contents, children) => box(type, [u8(version), u24(flags), contents ?? []], children);\nvar ftyp = (details) => {\n const minorVersion = 512;\n if (details.isQuickTime) {\n return box(\"ftyp\", [\n ascii(\"qt \"),\n u32(minorVersion),\n ascii(\"qt \")\n ]);\n }\n if (details.fragmented) {\n return box(\"ftyp\", [\n ascii(\"iso5\"),\n u32(minorVersion),\n ascii(\"iso5\"),\n ascii(\"iso6\"),\n ascii(\"mp41\")\n ]);\n }\n return box(\"ftyp\", [\n ascii(\"isom\"),\n u32(minorVersion),\n ascii(\"isom\"),\n details.holdsAvc ? ascii(\"avc1\") : [],\n ascii(\"mp41\")\n ]);\n};\nvar mdat = (reserveLargeSize) => ({ type: \"mdat\", largeSize: reserveLargeSize });\nvar free = (size) => ({ type: \"free\", size });\nvar moov = (muxer) => box(\"moov\", undefined, [\n mvhd(muxer.creationTime, muxer.trackDatas),\n ...muxer.trackDatas.map((x) => trak(x, muxer.creationTime)),\n muxer.isFragmented ? mvex(muxer.trackDatas) : null,\n udta(muxer)\n]);\nvar mvhd = (creationTime, trackDatas) => {\n const duration = intoTimescale(Math.max(0, ...trackDatas.filter((x) => x.samples.length > 0).map((x) => {\n const lastSample = lastPresentedSample(x.samples);\n return lastSample.timestamp + lastSample.duration;\n })), GLOBAL_TIMESCALE);\n const nextTrackId = Math.max(0, ...trackDatas.map((x) => x.track.id)) + 1;\n const needsU64 = !isU32(creationTime) || !isU32(duration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mvhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(GLOBAL_TIMESCALE),\n u32OrU64(duration),\n fixed_16_16(1),\n fixed_8_8(1),\n Array(10).fill(0),\n matrixToBytes(IDENTITY_MATRIX),\n Array(24).fill(0),\n u32(nextTrackId)\n ]);\n};\nvar trak = (trackData, creationTime) => {\n const trackMetadata = getTrackMetadata(trackData);\n return box(\"trak\", undefined, [\n tkhd(trackData, creationTime),\n mdia(trackData, creationTime),\n trackMetadata.name !== undefined ? box(\"udta\", undefined, [\n box(\"name\", [\n ...textEncoder.encode(trackMetadata.name)\n ])\n ]) : null\n ]);\n};\nvar tkhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const durationInGlobalTimescale = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, GLOBAL_TIMESCALE);\n const needsU64 = !isU32(creationTime) || !isU32(durationInGlobalTimescale);\n const u32OrU64 = needsU64 ? u64 : u32;\n let matrix;\n if (trackData.type === \"video\") {\n const rotation = trackData.track.metadata.rotation;\n matrix = rotationMatrix(rotation ?? 0);\n } else {\n matrix = IDENTITY_MATRIX;\n }\n let flags = 2;\n if (trackData.track.metadata.disposition?.default !== false) {\n flags |= 1;\n }\n return fullBox(\"tkhd\", +needsU64, flags, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.track.id),\n u32(0),\n u32OrU64(durationInGlobalTimescale),\n Array(8).fill(0),\n u16(0),\n u16(trackData.track.id),\n fixed_8_8(trackData.type === \"audio\" ? 1 : 0),\n u16(0),\n matrixToBytes(matrix),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.width : 0),\n fixed_16_16(trackData.type === \"video\" ? trackData.info.height : 0)\n ]);\n};\nvar mdia = (trackData, creationTime) => box(\"mdia\", undefined, [\n mdhd(trackData, creationTime),\n hdlr(true, TRACK_TYPE_TO_COMPONENT_SUBTYPE[trackData.type], TRACK_TYPE_TO_HANDLER_NAME[trackData.type]),\n minf(trackData)\n]);\nvar mdhd = (trackData, creationTime) => {\n const lastSample = lastPresentedSample(trackData.samples);\n const localDuration = intoTimescale(lastSample ? lastSample.timestamp + lastSample.duration : 0, trackData.timescale);\n const needsU64 = !isU32(creationTime) || !isU32(localDuration);\n const u32OrU64 = needsU64 ? u64 : u32;\n return fullBox(\"mdhd\", +needsU64, 0, [\n u32OrU64(creationTime),\n u32OrU64(creationTime),\n u32(trackData.timescale),\n u32OrU64(localDuration),\n u16(getLanguageCodeInt(trackData.track.metadata.languageCode ?? UNDETERMINED_LANGUAGE)),\n u16(0)\n ]);\n};\nvar TRACK_TYPE_TO_COMPONENT_SUBTYPE = {\n video: \"vide\",\n audio: \"soun\",\n subtitle: \"text\"\n};\nvar TRACK_TYPE_TO_HANDLER_NAME = {\n video: \"MediabunnyVideoHandler\",\n audio: \"MediabunnySoundHandler\",\n subtitle: \"MediabunnyTextHandler\"\n};\nvar hdlr = (hasComponentType, handlerType, name, manufacturer = \"\\x00\\x00\\x00\\x00\") => fullBox(\"hdlr\", 0, 0, [\n hasComponentType ? ascii(\"mhlr\") : u32(0),\n ascii(handlerType),\n ascii(manufacturer),\n u32(0),\n u32(0),\n ascii(name, true)\n]);\nvar minf = (trackData) => box(\"minf\", undefined, [\n TRACK_TYPE_TO_HEADER_BOX[trackData.type](),\n dinf(),\n stbl(trackData)\n]);\nvar vmhd = () => fullBox(\"vmhd\", 0, 1, [\n u16(0),\n u16(0),\n u16(0),\n u16(0)\n]);\nvar smhd = () => fullBox(\"smhd\", 0, 0, [\n u16(0),\n u16(0)\n]);\nvar nmhd = () => fullBox(\"nmhd\", 0, 0);\nvar TRACK_TYPE_TO_HEADER_BOX = {\n video: vmhd,\n audio: smhd,\n subtitle: nmhd\n};\nvar dinf = () => box(\"dinf\", undefined, [\n dref()\n]);\nvar dref = () => fullBox(\"dref\", 0, 0, [\n u32(1)\n], [\n url()\n]);\nvar url = () => fullBox(\"url \", 0, 1);\nvar stbl = (trackData) => {\n const needsCtts = trackData.compositionTimeOffsetTable.length > 1 || trackData.compositionTimeOffsetTable.some((x) => x.sampleCompositionTimeOffset !== 0);\n return box(\"stbl\", undefined, [\n stsd(trackData),\n stts(trackData),\n needsCtts ? ctts(trackData) : null,\n needsCtts ? cslg(trackData) : null,\n stsc(trackData),\n stsz(trackData),\n stco(trackData),\n stss(trackData)\n ]);\n};\nvar stsd = (trackData) => {\n let sampleDescription;\n if (trackData.type === \"video\") {\n sampleDescription = videoSampleDescription(videoCodecToBoxName(trackData.track.source._codec, trackData.info.decoderConfig.codec), trackData);\n } else if (trackData.type === \"audio\") {\n const boxName = audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime);\n assert(boxName);\n sampleDescription = soundSampleDescription(boxName, trackData);\n } else if (trackData.type === \"subtitle\") {\n sampleDescription = subtitleSampleDescription(SUBTITLE_CODEC_TO_BOX_NAME[trackData.track.source._codec], trackData);\n }\n assert(sampleDescription);\n return fullBox(\"stsd\", 0, 0, [\n u32(1)\n ], [\n sampleDescription\n ]);\n};\nvar videoSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1),\n u16(0),\n u16(0),\n Array(12).fill(0),\n u16(trackData.info.width),\n u16(trackData.info.height),\n u32(4718592),\n u32(4718592),\n u32(0),\n u16(1),\n Array(32).fill(0),\n u16(24),\n i16(65535)\n], [\n VIDEO_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData),\n colorSpaceIsComplete(trackData.info.decoderConfig.colorSpace) ? colr(trackData) : null\n]);\nvar colr = (trackData) => box(\"colr\", [\n ascii(\"nclx\"),\n u16(COLOR_PRIMARIES_MAP[trackData.info.decoderConfig.colorSpace.primaries]),\n u16(TRANSFER_CHARACTERISTICS_MAP[trackData.info.decoderConfig.colorSpace.transfer]),\n u16(MATRIX_COEFFICIENTS_MAP[trackData.info.decoderConfig.colorSpace.matrix]),\n u8((trackData.info.decoderConfig.colorSpace.fullRange ? 1 : 0) << 7)\n]);\nvar avcC = (trackData) => trackData.info.decoderConfig && box(\"avcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar hvcC = (trackData) => trackData.info.decoderConfig && box(\"hvcC\", [\n ...toUint8Array(trackData.info.decoderConfig.description)\n]);\nvar vpcC = (trackData) => {\n if (!trackData.info.decoderConfig) {\n return null;\n }\n const decoderConfig = trackData.info.decoderConfig;\n const parts = decoderConfig.codec.split(\".\");\n const profile = Number(parts[1]);\n const level = Number(parts[2]);\n const bitDepth = Number(parts[3]);\n const chromaSubsampling = parts[4] ? Number(parts[4]) : 1;\n const videoFullRangeFlag = parts[8] ? Number(parts[8]) : Number(decoderConfig.colorSpace?.fullRange ?? 0);\n const thirdByte = (bitDepth << 4) + (chromaSubsampling << 1) + videoFullRangeFlag;\n const colourPrimaries = parts[5] ? Number(parts[5]) : decoderConfig.colorSpace?.primaries ? COLOR_PRIMARIES_MAP[decoderConfig.colorSpace.primaries] : 2;\n const transferCharacteristics = parts[6] ? Number(parts[6]) : decoderConfig.colorSpace?.transfer ? TRANSFER_CHARACTERISTICS_MAP[decoderConfig.colorSpace.transfer] : 2;\n const matrixCoefficients = parts[7] ? Number(parts[7]) : decoderConfig.colorSpace?.matrix ? MATRIX_COEFFICIENTS_MAP[decoderConfig.colorSpace.matrix] : 2;\n return fullBox(\"vpcC\", 1, 0, [\n u8(profile),\n u8(level),\n u8(thirdByte),\n u8(colourPrimaries),\n u8(transferCharacteristics),\n u8(matrixCoefficients),\n u16(0)\n ]);\n};\nvar av1C = (trackData) => {\n return box(\"av1C\", generateAv1CodecConfigurationFromCodecString(trackData.info.decoderConfig.codec));\n};\nvar soundSampleDescription = (compressionType, trackData) => {\n let version = 0;\n let contents;\n let sampleSizeInBits = 16;\n if (PCM_AUDIO_CODECS.includes(trackData.track.source._codec)) {\n const codec = trackData.track.source._codec;\n const { sampleSize } = parsePcmCodec(codec);\n sampleSizeInBits = 8 * sampleSize;\n if (sampleSizeInBits > 16) {\n version = 1;\n }\n }\n if (version === 0) {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(sampleSizeInBits),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0)\n ];\n } else {\n contents = [\n Array(6).fill(0),\n u16(1),\n u16(version),\n u16(0),\n u32(0),\n u16(trackData.info.numberOfChannels),\n u16(Math.min(sampleSizeInBits, 16)),\n u16(0),\n u16(0),\n u16(trackData.info.sampleRate < 2 ** 16 ? trackData.info.sampleRate : 0),\n u16(0),\n u32(1),\n u32(sampleSizeInBits / 8),\n u32(trackData.info.numberOfChannels * sampleSizeInBits / 8),\n u32(2)\n ];\n }\n return box(compressionType, contents, [\n audioCodecToConfigurationBox(trackData.track.source._codec, trackData.muxer.isQuickTime)?.(trackData) ?? null\n ]);\n};\nvar esds = (trackData) => {\n let objectTypeIndication;\n switch (trackData.track.source._codec) {\n case \"aac\":\n {\n objectTypeIndication = 64;\n }\n ;\n break;\n case \"mp3\":\n {\n objectTypeIndication = 107;\n }\n ;\n break;\n case \"vorbis\":\n {\n objectTypeIndication = 221;\n }\n ;\n break;\n default:\n throw new Error(`Unhandled audio codec: ${trackData.track.source._codec}`);\n }\n let bytes2 = [\n ...u8(objectTypeIndication),\n ...u8(21),\n ...u24(0),\n ...u32(0),\n ...u32(0)\n ];\n if (trackData.info.decoderConfig.description) {\n const description = toUint8Array(trackData.info.decoderConfig.description);\n bytes2 = [\n ...bytes2,\n ...u8(5),\n ...variableUnsignedInt(description.byteLength),\n ...description\n ];\n }\n bytes2 = [\n ...u16(1),\n ...u8(0),\n ...u8(4),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2,\n ...u8(6),\n ...u8(1),\n ...u8(2)\n ];\n bytes2 = [\n ...u8(3),\n ...variableUnsignedInt(bytes2.length),\n ...bytes2\n ];\n return fullBox(\"esds\", 0, 0, bytes2);\n};\nvar wave = (trackData) => {\n return box(\"wave\", undefined, [\n frma(trackData),\n enda(trackData),\n box(\"\\x00\\x00\\x00\\x00\")\n ]);\n};\nvar frma = (trackData) => {\n return box(\"frma\", [\n ascii(audioCodecToBoxName(trackData.track.source._codec, trackData.muxer.isQuickTime))\n ]);\n};\nvar enda = (trackData) => {\n const { littleEndian } = parsePcmCodec(trackData.track.source._codec);\n return box(\"enda\", [\n u16(+littleEndian)\n ]);\n};\nvar dOps = (trackData) => {\n let outputChannelCount = trackData.info.numberOfChannels;\n let preSkip = 3840;\n let inputSampleRate = trackData.info.sampleRate;\n let outputGain = 0;\n let channelMappingFamily = 0;\n let channelMappingTable = new Uint8Array(0);\n const description = trackData.info.decoderConfig?.description;\n if (description) {\n assert(description.byteLength >= 18);\n const bytes2 = toUint8Array(description);\n const header = parseOpusIdentificationHeader(bytes2);\n outputChannelCount = header.outputChannelCount;\n preSkip = header.preSkip;\n inputSampleRate = header.inputSampleRate;\n outputGain = header.outputGain;\n channelMappingFamily = header.channelMappingFamily;\n if (header.channelMappingTable) {\n channelMappingTable = header.channelMappingTable;\n }\n }\n return box(\"dOps\", [\n u8(0),\n u8(outputChannelCount),\n u16(preSkip),\n u32(inputSampleRate),\n i16(outputGain),\n u8(channelMappingFamily),\n ...channelMappingTable\n ]);\n};\nvar dfLa = (trackData) => {\n const description = trackData.info.decoderConfig?.description;\n assert(description);\n const bytes2 = toUint8Array(description);\n return fullBox(\"dfLa\", 0, 0, [\n ...bytes2.subarray(4)\n ]);\n};\nvar pcmC = (trackData) => {\n const { littleEndian, sampleSize } = parsePcmCodec(trackData.track.source._codec);\n const formatFlags = +littleEndian;\n return fullBox(\"pcmC\", 0, 0, [\n u8(formatFlags),\n u8(8 * sampleSize)\n ]);\n};\nvar subtitleSampleDescription = (compressionType, trackData) => box(compressionType, [\n Array(6).fill(0),\n u16(1)\n], [\n SUBTITLE_CODEC_TO_CONFIGURATION_BOX[trackData.track.source._codec](trackData)\n]);\nvar vttC = (trackData) => box(\"vttC\", [\n ...textEncoder.encode(trackData.info.config.description)\n]);\nvar stts = (trackData) => {\n return fullBox(\"stts\", 0, 0, [\n u32(trackData.timeToSampleTable.length),\n trackData.timeToSampleTable.map((x) => [\n u32(x.sampleCount),\n u32(x.sampleDelta)\n ])\n ]);\n};\nvar stss = (trackData) => {\n if (trackData.samples.every((x) => x.type === \"key\"))\n return null;\n const keySamples = [...trackData.samples.entries()].filter(([, sample]) => sample.type === \"key\");\n return fullBox(\"stss\", 0, 0, [\n u32(keySamples.length),\n keySamples.map(([index]) => u32(index + 1))\n ]);\n};\nvar stsc = (trackData) => {\n return fullBox(\"stsc\", 0, 0, [\n u32(trackData.compactlyCodedChunkTable.length),\n trackData.compactlyCodedChunkTable.map((x) => [\n u32(x.firstChunk),\n u32(x.samplesPerChunk),\n u32(1)\n ])\n ]);\n};\nvar stsz = (trackData) => {\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n const { sampleSize } = parsePcmCodec(trackData.track.source._codec);\n return fullBox(\"stsz\", 0, 0, [\n u32(sampleSize * trackData.info.numberOfChannels),\n u32(trackData.samples.reduce((acc, x) => acc + intoTimescale(x.duration, trackData.timescale), 0))\n ]);\n }\n return fullBox(\"stsz\", 0, 0, [\n u32(0),\n u32(trackData.samples.length),\n trackData.samples.map((x) => u32(x.size))\n ]);\n};\nvar stco = (trackData) => {\n if (trackData.finalizedChunks.length > 0 && last(trackData.finalizedChunks).offset >= 2 ** 32) {\n return fullBox(\"co64\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u64(x.offset))\n ]);\n }\n return fullBox(\"stco\", 0, 0, [\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((x) => u32(x.offset))\n ]);\n};\nvar ctts = (trackData) => {\n return fullBox(\"ctts\", 1, 0, [\n u32(trackData.compositionTimeOffsetTable.length),\n trackData.compositionTimeOffsetTable.map((x) => [\n u32(x.sampleCount),\n i32(x.sampleCompositionTimeOffset)\n ])\n ]);\n};\nvar cslg = (trackData) => {\n let leastDecodeToDisplayDelta = Infinity;\n let greatestDecodeToDisplayDelta = -Infinity;\n let compositionStartTime = Infinity;\n let compositionEndTime = -Infinity;\n assert(trackData.compositionTimeOffsetTable.length > 0);\n assert(trackData.samples.length > 0);\n for (let i = 0;i < trackData.compositionTimeOffsetTable.length; i++) {\n const entry = trackData.compositionTimeOffsetTable[i];\n leastDecodeToDisplayDelta = Math.min(leastDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n greatestDecodeToDisplayDelta = Math.max(greatestDecodeToDisplayDelta, entry.sampleCompositionTimeOffset);\n }\n for (let i = 0;i < trackData.samples.length; i++) {\n const sample = trackData.samples[i];\n compositionStartTime = Math.min(compositionStartTime, intoTimescale(sample.timestamp, trackData.timescale));\n compositionEndTime = Math.max(compositionEndTime, intoTimescale(sample.timestamp + sample.duration, trackData.timescale));\n }\n const compositionToDtsShift = Math.max(-leastDecodeToDisplayDelta, 0);\n if (compositionEndTime >= 2 ** 31) {\n return null;\n }\n return fullBox(\"cslg\", 0, 0, [\n i32(compositionToDtsShift),\n i32(leastDecodeToDisplayDelta),\n i32(greatestDecodeToDisplayDelta),\n i32(compositionStartTime),\n i32(compositionEndTime)\n ]);\n};\nvar mvex = (trackDatas) => {\n return box(\"mvex\", undefined, trackDatas.map(trex));\n};\nvar trex = (trackData) => {\n return fullBox(\"trex\", 0, 0, [\n u32(trackData.track.id),\n u32(1),\n u32(0),\n u32(0),\n u32(0)\n ]);\n};\nvar moof = (sequenceNumber, trackDatas) => {\n return box(\"moof\", undefined, [\n mfhd(sequenceNumber),\n ...trackDatas.map(traf)\n ]);\n};\nvar mfhd = (sequenceNumber) => {\n return fullBox(\"mfhd\", 0, 0, [\n u32(sequenceNumber)\n ]);\n};\nvar fragmentSampleFlags = (sample) => {\n let byte1 = 0;\n let byte2 = 0;\n const byte3 = 0;\n const byte4 = 0;\n const sampleIsDifferenceSample = sample.type === \"delta\";\n byte2 |= +sampleIsDifferenceSample;\n if (sampleIsDifferenceSample) {\n byte1 |= 1;\n } else {\n byte1 |= 2;\n }\n return byte1 << 24 | byte2 << 16 | byte3 << 8 | byte4;\n};\nvar traf = (trackData) => {\n return box(\"traf\", undefined, [\n tfhd(trackData),\n tfdt(trackData),\n trun(trackData)\n ]);\n};\nvar tfhd = (trackData) => {\n assert(trackData.currentChunk);\n let tfFlags = 0;\n tfFlags |= 8;\n tfFlags |= 16;\n tfFlags |= 32;\n tfFlags |= 131072;\n const referenceSample = trackData.currentChunk.samples[1] ?? trackData.currentChunk.samples[0];\n const referenceSampleInfo = {\n duration: referenceSample.timescaleUnitsToNextSample,\n size: referenceSample.size,\n flags: fragmentSampleFlags(referenceSample)\n };\n return fullBox(\"tfhd\", 0, tfFlags, [\n u32(trackData.track.id),\n u32(referenceSampleInfo.duration),\n u32(referenceSampleInfo.size),\n u32(referenceSampleInfo.flags)\n ]);\n};\nvar tfdt = (trackData) => {\n assert(trackData.currentChunk);\n return fullBox(\"tfdt\", 1, 0, [\n u64(intoTimescale(trackData.currentChunk.startTimestamp, trackData.timescale))\n ]);\n};\nvar trun = (trackData) => {\n assert(trackData.currentChunk);\n const allSampleDurations = trackData.currentChunk.samples.map((x) => x.timescaleUnitsToNextSample);\n const allSampleSizes = trackData.currentChunk.samples.map((x) => x.size);\n const allSampleFlags = trackData.currentChunk.samples.map(fragmentSampleFlags);\n const allSampleCompositionTimeOffsets = trackData.currentChunk.samples.map((x) => intoTimescale(x.timestamp - x.decodeTimestamp, trackData.timescale));\n const uniqueSampleDurations = new Set(allSampleDurations);\n const uniqueSampleSizes = new Set(allSampleSizes);\n const uniqueSampleFlags = new Set(allSampleFlags);\n const uniqueSampleCompositionTimeOffsets = new Set(allSampleCompositionTimeOffsets);\n const firstSampleFlagsPresent = uniqueSampleFlags.size === 2 && allSampleFlags[0] !== allSampleFlags[1];\n const sampleDurationPresent = uniqueSampleDurations.size > 1;\n const sampleSizePresent = uniqueSampleSizes.size > 1;\n const sampleFlagsPresent = !firstSampleFlagsPresent && uniqueSampleFlags.size > 1;\n const sampleCompositionTimeOffsetsPresent = uniqueSampleCompositionTimeOffsets.size > 1 || [...uniqueSampleCompositionTimeOffsets].some((x) => x !== 0);\n let flags = 0;\n flags |= 1;\n flags |= 4 * +firstSampleFlagsPresent;\n flags |= 256 * +sampleDurationPresent;\n flags |= 512 * +sampleSizePresent;\n flags |= 1024 * +sampleFlagsPresent;\n flags |= 2048 * +sampleCompositionTimeOffsetsPresent;\n return fullBox(\"trun\", 1, flags, [\n u32(trackData.currentChunk.samples.length),\n u32(trackData.currentChunk.offset - trackData.currentChunk.moofOffset || 0),\n firstSampleFlagsPresent ? u32(allSampleFlags[0]) : [],\n trackData.currentChunk.samples.map((_, i) => [\n sampleDurationPresent ? u32(allSampleDurations[i]) : [],\n sampleSizePresent ? u32(allSampleSizes[i]) : [],\n sampleFlagsPresent ? u32(allSampleFlags[i]) : [],\n sampleCompositionTimeOffsetsPresent ? i32(allSampleCompositionTimeOffsets[i]) : []\n ])\n ]);\n};\nvar mfra = (trackDatas) => {\n return box(\"mfra\", undefined, [\n ...trackDatas.map(tfra),\n mfro()\n ]);\n};\nvar tfra = (trackData, trackIndex) => {\n const version = 1;\n return fullBox(\"tfra\", version, 0, [\n u32(trackData.track.id),\n u32(63),\n u32(trackData.finalizedChunks.length),\n trackData.finalizedChunks.map((chunk) => [\n u64(intoTimescale(chunk.samples[0].timestamp, trackData.timescale)),\n u64(chunk.moofOffset),\n u32(trackIndex + 1),\n u32(1),\n u32(1)\n ])\n ]);\n};\nvar mfro = () => {\n return fullBox(\"mfro\", 0, 0, [\n u32(0)\n ]);\n};\nvar vtte = () => box(\"vtte\");\nvar vttc = (payload, timestamp, identifier, settings, sourceId) => box(\"vttc\", undefined, [\n sourceId !== null ? box(\"vsid\", [i32(sourceId)]) : null,\n identifier !== null ? box(\"iden\", [...textEncoder.encode(identifier)]) : null,\n timestamp !== null ? box(\"ctim\", [...textEncoder.encode(formatSubtitleTimestamp(timestamp))]) : null,\n settings !== null ? box(\"sttg\", [...textEncoder.encode(settings)]) : null,\n box(\"payl\", [...textEncoder.encode(payload)])\n]);\nvar vtta = (notes) => box(\"vtta\", [...textEncoder.encode(notes)]);\nvar udta = (muxer) => {\n const boxes = [];\n const metadataFormat = muxer.format._options.metadataFormat ?? \"auto\";\n const metadataTags = muxer.output._metadataTags;\n if (metadataFormat === \"mdir\" || metadataFormat === \"auto\" && !muxer.isQuickTime) {\n const metaBox = metaMdir(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"mdta\") {\n const metaBox = metaMdta(metadataTags);\n if (metaBox)\n boxes.push(metaBox);\n } else if (metadataFormat === \"udta\" || metadataFormat === \"auto\" && muxer.isQuickTime) {\n addQuickTimeMetadataTagBoxes(boxes, muxer.output._metadataTags);\n }\n if (boxes.length === 0) {\n return null;\n }\n return box(\"udta\", undefined, boxes);\n};\nvar addQuickTimeMetadataTagBoxes = (boxes, tags) => {\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9nam\", value));\n }\n ;\n break;\n case \"description\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9des\", value));\n }\n ;\n break;\n case \"artist\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9ART\", value));\n }\n ;\n break;\n case \"album\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9alb\", value));\n }\n ;\n break;\n case \"albumArtist\":\n {\n boxes.push(metadataTagStringBoxShort(\"albr\", value));\n }\n ;\n break;\n case \"genre\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9gen\", value));\n }\n ;\n break;\n case \"date\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9day\", value.toISOString().slice(0, 10)));\n }\n ;\n break;\n case \"comment\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9cmt\", value));\n }\n ;\n break;\n case \"lyrics\":\n {\n boxes.push(metadataTagStringBoxShort(\"\u00A9lyr\", value));\n }\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n case \"discNumber\":\n case \"discsTotal\":\n case \"trackNumber\":\n case \"tracksTotal\":\n case \"images\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || key.length !== 4 || boxes.some((x) => x.type === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n boxes.push(metadataTagStringBoxShort(key, value));\n } else if (value instanceof Uint8Array) {\n boxes.push(box(key, Array.from(value)));\n }\n }\n }\n};\nvar metadataTagStringBoxShort = (name, value) => {\n const encoded = textEncoder.encode(value);\n return box(name, [\n u16(encoded.length),\n u16(getLanguageCodeInt(\"und\")),\n Array.from(encoded)\n ]);\n};\nvar DATA_BOX_MIME_TYPE_MAP = {\n \"image/jpeg\": 13,\n \"image/png\": 14,\n \"image/bmp\": 27\n};\nvar generateMetadataPairs = (tags, isMdta) => {\n const pairs = [];\n for (const { key, value } of keyValueIterator(tags)) {\n switch (key) {\n case \"title\":\n {\n pairs.push({ key: isMdta ? \"title\" : \"\u00A9nam\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"description\":\n {\n pairs.push({ key: isMdta ? \"description\" : \"\u00A9des\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"artist\":\n {\n pairs.push({ key: isMdta ? \"artist\" : \"\u00A9ART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"album\":\n {\n pairs.push({ key: isMdta ? \"album\" : \"\u00A9alb\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"albumArtist\":\n {\n pairs.push({ key: isMdta ? \"album_artist\" : \"aART\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"comment\":\n {\n pairs.push({ key: isMdta ? \"comment\" : \"\u00A9cmt\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"genre\":\n {\n pairs.push({ key: isMdta ? \"genre\" : \"\u00A9gen\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"lyrics\":\n {\n pairs.push({ key: isMdta ? \"lyrics\" : \"\u00A9lyr\", value: dataStringBoxLong(value) });\n }\n ;\n break;\n case \"date\":\n {\n pairs.push({\n key: isMdta ? \"date\" : \"\u00A9day\",\n value: dataStringBoxLong(value.toISOString().slice(0, 10))\n });\n }\n ;\n break;\n case \"images\":\n {\n for (const image of value) {\n if (image.kind !== \"coverFront\") {\n continue;\n }\n pairs.push({ key: \"covr\", value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[image.mimeType] ?? 0),\n u32(0),\n Array.from(image.data)\n ]) });\n }\n }\n ;\n break;\n case \"trackNumber\":\n {\n if (isMdta) {\n const string = tags.tracksTotal !== undefined ? `${value}/${tags.tracksTotal}` : value.toString();\n pairs.push({ key: \"track\", value: dataStringBoxLong(string) });\n } else {\n pairs.push({ key: \"trkn\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.tracksTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"discNumber\":\n {\n if (!isMdta) {\n pairs.push({ key: \"disc\", value: box(\"data\", [\n u32(0),\n u32(0),\n u16(0),\n u16(value),\n u16(tags.discsTotal ?? 0),\n u16(0)\n ]) });\n }\n }\n ;\n break;\n case \"tracksTotal\":\n case \"discsTotal\":\n {}\n ;\n break;\n case \"raw\":\n {}\n ;\n break;\n default:\n assertNever(key);\n }\n }\n if (tags.raw) {\n for (const key in tags.raw) {\n const value = tags.raw[key];\n if (value == null || !isMdta && key.length !== 4 || pairs.some((x) => x.key === key)) {\n continue;\n }\n if (typeof value === \"string\") {\n pairs.push({ key, value: dataStringBoxLong(value) });\n } else if (value instanceof Uint8Array) {\n pairs.push({ key, value: box(\"data\", [\n u32(0),\n u32(0),\n Array.from(value)\n ]) });\n } else if (value instanceof RichImageData) {\n pairs.push({ key, value: box(\"data\", [\n u32(DATA_BOX_MIME_TYPE_MAP[value.mimeType] ?? 0),\n u32(0),\n Array.from(value.data)\n ]) });\n }\n }\n }\n return pairs;\n};\nvar metaMdir = (tags) => {\n const pairs = generateMetadataPairs(tags, false);\n if (pairs.length === 0) {\n return null;\n }\n return fullBox(\"meta\", 0, 0, undefined, [\n hdlr(false, \"mdir\", \"\", \"appl\"),\n box(\"ilst\", undefined, pairs.map((pair) => box(pair.key, undefined, [pair.value])))\n ]);\n};\nvar metaMdta = (tags) => {\n const pairs = generateMetadataPairs(tags, true);\n if (pairs.length === 0) {\n return null;\n }\n return box(\"meta\", undefined, [\n hdlr(false, \"mdta\", \"\"),\n fullBox(\"keys\", 0, 0, [\n u32(pairs.length)\n ], pairs.map((pair) => box(\"mdta\", [\n ...textEncoder.encode(pair.key)\n ]))),\n box(\"ilst\", undefined, pairs.map((pair, i) => {\n const boxName = String.fromCharCode(...u32(i + 1));\n return box(boxName, undefined, [pair.value]);\n }))\n ]);\n};\nvar dataStringBoxLong = (value) => {\n return box(\"data\", [\n u32(1),\n u32(0),\n ...textEncoder.encode(value)\n ]);\n};\nvar videoCodecToBoxName = (codec, fullCodecString) => {\n switch (codec) {\n case \"avc\":\n return fullCodecString.startsWith(\"avc3\") ? \"avc3\" : \"avc1\";\n case \"hevc\":\n return \"hvc1\";\n case \"vp8\":\n return \"vp08\";\n case \"vp9\":\n return \"vp09\";\n case \"av1\":\n return \"av01\";\n }\n};\nvar VIDEO_CODEC_TO_CONFIGURATION_BOX = {\n avc: avcC,\n hevc: hvcC,\n vp8: vpcC,\n vp9: vpcC,\n av1: av1C\n};\nvar audioCodecToBoxName = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return \"mp4a\";\n case \"mp3\":\n return \"mp4a\";\n case \"opus\":\n return \"Opus\";\n case \"vorbis\":\n return \"mp4a\";\n case \"flac\":\n return \"fLaC\";\n case \"ulaw\":\n return \"ulaw\";\n case \"alaw\":\n return \"alaw\";\n case \"pcm-u8\":\n return \"raw \";\n case \"pcm-s8\":\n return \"sowt\";\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s16\":\n return \"sowt\";\n case \"pcm-s16be\":\n return \"twos\";\n case \"pcm-s24\":\n return \"in24\";\n case \"pcm-s24be\":\n return \"in24\";\n case \"pcm-s32\":\n return \"in32\";\n case \"pcm-s32be\":\n return \"in32\";\n case \"pcm-f32\":\n return \"fl32\";\n case \"pcm-f32be\":\n return \"fl32\";\n case \"pcm-f64\":\n return \"fl64\";\n case \"pcm-f64be\":\n return \"fl64\";\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return \"ipcm\";\n case \"pcm-s16be\":\n return \"ipcm\";\n case \"pcm-s24\":\n return \"ipcm\";\n case \"pcm-s24be\":\n return \"ipcm\";\n case \"pcm-s32\":\n return \"ipcm\";\n case \"pcm-s32be\":\n return \"ipcm\";\n case \"pcm-f32\":\n return \"fpcm\";\n case \"pcm-f32be\":\n return \"fpcm\";\n case \"pcm-f64\":\n return \"fpcm\";\n case \"pcm-f64be\":\n return \"fpcm\";\n }\n }\n};\nvar audioCodecToConfigurationBox = (codec, isQuickTime) => {\n switch (codec) {\n case \"aac\":\n return esds;\n case \"mp3\":\n return esds;\n case \"opus\":\n return dOps;\n case \"vorbis\":\n return esds;\n case \"flac\":\n return dfLa;\n }\n if (isQuickTime) {\n switch (codec) {\n case \"pcm-s24\":\n return wave;\n case \"pcm-s24be\":\n return wave;\n case \"pcm-s32\":\n return wave;\n case \"pcm-s32be\":\n return wave;\n case \"pcm-f32\":\n return wave;\n case \"pcm-f32be\":\n return wave;\n case \"pcm-f64\":\n return wave;\n case \"pcm-f64be\":\n return wave;\n }\n } else {\n switch (codec) {\n case \"pcm-s16\":\n return pcmC;\n case \"pcm-s16be\":\n return pcmC;\n case \"pcm-s24\":\n return pcmC;\n case \"pcm-s24be\":\n return pcmC;\n case \"pcm-s32\":\n return pcmC;\n case \"pcm-s32be\":\n return pcmC;\n case \"pcm-f32\":\n return pcmC;\n case \"pcm-f32be\":\n return pcmC;\n case \"pcm-f64\":\n return pcmC;\n case \"pcm-f64be\":\n return pcmC;\n }\n }\n return null;\n};\nvar SUBTITLE_CODEC_TO_BOX_NAME = {\n webvtt: \"wvtt\"\n};\nvar SUBTITLE_CODEC_TO_CONFIGURATION_BOX = {\n webvtt: vttC\n};\nvar getLanguageCodeInt = (code) => {\n assert(code.length === 3);\n let language = 0;\n for (let i = 0;i < 3; i++) {\n language <<= 5;\n language += code.charCodeAt(i) - 96;\n }\n return language;\n};\n\n// ../../node_modules/mediabunny/dist/modules/src/writer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass Writer {\n constructor() {\n this.ensureMonotonicity = false;\n this.trackedWrites = null;\n this.trackedStart = -1;\n this.trackedEnd = -1;\n }\n start() {}\n maybeTrackWrites(data) {\n if (!this.trackedWrites) {\n return;\n }\n let pos = this.getPos();\n if (pos < this.trackedStart) {\n if (pos + data.byteLength <= this.trackedStart) {\n return;\n }\n data = data.subarray(this.trackedStart - pos);\n pos = 0;\n }\n const neededSize = pos + data.byteLength - this.trackedStart;\n let newLength = this.trackedWrites.byteLength;\n while (newLength < neededSize) {\n newLength *= 2;\n }\n if (newLength !== this.trackedWrites.byteLength) {\n const copy = new Uint8Array(newLength);\n copy.set(this.trackedWrites, 0);\n this.trackedWrites = copy;\n }\n this.trackedWrites.set(data, pos - this.trackedStart);\n this.trackedEnd = Math.max(this.trackedEnd, pos + data.byteLength);\n }\n startTrackingWrites() {\n this.trackedWrites = new Uint8Array(2 ** 10);\n this.trackedStart = this.getPos();\n this.trackedEnd = this.trackedStart;\n }\n stopTrackingWrites() {\n if (!this.trackedWrites) {\n throw new Error(\"Internal error: Can't get tracked writes since nothing was tracked.\");\n }\n const slice = this.trackedWrites.subarray(0, this.trackedEnd - this.trackedStart);\n const result = {\n data: slice,\n start: this.trackedStart,\n end: this.trackedEnd\n };\n this.trackedWrites = null;\n return result;\n }\n}\nvar ARRAY_BUFFER_INITIAL_SIZE = 2 ** 16;\nvar ARRAY_BUFFER_MAX_SIZE = 2 ** 32;\n\nclass BufferTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.maxPos = 0;\n this.target = target;\n this.supportsResize = \"resize\" in new ArrayBuffer(0);\n if (this.supportsResize) {\n try {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE, { maxByteLength: ARRAY_BUFFER_MAX_SIZE });\n } catch {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n this.supportsResize = false;\n }\n } else {\n this.buffer = new ArrayBuffer(ARRAY_BUFFER_INITIAL_SIZE);\n }\n this.bytes = new Uint8Array(this.buffer);\n }\n ensureSize(size) {\n let newLength = this.buffer.byteLength;\n while (newLength < size)\n newLength *= 2;\n if (newLength === this.buffer.byteLength)\n return;\n if (newLength > ARRAY_BUFFER_MAX_SIZE) {\n throw new Error(`ArrayBuffer exceeded maximum size of ${ARRAY_BUFFER_MAX_SIZE} bytes. Please consider using another` + ` target.`);\n }\n if (this.supportsResize) {\n this.buffer.resize(newLength);\n } else {\n const newBuffer = new ArrayBuffer(newLength);\n const newBytes = new Uint8Array(newBuffer);\n newBytes.set(this.bytes, 0);\n this.buffer = newBuffer;\n this.bytes = newBytes;\n }\n }\n write(data) {\n this.maybeTrackWrites(data);\n this.ensureSize(this.pos + data.byteLength);\n this.bytes.set(data, this.pos);\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.maxPos = Math.max(this.maxPos, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {}\n async finalize() {\n this.ensureSize(this.pos);\n this.target.buffer = this.buffer.slice(0, Math.max(this.maxPos, this.pos));\n }\n async close() {}\n getSlice(start, end) {\n return this.bytes.slice(start, end);\n }\n}\nvar DEFAULT_CHUNK_SIZE = 2 ** 24;\nvar MAX_CHUNKS_AT_ONCE = 2;\n\nclass StreamTargetWriter extends Writer {\n constructor(target) {\n super();\n this.pos = 0;\n this.sections = [];\n this.lastWriteEnd = 0;\n this.lastFlushEnd = 0;\n this.writer = null;\n this.chunks = [];\n this.target = target;\n this.chunked = target._options.chunked ?? false;\n this.chunkSize = target._options.chunkSize ?? DEFAULT_CHUNK_SIZE;\n }\n start() {\n this.writer = this.target._writable.getWriter();\n }\n write(data) {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n this.maybeTrackWrites(data);\n this.sections.push({\n data: data.slice(),\n start: this.pos\n });\n this.target.onwrite?.(this.pos, this.pos + data.byteLength);\n this.pos += data.byteLength;\n this.lastWriteEnd = Math.max(this.lastWriteEnd, this.pos);\n }\n seek(newPos) {\n this.pos = newPos;\n }\n getPos() {\n return this.pos;\n }\n async flush() {\n if (this.pos > this.lastWriteEnd) {\n const paddingBytesNeeded = this.pos - this.lastWriteEnd;\n this.pos = this.lastWriteEnd;\n this.write(new Uint8Array(paddingBytesNeeded));\n }\n assert(this.writer);\n if (this.sections.length === 0)\n return;\n const chunks = [];\n const sorted = [...this.sections].sort((a, b) => a.start - b.start);\n chunks.push({\n start: sorted[0].start,\n size: sorted[0].data.byteLength\n });\n for (let i = 1;i < sorted.length; i++) {\n const lastChunk = chunks[chunks.length - 1];\n const section = sorted[i];\n if (section.start <= lastChunk.start + lastChunk.size) {\n lastChunk.size = Math.max(lastChunk.size, section.start + section.data.byteLength - lastChunk.start);\n } else {\n chunks.push({\n start: section.start,\n size: section.data.byteLength\n });\n }\n }\n for (const chunk of chunks) {\n chunk.data = new Uint8Array(chunk.size);\n for (const section of this.sections) {\n if (chunk.start <= section.start && section.start < chunk.start + chunk.size) {\n chunk.data.set(section.data, section.start - chunk.start);\n }\n }\n if (this.writer.desiredSize !== null && this.writer.desiredSize <= 0) {\n await this.writer.ready;\n }\n if (this.chunked) {\n this.writeDataIntoChunks(chunk.data, chunk.start);\n this.tryToFlushChunks();\n } else {\n if (this.ensureMonotonicity && chunk.start !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data,\n position: chunk.start\n });\n this.lastFlushEnd = chunk.start + chunk.data.byteLength;\n }\n }\n this.sections.length = 0;\n }\n writeDataIntoChunks(data, position) {\n let chunkIndex = this.chunks.findIndex((x) => x.start <= position && position < x.start + this.chunkSize);\n if (chunkIndex === -1)\n chunkIndex = this.createChunk(position);\n const chunk = this.chunks[chunkIndex];\n const relativePosition = position - chunk.start;\n const toWrite = data.subarray(0, Math.min(this.chunkSize - relativePosition, data.byteLength));\n chunk.data.set(toWrite, relativePosition);\n const section = {\n start: relativePosition,\n end: relativePosition + toWrite.byteLength\n };\n this.insertSectionIntoChunk(chunk, section);\n if (chunk.written[0].start === 0 && chunk.written[0].end === this.chunkSize) {\n chunk.shouldFlush = true;\n }\n if (this.chunks.length > MAX_CHUNKS_AT_ONCE) {\n for (let i = 0;i < this.chunks.length - 1; i++) {\n this.chunks[i].shouldFlush = true;\n }\n this.tryToFlushChunks();\n }\n if (toWrite.byteLength < data.byteLength) {\n this.writeDataIntoChunks(data.subarray(toWrite.byteLength), position + toWrite.byteLength);\n }\n }\n insertSectionIntoChunk(chunk, section) {\n let low = 0;\n let high = chunk.written.length - 1;\n let index = -1;\n while (low <= high) {\n const mid = Math.floor(low + (high - low + 1) / 2);\n if (chunk.written[mid].start <= section.start) {\n low = mid + 1;\n index = mid;\n } else {\n high = mid - 1;\n }\n }\n chunk.written.splice(index + 1, 0, section);\n if (index === -1 || chunk.written[index].end < section.start)\n index++;\n while (index < chunk.written.length - 1 && chunk.written[index].end >= chunk.written[index + 1].start) {\n chunk.written[index].end = Math.max(chunk.written[index].end, chunk.written[index + 1].end);\n chunk.written.splice(index + 1, 1);\n }\n }\n createChunk(includesPosition) {\n const start = Math.floor(includesPosition / this.chunkSize) * this.chunkSize;\n const chunk = {\n start,\n data: new Uint8Array(this.chunkSize),\n written: [],\n shouldFlush: false\n };\n this.chunks.push(chunk);\n this.chunks.sort((a, b) => a.start - b.start);\n return this.chunks.indexOf(chunk);\n }\n tryToFlushChunks(force = false) {\n assert(this.writer);\n for (let i = 0;i < this.chunks.length; i++) {\n const chunk = this.chunks[i];\n if (!chunk.shouldFlush && !force)\n continue;\n for (const section of chunk.written) {\n const position = chunk.start + section.start;\n if (this.ensureMonotonicity && position !== this.lastFlushEnd) {\n throw new Error(\"Internal error: Monotonicity violation.\");\n }\n this.writer.write({\n type: \"write\",\n data: chunk.data.subarray(section.start, section.end),\n position\n });\n this.lastFlushEnd = chunk.start + section.end;\n }\n this.chunks.splice(i--, 1);\n }\n }\n finalize() {\n if (this.chunked) {\n this.tryToFlushChunks(true);\n }\n assert(this.writer);\n return this.writer.close();\n }\n async close() {\n return this.writer?.close();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/target.js\nvar nodeAlias = (() => ({}));\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nclass Target {\n constructor() {\n this._output = null;\n this.onwrite = null;\n }\n}\n\nclass BufferTarget extends Target {\n constructor() {\n super(...arguments);\n this.buffer = null;\n }\n _createWriter() {\n return new BufferTargetWriter(this);\n }\n}\n\nclass StreamTarget extends Target {\n constructor(writable, options = {}) {\n super();\n if (!(writable instanceof WritableStream)) {\n throw new TypeError(\"StreamTarget requires a WritableStream instance.\");\n }\n if (options != null && typeof options !== \"object\") {\n throw new TypeError(\"StreamTarget options, when provided, must be an object.\");\n }\n if (options.chunked !== undefined && typeof options.chunked !== \"boolean\") {\n throw new TypeError(\"options.chunked, when provided, must be a boolean.\");\n }\n if (options.chunkSize !== undefined && (!Number.isInteger(options.chunkSize) || options.chunkSize < 1024)) {\n throw new TypeError(\"options.chunkSize, when provided, must be an integer and not smaller than 1024.\");\n }\n this._writable = writable;\n this._options = options;\n }\n _createWriter() {\n return new StreamTargetWriter(this);\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/isobmff/isobmff-muxer.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar GLOBAL_TIMESCALE = 1000;\nvar TIMESTAMP_OFFSET = 2082844800;\nvar getTrackMetadata = (trackData) => {\n const metadata = {};\n const track = trackData.track;\n if (track.metadata.name !== undefined) {\n metadata.name = track.metadata.name;\n }\n return metadata;\n};\nvar intoTimescale = (timeInSeconds, timescale, round = true) => {\n const value = timeInSeconds * timescale;\n return round ? Math.round(value) : value;\n};\n\nclass IsobmffMuxer extends Muxer {\n constructor(output, format) {\n super(output);\n this.auxTarget = new BufferTarget;\n this.auxWriter = this.auxTarget._createWriter();\n this.auxBoxWriter = new IsobmffBoxWriter(this.auxWriter);\n this.mdat = null;\n this.ftypSize = null;\n this.trackDatas = [];\n this.allTracksKnown = promiseWithResolvers();\n this.creationTime = Math.floor(Date.now() / 1000) + TIMESTAMP_OFFSET;\n this.finalizedChunks = [];\n this.nextFragmentNumber = 1;\n this.maxWrittenTimestamp = -Infinity;\n this.format = format;\n this.writer = output._writer;\n this.boxWriter = new IsobmffBoxWriter(this.writer);\n this.isQuickTime = format instanceof MovOutputFormat;\n const fastStartDefault = this.writer instanceof BufferTargetWriter ? \"in-memory\" : false;\n this.fastStart = format._options.fastStart ?? fastStartDefault;\n this.isFragmented = this.fastStart === \"fragmented\";\n if (this.fastStart === \"in-memory\" || this.isFragmented) {\n this.writer.ensureMonotonicity = true;\n }\n this.minimumFragmentDuration = format._options.minimumFragmentDuration ?? 1;\n }\n async start() {\n const release = await this.mutex.acquire();\n const holdsAvc = this.output._tracks.some((x) => x.type === \"video\" && x.source._codec === \"avc\");\n {\n if (this.format._options.onFtyp) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(ftyp({\n isQuickTime: this.isQuickTime,\n holdsAvc,\n fragmented: this.isFragmented\n }));\n if (this.format._options.onFtyp) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onFtyp(data, start);\n }\n }\n this.ftypSize = this.writer.getPos();\n if (this.fastStart === \"in-memory\") {} else if (this.fastStart === \"reserve\") {\n for (const track of this.output._tracks) {\n if (track.metadata.maximumPacketCount === undefined) {\n throw new Error(\"All tracks must specify maximumPacketCount in their metadata when using\" + \" fastStart: 'reserve'.\");\n }\n }\n } else if (this.isFragmented) {} else {\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n }\n await this.writer.flush();\n release();\n }\n allTracksAreKnown() {\n for (const track of this.output._tracks) {\n if (!track.source._closed && !this.trackDatas.some((x) => x.track === track)) {\n return false;\n }\n }\n return true;\n }\n async getMimeType() {\n await this.allTracksKnown.promise;\n const codecStrings = this.trackDatas.map((trackData) => {\n if (trackData.type === \"video\") {\n return trackData.info.decoderConfig.codec;\n } else if (trackData.type === \"audio\") {\n return trackData.info.decoderConfig.codec;\n } else {\n const map = {\n webvtt: \"wvtt\"\n };\n return map[trackData.track.source._codec];\n }\n });\n return buildIsobmffMimeType({\n isQuickTime: this.isQuickTime,\n hasVideo: this.trackDatas.some((x) => x.type === \"video\"),\n hasAudio: this.trackDatas.some((x) => x.type === \"audio\"),\n codecStrings\n });\n }\n getVideoTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateVideoChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n assert(decoderConfig.codedWidth !== undefined);\n assert(decoderConfig.codedHeight !== undefined);\n let requiresAnnexBTransformation = false;\n if (track.source._codec === \"avc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractAvcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an AVCDecoderConfigurationRecord from the AVC packet. Make sure the packets are\" + \" in Annex B format (as specified in ITU-T-REC-H.264) when not providing a description, or\" + \" provide a description (must be an AVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in AVCC format.\");\n }\n decoderConfig.description = serializeAvcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n } else if (track.source._codec === \"hevc\" && !decoderConfig.description) {\n const decoderConfigurationRecord = extractHevcDecoderConfigurationRecord(packet.data);\n if (!decoderConfigurationRecord) {\n throw new Error(\"Couldn't extract an HEVCDecoderConfigurationRecord from the HEVC packet. Make sure the packets\" + \" are in Annex B format (as specified in ITU-T-REC-H.265) when not providing a description, or\" + \" provide a description (must be an HEVCDecoderConfigurationRecord as specified in ISO 14496-15)\" + \" and ensure the packets are in HEVC format.\");\n }\n decoderConfig.description = serializeHevcDecoderConfigurationRecord(decoderConfigurationRecord);\n requiresAnnexBTransformation = true;\n }\n const timescale = computeRationalApproximation(1 / (track.metadata.frameRate ?? 57600), 1e6).denominator;\n const newTrackData = {\n muxer: this,\n track,\n type: \"video\",\n info: {\n width: decoderConfig.codedWidth,\n height: decoderConfig.codedHeight,\n decoderConfig,\n requiresAnnexBTransformation\n },\n timescale,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getAudioTrackData(track, packet, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateAudioChunkMetadata(meta);\n assert(meta);\n assert(meta.decoderConfig);\n const decoderConfig = { ...meta.decoderConfig };\n let requiresAdtsStripping = false;\n if (track.source._codec === \"aac\" && !decoderConfig.description) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packet.data));\n if (!adtsFrame) {\n throw new Error(\"Couldn't parse ADTS header from the AAC packet. Make sure the packets are in ADTS format\" + \" (as specified in ISO 13818-7) when not providing a description, or provide a description\" + \" (must be an AudioSpecificConfig as specified in ISO 14496-3) and ensure the packets\" + \" are raw AAC data.\");\n }\n const sampleRate = aacFrequencyTable[adtsFrame.samplingFrequencyIndex];\n const numberOfChannels = aacChannelMap[adtsFrame.channelConfiguration];\n if (sampleRate === undefined || numberOfChannels === undefined) {\n throw new Error(\"Invalid ADTS frame header.\");\n }\n decoderConfig.description = buildAacAudioSpecificConfig({\n objectType: adtsFrame.objectType,\n sampleRate,\n numberOfChannels\n });\n requiresAdtsStripping = true;\n }\n const newTrackData = {\n muxer: this,\n track,\n type: \"audio\",\n info: {\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate,\n decoderConfig,\n requiresPcmTransformation: !this.isFragmented && PCM_AUDIO_CODECS.includes(track.source._codec),\n requiresAdtsStripping\n },\n timescale: meta.decoderConfig.sampleRate,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: []\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n getSubtitleTrackData(track, meta) {\n const existingTrackData = this.trackDatas.find((x) => x.track === track);\n if (existingTrackData) {\n return existingTrackData;\n }\n validateSubtitleMetadata(meta);\n assert(meta);\n assert(meta.config);\n const newTrackData = {\n muxer: this,\n track,\n type: \"subtitle\",\n info: {\n config: meta.config\n },\n timescale: 1000,\n samples: [],\n sampleQueue: [],\n timestampProcessingQueue: [],\n timeToSampleTable: [],\n compositionTimeOffsetTable: [],\n lastTimescaleUnits: null,\n lastSample: null,\n finalizedChunks: [],\n currentChunk: null,\n compactlyCodedChunkTable: [],\n lastCueEndTimestamp: 0,\n cueQueue: [],\n nextSourceId: 0,\n cueToSourceId: new WeakMap\n };\n this.trackDatas.push(newTrackData);\n this.trackDatas.sort((a, b) => a.track.id - b.track.id);\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n return newTrackData;\n }\n async addEncodedVideoPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getVideoTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAnnexBTransformation) {\n const nalUnits = [...iterateNalUnitsInAnnexB(packetData)].map((loc) => packetData.subarray(loc.offset, loc.offset + loc.length));\n if (nalUnits.length === 0) {\n throw new Error(\"Failed to transform packet data. Make sure all packets are provided in Annex B format, as\" + \" specified in ITU-T-REC-H.264 and ITU-T-REC-H.265.\");\n }\n packetData = concatNalUnitsInLengthPrefixed(nalUnits, 4);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async addEncodedAudioPacket(track, packet, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getAudioTrackData(track, packet, meta);\n let packetData = packet.data;\n if (trackData.info.requiresAdtsStripping) {\n const adtsFrame = readAdtsFrameHeader(FileSlice.tempFromBytes(packetData));\n if (!adtsFrame) {\n throw new Error(\"Expected ADTS frame, didn't get one.\");\n }\n const headerLength = adtsFrame.crcCheck === null ? MIN_ADTS_FRAME_HEADER_SIZE : MAX_ADTS_FRAME_HEADER_SIZE;\n packetData = packetData.subarray(headerLength);\n }\n const timestamp = this.validateAndNormalizeTimestamp(trackData.track, packet.timestamp, packet.type === \"key\");\n const internalSample = this.createSampleForTrack(trackData, packetData, timestamp, packet.duration, packet.type);\n if (trackData.info.requiresPcmTransformation) {\n await this.maybePadWithSilence(trackData, timestamp);\n }\n await this.registerSample(trackData, internalSample);\n } finally {\n release();\n }\n }\n async maybePadWithSilence(trackData, untilTimestamp) {\n const lastSample = last(trackData.samples);\n const lastEndTimestamp = lastSample ? lastSample.timestamp + lastSample.duration : 0;\n const delta = untilTimestamp - lastEndTimestamp;\n const deltaInTimescale = intoTimescale(delta, trackData.timescale);\n if (deltaInTimescale > 0) {\n const { sampleSize, silentValue } = parsePcmCodec(trackData.info.decoderConfig.codec);\n const samplesNeeded = deltaInTimescale * trackData.info.numberOfChannels;\n const data = new Uint8Array(sampleSize * samplesNeeded).fill(silentValue);\n const paddingSample = this.createSampleForTrack(trackData, new Uint8Array(data.buffer), lastEndTimestamp, delta, \"key\");\n await this.registerSample(trackData, paddingSample);\n }\n }\n async addSubtitleCue(track, cue, meta) {\n const release = await this.mutex.acquire();\n try {\n const trackData = this.getSubtitleTrackData(track, meta);\n this.validateAndNormalizeTimestamp(trackData.track, cue.timestamp, true);\n if (track.source._codec === \"webvtt\") {\n trackData.cueQueue.push(cue);\n await this.processWebVTTCues(trackData, cue.timestamp);\n } else {}\n } finally {\n release();\n }\n }\n async processWebVTTCues(trackData, until) {\n while (trackData.cueQueue.length > 0) {\n const timestamps = new Set([]);\n for (const cue of trackData.cueQueue) {\n assert(cue.timestamp <= until);\n assert(trackData.lastCueEndTimestamp <= cue.timestamp + cue.duration);\n timestamps.add(Math.max(cue.timestamp, trackData.lastCueEndTimestamp));\n timestamps.add(cue.timestamp + cue.duration);\n }\n const sortedTimestamps = [...timestamps].sort((a, b) => a - b);\n const sampleStart = sortedTimestamps[0];\n const sampleEnd = sortedTimestamps[1] ?? sampleStart;\n if (until < sampleEnd) {\n break;\n }\n if (trackData.lastCueEndTimestamp < sampleStart) {\n this.auxWriter.seek(0);\n const box2 = vtte();\n this.auxBoxWriter.writeBox(box2);\n const body2 = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample2 = this.createSampleForTrack(trackData, body2, trackData.lastCueEndTimestamp, sampleStart - trackData.lastCueEndTimestamp, \"key\");\n await this.registerSample(trackData, sample2);\n trackData.lastCueEndTimestamp = sampleStart;\n }\n this.auxWriter.seek(0);\n for (let i = 0;i < trackData.cueQueue.length; i++) {\n const cue = trackData.cueQueue[i];\n if (cue.timestamp >= sampleEnd) {\n break;\n }\n inlineTimestampRegex.lastIndex = 0;\n const containsTimestamp = inlineTimestampRegex.test(cue.text);\n const endTimestamp = cue.timestamp + cue.duration;\n let sourceId = trackData.cueToSourceId.get(cue);\n if (sourceId === undefined && sampleEnd < endTimestamp) {\n sourceId = trackData.nextSourceId++;\n trackData.cueToSourceId.set(cue, sourceId);\n }\n if (cue.notes) {\n const box3 = vtta(cue.notes);\n this.auxBoxWriter.writeBox(box3);\n }\n const box2 = vttc(cue.text, containsTimestamp ? sampleStart : null, cue.identifier ?? null, cue.settings ?? null, sourceId ?? null);\n this.auxBoxWriter.writeBox(box2);\n if (endTimestamp === sampleEnd) {\n trackData.cueQueue.splice(i--, 1);\n }\n }\n const body = this.auxWriter.getSlice(0, this.auxWriter.getPos());\n const sample = this.createSampleForTrack(trackData, body, sampleStart, sampleEnd - sampleStart, \"key\");\n await this.registerSample(trackData, sample);\n trackData.lastCueEndTimestamp = sampleEnd;\n }\n }\n createSampleForTrack(trackData, data, timestamp, duration, type) {\n const sample = {\n timestamp,\n decodeTimestamp: timestamp,\n duration,\n data,\n size: data.byteLength,\n type,\n timescaleUnitsToNextSample: intoTimescale(duration, trackData.timescale)\n };\n return sample;\n }\n processTimestamps(trackData, nextSample) {\n if (trackData.timestampProcessingQueue.length === 0) {\n return;\n }\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n let totalDuration = 0;\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n const duration = intoTimescale(sample.duration, trackData.timescale);\n totalDuration += duration;\n }\n if (trackData.timeToSampleTable.length === 0) {\n trackData.timeToSampleTable.push({\n sampleCount: totalDuration,\n sampleDelta: 1\n });\n } else {\n const lastEntry = last(trackData.timeToSampleTable);\n lastEntry.sampleCount += totalDuration;\n }\n trackData.timestampProcessingQueue.length = 0;\n return;\n }\n const sortedTimestamps = trackData.timestampProcessingQueue.map((x) => x.timestamp).sort((a, b) => a - b);\n for (let i = 0;i < trackData.timestampProcessingQueue.length; i++) {\n const sample = trackData.timestampProcessingQueue[i];\n sample.decodeTimestamp = sortedTimestamps[i];\n if (!this.isFragmented && trackData.lastTimescaleUnits === null) {\n sample.decodeTimestamp = 0;\n }\n const sampleCompositionTimeOffset = intoTimescale(sample.timestamp - sample.decodeTimestamp, trackData.timescale);\n const durationInTimescale = intoTimescale(sample.duration, trackData.timescale);\n if (trackData.lastTimescaleUnits !== null) {\n assert(trackData.lastSample);\n const timescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n assert(delta >= 0);\n trackData.lastTimescaleUnits += delta;\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n if (!this.isFragmented) {\n let lastTableEntry = last(trackData.timeToSampleTable);\n assert(lastTableEntry);\n if (lastTableEntry.sampleCount === 1) {\n lastTableEntry.sampleDelta = delta;\n const entryBefore = trackData.timeToSampleTable[trackData.timeToSampleTable.length - 2];\n if (entryBefore && entryBefore.sampleDelta === delta) {\n entryBefore.sampleCount++;\n trackData.timeToSampleTable.pop();\n lastTableEntry = entryBefore;\n }\n } else if (lastTableEntry.sampleDelta !== delta) {\n lastTableEntry.sampleCount--;\n trackData.timeToSampleTable.push(lastTableEntry = {\n sampleCount: 1,\n sampleDelta: delta\n });\n }\n if (lastTableEntry.sampleDelta === durationInTimescale) {\n lastTableEntry.sampleCount++;\n } else {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n }\n const lastCompositionTimeOffsetTableEntry = last(trackData.compositionTimeOffsetTable);\n assert(lastCompositionTimeOffsetTableEntry);\n if (lastCompositionTimeOffsetTableEntry.sampleCompositionTimeOffset === sampleCompositionTimeOffset) {\n lastCompositionTimeOffsetTableEntry.sampleCount++;\n } else {\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n } else {\n trackData.lastTimescaleUnits = intoTimescale(sample.decodeTimestamp, trackData.timescale, false);\n if (!this.isFragmented) {\n trackData.timeToSampleTable.push({\n sampleCount: 1,\n sampleDelta: durationInTimescale\n });\n trackData.compositionTimeOffsetTable.push({\n sampleCount: 1,\n sampleCompositionTimeOffset\n });\n }\n }\n trackData.lastSample = sample;\n }\n trackData.timestampProcessingQueue.length = 0;\n assert(trackData.lastSample);\n assert(trackData.lastTimescaleUnits !== null);\n if (nextSample !== undefined && trackData.lastSample.timescaleUnitsToNextSample === 0) {\n assert(nextSample.type === \"key\");\n const timescaleUnits = intoTimescale(nextSample.timestamp, trackData.timescale, false);\n const delta = Math.round(timescaleUnits - trackData.lastTimescaleUnits);\n trackData.lastSample.timescaleUnitsToNextSample = delta;\n }\n }\n async registerSample(trackData, sample) {\n if (sample.type === \"key\") {\n this.processTimestamps(trackData, sample);\n }\n trackData.timestampProcessingQueue.push(sample);\n if (this.isFragmented) {\n trackData.sampleQueue.push(sample);\n await this.interleaveSamples();\n } else if (this.fastStart === \"reserve\") {\n await this.registerSampleFastStartReserve(trackData, sample);\n } else {\n await this.addSampleToTrack(trackData, sample);\n }\n }\n async addSampleToTrack(trackData, sample) {\n if (!this.isFragmented) {\n trackData.samples.push(sample);\n if (this.fastStart === \"reserve\") {\n const maximumPacketCount = trackData.track.metadata.maximumPacketCount;\n assert(maximumPacketCount !== undefined);\n if (trackData.samples.length > maximumPacketCount) {\n throw new Error(`Track #${trackData.track.id} has already reached the maximum packet count` + ` (${maximumPacketCount}). Either add less packets or increase the maximum packet count.`);\n }\n }\n }\n let beginNewChunk = false;\n if (!trackData.currentChunk) {\n beginNewChunk = true;\n } else {\n trackData.currentChunk.startTimestamp = Math.min(trackData.currentChunk.startTimestamp, sample.timestamp);\n const currentChunkDuration = sample.timestamp - trackData.currentChunk.startTimestamp;\n if (this.isFragmented) {\n const keyFrameQueuedEverywhere = this.trackDatas.every((otherTrackData) => {\n if (trackData === otherTrackData) {\n return sample.type === \"key\";\n }\n const firstQueuedSample = otherTrackData.sampleQueue[0];\n if (firstQueuedSample) {\n return firstQueuedSample.type === \"key\";\n }\n return otherTrackData.track.source._closed;\n });\n if (currentChunkDuration >= this.minimumFragmentDuration && keyFrameQueuedEverywhere && sample.timestamp > this.maxWrittenTimestamp) {\n beginNewChunk = true;\n await this.finalizeFragment();\n }\n } else {\n beginNewChunk = currentChunkDuration >= 0.5;\n }\n }\n if (beginNewChunk) {\n if (trackData.currentChunk) {\n await this.finalizeCurrentChunk(trackData);\n }\n trackData.currentChunk = {\n startTimestamp: sample.timestamp,\n samples: [],\n offset: null,\n moofOffset: null\n };\n }\n assert(trackData.currentChunk);\n trackData.currentChunk.samples.push(sample);\n if (this.isFragmented) {\n this.maxWrittenTimestamp = Math.max(this.maxWrittenTimestamp, sample.timestamp);\n }\n }\n async finalizeCurrentChunk(trackData) {\n assert(!this.isFragmented);\n if (!trackData.currentChunk)\n return;\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n let sampleCount = trackData.currentChunk.samples.length;\n if (trackData.type === \"audio\" && trackData.info.requiresPcmTransformation) {\n sampleCount = trackData.currentChunk.samples.reduce((acc, sample) => acc + intoTimescale(sample.duration, trackData.timescale), 0);\n }\n if (trackData.compactlyCodedChunkTable.length === 0 || last(trackData.compactlyCodedChunkTable).samplesPerChunk !== sampleCount) {\n trackData.compactlyCodedChunkTable.push({\n firstChunk: trackData.finalizedChunks.length,\n samplesPerChunk: sampleCount\n });\n }\n if (this.fastStart === \"in-memory\") {\n trackData.currentChunk.offset = 0;\n return;\n }\n trackData.currentChunk.offset = this.writer.getPos();\n for (const sample of trackData.currentChunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n await this.writer.flush();\n }\n async interleaveSamples(isFinalCall = false) {\n assert(this.isFragmented);\n if (!isFinalCall && !this.allTracksAreKnown()) {\n return;\n }\n outer:\n while (true) {\n let trackWithMinTimestamp = null;\n let minTimestamp = Infinity;\n for (const trackData of this.trackDatas) {\n if (!isFinalCall && trackData.sampleQueue.length === 0 && !trackData.track.source._closed) {\n break outer;\n }\n if (trackData.sampleQueue.length > 0 && trackData.sampleQueue[0].timestamp < minTimestamp) {\n trackWithMinTimestamp = trackData;\n minTimestamp = trackData.sampleQueue[0].timestamp;\n }\n }\n if (!trackWithMinTimestamp) {\n break;\n }\n const sample = trackWithMinTimestamp.sampleQueue.shift();\n await this.addSampleToTrack(trackWithMinTimestamp, sample);\n }\n }\n async finalizeFragment(flushWriter = true) {\n assert(this.isFragmented);\n const fragmentNumber = this.nextFragmentNumber++;\n if (fragmentNumber === 1) {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n const tracksInFragment = this.trackDatas.filter((x) => x.currentChunk);\n const moofBox = moof(fragmentNumber, tracksInFragment);\n const moofOffset = this.writer.getPos();\n const mdatStartPos = moofOffset + this.boxWriter.measureBox(moofBox);\n let currentPos = mdatStartPos + MIN_BOX_HEADER_SIZE;\n let fragmentStartTimestamp = Infinity;\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset = currentPos;\n trackData.currentChunk.moofOffset = moofOffset;\n for (const sample of trackData.currentChunk.samples) {\n currentPos += sample.size;\n }\n fragmentStartTimestamp = Math.min(fragmentStartTimestamp, trackData.currentChunk.startTimestamp);\n }\n const mdatSize = currentPos - mdatStartPos;\n const needsLargeMdatSize = mdatSize >= 2 ** 32;\n if (needsLargeMdatSize) {\n for (const trackData of tracksInFragment) {\n trackData.currentChunk.offset += MAX_BOX_HEADER_SIZE - MIN_BOX_HEADER_SIZE;\n }\n }\n if (this.format._options.onMoof) {\n this.writer.startTrackingWrites();\n }\n const newMoofBox = moof(fragmentNumber, tracksInFragment);\n this.boxWriter.writeBox(newMoofBox);\n if (this.format._options.onMoof) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoof(data, start, fragmentStartTimestamp);\n }\n assert(this.writer.getPos() === mdatStartPos);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n const mdatBox = mdat(needsLargeMdatSize);\n mdatBox.size = mdatSize;\n this.boxWriter.writeBox(mdatBox);\n this.writer.seek(mdatStartPos + (needsLargeMdatSize ? MAX_BOX_HEADER_SIZE : MIN_BOX_HEADER_SIZE));\n for (const trackData of tracksInFragment) {\n for (const sample of trackData.currentChunk.samples) {\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n for (const trackData of tracksInFragment) {\n trackData.finalizedChunks.push(trackData.currentChunk);\n this.finalizedChunks.push(trackData.currentChunk);\n trackData.currentChunk = null;\n }\n if (flushWriter) {\n await this.writer.flush();\n }\n }\n async registerSampleFastStartReserve(trackData, sample) {\n if (this.allTracksAreKnown()) {\n if (!this.mdat) {\n const moovBox = moov(this);\n const moovSize = this.boxWriter.measureBox(moovBox);\n const reservedSize = moovSize + this.computeSampleTableSizeUpperBound() + 4096;\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize + reservedSize);\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat = mdat(true);\n this.boxWriter.writeBox(this.mdat);\n for (const trackData2 of this.trackDatas) {\n for (const sample2 of trackData2.sampleQueue) {\n await this.addSampleToTrack(trackData2, sample2);\n }\n trackData2.sampleQueue.length = 0;\n }\n }\n await this.addSampleToTrack(trackData, sample);\n } else {\n trackData.sampleQueue.push(sample);\n }\n }\n computeSampleTableSizeUpperBound() {\n assert(this.fastStart === \"reserve\");\n let upperBound = 0;\n for (const trackData of this.trackDatas) {\n const n = trackData.track.metadata.maximumPacketCount;\n assert(n !== undefined);\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += (4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += (4 + 4 + 4) * Math.ceil(2 / 3 * n);\n upperBound += 4 * n;\n upperBound += 8 * n;\n }\n return upperBound;\n }\n async onTrackClose(track) {\n const release = await this.mutex.acquire();\n if (track.type === \"subtitle\" && track.source._codec === \"webvtt\") {\n const trackData = this.trackDatas.find((x) => x.track === track);\n if (trackData) {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.allTracksAreKnown()) {\n this.allTracksKnown.resolve();\n }\n if (this.isFragmented) {\n await this.interleaveSamples();\n }\n release();\n }\n async finalize() {\n const release = await this.mutex.acquire();\n this.allTracksKnown.resolve();\n for (const trackData of this.trackDatas) {\n if (trackData.type === \"subtitle\" && trackData.track.source._codec === \"webvtt\") {\n await this.processWebVTTCues(trackData, Infinity);\n }\n }\n if (this.isFragmented) {\n await this.interleaveSamples(true);\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n }\n await this.finalizeFragment(false);\n } else {\n for (const trackData of this.trackDatas) {\n this.processTimestamps(trackData);\n await this.finalizeCurrentChunk(trackData);\n }\n }\n if (this.fastStart === \"in-memory\") {\n this.mdat = mdat(false);\n let mdatSize;\n for (let i = 0;i < 2; i++) {\n const movieBox2 = moov(this);\n const movieBoxSize = this.boxWriter.measureBox(movieBox2);\n mdatSize = this.boxWriter.measureBox(this.mdat);\n let currentChunkPos = this.writer.getPos() + movieBoxSize + mdatSize;\n for (const chunk of this.finalizedChunks) {\n chunk.offset = currentChunkPos;\n for (const { data } of chunk.samples) {\n assert(data);\n currentChunkPos += data.byteLength;\n mdatSize += data.byteLength;\n }\n }\n if (currentChunkPos < 2 ** 32)\n break;\n if (mdatSize >= 2 ** 32)\n this.mdat.largeSize = true;\n }\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n const movieBox = moov(this);\n this.boxWriter.writeBox(movieBox);\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n if (this.format._options.onMdat) {\n this.writer.startTrackingWrites();\n }\n this.mdat.size = mdatSize;\n this.boxWriter.writeBox(this.mdat);\n for (const chunk of this.finalizedChunks) {\n for (const sample of chunk.samples) {\n assert(sample.data);\n this.writer.write(sample.data);\n sample.data = null;\n }\n }\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n } else if (this.isFragmented) {\n const startPos = this.writer.getPos();\n const mfraBox = mfra(this.trackDatas);\n this.boxWriter.writeBox(mfraBox);\n const mfraBoxSize = this.writer.getPos() - startPos;\n this.writer.seek(this.writer.getPos() - 4);\n this.boxWriter.writeU32(mfraBoxSize);\n } else {\n assert(this.mdat);\n const mdatPos = this.boxWriter.offsets.get(this.mdat);\n assert(mdatPos !== undefined);\n const mdatSize = this.writer.getPos() - mdatPos;\n this.mdat.size = mdatSize;\n this.mdat.largeSize = mdatSize >= 2 ** 32;\n this.boxWriter.patchBox(this.mdat);\n if (this.format._options.onMdat) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMdat(data, start);\n }\n const movieBox = moov(this);\n if (this.fastStart === \"reserve\") {\n assert(this.ftypSize !== null);\n this.writer.seek(this.ftypSize);\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n const remainingSpace = this.boxWriter.offsets.get(this.mdat) - this.writer.getPos();\n this.boxWriter.writeBox(free(remainingSpace));\n } else {\n if (this.format._options.onMoov) {\n this.writer.startTrackingWrites();\n }\n this.boxWriter.writeBox(movieBox);\n }\n if (this.format._options.onMoov) {\n const { data, start } = this.writer.stopTrackingWrites();\n this.format._options.onMoov(data, start);\n }\n }\n release();\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output-format.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass OutputFormat {\n getSupportedVideoCodecs() {\n return this.getSupportedCodecs().filter((codec) => VIDEO_CODECS.includes(codec));\n }\n getSupportedAudioCodecs() {\n return this.getSupportedCodecs().filter((codec) => AUDIO_CODECS.includes(codec));\n }\n getSupportedSubtitleCodecs() {\n return this.getSupportedCodecs().filter((codec) => SUBTITLE_CODECS.includes(codec));\n }\n _codecUnsupportedHint(codec) {\n return \"\";\n }\n}\n\nclass IsobmffOutputFormat extends OutputFormat {\n constructor(options = {}) {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (options.fastStart !== undefined && ![false, \"in-memory\", \"reserve\", \"fragmented\"].includes(options.fastStart)) {\n throw new TypeError(\"options.fastStart, when provided, must be false, 'in-memory', 'reserve', or 'fragmented'.\");\n }\n if (options.minimumFragmentDuration !== undefined && (!Number.isFinite(options.minimumFragmentDuration) || options.minimumFragmentDuration < 0)) {\n throw new TypeError(\"options.minimumFragmentDuration, when provided, must be a non-negative number.\");\n }\n if (options.onFtyp !== undefined && typeof options.onFtyp !== \"function\") {\n throw new TypeError(\"options.onFtyp, when provided, must be a function.\");\n }\n if (options.onMoov !== undefined && typeof options.onMoov !== \"function\") {\n throw new TypeError(\"options.onMoov, when provided, must be a function.\");\n }\n if (options.onMdat !== undefined && typeof options.onMdat !== \"function\") {\n throw new TypeError(\"options.onMdat, when provided, must be a function.\");\n }\n if (options.onMoof !== undefined && typeof options.onMoof !== \"function\") {\n throw new TypeError(\"options.onMoof, when provided, must be a function.\");\n }\n if (options.metadataFormat !== undefined && ![\"mdir\", \"mdta\", \"udta\", \"auto\"].includes(options.metadataFormat)) {\n throw new TypeError(\"options.metadataFormat, when provided, must be either 'auto', 'mdir', 'mdta', or 'udta'.\");\n }\n super();\n this._options = options;\n }\n getSupportedTrackCounts() {\n const max = 2 ** 32 - 1;\n return {\n video: { min: 0, max },\n audio: { min: 0, max },\n subtitle: { min: 0, max },\n total: { min: 1, max }\n };\n }\n get supportsVideoRotationMetadata() {\n return true;\n }\n _createMuxer(output) {\n return new IsobmffMuxer(output, this);\n }\n}\n\nclass Mp4OutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MP4\";\n }\n get fileExtension() {\n return \".mp4\";\n }\n get mimeType() {\n return \"video/mp4\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...NON_PCM_AUDIO_CODECS,\n \"pcm-s16\",\n \"pcm-s16be\",\n \"pcm-s24\",\n \"pcm-s24be\",\n \"pcm-s32\",\n \"pcm-s32be\",\n \"pcm-f32\",\n \"pcm-f32be\",\n \"pcm-f64\",\n \"pcm-f64be\",\n ...SUBTITLE_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new MovOutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MOV will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\nclass MovOutputFormat extends IsobmffOutputFormat {\n constructor(options) {\n super(options);\n }\n get _name() {\n return \"MOV\";\n }\n get fileExtension() {\n return \".mov\";\n }\n get mimeType() {\n return \"video/quicktime\";\n }\n getSupportedCodecs() {\n return [\n ...VIDEO_CODECS,\n ...AUDIO_CODECS\n ];\n }\n _codecUnsupportedHint(codec) {\n if (new Mp4OutputFormat().getSupportedCodecs().includes(codec)) {\n return \" Switching to MP4 will grant support for this codec.\";\n }\n return \"\";\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/encode.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar validateVideoEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!VIDEO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid video codec '${config.codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n if (!(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate must be a positive integer or a quality.\");\n }\n if (config.keyFrameInterval !== undefined && (!Number.isFinite(config.keyFrameInterval) || config.keyFrameInterval < 0)) {\n throw new TypeError(\"config.keyFrameInterval, when provided, must be a non-negative number.\");\n }\n if (config.sizeChangeBehavior !== undefined && ![\"deny\", \"passThrough\", \"fill\", \"contain\", \"cover\"].includes(config.sizeChangeBehavior)) {\n throw new TypeError(\"config.sizeChangeBehavior, when provided, must be 'deny', 'passThrough', 'fill', 'contain'\" + \" or 'cover'.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateVideoEncodingAdditionalOptions(config.codec, config);\n};\nvar validateVideoEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.alpha !== undefined && ![\"discard\", \"keep\"].includes(options.alpha)) {\n throw new TypeError(\"options.alpha, when provided, must be 'discard' or 'keep'.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.latencyMode !== undefined && ![\"quality\", \"realtime\"].includes(options.latencyMode)) {\n throw new TypeError(\"latencyMode, when provided, must be 'quality' or 'realtime'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n if (options.hardwareAcceleration !== undefined && ![\"no-preference\", \"prefer-hardware\", \"prefer-software\"].includes(options.hardwareAcceleration)) {\n throw new TypeError(\"hardwareAcceleration, when provided, must be 'no-preference', 'prefer-hardware' or\" + \" 'prefer-software'.\");\n }\n if (options.scalabilityMode !== undefined && typeof options.scalabilityMode !== \"string\") {\n throw new TypeError(\"scalabilityMode, when provided, must be a string.\");\n }\n if (options.contentHint !== undefined && typeof options.contentHint !== \"string\") {\n throw new TypeError(\"contentHint, when provided, must be a string.\");\n }\n};\nvar buildVideoEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toVideoBitrate(options.codec, options.width, options.height) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildVideoCodecString(options.codec, options.width, options.height, resolvedBitrate),\n width: options.width,\n height: options.height,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n alpha: options.alpha ?? \"discard\",\n framerate: options.framerate,\n latencyMode: options.latencyMode,\n hardwareAcceleration: options.hardwareAcceleration,\n scalabilityMode: options.scalabilityMode,\n contentHint: options.contentHint,\n ...getVideoEncoderConfigExtension(options.codec)\n };\n};\nvar validateAudioEncodingConfig = (config) => {\n if (!config || typeof config !== \"object\") {\n throw new TypeError(\"Encoding config must be an object.\");\n }\n if (!AUDIO_CODECS.includes(config.codec)) {\n throw new TypeError(`Invalid audio codec '${config.codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n if (config.bitrate === undefined && (!PCM_AUDIO_CODECS.includes(config.codec) || config.codec === \"flac\")) {\n throw new TypeError(\"config.bitrate must be provided for compressed audio codecs.\");\n }\n if (config.bitrate !== undefined && !(config.bitrate instanceof Quality) && (!Number.isInteger(config.bitrate) || config.bitrate <= 0)) {\n throw new TypeError(\"config.bitrate, when provided, must be a positive integer or a quality.\");\n }\n if (config.onEncodedPacket !== undefined && typeof config.onEncodedPacket !== \"function\") {\n throw new TypeError(\"config.onEncodedChunk, when provided, must be a function.\");\n }\n if (config.onEncoderConfig !== undefined && typeof config.onEncoderConfig !== \"function\") {\n throw new TypeError(\"config.onEncoderConfig, when provided, must be a function.\");\n }\n validateAudioEncodingAdditionalOptions(config.codec, config);\n};\nvar validateAudioEncodingAdditionalOptions = (codec, options) => {\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"Encoding options must be an object.\");\n }\n if (options.bitrateMode !== undefined && ![\"constant\", \"variable\"].includes(options.bitrateMode)) {\n throw new TypeError(\"bitrateMode, when provided, must be 'constant' or 'variable'.\");\n }\n if (options.fullCodecString !== undefined && typeof options.fullCodecString !== \"string\") {\n throw new TypeError(\"fullCodecString, when provided, must be a string.\");\n }\n if (options.fullCodecString !== undefined && inferCodecFromCodecString(options.fullCodecString) !== codec) {\n throw new TypeError(`fullCodecString, when provided, must be a string that matches the specified codec (${codec}).`);\n }\n};\nvar buildAudioEncoderConfig = (options) => {\n const resolvedBitrate = options.bitrate instanceof Quality ? options.bitrate._toAudioBitrate(options.codec) : options.bitrate;\n return {\n codec: options.fullCodecString ?? buildAudioCodecString(options.codec, options.numberOfChannels, options.sampleRate),\n numberOfChannels: options.numberOfChannels,\n sampleRate: options.sampleRate,\n bitrate: resolvedBitrate,\n bitrateMode: options.bitrateMode,\n ...getAudioEncoderConfigExtension(options.codec)\n };\n};\n\nclass Quality {\n constructor(factor) {\n this._factor = factor;\n }\n _toVideoBitrate(codec, width, height) {\n const pixels = width * height;\n const codecEfficiencyFactors = {\n avc: 1,\n hevc: 0.6,\n vp9: 0.6,\n av1: 0.4,\n vp8: 1.2\n };\n const referencePixels = 1920 * 1080;\n const referenceBitrate = 3000000;\n const scaleFactor = Math.pow(pixels / referencePixels, 0.95);\n const baseBitrate = referenceBitrate * scaleFactor;\n const codecAdjustedBitrate = baseBitrate * codecEfficiencyFactors[codec];\n const finalBitrate = codecAdjustedBitrate * this._factor;\n return Math.ceil(finalBitrate / 1000) * 1000;\n }\n _toAudioBitrate(codec) {\n if (PCM_AUDIO_CODECS.includes(codec) || codec === \"flac\") {\n return;\n }\n const baseRates = {\n aac: 128000,\n opus: 64000,\n mp3: 160000,\n vorbis: 64000\n };\n const baseBitrate = baseRates[codec];\n if (!baseBitrate) {\n throw new Error(`Unhandled codec: ${codec}`);\n }\n let finalBitrate = baseBitrate * this._factor;\n if (codec === \"aac\") {\n const validRates = [96000, 128000, 160000, 192000];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n } else if (codec === \"opus\" || codec === \"vorbis\") {\n finalBitrate = Math.max(6000, finalBitrate);\n } else if (codec === \"mp3\") {\n const validRates = [\n 8000,\n 16000,\n 24000,\n 32000,\n 40000,\n 48000,\n 64000,\n 80000,\n 96000,\n 112000,\n 128000,\n 160000,\n 192000,\n 224000,\n 256000,\n 320000\n ];\n finalBitrate = validRates.reduce((prev, curr) => Math.abs(curr - finalBitrate) < Math.abs(prev - finalBitrate) ? curr : prev);\n }\n return Math.round(finalBitrate / 1000) * 1000;\n }\n}\nvar QUALITY_LOW = /* @__PURE__ */ new Quality(0.6);\nvar QUALITY_MEDIUM = /* @__PURE__ */ new Quality(1);\nvar QUALITY_HIGH = /* @__PURE__ */ new Quality(2);\nvar QUALITY_VERY_HIGH = /* @__PURE__ */ new Quality(4);\n\n// ../../node_modules/mediabunny/dist/modules/src/media-source.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\n\nclass MediaSource {\n constructor() {\n this._connectedTrack = null;\n this._closingPromise = null;\n this._closed = false;\n this._timestampOffset = 0;\n }\n _ensureValidAdd() {\n if (!this._connectedTrack) {\n throw new Error(\"Source is not connected to an output track.\");\n }\n if (this._connectedTrack.output.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._connectedTrack.output.state === \"finalizing\" || this._connectedTrack.output.state === \"finalized\") {\n throw new Error(\"Output has been finalized.\");\n }\n if (this._connectedTrack.output.state === \"pending\") {\n throw new Error(\"Output has not started.\");\n }\n if (this._closed) {\n throw new Error(\"Source is closed.\");\n }\n }\n async _start() {}\n async _flushAndClose(forceClose) {}\n close() {\n if (this._closingPromise) {\n return;\n }\n const connectedTrack = this._connectedTrack;\n if (!connectedTrack) {\n throw new Error(\"Cannot call close without connecting the source to an output track.\");\n }\n if (connectedTrack.output.state === \"pending\") {\n throw new Error(\"Cannot call close before output has been started.\");\n }\n this._closingPromise = (async () => {\n await this._flushAndClose(false);\n this._closed = true;\n if (connectedTrack.output.state === \"finalizing\" || connectedTrack.output.state === \"finalized\") {\n return;\n }\n connectedTrack.output._muxer.onTrackClose(connectedTrack);\n })();\n }\n async _flushOrWaitForOngoingClose(forceClose) {\n return this._closingPromise ??= (async () => {\n await this._flushAndClose(forceClose);\n this._closed = true;\n })();\n }\n}\n\nclass VideoSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!VIDEO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid video codec '${codec}'. Must be one of: ${VIDEO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass VideoEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastMultipleOfKeyFrameInterval = -1;\n this.codedWidth = null;\n this.codedHeight = null;\n this.resizeCanvas = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.alphaEncoder = null;\n this.splitter = null;\n this.splitterCreationFailed = false;\n this.alphaFrameQueue = [];\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(videoSample, shouldClose, encodeOptions) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.codedWidth !== null && this.codedHeight !== null) {\n if (videoSample.codedWidth !== this.codedWidth || videoSample.codedHeight !== this.codedHeight) {\n const sizeChangeBehavior = this.encodingConfig.sizeChangeBehavior ?? \"deny\";\n if (sizeChangeBehavior === \"passThrough\") {} else if (sizeChangeBehavior === \"deny\") {\n throw new Error(`Video sample size must remain constant. Expected ${this.codedWidth}x${this.codedHeight},` + ` got ${videoSample.codedWidth}x${videoSample.codedHeight}. To allow the sample size to` + ` change over time, set \\`sizeChangeBehavior\\` to a value other than 'strict' in the` + ` encoding options.`);\n } else {\n let canvasIsNew = false;\n if (!this.resizeCanvas) {\n if (typeof document !== \"undefined\") {\n this.resizeCanvas = document.createElement(\"canvas\");\n this.resizeCanvas.width = this.codedWidth;\n this.resizeCanvas.height = this.codedHeight;\n } else {\n this.resizeCanvas = new OffscreenCanvas(this.codedWidth, this.codedHeight);\n }\n canvasIsNew = true;\n }\n const context = this.resizeCanvas.getContext(\"2d\", {\n alpha: isFirefox()\n });\n assert(context);\n if (!canvasIsNew) {\n if (isFirefox()) {\n context.fillStyle = \"black\";\n context.fillRect(0, 0, this.codedWidth, this.codedHeight);\n } else {\n context.clearRect(0, 0, this.codedWidth, this.codedHeight);\n }\n }\n videoSample.drawWithFit(context, { fit: sizeChangeBehavior });\n if (shouldClose) {\n videoSample.close();\n }\n videoSample = new VideoSample(this.resizeCanvas, {\n timestamp: videoSample.timestamp,\n duration: videoSample.duration,\n rotation: videoSample.rotation\n });\n shouldClose = true;\n }\n }\n } else {\n this.codedWidth = videoSample.codedWidth;\n this.codedHeight = videoSample.codedHeight;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(videoSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n const keyFrameInterval = this.encodingConfig.keyFrameInterval ?? 5;\n const multipleOfKeyFrameInterval = Math.floor(videoSample.timestamp / keyFrameInterval);\n const finalEncodeOptions = {\n ...encodeOptions,\n keyFrame: encodeOptions?.keyFrame || keyFrameInterval === 0 || multipleOfKeyFrameInterval !== this.lastMultipleOfKeyFrameInterval\n };\n this.lastMultipleOfKeyFrameInterval = multipleOfKeyFrameInterval;\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = videoSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample, finalEncodeOptions)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n } else {\n assert(this.encoder);\n const videoFrame = videoSample.toVideoFrame();\n if (!this.alphaEncoder) {\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const frameDefinitelyHasNoAlpha = !!videoFrame.format && !videoFrame.format.includes(\"A\");\n if (frameDefinitelyHasNoAlpha || this.splitterCreationFailed) {\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n } else {\n const width = videoFrame.displayWidth;\n const height = videoFrame.displayHeight;\n if (!this.splitter) {\n try {\n this.splitter = new ColorAlphaSplitter(width, height);\n } catch (error) {\n console.error(\"Due to an error, only color data will be encoded.\", error);\n this.splitterCreationFailed = true;\n this.alphaFrameQueue.push(null);\n this.encoder.encode(videoFrame, finalEncodeOptions);\n videoFrame.close();\n }\n }\n if (this.splitter) {\n const colorFrame = this.splitter.extractColor(videoFrame);\n const alphaFrame = this.splitter.extractAlpha(videoFrame);\n this.alphaFrameQueue.push(alphaFrame);\n this.encoder.encode(colorFrame, finalEncodeOptions);\n colorFrame.close();\n videoFrame.close();\n }\n }\n }\n if (shouldClose) {\n videoSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n }\n await this.muxer.mutex.currentPromise;\n } finally {\n if (shouldClose) {\n videoSample.close();\n }\n }\n }\n ensureEncoder(videoSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const encoderConfig = buildVideoEncoderConfig({\n width: videoSample.codedWidth,\n height: videoSample.codedHeight,\n ...this.encodingConfig,\n framerate: this.source._connectedTrack?.metadata.frameRate\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customVideoEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else {\n if (typeof VideoEncoder === \"undefined\") {\n throw new Error(\"VideoEncoder is not supported by this browser.\");\n }\n encoderConfig.alpha = \"discard\";\n if (this.encodingConfig.alpha === \"keep\") {\n encoderConfig.latencyMode = \"quality\";\n }\n const hasOddDimension = encoderConfig.width % 2 === 1 || encoderConfig.height % 2 === 1;\n if (hasOddDimension && (this.encodingConfig.codec === \"avc\" || this.encodingConfig.codec === \"hevc\")) {\n throw new Error(`The dimensions ${encoderConfig.width}x${encoderConfig.height} are not supported for codec` + ` '${this.encodingConfig.codec}'; both width and height must be even numbers. Make sure to` + ` round your dimensions to the nearest even number.`);\n }\n const support = await VideoEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.width}x${encoderConfig.height}, hardware acceleration:` + ` ${encoderConfig.hardwareAcceleration ?? \"no-preference\"}) is not supported by this browser.` + ` Consider using another codec or changing your video parameters.`);\n }\n const colorChunkQueue = [];\n const nullAlphaChunkQueue = [];\n let encodedAlphaChunkCount = 0;\n let alphaEncoderQueue = 0;\n const addPacket = (colorChunk, alphaChunk, meta) => {\n const sideData = {};\n if (alphaChunk) {\n const alphaData = new Uint8Array(alphaChunk.byteLength);\n alphaChunk.copyTo(alphaData);\n sideData.alpha = alphaData;\n }\n const packet = EncodedPacket.fromEncodedChunk(colorChunk, sideData);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedVideoPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n this.encoder = new VideoEncoder({\n output: (chunk, meta) => {\n if (!this.alphaEncoder) {\n addPacket(chunk, null, meta);\n return;\n }\n const alphaFrame = this.alphaFrameQueue.shift();\n assert(alphaFrame !== undefined);\n if (alphaFrame) {\n this.alphaEncoder.encode(alphaFrame, {\n keyFrame: chunk.type === \"key\"\n });\n alphaEncoderQueue++;\n alphaFrame.close();\n colorChunkQueue.push({ chunk, meta });\n } else {\n if (alphaEncoderQueue === 0) {\n addPacket(chunk, null, meta);\n } else {\n nullAlphaChunkQueue.push(encodedAlphaChunkCount + alphaEncoderQueue);\n colorChunkQueue.push({ chunk, meta });\n }\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n if (this.encodingConfig.alpha === \"keep\") {\n this.alphaEncoder = new VideoEncoder({\n output: (chunk, meta) => {\n alphaEncoderQueue--;\n const colorChunk = colorChunkQueue.shift();\n assert(colorChunk !== undefined);\n addPacket(colorChunk.chunk, chunk, colorChunk.meta);\n encodedAlphaChunkCount++;\n while (nullAlphaChunkQueue.length > 0 && nullAlphaChunkQueue[0] === encodedAlphaChunkCount) {\n nullAlphaChunkQueue.shift();\n const colorChunk2 = colorChunkQueue.shift();\n assert(colorChunk2 !== undefined);\n addPacket(colorChunk2.chunk, null, colorChunk2.meta);\n }\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.alphaEncoder.configure(encoderConfig);\n }\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n await this.alphaEncoder?.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n if (this.alphaEncoder && this.alphaEncoder.state !== \"closed\") {\n this.alphaEncoder.close();\n }\n this.alphaFrameQueue.forEach((x) => x?.close());\n this.splitter?.close();\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass ColorAlphaSplitter {\n constructor(initialWidth, initialHeight) {\n this.lastFrame = null;\n if (typeof OffscreenCanvas !== \"undefined\") {\n this.canvas = new OffscreenCanvas(initialWidth, initialHeight);\n } else {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = initialWidth;\n this.canvas.height = initialHeight;\n }\n const gl = this.canvas.getContext(\"webgl2\", {\n alpha: true\n });\n if (!gl) {\n throw new Error(\"Couldn't acquire WebGL 2 context.\");\n }\n this.gl = gl;\n this.colorProgram = this.createColorProgram();\n this.alphaProgram = this.createAlphaProgram();\n this.vao = this.createVAO();\n this.sourceTexture = this.createTexture();\n this.alphaResolutionLocation = this.gl.getUniformLocation(this.alphaProgram, \"u_resolution\");\n this.gl.useProgram(this.colorProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.colorProgram, \"u_sourceTexture\"), 0);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform1i(this.gl.getUniformLocation(this.alphaProgram, \"u_sourceTexture\"), 0);\n }\n createVertexShader() {\n return this.createShader(this.gl.VERTEX_SHADER, `#version 300 es\n\t\t\tin vec2 a_position;\n\t\t\tin vec2 a_texCoord;\n\t\t\tout vec2 v_texCoord;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tgl_Position = vec4(a_position, 0.0, 1.0);\n\t\t\t\tv_texCoord = a_texCoord;\n\t\t\t}\n\t\t`);\n }\n createColorProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\tvec4 source = texture(u_sourceTexture, v_texCoord);\n\t\t\t\tfragColor = vec4(source.rgb, 1.0);\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createAlphaProgram() {\n const vertexShader = this.createVertexShader();\n const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, `#version 300 es\n\t\t\tprecision highp float;\n\t\t\t\n\t\t\tuniform sampler2D u_sourceTexture;\n\t\t\tuniform vec2 u_resolution; // The width and height of the canvas\n\t\t\tin vec2 v_texCoord;\n\t\t\tout vec4 fragColor;\n\n\t\t\t// This function determines the value for a single byte in the YUV stream\n\t\t\tfloat getByteValue(float byteOffset) {\n\t\t\t\tfloat width = u_resolution.x;\n\t\t\t\tfloat height = u_resolution.y;\n\n\t\t\t\tfloat yPlaneSize = width * height;\n\n\t\t\t\tif (byteOffset < yPlaneSize) {\n\t\t\t\t\t// This byte is in the luma plane. Find the corresponding pixel coordinates to sample from\n\t\t\t\t\tfloat y = floor(byteOffset / width);\n\t\t\t\t\tfloat x = mod(byteOffset, width);\n\t\t\t\t\t\n\t\t\t\t\t// Add 0.5 to sample the center of the texel\n\t\t\t\t\tvec2 sampleCoord = (vec2(x, y) + 0.5) / u_resolution;\n\t\t\t\t\t\n\t\t\t\t\t// The luma value is the alpha from the source texture\n\t\t\t\t\treturn texture(u_sourceTexture, sampleCoord).a;\n\t\t\t\t} else {\n\t\t\t\t\t// Write a fixed value for chroma and beyond\n\t\t\t\t\treturn 128.0 / 255.0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tvoid main() {\n\t\t\t\t// Each fragment writes 4 bytes (R, G, B, A)\n\t\t\t\tfloat pixelIndex = floor(gl_FragCoord.y) * u_resolution.x + floor(gl_FragCoord.x);\n\t\t\t\tfloat baseByteOffset = pixelIndex * 4.0;\n\n\t\t\t\tvec4 result;\n\t\t\t\tfor (int i = 0; i < 4; i++) {\n\t\t\t\t\tfloat currentByteOffset = baseByteOffset + float(i);\n\t\t\t\t\tresult[i] = getByteValue(currentByteOffset);\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfragColor = result;\n\t\t\t}\n\t\t`);\n const program = this.gl.createProgram();\n this.gl.attachShader(program, vertexShader);\n this.gl.attachShader(program, fragmentShader);\n this.gl.linkProgram(program);\n return program;\n }\n createShader(type, source) {\n const shader = this.gl.createShader(type);\n this.gl.shaderSource(shader, source);\n this.gl.compileShader(shader);\n if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {\n console.error(\"Shader compile error:\", this.gl.getShaderInfoLog(shader));\n }\n return shader;\n }\n createVAO() {\n const vao = this.gl.createVertexArray();\n this.gl.bindVertexArray(vao);\n const vertices = new Float32Array([\n -1,\n -1,\n 0,\n 1,\n 1,\n -1,\n 1,\n 1,\n -1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 0\n ]);\n const buffer = this.gl.createBuffer();\n this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);\n this.gl.bufferData(this.gl.ARRAY_BUFFER, vertices, this.gl.STATIC_DRAW);\n const positionLocation = this.gl.getAttribLocation(this.colorProgram, \"a_position\");\n const texCoordLocation = this.gl.getAttribLocation(this.colorProgram, \"a_texCoord\");\n this.gl.enableVertexAttribArray(positionLocation);\n this.gl.vertexAttribPointer(positionLocation, 2, this.gl.FLOAT, false, 16, 0);\n this.gl.enableVertexAttribArray(texCoordLocation);\n this.gl.vertexAttribPointer(texCoordLocation, 2, this.gl.FLOAT, false, 16, 8);\n return vao;\n }\n createTexture() {\n const texture = this.gl.createTexture();\n this.gl.bindTexture(this.gl.TEXTURE_2D, texture);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_S, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_WRAP_T, this.gl.CLAMP_TO_EDGE);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MIN_FILTER, this.gl.LINEAR);\n this.gl.texParameteri(this.gl.TEXTURE_2D, this.gl.TEXTURE_MAG_FILTER, this.gl.LINEAR);\n return texture;\n }\n updateTexture(sourceFrame) {\n if (this.lastFrame === sourceFrame) {\n return;\n }\n if (sourceFrame.displayWidth !== this.canvas.width || sourceFrame.displayHeight !== this.canvas.height) {\n this.canvas.width = sourceFrame.displayWidth;\n this.canvas.height = sourceFrame.displayHeight;\n }\n this.gl.activeTexture(this.gl.TEXTURE0);\n this.gl.bindTexture(this.gl.TEXTURE_2D, this.sourceTexture);\n this.gl.texImage2D(this.gl.TEXTURE_2D, 0, this.gl.RGBA, this.gl.RGBA, this.gl.UNSIGNED_BYTE, sourceFrame);\n this.lastFrame = sourceFrame;\n }\n extractColor(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.colorProgram);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n return new VideoFrame(this.canvas, {\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n alpha: \"discard\"\n });\n }\n extractAlpha(sourceFrame) {\n this.updateTexture(sourceFrame);\n this.gl.useProgram(this.alphaProgram);\n this.gl.uniform2f(this.alphaResolutionLocation, this.canvas.width, this.canvas.height);\n this.gl.viewport(0, 0, this.canvas.width, this.canvas.height);\n this.gl.clear(this.gl.COLOR_BUFFER_BIT);\n this.gl.bindVertexArray(this.vao);\n this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4);\n const { width, height } = this.canvas;\n const chromaSamples = Math.ceil(width / 2) * Math.ceil(height / 2);\n const yuvSize = width * height + chromaSamples * 2;\n const requiredHeight = Math.ceil(yuvSize / (width * 4));\n let yuv = new Uint8Array(4 * width * requiredHeight);\n this.gl.readPixels(0, 0, width, requiredHeight, this.gl.RGBA, this.gl.UNSIGNED_BYTE, yuv);\n yuv = yuv.subarray(0, yuvSize);\n assert(yuv[width * height] === 128);\n assert(yuv[yuv.length - 1] === 128);\n const init = {\n format: \"I420\",\n codedWidth: width,\n codedHeight: height,\n timestamp: sourceFrame.timestamp,\n duration: sourceFrame.duration ?? undefined,\n transfer: [yuv.buffer]\n };\n return new VideoFrame(yuv, init);\n }\n close() {\n this.gl.getExtension(\"WEBGL_lose_context\")?.loseContext();\n this.gl = null;\n }\n}\n\nclass VideoSampleSource extends VideoSource {\n constructor(encodingConfig) {\n validateVideoEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new VideoEncoderWrapper(this, encodingConfig);\n }\n add(videoSample, encodeOptions) {\n if (!(videoSample instanceof VideoSample)) {\n throw new TypeError(\"videoSample must be a VideoSample.\");\n }\n return this._encoder.add(videoSample, false, encodeOptions);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass AudioSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!AUDIO_CODECS.includes(codec)) {\n throw new TypeError(`Invalid audio codec '${codec}'. Must be one of: ${AUDIO_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\nclass AudioEncoderWrapper {\n constructor(source, encodingConfig) {\n this.source = source;\n this.encodingConfig = encodingConfig;\n this.ensureEncoderPromise = null;\n this.encoderInitialized = false;\n this.encoder = null;\n this.muxer = null;\n this.lastNumberOfChannels = null;\n this.lastSampleRate = null;\n this.isPcmEncoder = false;\n this.outputSampleSize = null;\n this.writeOutputValue = null;\n this.customEncoder = null;\n this.customEncoderCallSerializer = new CallSerializer;\n this.customEncoderQueueSize = 0;\n this.lastEndSampleIndex = null;\n this.error = null;\n this.errorNeedsNewStack = true;\n }\n async add(audioSample, shouldClose) {\n try {\n this.checkForEncoderError();\n this.source._ensureValidAdd();\n if (this.lastNumberOfChannels !== null && this.lastSampleRate !== null) {\n if (audioSample.numberOfChannels !== this.lastNumberOfChannels || audioSample.sampleRate !== this.lastSampleRate) {\n throw new Error(`Audio parameters must remain constant. Expected ${this.lastNumberOfChannels} channels at` + ` ${this.lastSampleRate} Hz, got ${audioSample.numberOfChannels} channels at` + ` ${audioSample.sampleRate} Hz.`);\n }\n } else {\n this.lastNumberOfChannels = audioSample.numberOfChannels;\n this.lastSampleRate = audioSample.sampleRate;\n }\n if (!this.encoderInitialized) {\n if (!this.ensureEncoderPromise) {\n this.ensureEncoder(audioSample);\n }\n if (!this.encoderInitialized) {\n await this.ensureEncoderPromise;\n }\n }\n assert(this.encoderInitialized);\n {\n const startSampleIndex = Math.round(audioSample.timestamp * audioSample.sampleRate);\n const endSampleIndex = Math.round((audioSample.timestamp + audioSample.duration) * audioSample.sampleRate);\n if (this.lastEndSampleIndex === null) {\n this.lastEndSampleIndex = endSampleIndex;\n } else {\n const sampleDiff = startSampleIndex - this.lastEndSampleIndex;\n if (sampleDiff >= 64) {\n const fillSample = new AudioSample({\n data: new Float32Array(sampleDiff * audioSample.numberOfChannels),\n format: \"f32-planar\",\n sampleRate: audioSample.sampleRate,\n numberOfChannels: audioSample.numberOfChannels,\n numberOfFrames: sampleDiff,\n timestamp: this.lastEndSampleIndex / audioSample.sampleRate\n });\n await this.add(fillSample, true);\n }\n this.lastEndSampleIndex += audioSample.numberOfFrames;\n }\n }\n if (this.customEncoder) {\n this.customEncoderQueueSize++;\n const clonedSample = audioSample.clone();\n const promise = this.customEncoderCallSerializer.call(() => this.customEncoder.encode(clonedSample)).then(() => this.customEncoderQueueSize--).catch((error) => this.error ??= error).finally(() => {\n clonedSample.close();\n });\n if (this.customEncoderQueueSize >= 4) {\n await promise;\n }\n await this.muxer.mutex.currentPromise;\n } else if (this.isPcmEncoder) {\n await this.doPcmEncoding(audioSample, shouldClose);\n } else {\n assert(this.encoder);\n const audioData = audioSample.toAudioData();\n this.encoder.encode(audioData);\n audioData.close();\n if (shouldClose) {\n audioSample.close();\n }\n if (this.encoder.encodeQueueSize >= 4) {\n await new Promise((resolve) => this.encoder.addEventListener(\"dequeue\", resolve, { once: true }));\n }\n await this.muxer.mutex.currentPromise;\n }\n } finally {\n if (shouldClose) {\n audioSample.close();\n }\n }\n }\n async doPcmEncoding(audioSample, shouldClose) {\n assert(this.outputSampleSize);\n assert(this.writeOutputValue);\n const { numberOfChannels, numberOfFrames, sampleRate, timestamp } = audioSample;\n const CHUNK_SIZE = 2048;\n const outputs = [];\n for (let frame = 0;frame < numberOfFrames; frame += CHUNK_SIZE) {\n const frameCount = Math.min(CHUNK_SIZE, audioSample.numberOfFrames - frame);\n const outputSize = frameCount * numberOfChannels * this.outputSampleSize;\n const outputBuffer = new ArrayBuffer(outputSize);\n const outputView = new DataView(outputBuffer);\n outputs.push({ frameCount, view: outputView });\n }\n const allocationSize = audioSample.allocationSize({ planeIndex: 0, format: \"f32-planar\" });\n const floats = new Float32Array(allocationSize / Float32Array.BYTES_PER_ELEMENT);\n for (let i = 0;i < numberOfChannels; i++) {\n audioSample.copyTo(floats, { planeIndex: i, format: \"f32-planar\" });\n for (let j = 0;j < outputs.length; j++) {\n const { frameCount, view: view2 } = outputs[j];\n for (let k = 0;k < frameCount; k++) {\n this.writeOutputValue(view2, (k * numberOfChannels + i) * this.outputSampleSize, floats[j * CHUNK_SIZE + k]);\n }\n }\n }\n if (shouldClose) {\n audioSample.close();\n }\n const meta = {\n decoderConfig: {\n codec: this.encodingConfig.codec,\n numberOfChannels,\n sampleRate\n }\n };\n for (let i = 0;i < outputs.length; i++) {\n const { frameCount, view: view2 } = outputs[i];\n const outputBuffer = view2.buffer;\n const startFrame = i * CHUNK_SIZE;\n const packet = new EncodedPacket(new Uint8Array(outputBuffer), \"key\", timestamp + startFrame / sampleRate, frameCount / sampleRate);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n await this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta);\n }\n }\n ensureEncoder(audioSample) {\n const encoderError = new Error;\n this.ensureEncoderPromise = (async () => {\n const { numberOfChannels, sampleRate } = audioSample;\n const encoderConfig = buildAudioEncoderConfig({\n numberOfChannels,\n sampleRate,\n ...this.encodingConfig\n });\n this.encodingConfig.onEncoderConfig?.(encoderConfig);\n const MatchingCustomEncoder = customAudioEncoders.find((x) => x.supports(this.encodingConfig.codec, encoderConfig));\n if (MatchingCustomEncoder) {\n this.customEncoder = new MatchingCustomEncoder;\n this.customEncoder.codec = this.encodingConfig.codec;\n this.customEncoder.config = encoderConfig;\n this.customEncoder.onPacket = (packet, meta) => {\n if (!(packet instanceof EncodedPacket)) {\n throw new TypeError(\"The first argument passed to onPacket must be an EncodedPacket.\");\n }\n if (meta !== undefined && (!meta || typeof meta !== \"object\")) {\n throw new TypeError(\"The second argument passed to onPacket must be an object or undefined.\");\n }\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n };\n await this.customEncoder.init();\n } else if (PCM_AUDIO_CODECS.includes(this.encodingConfig.codec)) {\n this.initPcmEncoder();\n } else {\n if (typeof AudioEncoder === \"undefined\") {\n throw new Error(\"AudioEncoder is not supported by this browser.\");\n }\n const support = await AudioEncoder.isConfigSupported(encoderConfig);\n if (!support.supported) {\n throw new Error(`This specific encoder configuration (${encoderConfig.codec}, ${encoderConfig.bitrate} bps,` + ` ${encoderConfig.numberOfChannels} channels, ${encoderConfig.sampleRate} Hz) is not` + ` supported by this browser. Consider using another codec or changing your audio parameters.`);\n }\n this.encoder = new AudioEncoder({\n output: (chunk, meta) => {\n if (this.encodingConfig.codec === \"aac\" && meta?.decoderConfig) {\n let needsDescriptionOverwrite = false;\n if (!meta.decoderConfig.description || meta.decoderConfig.description.byteLength < 2) {\n needsDescriptionOverwrite = true;\n } else {\n const audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(meta.decoderConfig.description));\n needsDescriptionOverwrite = audioSpecificConfig.objectType === 0;\n }\n if (needsDescriptionOverwrite) {\n const objectType = Number(last(encoderConfig.codec.split(\".\")));\n meta.decoderConfig.description = buildAacAudioSpecificConfig({\n objectType,\n numberOfChannels: meta.decoderConfig.numberOfChannels,\n sampleRate: meta.decoderConfig.sampleRate\n });\n }\n }\n const packet = EncodedPacket.fromEncodedChunk(chunk);\n this.encodingConfig.onEncodedPacket?.(packet, meta);\n this.muxer.addEncodedAudioPacket(this.source._connectedTrack, packet, meta).catch((error) => {\n this.error ??= error;\n this.errorNeedsNewStack = false;\n });\n },\n error: (error) => {\n error.stack = encoderError.stack;\n this.error ??= error;\n }\n });\n this.encoder.configure(encoderConfig);\n }\n assert(this.source._connectedTrack);\n this.muxer = this.source._connectedTrack.output._muxer;\n this.encoderInitialized = true;\n })();\n }\n initPcmEncoder() {\n this.isPcmEncoder = true;\n const codec = this.encodingConfig.codec;\n const { dataType, sampleSize, littleEndian } = parsePcmCodec(codec);\n this.outputSampleSize = sampleSize;\n switch (sampleSize) {\n case 1:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint8(byteOffset, clamp((value + 1) * 127.5, 0, 255));\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n view2.setInt8(byteOffset, clamp(Math.round(value * 128), -128, 127));\n };\n } else if (dataType === \"ulaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toUlaw(int16));\n };\n } else if (dataType === \"alaw\") {\n this.writeOutputValue = (view2, byteOffset, value) => {\n const int16 = clamp(Math.floor(value * 32767), -32768, 32767);\n view2.setUint8(byteOffset, toAlaw(int16));\n };\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 2:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint16(byteOffset, clamp((value + 1) * 32767.5, 0, 65535), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt16(byteOffset, clamp(Math.round(value * 32767), -32768, 32767), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 3:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => setUint24(view2, byteOffset, clamp((value + 1) * 8388607.5, 0, 16777215), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => setInt24(view2, byteOffset, clamp(Math.round(value * 8388607), -8388608, 8388607), littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 4:\n {\n if (dataType === \"unsigned\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setUint32(byteOffset, clamp((value + 1) * 2147483647.5, 0, 4294967295), littleEndian);\n } else if (dataType === \"signed\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setInt32(byteOffset, clamp(Math.round(value * 2147483647), -2147483648, 2147483647), littleEndian);\n } else if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat32(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n case 8:\n {\n if (dataType === \"float\") {\n this.writeOutputValue = (view2, byteOffset, value) => view2.setFloat64(byteOffset, value, littleEndian);\n } else {\n assert(false);\n }\n }\n ;\n break;\n default:\n {\n assertNever(sampleSize);\n assert(false);\n }\n ;\n }\n }\n async flushAndClose(forceClose) {\n if (!forceClose)\n this.checkForEncoderError();\n if (this.customEncoder) {\n if (!forceClose) {\n this.customEncoderCallSerializer.call(() => this.customEncoder.flush());\n }\n await this.customEncoderCallSerializer.call(() => this.customEncoder.close());\n } else if (this.encoder) {\n if (!forceClose) {\n await this.encoder.flush();\n }\n if (this.encoder.state !== \"closed\") {\n this.encoder.close();\n }\n }\n if (!forceClose)\n this.checkForEncoderError();\n }\n getQueueSize() {\n if (this.customEncoder) {\n return this.customEncoderQueueSize;\n } else if (this.isPcmEncoder) {\n return 0;\n } else {\n return this.encoder?.encodeQueueSize ?? 0;\n }\n }\n checkForEncoderError() {\n if (this.error) {\n if (this.errorNeedsNewStack) {\n this.error.stack = new Error().stack;\n }\n throw this.error;\n }\n }\n}\n\nclass AudioSampleSource extends AudioSource {\n constructor(encodingConfig) {\n validateAudioEncodingConfig(encodingConfig);\n super(encodingConfig.codec);\n this._encoder = new AudioEncoderWrapper(this, encodingConfig);\n }\n add(audioSample) {\n if (!(audioSample instanceof AudioSample)) {\n throw new TypeError(\"audioSample must be an AudioSample.\");\n }\n return this._encoder.add(audioSample, false);\n }\n _flushAndClose(forceClose) {\n return this._encoder.flushAndClose(forceClose);\n }\n}\nclass SubtitleSource extends MediaSource {\n constructor(codec) {\n super();\n this._connectedTrack = null;\n if (!SUBTITLE_CODECS.includes(codec)) {\n throw new TypeError(`Invalid subtitle codec '${codec}'. Must be one of: ${SUBTITLE_CODECS.join(\", \")}.`);\n }\n this._codec = codec;\n }\n}\n\n// ../../node_modules/mediabunny/dist/modules/src/output.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar ALL_TRACK_TYPES = [\"video\", \"audio\", \"subtitle\"];\nvar validateBaseTrackMetadata = (metadata) => {\n if (!metadata || typeof metadata !== \"object\") {\n throw new TypeError(\"metadata must be an object.\");\n }\n if (metadata.languageCode !== undefined && !isIso639Dash2LanguageCode(metadata.languageCode)) {\n throw new TypeError(\"metadata.languageCode, when provided, must be a three-letter, ISO 639-2/T language code.\");\n }\n if (metadata.name !== undefined && typeof metadata.name !== \"string\") {\n throw new TypeError(\"metadata.name, when provided, must be a string.\");\n }\n if (metadata.disposition !== undefined) {\n validateTrackDisposition(metadata.disposition);\n }\n if (metadata.maximumPacketCount !== undefined && (!Number.isInteger(metadata.maximumPacketCount) || metadata.maximumPacketCount < 0)) {\n throw new TypeError(\"metadata.maximumPacketCount, when provided, must be a non-negative integer.\");\n }\n};\n\nclass Output {\n constructor(options) {\n this.state = \"pending\";\n this._tracks = [];\n this._startPromise = null;\n this._cancelPromise = null;\n this._finalizePromise = null;\n this._mutex = new AsyncMutex;\n this._metadataTags = {};\n if (!options || typeof options !== \"object\") {\n throw new TypeError(\"options must be an object.\");\n }\n if (!(options.format instanceof OutputFormat)) {\n throw new TypeError(\"options.format must be an OutputFormat.\");\n }\n if (!(options.target instanceof Target)) {\n throw new TypeError(\"options.target must be a Target.\");\n }\n if (options.target._output) {\n throw new Error(\"Target is already used for another output.\");\n }\n options.target._output = this;\n this.format = options.format;\n this.target = options.target;\n this._writer = options.target._createWriter();\n this._muxer = options.format._createMuxer(this);\n }\n addVideoTrack(source, metadata = {}) {\n if (!(source instanceof VideoSource)) {\n throw new TypeError(\"source must be a VideoSource.\");\n }\n validateBaseTrackMetadata(metadata);\n if (metadata.rotation !== undefined && ![0, 90, 180, 270].includes(metadata.rotation)) {\n throw new TypeError(`Invalid video rotation: ${metadata.rotation}. Has to be 0, 90, 180 or 270.`);\n }\n if (!this.format.supportsVideoRotationMetadata && metadata.rotation) {\n throw new Error(`${this.format._name} does not support video rotation metadata.`);\n }\n if (metadata.frameRate !== undefined && (!Number.isFinite(metadata.frameRate) || metadata.frameRate <= 0)) {\n throw new TypeError(`Invalid video frame rate: ${metadata.frameRate}. Must be a positive number.`);\n }\n this._addTrack(\"video\", source, metadata);\n }\n addAudioTrack(source, metadata = {}) {\n if (!(source instanceof AudioSource)) {\n throw new TypeError(\"source must be an AudioSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"audio\", source, metadata);\n }\n addSubtitleTrack(source, metadata = {}) {\n if (!(source instanceof SubtitleSource)) {\n throw new TypeError(\"source must be a SubtitleSource.\");\n }\n validateBaseTrackMetadata(metadata);\n this._addTrack(\"subtitle\", source, metadata);\n }\n setMetadataTags(tags) {\n validateMetadataTags(tags);\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot set metadata tags after output has been started or canceled.\");\n }\n this._metadataTags = tags;\n }\n _addTrack(type, source, metadata) {\n if (this.state !== \"pending\") {\n throw new Error(\"Cannot add track after output has been started or canceled.\");\n }\n if (source._connectedTrack) {\n throw new Error(\"Source is already used for a track.\");\n }\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n const presentTracksOfThisType = this._tracks.reduce((count, track2) => count + (track2.type === type ? 1 : 0), 0);\n const maxCount = supportedTrackCounts[type].max;\n if (presentTracksOfThisType === maxCount) {\n throw new Error(maxCount === 0 ? `${this.format._name} does not support ${type} tracks.` : `${this.format._name} does not support more than ${maxCount} ${type} track` + `${maxCount === 1 ? \"\" : \"s\"}.`);\n }\n const maxTotalCount = supportedTrackCounts.total.max;\n if (this._tracks.length === maxTotalCount) {\n throw new Error(`${this.format._name} does not support more than ${maxTotalCount} tracks` + `${maxTotalCount === 1 ? \"\" : \"s\"} in total.`);\n }\n const track = {\n id: this._tracks.length + 1,\n output: this,\n type,\n source,\n metadata\n };\n if (track.type === \"video\") {\n const supportedVideoCodecs = this.format.getSupportedVideoCodecs();\n if (supportedVideoCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support video tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedVideoCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` video codecs are: ${supportedVideoCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"audio\") {\n const supportedAudioCodecs = this.format.getSupportedAudioCodecs();\n if (supportedAudioCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support audio tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedAudioCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` audio codecs are: ${supportedAudioCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n } else if (track.type === \"subtitle\") {\n const supportedSubtitleCodecs = this.format.getSupportedSubtitleCodecs();\n if (supportedSubtitleCodecs.length === 0) {\n throw new Error(`${this.format._name} does not support subtitle tracks.` + this.format._codecUnsupportedHint(track.source._codec));\n } else if (!supportedSubtitleCodecs.includes(track.source._codec)) {\n throw new Error(`Codec '${track.source._codec}' cannot be contained within ${this.format._name}. Supported` + ` subtitle codecs are: ${supportedSubtitleCodecs.map((codec) => `'${codec}'`).join(\", \")}.` + this.format._codecUnsupportedHint(track.source._codec));\n }\n }\n this._tracks.push(track);\n source._connectedTrack = track;\n }\n async start() {\n const supportedTrackCounts = this.format.getSupportedTrackCounts();\n for (const trackType of ALL_TRACK_TYPES) {\n const presentTracksOfThisType = this._tracks.reduce((count, track) => count + (track.type === trackType ? 1 : 0), 0);\n const minCount = supportedTrackCounts[trackType].min;\n if (presentTracksOfThisType < minCount) {\n throw new Error(minCount === supportedTrackCounts[trackType].max ? `${this.format._name} requires exactly ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${minCount} ${trackType}` + ` track${minCount === 1 ? \"\" : \"s\"}.`);\n }\n }\n const totalMinCount = supportedTrackCounts.total.min;\n if (this._tracks.length < totalMinCount) {\n throw new Error(totalMinCount === supportedTrackCounts.total.max ? `${this.format._name} requires exactly ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.` : `${this.format._name} requires at least ${totalMinCount} track` + `${totalMinCount === 1 ? \"\" : \"s\"}.`);\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Output has been canceled.\");\n }\n if (this._startPromise) {\n console.warn(\"Output has already been started.\");\n return this._startPromise;\n }\n return this._startPromise = (async () => {\n this.state = \"started\";\n this._writer.start();\n const release = await this._mutex.acquire();\n await this._muxer.start();\n const promises = this._tracks.map((track) => track.source._start());\n await Promise.all(promises);\n release();\n })();\n }\n getMimeType() {\n return this._muxer.getMimeType();\n }\n async cancel() {\n if (this._cancelPromise) {\n console.warn(\"Output has already been canceled.\");\n return this._cancelPromise;\n } else if (this.state === \"finalizing\" || this.state === \"finalized\") {\n console.warn(\"Output has already been finalized.\");\n return;\n }\n return this._cancelPromise = (async () => {\n this.state = \"canceled\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(true));\n await Promise.all(promises);\n await this._writer.close();\n release();\n })();\n }\n async finalize() {\n if (this.state === \"pending\") {\n throw new Error(\"Cannot finalize before starting.\");\n }\n if (this.state === \"canceled\") {\n throw new Error(\"Cannot finalize after canceling.\");\n }\n if (this._finalizePromise) {\n console.warn(\"Output has already been finalized.\");\n return this._finalizePromise;\n }\n return this._finalizePromise = (async () => {\n this.state = \"finalizing\";\n const release = await this._mutex.acquire();\n const promises = this._tracks.map((x) => x.source._flushOrWaitForOngoingClose(false));\n await Promise.all(promises);\n await this._muxer.finalize();\n await this._writer.flush();\n await this._writer.finalize();\n this.state = \"finalized\";\n release();\n })();\n }\n}\n// ../../node_modules/mediabunny/dist/modules/src/index.js\n/*!\n * Copyright (c) 2026-present, Vanilagy and contributors\n *\n * This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at https://mozilla.org/MPL/2.0/.\n */\nvar MEDIABUNNY_LOADED_SYMBOL = Symbol.for(\"mediabunny loaded\");\nif (globalThis[MEDIABUNNY_LOADED_SYMBOL]) {\n console.error(`[WARNING]\nMediabunny was loaded twice.` + \" This will likely cause Mediabunny not to work correctly.\" + \" Check if multiple dependencies are importing different versions of Mediabunny,\" + \" or if something is being bundled incorrectly.\");\n}\nglobalThis[MEDIABUNNY_LOADED_SYMBOL] = true;\n\n// src/core/utils/error-handler.ts\nfunction extractErrorMessage(error) {\n if (error instanceof Error) {\n return error.message;\n }\n return String(error);\n}\n\n// src/core/utils/logger.ts\nfunction isDebugEnabled() {\n const globalAny = globalThis;\n if (globalAny.__VIDTREO_DEBUG__ === true || globalAny.__VIDTREO_DEV__ === true) {\n return true;\n }\n const envNode = typeof process !== \"undefined\" && process?.env ? \"development\" : undefined;\n if (envNode === \"development\" || envNode === \"test\") {\n return true;\n }\n if (typeof localStorage !== \"undefined\") {\n const flag = localStorage.getItem(\"VIDTREO_DEBUG\");\n if (flag === \"true\") {\n return true;\n }\n }\n return false;\n}\nvar isDevelopment = isDebugEnabled();\nvar ANSI_COLORS = {\n reset: \"\\x1B[0m\",\n bright: \"\\x1B[1m\",\n dim: \"\\x1B[2m\",\n red: \"\\x1B[31m\",\n green: \"\\x1B[32m\",\n yellow: \"\\x1B[33m\",\n blue: \"\\x1B[34m\",\n magenta: \"\\x1B[35m\",\n cyan: \"\\x1B[36m\",\n white: \"\\x1B[37m\",\n gray: \"\\x1B[90m\"\n};\nfunction formatMessage(level, message, options) {\n if (!isDevelopment) {\n return \"\";\n }\n const prefix = options?.prefix || `[${level.toUpperCase()}]`;\n const color = options?.color || getDefaultColor(level);\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n return `${colorCode}${prefix}${resetCode} ${message}`;\n}\nfunction getDefaultColor(level) {\n switch (level) {\n case \"error\":\n return \"red\";\n case \"warn\":\n return \"yellow\";\n case \"info\":\n return \"cyan\";\n case \"debug\":\n return \"gray\";\n default:\n return \"white\";\n }\n}\nfunction log(level, message, ...args) {\n if (!isDevelopment) {\n return;\n }\n const formatted = formatMessage(level, message);\n console[level](formatted, ...args);\n}\nvar logger = {\n log: (message, ...args) => {\n log(\"log\", message, ...args);\n },\n info: (message, ...args) => {\n log(\"info\", message, ...args);\n },\n warn: (message, ...args) => {\n log(\"warn\", message, ...args);\n },\n error: (message, ...args) => {\n log(\"error\", message, ...args);\n },\n debug: (message, ...args) => {\n log(\"debug\", message, ...args);\n },\n group: (label, color = \"cyan\") => {\n if (!isDevelopment) {\n return;\n }\n const colorCode = ANSI_COLORS[color];\n const resetCode = ANSI_COLORS.reset;\n console.group(`${colorCode}${label}${resetCode}`);\n },\n groupEnd: () => {\n if (!isDevelopment) {\n return;\n }\n console.groupEnd();\n }\n};\n\n// src/core/utils/validation.ts\nfunction requireNonNull(value, message) {\n if (value === null || value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireDefined(value, message) {\n if (value === undefined) {\n throw new Error(message);\n }\n return value;\n}\nfunction requireInitialized(value, componentName) {\n if (value === null || value === undefined) {\n throw new Error(`${componentName} is not initialized`);\n }\n return value;\n}\n\n// src/core/processor/worker/audio-state.ts\nvar MILLISECONDS_PER_SECOND = 1000;\n\nclass AudioState {\n getNowMilliseconds;\n isPaused = false;\n isMuted = false;\n pausedDuration = 0;\n pauseStartedAt = null;\n lastAudioTimestamp = 0;\n isProcessingActive = false;\n constructor(dependencies) {\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset() {\n this.isPaused = false;\n this.isMuted = false;\n this.pausedDuration = 0;\n this.pauseStartedAt = null;\n this.lastAudioTimestamp = 0;\n this.isProcessingActive = false;\n }\n setProcessingActive(isActive) {\n this.isProcessingActive = isActive;\n }\n isActive() {\n return this.isProcessingActive;\n }\n toggleMuted() {\n this.isMuted = !this.isMuted;\n return this.isMuted;\n }\n setMuted(isMuted) {\n this.isMuted = isMuted;\n }\n getIsMuted() {\n return this.isMuted;\n }\n getIsPaused() {\n return this.isPaused;\n }\n getPausedDuration() {\n return this.pausedDuration;\n }\n pause() {\n if (this.isPaused) {\n return false;\n }\n this.pauseStartedAt = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n this.isPaused = true;\n return true;\n }\n resume() {\n if (!this.isPaused) {\n return false;\n }\n const now = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND;\n if (this.pauseStartedAt !== null) {\n this.pausedDuration += now - this.pauseStartedAt;\n }\n this.pauseStartedAt = null;\n this.isPaused = false;\n return true;\n }\n getAudioTimestamp(timestamp) {\n if (timestamp >= this.lastAudioTimestamp) {\n return timestamp;\n }\n return this.lastAudioTimestamp;\n }\n updateLastAudioTimestamp(timestamp, duration) {\n this.lastAudioTimestamp = timestamp + duration;\n }\n getLastAudioTimestamp() {\n return this.lastAudioTimestamp;\n }\n}\n\n// src/core/processor/worker/buffer-tracker.ts\nvar BUFFER_UPDATE_INTERVAL_MILLISECONDS = 1000;\nvar BYTES_PER_KILOBYTE = 1024;\nvar FILE_SIZE_PRECISION_FACTOR = 100;\nvar FILE_SIZE_UNITS = [\"Bytes\", \"KB\", \"MB\", \"GB\"];\n\nclass BufferTracker {\n intervalId = null;\n dependencies;\n constructor(dependencies) {\n this.dependencies = dependencies;\n }\n start() {\n if (this.intervalId !== null) {\n return;\n }\n this.intervalId = this.dependencies.setInterval(() => {\n const size = this.dependencies.getBufferSize();\n const formatted = formatFileSize(size);\n this.dependencies.onBufferUpdate(size, formatted);\n }, BUFFER_UPDATE_INTERVAL_MILLISECONDS);\n }\n stop() {\n if (this.intervalId === null) {\n return;\n }\n this.dependencies.clearInterval(this.intervalId);\n this.intervalId = null;\n }\n}\nfunction formatFileSize(bytes2) {\n if (bytes2 === 0) {\n return `0 ${FILE_SIZE_UNITS[0]}`;\n }\n const base = BYTES_PER_KILOBYTE;\n const index = Math.floor(Math.log(bytes2) / Math.log(base));\n const size = Math.round(bytes2 / base ** index * FILE_SIZE_PRECISION_FACTOR) / FILE_SIZE_PRECISION_FACTOR;\n return `${size} ${FILE_SIZE_UNITS[index]}`;\n}\n\n// src/core/processor/worker/rotation-utils.ts\nvar ROTATION_DEGREES_0 = 0;\nvar ROTATION_DEGREES_90 = 90;\nvar ROTATION_DEGREES_180 = 180;\nvar ROTATION_DEGREES_270 = 270;\nvar ROTATION_DEGREES_360 = 360;\nfunction calculateFrameRotationDegrees(input) {\n if (!input.isMobileDevice) {\n return ROTATION_DEGREES_0;\n }\n const targetWidth = input.targetWidth;\n const targetHeight = input.targetHeight;\n if (typeof targetWidth !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n if (typeof targetHeight !== \"number\") {\n return ROTATION_DEGREES_0;\n }\n const isTargetPortrait = targetHeight > targetWidth;\n const isFramePortrait = input.frameHeight > input.frameWidth;\n if (isTargetPortrait === isFramePortrait) {\n return ROTATION_DEGREES_0;\n }\n const settingsRotation = resolveRotationHint(input.settingsRotation);\n if (settingsRotation !== null) {\n return settingsRotation;\n }\n const orientationRotation = resolveRotationHint(input.orientationAngle);\n if (orientationRotation !== null) {\n return orientationRotation;\n }\n const windowRotation = resolveRotationHint(input.windowOrientation);\n if (windowRotation !== null) {\n return windowRotation;\n }\n return getFallbackRotationDegrees();\n}\nfunction resolveRotationHint(rotationHint) {\n const normalizedRotation = normalizeRotationDegrees(rotationHint);\n if (normalizedRotation === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedRotation === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction normalizeRotationDegrees(rotationDegrees) {\n if (typeof rotationDegrees !== \"number\") {\n return null;\n }\n const normalizedValue = (rotationDegrees % ROTATION_DEGREES_360 + ROTATION_DEGREES_360) % ROTATION_DEGREES_360;\n const remainder = normalizedValue % ROTATION_DEGREES_90;\n if (remainder !== 0) {\n return null;\n }\n if (normalizedValue === ROTATION_DEGREES_0) {\n return ROTATION_DEGREES_0;\n }\n if (normalizedValue === ROTATION_DEGREES_90) {\n return ROTATION_DEGREES_90;\n }\n if (normalizedValue === ROTATION_DEGREES_180) {\n return ROTATION_DEGREES_180;\n }\n if (normalizedValue === ROTATION_DEGREES_270) {\n return ROTATION_DEGREES_270;\n }\n return null;\n}\nfunction getFallbackRotationDegrees() {\n return ROTATION_DEGREES_90;\n}\n\n// src/core/processor/worker/watermark-utils.ts\nfunction calculateWatermarkTargetSize(videoWidth, imageWidth, imageHeight) {\n const targetWidth = Math.round(videoWidth * 0.07);\n const scaleFactor = targetWidth / imageWidth;\n const targetHeight = Math.round(imageHeight * scaleFactor);\n return { width: targetWidth, height: targetHeight };\n}\nfunction getWatermarkPosition(options) {\n const { watermarkWidth, watermarkHeight, videoWidth, videoHeight, position } = options;\n const padding = 20;\n switch (position) {\n case \"top-left\":\n return { x: padding, y: padding };\n case \"top-right\":\n return { x: videoWidth - watermarkWidth - padding, y: padding };\n case \"bottom-left\":\n return { x: padding, y: videoHeight - watermarkHeight - padding };\n case \"bottom-right\":\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n case \"center\":\n return {\n x: (videoWidth - watermarkWidth) / 2,\n y: (videoHeight - watermarkHeight) / 2\n };\n default:\n return {\n x: videoWidth - watermarkWidth - padding,\n y: videoHeight - watermarkHeight - padding\n };\n }\n}\n\n// src/core/processor/worker/frame-compositor.ts\nvar DOUBLE_VALUE = 2;\nvar DEFAULT_WATERMARK_OPACITY = 1;\nvar DEFAULT_WATERMARK_BASE_WIDTH = 1280;\nvar OVERLAY_BACKGROUND_OPACITY = 0.6;\nvar OVERLAY_PADDING = 16;\nvar OVERLAY_TEXT_COLOR = \"#ffffff\";\nvar OVERLAY_FONT_SIZE = 16;\nvar OVERLAY_FONT_FAMILY = \"Arial, sans-serif\";\nvar OVERLAY_MIN_WIDTH = 200;\nvar OVERLAY_MIN_HEIGHT = 50;\nvar OVERLAY_COLOR_CHANNEL_VALUE = 20;\nvar OVERLAY_BORDER_RADIUS = 50;\nvar COMPOSITION_CONTEXT_ERROR_MESSAGE = \"Failed to get composition canvas context\";\nvar RECORDER_WORKER_LOG_PREFIX = \"[RecorderWorker]\";\nvar ROTATION_RADIANS_90 = Math.PI * ROTATION_DEGREES_90 / ROTATION_DEGREES_180;\nvar ROTATION_RADIANS_270 = Math.PI * ROTATION_DEGREES_270 / ROTATION_DEGREES_180;\n\nclass FrameCompositor {\n overlayCanvas = null;\n compositionCanvas = null;\n compositionContext = null;\n watermarkCanvas = null;\n frameRotationDegrees = null;\n videoSettings = null;\n viewportMetadata = null;\n isMobileDevice = false;\n logger;\n fetchResource;\n createImageBitmap;\n sendDebugLog;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.fetchResource = dependencies.fetchResource;\n this.createImageBitmap = dependencies.createImageBitmap;\n this.sendDebugLog = dependencies.sendDebugLog;\n }\n reset() {\n this.overlayCanvas = null;\n this.compositionCanvas = null;\n this.compositionContext = null;\n this.watermarkCanvas = null;\n this.frameRotationDegrees = null;\n this.videoSettings = null;\n this.viewportMetadata = null;\n this.isMobileDevice = false;\n }\n setVideoSettings(settings) {\n this.videoSettings = settings;\n this.frameRotationDegrees = null;\n }\n setViewportMetadata(metadata) {\n this.viewportMetadata = metadata;\n this.frameRotationDegrees = null;\n }\n setIsMobileDevice(isMobileDevice) {\n this.isMobileDevice = isMobileDevice;\n this.frameRotationDegrees = null;\n }\n async prepareWatermark(config) {\n const watermarkConfig = config.watermark;\n if (!watermarkConfig) {\n return;\n }\n if (this.watermarkCanvas) {\n return;\n }\n const url2 = watermarkConfig.url;\n let opacity = DEFAULT_WATERMARK_OPACITY;\n if (typeof watermarkConfig.opacity === \"number\") {\n opacity = watermarkConfig.opacity;\n }\n const response = await this.fetchResource(url2, { mode: \"cors\" }).catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!response) {\n return;\n }\n if (!response.ok) {\n const httpError = new Error(`HTTP error! status: ${response.status}`);\n this.logWatermarkError(url2, httpError);\n return;\n }\n const blob = await response.blob().catch((error) => {\n this.logWatermarkError(url2, error);\n return null;\n });\n if (!blob) {\n return;\n }\n let isVectorImageFormat = false;\n if (url2.toLowerCase().endsWith(\".svg\")) {\n isVectorImageFormat = true;\n }\n if (blob.type === \"image/svg+xml\") {\n isVectorImageFormat = true;\n }\n if (isVectorImageFormat) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Loading SVG watermark. Note: Some environments may not support SVG in createImageBitmap inside workers. If the watermark doesn't appear, consider using a PNG or a Data URL.`);\n }\n const imageBitmap = await this.createImageBitmap(blob).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n const bitmapError = new Error(`Failed to create ImageBitmap from blob (${blob.type}). Errors can happen with SVGs in workers or invalid formats: ${errorMessage}`);\n this.logWatermarkError(url2, bitmapError);\n return null;\n });\n if (!imageBitmap) {\n return;\n }\n let videoWidth = DEFAULT_WATERMARK_BASE_WIDTH;\n if (typeof config.width === \"number\") {\n videoWidth = config.width;\n }\n const { width: targetWidth, height: targetHeight } = calculateWatermarkTargetSize(videoWidth, imageBitmap.width, imageBitmap.height);\n const scaleFactor = targetWidth / imageBitmap.width;\n const canvas = new OffscreenCanvas(targetWidth, targetHeight);\n const context = requireNonNull(canvas.getContext(\"2d\", { willReadFrequently: false }), \"Failed to get watermark canvas context\");\n context.globalAlpha = opacity;\n context.drawImage(imageBitmap, 0, 0, targetWidth, targetHeight);\n context.globalAlpha = 1;\n imageBitmap.close();\n this.watermarkCanvas = canvas;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Watermark prepared with pre-applied opacity`, {\n width: canvas.width,\n height: canvas.height,\n opacity,\n scaleFactor\n });\n }\n composeFrame(parameters) {\n const compositionPlan = this.getCompositionPlan(parameters);\n if (!compositionPlan.needsComposition) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const dimensions = this.getValidFrameDimensions(parameters.videoFrame, compositionPlan.rotationDegrees);\n if (!dimensions) {\n return { frameToProcess: parameters.videoFrame, imageBitmap: null };\n }\n const width = dimensions.width;\n const height = dimensions.height;\n const context = this.ensureCompositionCanvas(width, height);\n context.clearRect(0, 0, width, height);\n this.drawVideoFrame({\n context,\n videoFrame: parameters.videoFrame,\n rotationDegrees: compositionPlan.rotationDegrees,\n width,\n height\n });\n this.applyOverlayIfNeeded(context, width, compositionPlan.shouldApplyOverlay, parameters.overlayConfig);\n this.applyWatermarkIfNeeded({\n context,\n videoWidth: width,\n videoHeight: height,\n needsWatermark: compositionPlan.needsWatermark,\n config: parameters.config\n });\n return this.buildCompositionResult(parameters.videoFrame);\n }\n getCompositionPlan(parameters) {\n const rotationDegrees = this.getFrameRotationDegrees(parameters.videoFrame, parameters.config);\n const shouldRotateFrame = rotationDegrees !== ROTATION_DEGREES_0;\n let needsWatermark = false;\n if (parameters.config.watermark && this.watermarkCanvas) {\n needsWatermark = true;\n }\n let needsComposition = false;\n if (parameters.shouldApplyOverlay) {\n needsComposition = true;\n }\n if (needsWatermark) {\n needsComposition = true;\n }\n if (shouldRotateFrame) {\n needsComposition = true;\n }\n return {\n rotationDegrees,\n shouldApplyOverlay: parameters.shouldApplyOverlay,\n needsWatermark,\n needsComposition\n };\n }\n getValidFrameDimensions(videoFrame, rotationDegrees) {\n const dimensions = this.getFrameDimensions(videoFrame, rotationDegrees);\n const width = dimensions.width;\n const height = dimensions.height;\n let hasInvalidDimensions = false;\n if (width <= 0) {\n hasInvalidDimensions = true;\n }\n if (height <= 0) {\n hasInvalidDimensions = true;\n }\n if (hasInvalidDimensions) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX} Invalid video frame dimensions, skipping composition`, { width, height });\n return null;\n }\n return { width, height };\n }\n applyOverlayIfNeeded(context, videoWidth, shouldApplyOverlay, overlayConfig) {\n if (!(shouldApplyOverlay && overlayConfig)) {\n return;\n }\n if (!this.overlayCanvas) {\n this.overlayCanvas = this.createOverlayCanvas(overlayConfig.text);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Overlay canvas created`, {\n overlayWidth: this.overlayCanvas.width,\n overlayHeight: this.overlayCanvas.height\n });\n }\n if (!this.overlayCanvas) {\n return;\n }\n const overlayPosition = this.getOverlayPosition(this.overlayCanvas.width, videoWidth);\n context.drawImage(this.overlayCanvas, overlayPosition.horizontal, overlayPosition.vertical);\n }\n applyWatermarkIfNeeded(options) {\n const { context, videoWidth, videoHeight, needsWatermark, config } = options;\n const watermarkCanvas = this.watermarkCanvas;\n if (!(needsWatermark && watermarkCanvas && config.watermark)) {\n return;\n }\n const watermarkPosition = getWatermarkPosition({\n watermarkWidth: watermarkCanvas.width,\n watermarkHeight: watermarkCanvas.height,\n videoWidth,\n videoHeight,\n position: config.watermark.position\n });\n context.drawImage(watermarkCanvas, watermarkPosition.x, watermarkPosition.y);\n }\n buildCompositionResult(videoFrame) {\n const compositionCanvas = requireNonNull(this.compositionCanvas, \"Composition canvas must exist after ensureCompositionCanvas\");\n const imageBitmap = compositionCanvas.transferToImageBitmap();\n let frameInitialization = {};\n if (typeof videoFrame.timestamp === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n timestamp: videoFrame.timestamp\n };\n }\n if (typeof videoFrame.duration === \"number\") {\n frameInitialization = {\n ...frameInitialization,\n duration: videoFrame.duration\n };\n }\n const frameToProcess = new VideoFrame(imageBitmap, frameInitialization);\n return { frameToProcess, imageBitmap };\n }\n createOverlayCanvas(text) {\n requireDefined(text, \"Overlay text is required\");\n const canvas = new OffscreenCanvas(1, 1);\n const context = requireNonNull(canvas.getContext(\"2d\"), \"Failed to get OffscreenCanvas context\");\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n const textMetrics = context.measureText(text);\n const textWidth = textMetrics.width;\n const textHeight = OVERLAY_FONT_SIZE;\n const overlayWidth = Math.max(OVERLAY_MIN_WIDTH, textWidth + OVERLAY_PADDING * DOUBLE_VALUE);\n const overlayHeight = Math.max(OVERLAY_MIN_HEIGHT, textHeight + OVERLAY_PADDING * DOUBLE_VALUE);\n canvas.width = overlayWidth;\n canvas.height = overlayHeight;\n const redValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const greenValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const blueValue = OVERLAY_COLOR_CHANNEL_VALUE;\n const borderRadius = OVERLAY_BORDER_RADIUS;\n context.fillStyle = `rgba(${redValue}, ${greenValue}, ${blueValue}, ${OVERLAY_BACKGROUND_OPACITY})`;\n context.beginPath();\n context.roundRect(0, 0, overlayWidth, overlayHeight, borderRadius);\n context.fill();\n context.fillStyle = OVERLAY_TEXT_COLOR;\n context.font = `${OVERLAY_FONT_SIZE}px ${OVERLAY_FONT_FAMILY}`;\n context.textBaseline = \"middle\";\n context.textAlign = \"center\";\n const textHorizontalPosition = overlayWidth / DOUBLE_VALUE;\n const textVerticalPosition = overlayHeight / DOUBLE_VALUE;\n context.fillText(text, textHorizontalPosition, textVerticalPosition);\n return canvas;\n }\n getOverlayPosition(overlayWidth, videoWidth) {\n return {\n horizontal: videoWidth - overlayWidth - OVERLAY_PADDING,\n vertical: OVERLAY_PADDING\n };\n }\n ensureCompositionCanvas(width, height) {\n if (!this.compositionCanvas) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas created`, {\n width,\n height\n });\n return this.compositionContext;\n }\n if (!this.compositionContext) {\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n return this.compositionContext;\n }\n const widthChanged = this.compositionCanvas.width !== width;\n const heightChanged = this.compositionCanvas.height !== height;\n let shouldResize = false;\n if (widthChanged) {\n shouldResize = true;\n }\n if (heightChanged) {\n shouldResize = true;\n }\n if (shouldResize) {\n this.compositionCanvas = new OffscreenCanvas(width, height);\n this.compositionContext = requireNonNull(this.compositionCanvas.getContext(\"2d\", { willReadFrequently: false }), COMPOSITION_CONTEXT_ERROR_MESSAGE);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX} Composition canvas resized`, {\n width,\n height\n });\n return this.compositionContext;\n }\n return this.compositionContext;\n }\n getFrameRotationDegrees(videoFrame, config) {\n if (this.frameRotationDegrees !== null) {\n return this.frameRotationDegrees;\n }\n const rotation = this.determineFrameRotationDegrees(videoFrame, config);\n this.frameRotationDegrees = rotation;\n const rotationLog = JSON.stringify({\n rotationDegrees: rotation,\n configWidth: config.width,\n configHeight: config.height,\n settingsWidth: this.videoSettings?.width,\n settingsHeight: this.videoSettings?.height,\n facingMode: this.videoSettings?.facingMode,\n frameDisplayWidth: videoFrame.displayWidth,\n frameDisplayHeight: videoFrame.displayHeight\n });\n this.sendDebugLog(`${RECORDER_WORKER_LOG_PREFIX} Rotation decision`, rotationLog);\n return rotation;\n }\n determineFrameRotationDegrees(videoFrame, config) {\n const configWidth = config.width;\n const configHeight = config.height;\n let facingMode;\n let settingsRotation;\n if (this.videoSettings) {\n facingMode = this.videoSettings.facingMode;\n settingsRotation = this.videoSettings.rotation;\n }\n let orientationAngle;\n let windowOrientation;\n if (this.viewportMetadata) {\n orientationAngle = this.viewportMetadata.orientationAngle;\n windowOrientation = this.viewportMetadata.windowOrientation;\n }\n return calculateFrameRotationDegrees({\n isMobileDevice: this.isMobileDevice,\n targetWidth: configWidth,\n targetHeight: configHeight,\n frameWidth: videoFrame.displayWidth,\n frameHeight: videoFrame.displayHeight,\n facingMode,\n settingsRotation,\n orientationAngle,\n windowOrientation\n });\n }\n getFrameDimensions(videoFrame, rotationDegrees) {\n let width = videoFrame.displayWidth;\n let height = videoFrame.displayHeight;\n let shouldSwapDimensions = false;\n if (rotationDegrees === ROTATION_DEGREES_90) {\n shouldSwapDimensions = true;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n shouldSwapDimensions = true;\n }\n if (shouldSwapDimensions) {\n width = videoFrame.displayHeight;\n height = videoFrame.displayWidth;\n }\n return { width, height };\n }\n drawVideoFrame(parameters) {\n const { context, videoFrame, rotationDegrees, width, height } = parameters;\n const sourceWidth = videoFrame.displayWidth;\n const sourceHeight = videoFrame.displayHeight;\n context.setTransform(1, 0, 0, 1, 0, 0);\n if (rotationDegrees === ROTATION_DEGREES_90) {\n context.translate(width, 0);\n context.rotate(ROTATION_RADIANS_90);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n if (rotationDegrees === ROTATION_DEGREES_270) {\n context.translate(0, height);\n context.rotate(ROTATION_RADIANS_270);\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n context.setTransform(1, 0, 0, 1, 0, 0);\n return;\n }\n context.drawImage(videoFrame, 0, 0, sourceWidth, sourceHeight);\n }\n logWatermarkError(url2, error) {\n const errorMessage = extractErrorMessage(error);\n this.logger.error(`${RECORDER_WORKER_LOG_PREFIX} Failed to load watermark. This is often caused by CORS if the image is on another domain. Try using a Data URL (base64) or ensure the server has Access-Control-Allow-Origin: *.`, {\n url: url2,\n error: errorMessage\n });\n }\n}\n\n// src/core/processor/worker/stop-finalization.ts\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS = 500;\nvar STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS = 10;\nvar ERROR_STOP_PENDING_WRITES_TIMEOUT = \"stop.pending-writes-timeout\";\nfunction createDefaultNowMilliseconds() {\n return () => performance.now();\n}\nfunction createDefaultWaitMilliseconds() {\n return (milliseconds) => new Promise((resolve) => {\n globalThis.setTimeout(resolve, milliseconds);\n });\n}\nasync function waitForPendingWritesToDrain(dependencies) {\n let getNowMilliseconds = dependencies.getNowMilliseconds;\n if (!getNowMilliseconds) {\n getNowMilliseconds = createDefaultNowMilliseconds();\n }\n let waitMilliseconds = dependencies.waitMilliseconds;\n if (!waitMilliseconds) {\n waitMilliseconds = createDefaultWaitMilliseconds();\n }\n let timeoutMilliseconds = dependencies.timeoutMilliseconds;\n if (timeoutMilliseconds === undefined) {\n timeoutMilliseconds = STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS;\n }\n const startedAtMilliseconds = getNowMilliseconds();\n let pendingWriteCount = dependencies.getPendingWriteCount();\n while (pendingWriteCount > 0) {\n const elapsedMilliseconds = getNowMilliseconds() - startedAtMilliseconds;\n if (elapsedMilliseconds >= timeoutMilliseconds) {\n throw new Error(ERROR_STOP_PENDING_WRITES_TIMEOUT);\n }\n await waitMilliseconds(STOP_PENDING_WRITES_POLL_INTERVAL_MILLISECONDS);\n pendingWriteCount = dependencies.getPendingWriteCount();\n }\n}\n\n// src/core/processor/worker/stop-transition.ts\nasync function runStopTransition(dependencies) {\n await dependencies.finalizeStopSequence().then(() => dependencies.completeStop()).catch((error) => {\n return dependencies.recoverStopFailure().then(() => {\n throw error;\n });\n }).finally(() => {\n dependencies.clearStoppingFlag();\n });\n}\n\n// src/core/processor/worker/timestamp-manager.ts\nvar DEFAULT_FRAME_RATE = 30;\nvar DEFAULT_KEY_FRAME_INTERVAL_SECONDS = 5;\nvar MILLISECONDS_PER_SECOND2 = 1000;\nvar MICROSECONDS_PER_SECOND = 1e6;\nvar MAX_LEAD_SECONDS = 0.05;\nvar MAX_LAG_SECONDS = 0.1;\nvar MAX_DRIFT_CORRECTION_SECONDS = MAX_LAG_SECONDS;\nvar DRIFT_OFFSET_DECAY_FACTOR = 0.5;\nvar DRIFT_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX2 = \"[RecorderWorker]\";\n\nclass TimestampManager {\n frameRate = DEFAULT_FRAME_RATE;\n lastVideoTimestamp = 0;\n baseVideoTimestamp = null;\n frameCount = 0;\n lastKeyFrameTimestamp = 0;\n forceNextKeyFrame = false;\n driftOffset = 0;\n logger;\n getNowMilliseconds;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n this.getNowMilliseconds = dependencies.getNowMilliseconds;\n }\n reset(frameRate) {\n let resolvedFrameRate = DEFAULT_FRAME_RATE;\n if (typeof frameRate === \"number\" && frameRate > 0) {\n resolvedFrameRate = frameRate;\n }\n this.frameRate = resolvedFrameRate;\n this.lastVideoTimestamp = 0;\n this.baseVideoTimestamp = null;\n this.frameCount = 0;\n this.lastKeyFrameTimestamp = 0;\n this.forceNextKeyFrame = false;\n this.driftOffset = 0;\n }\n setFrameRate(frameRate) {\n this.frameRate = frameRate;\n }\n getFrameRate() {\n return this.frameRate;\n }\n getFrameCount() {\n return this.frameCount;\n }\n getLastVideoTimestamp() {\n return this.lastVideoTimestamp;\n }\n getBaseVideoTimestamp() {\n return this.baseVideoTimestamp;\n }\n calculateVideoFrameTimestamp(parameters) {\n if (this.frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n let rawTimestamp = this.getNowMilliseconds() / MILLISECONDS_PER_SECOND2;\n const hasTimestamp = typeof parameters.videoFrame.timestamp === \"number\" && parameters.videoFrame.timestamp !== null;\n if (hasTimestamp) {\n rawTimestamp = parameters.videoFrame.timestamp / MICROSECONDS_PER_SECOND;\n }\n if (this.baseVideoTimestamp === null) {\n this.baseVideoTimestamp = rawTimestamp;\n const logData = {\n baseVideoTimestamp: this.baseVideoTimestamp,\n recordingStartTime: parameters.recordingStartTime,\n difference: this.baseVideoTimestamp - parameters.recordingStartTime,\n pendingUpdates: parameters.pendingVisibilityUpdatesCount\n };\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} baseVideoTimestamp set`, logData);\n parameters.processPendingVisibilityUpdates();\n }\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set\");\n }\n if (this.frameCount === 0 && this.lastVideoTimestamp > 0) {\n const originalBase = this.baseVideoTimestamp;\n const offset = rawTimestamp - originalBase;\n this.baseVideoTimestamp = rawTimestamp - this.lastVideoTimestamp;\n const frameTimestamp2 = this.lastVideoTimestamp;\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} First frame after source switch`, {\n rawTimestamp,\n originalBase,\n offset,\n adjustedBaseVideoTimestamp: this.baseVideoTimestamp,\n continuationTimestamp: this.lastVideoTimestamp,\n frameTimestamp: frameTimestamp2,\n isScreenCapture: parameters.isScreenCapture\n });\n return frameTimestamp2;\n }\n const normalizedTimestamp = rawTimestamp - this.baseVideoTimestamp - parameters.pausedDuration;\n let previousTimestamp = 0;\n if (this.lastVideoTimestamp > 0) {\n previousTimestamp = this.lastVideoTimestamp;\n }\n let frameTimestamp = normalizedTimestamp;\n if (normalizedTimestamp < previousTimestamp) {\n frameTimestamp = previousTimestamp + 1 / this.frameRate;\n }\n if (frameTimestamp < 0) {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX2} Negative frame timestamp detected, clamping to zero`, { frameTimestamp, normalizedTimestamp, previousTimestamp });\n return 0;\n }\n if (this.lastVideoTimestamp === 0) {\n this.lastVideoTimestamp = frameTimestamp;\n }\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX2} Frame timestamp calculation`, {\n rawTimestamp,\n baseVideoTimestamp: this.baseVideoTimestamp,\n normalizedTimestamp,\n previousTimestamp,\n frameTimestamp,\n lastVideoTimestamp: this.lastVideoTimestamp,\n isScreenCapture: parameters.isScreenCapture,\n frameCount: this.frameCount\n });\n return frameTimestamp;\n }\n prepareFrameTiming(parameters) {\n const frameDuration = 1 / this.frameRate;\n let adjustedTimestamp = parameters.frameTimestamp + this.driftOffset;\n if (adjustedTimestamp - parameters.lastAudioTimestamp > MAX_LEAD_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp + MAX_LEAD_SECONDS;\n }\n if (parameters.lastAudioTimestamp - adjustedTimestamp > MAX_LAG_SECONDS) {\n adjustedTimestamp = parameters.lastAudioTimestamp - MAX_LAG_SECONDS;\n }\n const monotonicTimestamp = this.lastVideoTimestamp + frameDuration;\n let finalTimestamp = adjustedTimestamp;\n if (finalTimestamp < monotonicTimestamp) {\n finalTimestamp = monotonicTimestamp;\n }\n let keyFrameIntervalSeconds = parameters.keyFrameIntervalSeconds;\n if (!(keyFrameIntervalSeconds > 0)) {\n keyFrameIntervalSeconds = DEFAULT_KEY_FRAME_INTERVAL_SECONDS;\n }\n let keyFrameIntervalFrames = Math.round(keyFrameIntervalSeconds * this.frameRate);\n if (keyFrameIntervalFrames < 1) {\n keyFrameIntervalFrames = 1;\n }\n const timeSinceLastKeyFrame = finalTimestamp - this.lastKeyFrameTimestamp;\n let isKeyFrame = false;\n if (this.forceNextKeyFrame) {\n isKeyFrame = true;\n }\n if (timeSinceLastKeyFrame >= keyFrameIntervalSeconds) {\n isKeyFrame = true;\n }\n if (this.frameCount % keyFrameIntervalFrames === 0) {\n isKeyFrame = true;\n }\n this.driftOffset *= DRIFT_OFFSET_DECAY_FACTOR;\n return {\n finalTimestamp,\n frameDuration,\n isKeyFrame\n };\n }\n commitFrame(parameters) {\n this.frameCount += 1;\n this.lastVideoTimestamp = parameters.finalTimestamp;\n if (parameters.isKeyFrame) {\n this.lastKeyFrameTimestamp = parameters.finalTimestamp;\n this.forceNextKeyFrame = false;\n }\n let shouldLogDrift = false;\n let audioVideoDrift = 0;\n if (this.frameCount % DRIFT_LOG_FRAME_INTERVAL === 0 && parameters.audioProcessingActive) {\n audioVideoDrift = parameters.lastAudioTimestamp - this.lastVideoTimestamp;\n shouldLogDrift = true;\n }\n return {\n shouldLogDrift,\n audioVideoDrift,\n frameCount: this.frameCount,\n lastVideoTimestamp: this.lastVideoTimestamp\n };\n }\n handleSourceSwitch(lastAudioTimestamp) {\n if (this.baseVideoTimestamp === null) {\n throw new Error(\"Base video timestamp must be set for source switch\");\n }\n const minFrameDuration = 1 / this.frameRate;\n const rawDrift = lastAudioTimestamp - this.lastVideoTimestamp;\n this.driftOffset = clampValue(rawDrift, -MAX_DRIFT_CORRECTION_SECONDS, MAX_DRIFT_CORRECTION_SECONDS);\n const continuationTimestamp = Math.max(lastAudioTimestamp, this.lastVideoTimestamp) + minFrameDuration;\n const previousVideoTimestamp = this.lastVideoTimestamp;\n this.lastVideoTimestamp = continuationTimestamp;\n this.frameCount = 0;\n this.forceNextKeyFrame = true;\n return {\n continuationTimestamp,\n previousVideoTimestamp,\n minFrameDuration,\n rawDrift,\n driftOffset: this.driftOffset\n };\n }\n}\nfunction clampValue(value, min, max) {\n return Math.max(min, Math.min(max, value));\n}\n\n// src/core/processor/worker/types.ts\nvar WORKER_MESSAGE_TYPE_PROBE = \"probe\";\nvar WORKER_MESSAGE_TYPE_AUDIO_CHUNK = \"audioChunk\";\nvar WORKER_RESPONSE_TYPE_PROBE_RESULT = \"probeResult\";\nvar WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR = \"f32-planar\";\n\n// src/core/processor/worker/visibility-tracker.ts\nvar MILLISECONDS_PER_SECOND3 = 1000;\nvar OVERLAY_LOG_FRAME_INTERVAL = 90;\nvar RECORDER_WORKER_LOG_PREFIX3 = \"[RecorderWorker]\";\nvar INITIAL_INTERVAL_INDEX = 0;\nvar INITIAL_LAST_OVERLAY_TIMESTAMP = -1;\n\nclass VisibilityTracker {\n hiddenIntervals = [];\n currentHiddenIntervalStart = null;\n pendingVisibilityUpdates = [];\n recordingStartTime = 0;\n isScreenCapture = false;\n intervalCursor = INITIAL_INTERVAL_INDEX;\n lastOverlayTimestamp = INITIAL_LAST_OVERLAY_TIMESTAMP;\n logger;\n constructor(dependencies) {\n this.logger = dependencies.logger;\n }\n reset(recordingStartTime, isScreenCapture) {\n this.hiddenIntervals = [];\n this.currentHiddenIntervalStart = null;\n this.pendingVisibilityUpdates = [];\n this.recordingStartTime = recordingStartTime;\n this.isScreenCapture = isScreenCapture;\n this.intervalCursor = INITIAL_INTERVAL_INDEX;\n this.lastOverlayTimestamp = INITIAL_LAST_OVERLAY_TIMESTAMP;\n }\n setRecordingStartTime(recordingStartTime) {\n this.recordingStartTime = recordingStartTime;\n }\n setIsScreenCapture(isScreenCapture) {\n this.isScreenCapture = isScreenCapture;\n }\n getPendingUpdatesCount() {\n return this.pendingVisibilityUpdates.length;\n }\n shouldApplyOverlay(timestamp, frameCount) {\n if (this.isScreenCapture) {\n return false;\n }\n const shouldApplyCurrentInterval = this.currentHiddenIntervalStart !== null && timestamp >= this.currentHiddenIntervalStart;\n let shouldApply = false;\n if (shouldApplyCurrentInterval) {\n shouldApply = true;\n }\n if (!shouldApplyCurrentInterval) {\n shouldApply = this.shouldApplyCompletedIntervalOverlay(timestamp);\n }\n if (frameCount % OVERLAY_LOG_FRAME_INTERVAL === 0) {\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Overlay check`, {\n timestamp,\n shouldApply,\n frameCount,\n intervalsCount: this.hiddenIntervals.length,\n intervalCursor: this.intervalCursor\n });\n }\n return shouldApply;\n }\n shouldApplyCompletedIntervalOverlay(timestamp) {\n if (this.hiddenIntervals.length === 0) {\n return false;\n }\n if (this.lastOverlayTimestamp !== INITIAL_LAST_OVERLAY_TIMESTAMP && timestamp < this.lastOverlayTimestamp) {\n this.intervalCursor = INITIAL_INTERVAL_INDEX;\n }\n this.lastOverlayTimestamp = timestamp;\n let activeCursor = this.intervalCursor;\n while (activeCursor < this.hiddenIntervals.length) {\n const interval = this.hiddenIntervals[activeCursor];\n if (timestamp < interval.start) {\n break;\n }\n if (timestamp <= interval.end) {\n this.intervalCursor = activeCursor;\n return true;\n }\n activeCursor += 1;\n }\n this.intervalCursor = activeCursor;\n return false;\n }\n handleUpdateVisibility(isHidden, timestamp, hasBaseVideoTimestamp, pausedDuration) {\n if (!hasBaseVideoTimestamp) {\n this.pendingVisibilityUpdates = [\n ...this.pendingVisibilityUpdates,\n { isHidden, timestamp }\n ];\n return;\n }\n this.processVisibilityUpdate(isHidden, timestamp, pausedDuration);\n }\n flushPendingUpdates(pausedDuration) {\n if (this.pendingVisibilityUpdates.length === 0) {\n return;\n }\n for (const update of this.pendingVisibilityUpdates) {\n this.processVisibilityUpdate(update.isHidden, update.timestamp, pausedDuration);\n }\n this.pendingVisibilityUpdates = [];\n }\n processVisibilityUpdate(isHidden, timestamp, pausedDuration) {\n const timestampSeconds = timestamp / MILLISECONDS_PER_SECOND3;\n const normalizedTimestamp = timestampSeconds - this.recordingStartTime - pausedDuration;\n if (isHidden && this.currentHiddenIntervalStart === null) {\n this.currentHiddenIntervalStart = Math.max(0, normalizedTimestamp);\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Started hidden interval`, {\n start: this.currentHiddenIntervalStart\n });\n }\n if (isHidden) {\n return;\n }\n if (this.currentHiddenIntervalStart === null) {\n return;\n }\n const endTimestamp = Math.max(0, normalizedTimestamp);\n if (endTimestamp > this.currentHiddenIntervalStart) {\n const interval = {\n start: this.currentHiddenIntervalStart,\n end: endTimestamp\n };\n this.hiddenIntervals = [...this.hiddenIntervals, interval];\n this.logger.debug(`${RECORDER_WORKER_LOG_PREFIX3} Completed hidden interval`, {\n interval,\n duration: endTimestamp - this.currentHiddenIntervalStart,\n totalIntervals: this.hiddenIntervals.length\n });\n } else {\n this.logger.warn(`${RECORDER_WORKER_LOG_PREFIX3} Invalid interval (end <= start), discarding`);\n }\n this.currentHiddenIntervalStart = null;\n }\n}\n\n// src/core/processor/worker/recorder-worker.ts\nvar CHUNK_SIZE = 16 * 1024 * 1024;\nvar DEFAULT_OUTPUT_FORMAT = \"mp4\";\nvar MILLISECONDS_PER_SECOND4 = 1000;\nvar ERROR_AUDIO_BITRATE_INVALID = \"Audio bitrate must be greater than zero\";\nvar ERROR_AUDIO_SAMPLE_RATE_INVALID = \"Audio sample rate must be greater than zero\";\nvar ERROR_AUDIO_CHANNELS_INVALID = \"Audio channels must be greater than zero\";\nvar ERROR_AUDIO_FRAMES_INVALID = \"Audio frames must be greater than zero\";\nvar STEREO_CHANNEL_COUNT = 2;\nvar AUDIO_SAMPLE_AVERAGE_SCALE = 0.5;\nvar STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2 = 500;\nvar MP4_FAST_START_DISABLED = false;\nvar VIDEO_LATENCY_MODE_REALTIME = \"realtime\";\nvar VIDEO_CONTENT_HINT_MOTION = \"motion\";\nvar VIDEO_HARDWARE_ACCELERATION_PREFERENCE = \"prefer-hardware\";\n\nclass RecorderWorker {\n output = null;\n videoSource = null;\n audioSource = null;\n videoProcessor = null;\n audioProcessor = null;\n config = null;\n videoProcessingActive = false;\n isStopping = false;\n isFinalized = false;\n bufferTracker;\n audioState;\n timestampManager;\n frameCompositor;\n overlayConfig = null;\n visibilityTracker;\n recordingStartTime = 0;\n isScreenCapture = false;\n totalSize = 0;\n expectedAudioChannels = null;\n expectedAudioSampleRate = null;\n pendingWriteCount = 0;\n constructor() {\n this.bufferTracker = new BufferTracker({\n getBufferSize: () => this.totalSize,\n onBufferUpdate: (size, formatted) => {\n const response = {\n type: \"bufferUpdate\",\n size,\n formatted\n };\n self.postMessage(response);\n },\n setInterval: (handler, timeout) => self.setInterval(handler, timeout),\n clearInterval: (intervalId) => self.clearInterval(intervalId)\n });\n this.audioState = new AudioState({\n getNowMilliseconds: () => performance.now()\n });\n this.visibilityTracker = new VisibilityTracker({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n }\n });\n this.timestampManager = new TimestampManager({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data)\n },\n getNowMilliseconds: () => performance.now()\n });\n this.frameCompositor = new FrameCompositor({\n logger: {\n debug: (message, data) => logger.debug(message, data),\n warn: (message, data) => logger.warn(message, data),\n error: (message, data) => logger.error(message, data)\n },\n fetchResource: (input, init) => fetch(input, init),\n createImageBitmap: (image) => createImageBitmap(image),\n sendDebugLog: (_message, _payload) => {\n return;\n }\n });\n self.addEventListener(\"message\", this.handleMessage);\n }\n shouldIgnoreMessage() {\n if (this.isStopping) {\n return true;\n }\n if (this.isFinalized) {\n return true;\n }\n return false;\n }\n handleAsyncOperation(operation, context) {\n operation.catch((error) => {\n logger.error(`[RecorderWorker] Error in ${context}:`, error);\n this.sendError(error);\n });\n }\n handleProbe() {\n const response = {\n type: WORKER_RESPONSE_TYPE_PROBE_RESULT,\n hasMediaStreamTrackProcessor: typeof MediaStreamTrackProcessor !== \"undefined\",\n hasVideoFrame: typeof VideoFrame !== \"undefined\",\n hasAudioData: typeof AudioData !== \"undefined\",\n hasOffscreenCanvas: typeof OffscreenCanvas !== \"undefined\",\n hasCreateImageBitmap: typeof createImageBitmap !== \"undefined\",\n hasReadableStream: typeof ReadableStream !== \"undefined\"\n };\n self.postMessage(response);\n }\n handleMessage = (event) => {\n const message = event.data;\n logger.debug(\"[RecorderWorker] Received message:\", { type: message.type });\n switch (message.type) {\n case WORKER_MESSAGE_TYPE_PROBE:\n this.handleProbe();\n return;\n case \"start\":\n this.handleStartMessage(message);\n return;\n case \"pause\":\n this.handlePause();\n return;\n case \"resume\":\n this.handleResume();\n return;\n case \"stop\":\n this.handleStopMessage();\n return;\n case \"toggleMute\":\n this.handleToggleMute();\n return;\n case WORKER_MESSAGE_TYPE_AUDIO_CHUNK:\n this.handleAudioChunk(message);\n return;\n case \"switchSource\":\n this.handleSwitchSourceMessage(message);\n return;\n case \"updateFps\":\n this.handleUpdateFps(message.fps);\n return;\n case \"updateVisibility\":\n this.visibilityTracker.handleUpdateVisibility(message.isHidden, message.timestamp, this.timestampManager.getBaseVideoTimestamp() !== null, this.audioState.getPausedDuration());\n return;\n case \"updateSourceType\":\n this.handleUpdateSourceType(message.isScreenCapture);\n return;\n default:\n this.sendError(new Error(`Unknown message type: ${message.type}`));\n }\n };\n handleStartMessage(message) {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] start ignored (stopping/finalized)\");\n return;\n }\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleStart(message, videoTrack, videoStream), \"handleStart\");\n }\n handleStopMessage() {\n if (this.shouldIgnoreMessage()) {\n logger.debug(\"[RecorderWorker] stop ignored (stopping/finalized)\");\n return;\n }\n this.handleAsyncOperation(this.handleStop(), \"handleStop\");\n }\n handleSwitchSourceMessage(message) {\n let videoTrack = null;\n if (message.videoTrack) {\n videoTrack = message.videoTrack;\n }\n let videoStream = null;\n if (message.videoStream) {\n videoStream = message.videoStream;\n }\n this.handleAsyncOperation(this.handleSwitchSource(videoTrack, videoStream), \"handleSwitchSource\");\n }\n validateConfig(config) {\n requireDefined(config, \"Transcode config is required\");\n if (config.width !== undefined && config.width <= 0) {\n throw new Error(\"Video width must be greater than zero\");\n }\n if (config.height !== undefined && config.height <= 0) {\n throw new Error(\"Video height must be greater than zero\");\n }\n if (config.fps !== undefined && config.fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (config.bitrate !== undefined && typeof config.bitrate === \"number\" && config.bitrate <= 0) {\n throw new Error(\"Bitrate must be greater than zero\");\n }\n if (config.keyFrameInterval <= 0) {\n throw new Error(\"Key frame interval must be greater than zero\");\n }\n }\n validateFormat(format) {\n if (format !== \"mp4\") {\n throw new Error(`Format ${format} is not yet supported in worker. Only MP4 is currently supported.`);\n }\n }\n initializeRecordingState(config) {\n this.config = config;\n this.timestampManager.reset(config.fps);\n this.audioState.reset();\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.videoProcessingActive = false;\n this.frameCompositor.reset();\n this.recordingStartTime = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setupOverlayConfig(overlayConfig) {\n let nextOverlayConfig = null;\n if (overlayConfig) {\n nextOverlayConfig = {\n enabled: overlayConfig.enabled,\n text: overlayConfig.text\n };\n }\n this.overlayConfig = nextOverlayConfig;\n let recordingStartTimeSeconds = performance.now() / MILLISECONDS_PER_SECOND4;\n if (overlayConfig && overlayConfig.recordingStartTime !== undefined) {\n recordingStartTimeSeconds = overlayConfig.recordingStartTime / MILLISECONDS_PER_SECOND4;\n }\n this.recordingStartTime = recordingStartTimeSeconds;\n this.visibilityTracker.setRecordingStartTime(this.recordingStartTime);\n const logData = {\n hasOverlayConfig: !!this.overlayConfig,\n overlayEnabled: this.overlayConfig?.enabled,\n overlayText: this.overlayConfig?.text,\n recordingStartTime: this.recordingStartTime\n };\n logger.debug(\"[RecorderWorker] Overlay config initialized\", logData);\n }\n createOutput() {\n const writable = new WritableStream({\n write: (chunk) => this.handleOutputChunkWrite(chunk)\n });\n this.output = new Output({\n format: new Mp4OutputFormat({\n fastStart: MP4_FAST_START_DISABLED\n }),\n target: new StreamTarget(writable, {\n chunked: true,\n chunkSize: CHUNK_SIZE\n })\n });\n }\n decrementPendingWriteCount() {\n this.pendingWriteCount -= 1;\n if (this.pendingWriteCount < 0) {\n this.pendingWriteCount = 0;\n }\n }\n handleOutputChunkWrite(chunk) {\n this.pendingWriteCount += 1;\n const writeOperation = Promise.resolve().then(() => {\n this.sendChunk(chunk.data, chunk.position);\n });\n return writeOperation.then(() => {\n this.decrementPendingWriteCount();\n }, (error) => {\n this.decrementPendingWriteCount();\n throw error;\n });\n }\n createVideoSource(config) {\n const fps = this.timestampManager.getFrameRate();\n const keyFrameIntervalSeconds = config.keyFrameInterval;\n const videoSourceOptions = {\n codec: config.codec,\n width: config.width,\n height: config.height,\n sizeChangeBehavior: \"contain\",\n alpha: \"discard\",\n bitrateMode: \"variable\",\n latencyMode: VIDEO_LATENCY_MODE_REALTIME,\n contentHint: VIDEO_CONTENT_HINT_MOTION,\n hardwareAcceleration: VIDEO_HARDWARE_ACCELERATION_PREFERENCE,\n keyFrameInterval: keyFrameIntervalSeconds,\n bitrate: this.deserializeBitrate(config.bitrate)\n };\n this.videoSource = new VideoSampleSource(videoSourceOptions);\n const output = requireNonNull(this.output, \"Output must be initialized before adding video track\");\n const trackOptions = {};\n if (fps !== undefined) {\n trackOptions.frameRate = fps;\n }\n output.addVideoTrack(this.videoSource, trackOptions);\n }\n setupAudioSource(audioConfig, config) {\n if (!audioConfig) {\n return;\n }\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n if (audioConfig.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (audioConfig.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.expectedAudioChannels = audioConfig.numberOfChannels;\n this.expectedAudioSampleRate = audioConfig.sampleRate;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioState.setProcessingActive(true);\n }\n setupAudioStream(audioStream, config) {\n if (!config.audioBitrate) {\n return;\n }\n if (!config.audioCodec) {\n return;\n }\n if (config.audioBitrate <= 0) {\n throw new Error(ERROR_AUDIO_BITRATE_INVALID);\n }\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.audioSource = new AudioSampleSource({\n codec: config.audioCodec,\n bitrate: config.audioBitrate,\n bitrateMode: \"variable\"\n });\n const output = requireNonNull(this.output, \"Output must be initialized before adding audio track\");\n output.addAudioTrack(this.audioSource);\n this.audioProcessor = audioStream.getReader();\n this.audioState.setProcessingActive(true);\n this.processAudioData();\n }\n async handleStart(message, videoTrack, videoStream) {\n const audioConfig = message.audioConfig;\n let audioStream = null;\n if (message.audioStream) {\n audioStream = message.audioStream;\n }\n const config = message.config;\n const overlayConfig = message.overlayConfig;\n this.validateConfig(config);\n logger.debug(\"[RecorderWorker] handleStart called\", {\n hasVideoTrack: !!videoTrack,\n hasVideoStream: !!videoStream,\n hasAudioStream: !!audioStream,\n hasAudioConfig: !!audioConfig,\n config: {\n width: config.width,\n height: config.height,\n fps: config.fps,\n bitrate: config.bitrate\n },\n hasOverlayConfig: !!overlayConfig,\n overlayConfig\n });\n this.isStopping = false;\n this.isFinalized = false;\n if (this.output) {\n logger.debug(\"[RecorderWorker] Cleaning up existing output\");\n await this.cleanup();\n }\n this.initializeRecordingState(config);\n if (message.videoSettings) {\n this.frameCompositor.setVideoSettings(message.videoSettings);\n } else {\n this.frameCompositor.setVideoSettings(null);\n }\n if (message.viewportMetadata) {\n this.frameCompositor.setViewportMetadata(message.viewportMetadata);\n } else {\n this.frameCompositor.setViewportMetadata(null);\n }\n this.frameCompositor.setIsMobileDevice(message.isMobileDevice === true);\n this.setupOverlayConfig(overlayConfig);\n let format = config.format;\n if (!format) {\n format = DEFAULT_OUTPUT_FORMAT;\n }\n this.validateFormat(format);\n this.createOutput();\n this.createVideoSource(config);\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n }\n if (!videoStream && videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n if (audioStream) {\n this.setupAudioStream(audioStream, config);\n } else {\n this.setupAudioSource(audioConfig, config);\n }\n const output = requireNonNull(this.output, \"Output must be initialized before starting\");\n if (this.config?.watermark) {\n this.frameCompositor.prepareWatermark(this.config);\n }\n await output.start();\n this.bufferTracker.start();\n this.sendReady();\n this.sendStateChange(\"recording\");\n }\n setupVideoProcessing(videoTrack) {\n if (!this.videoSource) {\n return;\n }\n if (typeof MediaStreamTrackProcessor === \"undefined\") {\n throw new Error(\"MediaStreamTrackProcessor is not available in worker\");\n }\n const processor = new MediaStreamTrackProcessor({ track: videoTrack });\n this.videoProcessor = processor.readable.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n setupVideoProcessingFromStream(videoStream) {\n if (!this.videoSource) {\n return;\n }\n this.videoProcessor = videoStream.getReader();\n this.videoProcessingActive = true;\n this.processVideoFrames();\n }\n async handlePausedVideoFrame() {\n if (!this.videoProcessor) {\n return false;\n }\n const pausedResult = await this.videoProcessor.read();\n if (pausedResult.done) {\n return false;\n }\n if (pausedResult.value) {\n pausedResult.value.close();\n }\n return true;\n }\n async processVideoFrame(videoFrame) {\n const videoSource = requireInitialized(this.videoSource, \"Video source\");\n const config = requireInitialized(this.config, \"Transcode config\");\n const pausedDuration = this.audioState.getPausedDuration();\n const frameTimestamp = this.timestampManager.calculateVideoFrameTimestamp({\n videoFrame,\n pausedDuration,\n recordingStartTime: this.recordingStartTime,\n pendingVisibilityUpdatesCount: this.visibilityTracker.getPendingUpdatesCount(),\n processPendingVisibilityUpdates: () => {\n this.visibilityTracker.flushPendingUpdates(pausedDuration);\n },\n isScreenCapture: this.isScreenCapture\n });\n const overlayConfig = this.overlayConfig;\n let shouldApplyOverlay = false;\n if (overlayConfig?.enabled && !this.isScreenCapture) {\n shouldApplyOverlay = this.visibilityTracker.shouldApplyOverlay(frameTimestamp, this.timestampManager.getFrameCount());\n }\n const compositionResult = this.frameCompositor.composeFrame({\n videoFrame,\n overlayConfig,\n shouldApplyOverlay,\n config\n });\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const frameTiming = this.timestampManager.prepareFrameTiming({\n frameTimestamp,\n keyFrameIntervalSeconds: config.keyFrameInterval,\n lastAudioTimestamp\n });\n const sample = new VideoSample(compositionResult.frameToProcess, {\n timestamp: frameTiming.finalTimestamp,\n duration: frameTiming.frameDuration\n });\n let videoSampleOptions;\n if (frameTiming.isKeyFrame) {\n videoSampleOptions = { keyFrame: true };\n }\n const addError = await videoSource.add(sample, videoSampleOptions).then(() => null).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add video frame: ${errorMessage}`));\n return error;\n });\n sample.close();\n if (!addError) {\n const commitResult = this.timestampManager.commitFrame({\n finalTimestamp: frameTiming.finalTimestamp,\n isKeyFrame: frameTiming.isKeyFrame,\n lastAudioTimestamp,\n audioProcessingActive: this.audioState.isActive(),\n isScreenCapture: this.isScreenCapture\n });\n if (commitResult.shouldLogDrift) {\n logger.debug(\"[RecorderWorker] AV drift metrics\", {\n frameCount: commitResult.frameCount,\n lastAudioTimestamp,\n lastVideoTimestamp: commitResult.lastVideoTimestamp,\n audioVideoDrift: commitResult.audioVideoDrift,\n isScreenCapture: this.isScreenCapture\n });\n }\n }\n if (compositionResult.imageBitmap) {\n compositionResult.imageBitmap.close();\n }\n if (compositionResult.frameToProcess !== videoFrame) {\n compositionResult.frameToProcess.close();\n }\n videoFrame.close();\n }\n async processVideoFrames() {\n if (!(this.videoProcessor && this.videoSource)) {\n return;\n }\n while (this.videoProcessingActive && !this.isStopping) {\n if (this.audioState.getIsPaused()) {\n const shouldContinue = await this.handlePausedVideoFrame();\n if (!shouldContinue) {\n break;\n }\n continue;\n }\n const result = await this.videoProcessor.read();\n if (result.done) {\n break;\n }\n const videoFrame = result.value;\n if (!videoFrame) {\n continue;\n }\n await this.processVideoFrame(videoFrame).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n logger.error(\"[RecorderWorker] Error processing video frame\", errorMessage);\n videoFrame.close();\n });\n }\n }\n handlePausedAudioData(audioData) {\n audioData.close();\n }\n createAudioBuffer(audioData) {\n const numberOfFrames = audioData.numberOfFrames;\n if (numberOfFrames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n const numberOfChannels = audioData.numberOfChannels;\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n const audioBuffer = new Float32Array(numberOfFrames * numberOfChannels);\n let channelIndex = 0;\n while (channelIndex < numberOfChannels) {\n const startIndex = channelIndex * numberOfFrames;\n const endIndex = startIndex + numberOfFrames;\n const channelBuffer = audioBuffer.subarray(startIndex, endIndex);\n audioData.copyTo(channelBuffer, { planeIndex: channelIndex });\n channelIndex += 1;\n }\n return audioBuffer;\n }\n createAudioSample(audioBuffer, audioTimestamp, sampleRate, numberOfChannels) {\n if (sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n let bufferToWrite = audioBuffer;\n if (this.audioState.getIsMuted()) {\n bufferToWrite = new Float32Array(audioBuffer.length);\n }\n return new AudioSample({\n data: bufferToWrite,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n }\n async processAudioSample(audioData, audioSample, audioTimestamp, duration) {\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n audioSample.close();\n audioData.close();\n }\n async processAudioData() {\n if (!(this.audioProcessor && this.audioSource)) {\n return;\n }\n while (this.audioState.isActive() && !this.isStopping) {\n const result = await this.audioProcessor.read();\n if (result.done) {\n this.audioState.setProcessingActive(false);\n break;\n }\n const audioData = result.value;\n if (this.shouldSkipAudioData(audioData)) {\n continue;\n }\n const audioFormat = this.getAudioDataFormat(audioData);\n if (!audioFormat) {\n continue;\n }\n const audioBuffer = this.createAudioBuffer(audioData);\n const normalized = this.normalizeAudioBufferForFormat(audioBuffer, audioFormat);\n const duration = audioFormat.numberOfFrames / audioFormat.sampleRate;\n const audioTimestamp = this.audioState.getLastAudioTimestamp();\n const audioSample = this.createAudioSample(normalized.buffer, audioTimestamp, audioFormat.sampleRate, normalized.numberOfChannels);\n await this.processAudioSample(audioData, audioSample, audioTimestamp, duration);\n }\n }\n handleAudioChunk(message) {\n this.handleAsyncOperation(this.processAudioChunk(message), \"handleAudioChunk\");\n }\n async processAudioChunk(message) {\n if (this.shouldIgnoreMessage()) {\n return;\n }\n if (!this.audioSource) {\n return;\n }\n if (!this.audioState.isActive()) {\n return;\n }\n if (this.audioState.getIsPaused()) {\n return;\n }\n if (message.frames <= 0) {\n throw new Error(ERROR_AUDIO_FRAMES_INVALID);\n }\n if (message.sampleRate <= 0) {\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n if (message.numberOfChannels <= 0) {\n throw new Error(ERROR_AUDIO_CHANNELS_INVALID);\n }\n this.setExpectedAudioFormat(message.sampleRate, message.numberOfChannels);\n if (this.expectedAudioSampleRate !== null && message.sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: message.sampleRate\n });\n return;\n }\n let audioBuffer = message.data;\n let numberOfChannels = message.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, message.frames, numberOfChannels, this.expectedAudioChannels);\n audioBuffer = normalized.buffer;\n numberOfChannels = normalized.numberOfChannels;\n }\n const expectedSamples = message.frames * numberOfChannels;\n if (audioBuffer.length < expectedSamples) {\n throw new Error(\"Audio buffer length is shorter than expected\");\n }\n const sampleRate = message.sampleRate;\n const duration = message.frames / sampleRate;\n const audioTimestamp = this.audioState.getAudioTimestamp(message.timestamp);\n if (this.audioState.getIsMuted()) {\n audioBuffer = new Float32Array(audioBuffer.length);\n }\n const audioSample = new AudioSample({\n data: audioBuffer,\n format: WORKER_AUDIO_SAMPLE_FORMAT_F32_PLANAR,\n numberOfChannels,\n sampleRate,\n timestamp: audioTimestamp\n });\n const audioSource = requireInitialized(this.audioSource, \"Audio source\");\n await audioSource.add(audioSample).catch((error) => {\n const errorMessage = extractErrorMessage(error);\n this.sendError(new Error(`Failed to add audio sample: ${errorMessage}`));\n });\n this.audioState.updateLastAudioTimestamp(audioTimestamp, duration);\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n logger.debug(\"[RecorderWorker] Audio sample processed\", {\n lastAudioTimestamp,\n duration,\n sampleRate,\n numberOfFrames: message.frames\n });\n audioSample.close();\n }\n shouldSkipAudioData(audioData) {\n if (!audioData) {\n return true;\n }\n if (this.audioState.getIsPaused()) {\n this.handlePausedAudioData(audioData);\n return true;\n }\n return false;\n }\n getAudioDataFormat(audioData) {\n const sampleRate = audioData.sampleRate;\n if (sampleRate <= 0) {\n audioData.close();\n throw new Error(ERROR_AUDIO_SAMPLE_RATE_INVALID);\n }\n const numberOfFrames = audioData.numberOfFrames;\n const numberOfChannels = audioData.numberOfChannels;\n this.setExpectedAudioFormat(sampleRate, numberOfChannels);\n if (this.expectedAudioSampleRate !== null && sampleRate !== this.expectedAudioSampleRate) {\n logger.warn(\"[RecorderWorker] Audio sample rate changed\", {\n expectedSampleRate: this.expectedAudioSampleRate,\n receivedSampleRate: sampleRate\n });\n audioData.close();\n return null;\n }\n return {\n sampleRate,\n numberOfFrames,\n numberOfChannels\n };\n }\n normalizeAudioBufferForFormat(audioBuffer, audioFormat) {\n let bufferToWrite = audioBuffer;\n let channelsToWrite = audioFormat.numberOfChannels;\n if (this.expectedAudioChannels !== null) {\n const normalized = this.normalizeAudioBuffer(audioBuffer, audioFormat.numberOfFrames, audioFormat.numberOfChannels, this.expectedAudioChannels);\n bufferToWrite = normalized.buffer;\n channelsToWrite = normalized.numberOfChannels;\n }\n return {\n buffer: bufferToWrite,\n numberOfChannels: channelsToWrite\n };\n }\n handlePause() {\n if (!this.audioState.pause()) {\n return;\n }\n this.sendStateChange(\"paused\");\n }\n handleResume() {\n if (!this.audioState.resume()) {\n return;\n }\n this.sendStateChange(\"recording\");\n }\n handleStop() {\n if (this.isStopping) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n if (this.isFinalized) {\n logger.debug(\"[RecorderWorker] handleStop ignored (stopping/finalized)\");\n return Promise.resolve();\n }\n this.isStopping = true;\n this.isFinalized = true;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n return runStopTransition({\n finalizeStopSequence: () => this.finalizeStopSequence(),\n completeStop: () => this.completeStop(),\n recoverStopFailure: () => {\n if (this.isFinalized) {\n this.resetStopStateAfterFailure();\n }\n return this.cleanup().catch((cleanupError) => {\n logger.error(\"[RecorderWorker] Stop failure cleanup failed\", {\n error: extractErrorMessage(cleanupError)\n });\n });\n },\n clearStoppingFlag: () => {\n this.isStopping = false;\n }\n });\n }\n async completeStop() {\n await this.cleanup();\n this.sendStateChange(\"stopped\");\n }\n async finalizeStopSequence() {\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n if (this.output) {\n await this.output.finalize();\n }\n await waitForPendingWritesToDrain({\n getPendingWriteCount: () => this.pendingWriteCount,\n timeoutMilliseconds: STOP_PENDING_WRITES_TIMEOUT_MILLISECONDS2\n });\n }\n resetStopStateAfterFailure() {\n this.isFinalized = false;\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n }\n handleToggleMute() {\n this.audioState.toggleMuted();\n }\n handleUpdateFps(fps) {\n if (fps <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n const previousFps = this.timestampManager.getFrameRate();\n logger.debug(\"[RecorderWorker] Updating FPS\", {\n fps,\n previousFps\n });\n this.timestampManager.setFrameRate(fps);\n if (this.config) {\n this.config.fps = fps;\n }\n }\n handleUpdateSourceType(isScreenCapture) {\n logger.debug(\"[RecorderWorker] Updating source type\", {\n isScreenCapture,\n previousIsScreenCapture: this.isScreenCapture\n });\n this.isScreenCapture = isScreenCapture;\n this.visibilityTracker.setIsScreenCapture(isScreenCapture);\n }\n async handleSwitchSource(videoTrack, videoStream) {\n if (!(videoTrack || videoStream)) {\n throw new Error(\"Video track or stream is required\");\n }\n const frameRate = this.timestampManager.getFrameRate();\n requireDefined(frameRate, \"Frame rate must be set\");\n if (frameRate <= 0) {\n throw new Error(\"Frame rate must be greater than zero\");\n }\n if (this.videoProcessor) {\n this.videoProcessingActive = false;\n await this.videoProcessor.cancel();\n let drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n while (!drainResult.done) {\n drainResult.value?.close();\n drainResult = await this.videoProcessor.read().catch(() => ({ done: true }));\n }\n this.videoProcessor = null;\n }\n const lastAudioTimestamp = this.audioState.getLastAudioTimestamp();\n const baseVideoTimestamp = this.timestampManager.getBaseVideoTimestamp();\n requireNonNull(baseVideoTimestamp, \"Base video timestamp must be set for source switch\");\n const switchResult = this.timestampManager.handleSourceSwitch(lastAudioTimestamp);\n logger.debug(\"[RecorderWorker] handleSwitchSource - preserving baseVideoTimestamp\", {\n continuationTimestamp: switchResult.continuationTimestamp,\n lastVideoTimestamp: this.timestampManager.getLastVideoTimestamp(),\n frameRate,\n isScreenCapture: this.isScreenCapture,\n baseVideoTimestamp,\n recordingStartTime: this.recordingStartTime,\n lastAudioTimestamp,\n previousVideoTimestamp: switchResult.previousVideoTimestamp,\n minFrameDuration: switchResult.minFrameDuration,\n rawDrift: switchResult.rawDrift,\n driftOffset: switchResult.driftOffset\n });\n if (videoStream) {\n this.setupVideoProcessingFromStream(videoStream);\n return;\n }\n if (videoTrack) {\n this.setupVideoProcessing(videoTrack);\n }\n }\n async cleanup() {\n this.bufferTracker.stop();\n this.videoProcessingActive = false;\n this.audioState.setProcessingActive(false);\n if (this.videoProcessor) {\n await this.videoProcessor.cancel();\n this.videoProcessor = null;\n }\n if (this.audioProcessor) {\n await this.audioProcessor.cancel();\n this.audioProcessor = null;\n }\n const videoSource = this.videoSource;\n if (videoSource && !this.isFinalized) {\n videoSource.close();\n }\n if (videoSource) {\n this.videoSource = null;\n }\n const audioSource = this.audioSource;\n if (audioSource && !this.isFinalized) {\n audioSource.close();\n }\n if (audioSource) {\n this.audioSource = null;\n }\n const output = this.output;\n if (output && !this.isFinalized) {\n await output.cancel().catch((error) => {\n logger.warn(\"[RecorderWorker] cancel failed (ignored, possibly finalized)\", error);\n });\n this.isFinalized = true;\n }\n if (output) {\n this.output = null;\n }\n this.timestampManager.reset(undefined);\n this.totalSize = 0;\n this.audioState.reset();\n this.frameCompositor.reset();\n this.overlayConfig = null;\n this.recordingStartTime = 0;\n this.isScreenCapture = false;\n this.expectedAudioChannels = null;\n this.expectedAudioSampleRate = null;\n this.pendingWriteCount = 0;\n this.visibilityTracker.reset(this.recordingStartTime, this.isScreenCapture);\n }\n setExpectedAudioFormat(sampleRate, numberOfChannels) {\n if (this.expectedAudioSampleRate === null) {\n this.expectedAudioSampleRate = sampleRate;\n }\n if (this.expectedAudioChannels === null) {\n this.expectedAudioChannels = numberOfChannels;\n }\n }\n normalizeAudioBuffer(audioBuffer, frames, actualChannels, expectedChannels) {\n if (actualChannels === expectedChannels) {\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n if (actualChannels === 1 && expectedChannels === STEREO_CHANNEL_COUNT) {\n const expandedBuffer = new Float32Array(frames * STEREO_CHANNEL_COUNT);\n expandedBuffer.set(audioBuffer, 0);\n expandedBuffer.set(audioBuffer, frames);\n return {\n buffer: expandedBuffer,\n numberOfChannels: STEREO_CHANNEL_COUNT\n };\n }\n if (actualChannels === STEREO_CHANNEL_COUNT && expectedChannels === 1) {\n const mixedBuffer = new Float32Array(frames);\n let frameIndex = 0;\n while (frameIndex < frames) {\n const leftSample = audioBuffer[frameIndex];\n const rightSample = audioBuffer[frameIndex + frames];\n mixedBuffer[frameIndex] = (leftSample + rightSample) * AUDIO_SAMPLE_AVERAGE_SCALE;\n frameIndex += 1;\n }\n return { buffer: mixedBuffer, numberOfChannels: 1 };\n }\n logger.warn(\"[RecorderWorker] Audio channel mismatch\", {\n expectedChannels,\n receivedChannels: actualChannels\n });\n return { buffer: audioBuffer, numberOfChannels: actualChannels };\n }\n sendReady() {\n const response = { type: \"ready\" };\n self.postMessage(response);\n }\n sendError(error) {\n const errorMessage = extractErrorMessage(error);\n const response = {\n type: \"error\",\n error: errorMessage\n };\n self.postMessage(response);\n }\n sendChunk(data, position) {\n this.totalSize = Math.max(this.totalSize, position + data.length);\n const response = {\n type: \"chunk\",\n data,\n position\n };\n const buffer = data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);\n self.postMessage(response, [buffer]);\n }\n sendStateChange(state) {\n const response = {\n type: \"stateChange\",\n state\n };\n self.postMessage(response);\n }\n deserializeBitrate(bitrate) {\n if (typeof bitrate === \"number\") {\n return bitrate;\n }\n if (bitrate === \"low\") {\n return QUALITY_LOW;\n }\n if (bitrate === \"medium\") {\n return QUALITY_MEDIUM;\n }\n if (bitrate === \"high\") {\n return QUALITY_HIGH;\n }\n if (bitrate === \"very-high\") {\n return QUALITY_VERY_HIGH;\n }\n return QUALITY_HIGH;\n }\n}\nnew RecorderWorker;\n";
1763
1842
 
1764
1843
  import type { WatermarkPosition } from "../types";
1765
1844
  /**
@@ -1943,17 +2022,16 @@ export declare class VisibilityTracker {
1943
2022
  private pendingVisibilityUpdates;
1944
2023
  private recordingStartTime;
1945
2024
  private isScreenCapture;
2025
+ private intervalCursor;
2026
+ private lastOverlayTimestamp;
1946
2027
  private readonly logger;
1947
2028
  constructor(dependencies: VisibilityTrackerDependencies);
1948
2029
  reset(recordingStartTime: number, isScreenCapture: boolean): void;
1949
2030
  setRecordingStartTime(recordingStartTime: number): void;
1950
2031
  setIsScreenCapture(isScreenCapture: boolean): void;
1951
2032
  getPendingUpdatesCount(): number;
1952
- shouldApplyOverlay(parameters: {
1953
- timestamp: number;
1954
- overlayEnabled: boolean;
1955
- frameCount: number;
1956
- }): boolean;
2033
+ shouldApplyOverlay(timestamp: number, frameCount: number): boolean;
2034
+ private shouldApplyCompletedIntervalOverlay;
1957
2035
  handleUpdateVisibility(isHidden: boolean, timestamp: number, hasBaseVideoTimestamp: boolean, pausedDuration: number): void;
1958
2036
  flushPendingUpdates(pausedDuration: number): void;
1959
2037
  private processVisibilityUpdate;
@@ -2025,6 +2103,47 @@ export declare function getAudioCodecForFormat(format: OutputFormat, overrideCod
2025
2103
 
2026
2104
  export {};
2027
2105
 
2106
+ import type { Quality, VideoCodec } from "mediabunny";
2107
+ import type { FormatCompatibilityPolicy } from "../config/config-constants";
2108
+ type VideoCodecCheckOptions = {
2109
+ width?: number;
2110
+ height?: number;
2111
+ bitrate?: number | Quality;
2112
+ hardwareAcceleration: "no-preference" | "prefer-hardware" | "prefer-software";
2113
+ };
2114
+ type AudioCodecCheckOptions = {
2115
+ bitrate?: number | Quality;
2116
+ };
2117
+ type MediabunnyModule = {
2118
+ canEncodeVideo?: (codec: VideoCodec, options: VideoCodecCheckOptions) => Promise<boolean>;
2119
+ getFirstEncodableAudioCodec?: (codecs: AudioCodec[], options: AudioCodecCheckOptions) => Promise<string | null>;
2120
+ };
2121
+ type LoadMediabunnyModuleDependency = () => Promise<MediabunnyModule | null>;
2122
+ export type CodecPolicyResolverDependencies = {
2123
+ loadMediabunnyModule: LoadMediabunnyModuleDependency;
2124
+ };
2125
+ export type ResolveVideoCodecOptions = {
2126
+ format: OutputFormat;
2127
+ overrideCodec: NonNullable<TranscodeConfig["codec"]> | undefined;
2128
+ policy: FormatCompatibilityPolicy;
2129
+ width: number | undefined;
2130
+ height: number | undefined;
2131
+ bitrate: number | Quality | undefined;
2132
+ shouldThrowIfNoCodecAvailable?: boolean;
2133
+ dependencies?: Partial<CodecPolicyResolverDependencies>;
2134
+ };
2135
+ export declare function resolveVideoCodecFromPolicy(options: ResolveVideoCodecOptions): Promise<NonNullable<TranscodeConfig["codec"]>>;
2136
+ export type ResolveAudioCodecOptions = {
2137
+ format: OutputFormat;
2138
+ overrideCodec: AudioCodec | undefined;
2139
+ policy: FormatCompatibilityPolicy;
2140
+ bitrate: number | Quality | undefined;
2141
+ shouldThrowIfNoCodecAvailable?: boolean;
2142
+ dependencies?: Partial<CodecPolicyResolverDependencies>;
2143
+ };
2144
+ export declare function resolveAudioCodecFromPolicy(options: ResolveAudioCodecOptions): Promise<AudioCodec>;
2145
+ export {};
2146
+
2028
2147
  declare const VIDEO_PATH_WORKER_TRACK = "worker-track";
2029
2148
  declare const VIDEO_PATH_MAIN_THREAD_STREAM = "main-thread-stream";
2030
2149
  declare const VIDEO_PATH_UNAVAILABLE = "unavailable";