avbridge 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/CHANGELOG.md +120 -0
  2. package/LICENSE +21 -0
  3. package/README.md +415 -0
  4. package/dist/avi-M5B4SHRM.cjs +164 -0
  5. package/dist/avi-M5B4SHRM.cjs.map +1 -0
  6. package/dist/avi-POCGZ4JX.js +162 -0
  7. package/dist/avi-POCGZ4JX.js.map +1 -0
  8. package/dist/chunk-5ISVAODK.js +80 -0
  9. package/dist/chunk-5ISVAODK.js.map +1 -0
  10. package/dist/chunk-F7YS2XOA.cjs +2966 -0
  11. package/dist/chunk-F7YS2XOA.cjs.map +1 -0
  12. package/dist/chunk-FKM7QBZU.js +2957 -0
  13. package/dist/chunk-FKM7QBZU.js.map +1 -0
  14. package/dist/chunk-J5MCMN3S.js +27 -0
  15. package/dist/chunk-J5MCMN3S.js.map +1 -0
  16. package/dist/chunk-L4NPOJ36.cjs +180 -0
  17. package/dist/chunk-L4NPOJ36.cjs.map +1 -0
  18. package/dist/chunk-NZU7W256.cjs +29 -0
  19. package/dist/chunk-NZU7W256.cjs.map +1 -0
  20. package/dist/chunk-PQTZS7OA.js +147 -0
  21. package/dist/chunk-PQTZS7OA.js.map +1 -0
  22. package/dist/chunk-WD2ZNQA7.js +177 -0
  23. package/dist/chunk-WD2ZNQA7.js.map +1 -0
  24. package/dist/chunk-Y5FYF5KG.cjs +153 -0
  25. package/dist/chunk-Y5FYF5KG.cjs.map +1 -0
  26. package/dist/chunk-Z2FJ5TJC.cjs +82 -0
  27. package/dist/chunk-Z2FJ5TJC.cjs.map +1 -0
  28. package/dist/element.cjs +433 -0
  29. package/dist/element.cjs.map +1 -0
  30. package/dist/element.d.cts +158 -0
  31. package/dist/element.d.ts +158 -0
  32. package/dist/element.js +431 -0
  33. package/dist/element.js.map +1 -0
  34. package/dist/index.cjs +576 -0
  35. package/dist/index.cjs.map +1 -0
  36. package/dist/index.d.cts +80 -0
  37. package/dist/index.d.ts +80 -0
  38. package/dist/index.js +554 -0
  39. package/dist/index.js.map +1 -0
  40. package/dist/libav-http-reader-FPYDBMYK.cjs +16 -0
  41. package/dist/libav-http-reader-FPYDBMYK.cjs.map +1 -0
  42. package/dist/libav-http-reader-NQJVY273.js +3 -0
  43. package/dist/libav-http-reader-NQJVY273.js.map +1 -0
  44. package/dist/libav-import-2JURFHEW.js +8 -0
  45. package/dist/libav-import-2JURFHEW.js.map +1 -0
  46. package/dist/libav-import-GST2AMPL.cjs +30 -0
  47. package/dist/libav-import-GST2AMPL.cjs.map +1 -0
  48. package/dist/libav-loader-KA2MAWLM.js +3 -0
  49. package/dist/libav-loader-KA2MAWLM.js.map +1 -0
  50. package/dist/libav-loader-ZHOERPHW.cjs +12 -0
  51. package/dist/libav-loader-ZHOERPHW.cjs.map +1 -0
  52. package/dist/player-BBwbCkdL.d.cts +365 -0
  53. package/dist/player-BBwbCkdL.d.ts +365 -0
  54. package/dist/source-SC6ZEQYR.cjs +28 -0
  55. package/dist/source-SC6ZEQYR.cjs.map +1 -0
  56. package/dist/source-ZFS4H7J3.js +3 -0
  57. package/dist/source-ZFS4H7J3.js.map +1 -0
  58. package/dist/variant-routing-GOHB2RZN.cjs +12 -0
  59. package/dist/variant-routing-GOHB2RZN.cjs.map +1 -0
  60. package/dist/variant-routing-JOBWXYKD.js +3 -0
  61. package/dist/variant-routing-JOBWXYKD.js.map +1 -0
  62. package/package.json +95 -0
  63. package/src/classify/index.ts +1 -0
  64. package/src/classify/rules.ts +214 -0
  65. package/src/convert/index.ts +2 -0
  66. package/src/convert/remux.ts +522 -0
  67. package/src/convert/transcode.ts +329 -0
  68. package/src/diagnostics.ts +99 -0
  69. package/src/element/avbridge-player.ts +576 -0
  70. package/src/element.ts +19 -0
  71. package/src/events.ts +71 -0
  72. package/src/index.ts +42 -0
  73. package/src/libav-stubs.d.ts +24 -0
  74. package/src/player.ts +455 -0
  75. package/src/plugins/builtin.ts +37 -0
  76. package/src/plugins/registry.ts +32 -0
  77. package/src/probe/avi.ts +242 -0
  78. package/src/probe/index.ts +59 -0
  79. package/src/probe/mediabunny.ts +194 -0
  80. package/src/strategies/fallback/audio-output.ts +293 -0
  81. package/src/strategies/fallback/clock.ts +7 -0
  82. package/src/strategies/fallback/decoder.ts +660 -0
  83. package/src/strategies/fallback/index.ts +170 -0
  84. package/src/strategies/fallback/libav-import.ts +27 -0
  85. package/src/strategies/fallback/libav-loader.ts +190 -0
  86. package/src/strategies/fallback/variant-routing.ts +43 -0
  87. package/src/strategies/fallback/video-renderer.ts +216 -0
  88. package/src/strategies/hybrid/decoder.ts +641 -0
  89. package/src/strategies/hybrid/index.ts +139 -0
  90. package/src/strategies/native.ts +107 -0
  91. package/src/strategies/remux/annexb.ts +112 -0
  92. package/src/strategies/remux/index.ts +79 -0
  93. package/src/strategies/remux/mse.ts +234 -0
  94. package/src/strategies/remux/pipeline.ts +254 -0
  95. package/src/subtitles/index.ts +91 -0
  96. package/src/subtitles/render.ts +62 -0
  97. package/src/subtitles/srt.ts +62 -0
  98. package/src/subtitles/vtt.ts +5 -0
  99. package/src/types-shim.d.ts +3 -0
  100. package/src/types.ts +360 -0
  101. package/src/util/codec-strings.ts +86 -0
  102. package/src/util/libav-http-reader.ts +315 -0
  103. package/src/util/source.ts +274 -0
@@ -0,0 +1,254 @@
1
+ import type { MediaContext } from "../../types.js";
2
+ import { MseSink } from "./mse.js";
3
+ import {
4
+ avbridgeVideoToMediabunny,
5
+ avbridgeAudioToMediabunny,
6
+ buildMediabunnySourceFromInput,
7
+ } from "../../probe/mediabunny.js";
8
+
9
+ /**
10
+ * Remux pipeline built against mediabunny's real API.
11
+ *
12
+ * Key design notes:
13
+ *
14
+ * - mediabunny's fMP4 muxer is a streaming muxer that requires monotonically
15
+ * increasing timestamps. It cannot accept out-of-order packets after a seek.
16
+ * Therefore, on each seek we create a **fresh** Output + sources + StreamTarget.
17
+ * The MseSink handles the SourceBuffer reset via `invalidate()`.
18
+ *
19
+ * - Backpressure is enforced at two levels: in the WritableStream write handler
20
+ * (limits append queue depth and total buffered time) and in the pump loop
21
+ * (limits buffered-ahead and total buffered time). Without this, long files
22
+ * dump gigabytes into the SourceBuffer and exhaust memory.
23
+ */
24
+ export interface RemuxPipeline {
25
+ start(fromTime?: number, autoPlay?: boolean): Promise<void>;
26
+ seek(time: number, autoPlay?: boolean): Promise<void>;
27
+ destroy(): Promise<void>;
28
+ stats(): Record<string, unknown>;
29
+ }
30
+
31
+ export async function createRemuxPipeline(
32
+ ctx: MediaContext,
33
+ video: HTMLVideoElement,
34
+ ): Promise<RemuxPipeline> {
35
+ const mb = await import("mediabunny");
36
+
37
+ const videoTrackInfo = ctx.videoTracks[0];
38
+ const audioTrackInfo = ctx.audioTracks[0];
39
+ if (!videoTrackInfo) throw new Error("remux: source has no video track");
40
+
41
+ // Map avbridge codec names back to mediabunny's enum strings.
42
+ const mbVideoCodec = avbridgeVideoToMediabunny(videoTrackInfo.codec);
43
+ if (!mbVideoCodec) {
44
+ throw new Error(`remux: video codec "${videoTrackInfo.codec}" is not supported by mediabunny output`);
45
+ }
46
+ const mbAudioCodec = audioTrackInfo ? avbridgeAudioToMediabunny(audioTrackInfo.codec) : null;
47
+
48
+ // Open the input. URL sources go through mediabunny's UrlSource so the
49
+ // muxer streams via Range requests instead of buffering the whole file.
50
+ const input = new mb.Input({
51
+ source: await buildMediabunnySourceFromInput(mb, ctx.source),
52
+ formats: mb.ALL_FORMATS,
53
+ });
54
+ const allTracks = await input.getTracks();
55
+ const inputVideo = allTracks.find((t) => t.id === videoTrackInfo.id && t.isVideoTrack());
56
+ const inputAudio = audioTrackInfo
57
+ ? allTracks.find((t) => t.id === audioTrackInfo.id && t.isAudioTrack())
58
+ : null;
59
+ if (!inputVideo || !inputVideo.isVideoTrack()) {
60
+ throw new Error("remux: video track not found in input");
61
+ }
62
+ if (audioTrackInfo && (!inputAudio || !inputAudio.isAudioTrack())) {
63
+ throw new Error("remux: audio track not found in input");
64
+ }
65
+
66
+ // Pull WebCodecs decoder configs once — used as `meta` on the first packet.
67
+ const videoConfig = await inputVideo.getDecoderConfig();
68
+ const audioConfig = inputAudio && inputAudio.isAudioTrack() ? await inputAudio.getDecoderConfig() : null;
69
+
70
+ // Packet sinks (input side) — reused across seeks.
71
+ const videoSink = new mb.EncodedPacketSink(inputVideo);
72
+ const audioSink = inputAudio?.isAudioTrack() ? new mb.EncodedPacketSink(inputAudio) : null;
73
+
74
+ // MSE sink — created lazily on first output write, reused across seeks.
75
+ let sink: MseSink | null = null;
76
+ const stats = { videoPackets: 0, audioPackets: 0, bytesWritten: 0, fragments: 0 };
77
+
78
+ let destroyed = false;
79
+ let pumpToken = 0;
80
+ let pendingAutoPlay = false;
81
+ let pendingStartTime = 0;
82
+
83
+ // The current Output instance. Recreated on each seek because mediabunny's
84
+ // fMP4 muxer requires monotonically increasing timestamps.
85
+ let currentOutput: InstanceType<typeof mb.Output> | null = null;
86
+
87
+ /**
88
+ * Create a fresh mediabunny Output wired to the MSE sink. Called once at
89
+ * start and again on each seek.
90
+ */
91
+ function createOutput() {
92
+ // Cancel the previous output if it exists.
93
+ if (currentOutput) {
94
+ try { void currentOutput.cancel(); } catch { /* ignore */ }
95
+ }
96
+
97
+ let mimePromise: Promise<string> | null = null;
98
+
99
+ const writable = new WritableStream<{
100
+ type: "write";
101
+ data: Uint8Array<ArrayBuffer>;
102
+ position: number;
103
+ }>({
104
+ write: async (chunk) => {
105
+ if (destroyed) return;
106
+ if (!sink) {
107
+ const mime = await (mimePromise ??= output.getMimeType());
108
+ sink = new MseSink({ mime, video });
109
+ await sink.ready();
110
+ // Apply deferred seek + autoPlay for the initial start.
111
+ if (pendingStartTime > 0) {
112
+ sink.invalidate(pendingStartTime);
113
+ }
114
+ sink.setPlayOnSeek(pendingAutoPlay);
115
+ }
116
+ // Backpressure: wait for the SourceBuffer append queue to drain.
117
+ while (sink && !destroyed && (sink.queueLength() > 10 || sink.bufferedAhead() > 60 || sink.totalBuffered() > 120)) {
118
+ await new Promise((r) => setTimeout(r, 500));
119
+ }
120
+ if (destroyed) return;
121
+ sink.append(chunk.data);
122
+ stats.bytesWritten += chunk.data.byteLength;
123
+ stats.fragments++;
124
+ },
125
+ });
126
+
127
+ const target = new mb.StreamTarget(writable);
128
+ const output = new mb.Output({
129
+ format: new mb.Mp4OutputFormat({ fastStart: "fragmented" }),
130
+ target,
131
+ });
132
+
133
+ // Build the output sources.
134
+ const videoSource = new mb.EncodedVideoPacketSource(mbVideoCodec!);
135
+ output.addVideoTrack(videoSource);
136
+
137
+ type AudioSourceCtorArg = ConstructorParameters<typeof mb.EncodedAudioPacketSource>[0];
138
+ let audioSource: InstanceType<typeof mb.EncodedAudioPacketSource> | null = null;
139
+ if (mbAudioCodec && inputAudio?.isAudioTrack()) {
140
+ audioSource = new mb.EncodedAudioPacketSource(mbAudioCodec as AudioSourceCtorArg);
141
+ output.addAudioTrack(audioSource);
142
+ }
143
+
144
+ currentOutput = output;
145
+ return { output, videoSource, audioSource };
146
+ }
147
+
148
+ async function pumpLoop(token: number, fromTime: number) {
149
+ const { output, videoSource, audioSource } = createOutput();
150
+
151
+ await output.start();
152
+
153
+
154
+ // Find the starting key packet so we never push partial GOPs.
155
+ const startVideoPacket =
156
+ fromTime > 0
157
+ ? (await videoSink.getKeyPacket(fromTime)) ?? (await videoSink.getFirstPacket())
158
+ : await videoSink.getFirstPacket();
159
+ if (!startVideoPacket) return;
160
+
161
+ const startAudioPacket = audioSink
162
+ ? (audioSink && fromTime > 0
163
+ ? (await audioSink.getPacket(fromTime)) ?? (await audioSink.getFirstPacket())
164
+ : await audioSink.getFirstPacket())
165
+ : null;
166
+
167
+ const videoIter = videoSink.packets(startVideoPacket);
168
+ const audioIter = audioSink && startAudioPacket ? audioSink.packets(startAudioPacket) : null;
169
+
170
+ let vNext = await videoIter.next();
171
+ let aNext = audioIter ? await audioIter.next() : { done: true as const, value: undefined };
172
+ let firstVideo = true;
173
+ let firstAudio = true;
174
+
175
+ while (!destroyed && pumpToken === token && (!vNext.done || !aNext.done)) {
176
+ // Backpressure: pause pumping when we've buffered enough.
177
+ while (
178
+ !destroyed &&
179
+ pumpToken === token &&
180
+ sink &&
181
+ (sink.bufferedAhead() > 30 || sink.queueLength() > 20 || sink.totalBuffered() > 90)
182
+ ) {
183
+ await new Promise((r) => setTimeout(r, 500));
184
+ }
185
+ if (destroyed || pumpToken !== token) break;
186
+
187
+ const vTs = !vNext.done ? vNext.value.timestamp : Number.POSITIVE_INFINITY;
188
+ const aTs = !aNext.done ? aNext.value.timestamp : Number.POSITIVE_INFINITY;
189
+
190
+ if (!vNext.done && vTs <= aTs) {
191
+ await videoSource.add(
192
+ vNext.value,
193
+ firstVideo && videoConfig ? { decoderConfig: videoConfig } : undefined,
194
+ );
195
+ firstVideo = false;
196
+ stats.videoPackets++;
197
+ vNext = await videoIter.next();
198
+ } else if (audioIter && audioSource && !aNext.done) {
199
+ await audioSource.add(
200
+ aNext.value,
201
+ firstAudio && audioConfig ? { decoderConfig: audioConfig } : undefined,
202
+ );
203
+ firstAudio = false;
204
+ stats.audioPackets++;
205
+ aNext = await audioIter.next();
206
+ } else {
207
+ break;
208
+ }
209
+ }
210
+
211
+ if (!destroyed && pumpToken === token) {
212
+ await output.finalize();
213
+ sink?.endOfStream();
214
+ }
215
+ }
216
+
217
+ return {
218
+ async start(fromTime = 0, autoPlay = false) {
219
+ // Store autoPlay/seekTime so the MseSink (created lazily on first
220
+ // write) can apply the deferred seek and auto-play.
221
+ pendingAutoPlay = autoPlay;
222
+ pendingStartTime = fromTime;
223
+ pumpLoop(++pumpToken, fromTime).catch((err) => {
224
+ // eslint-disable-next-line no-console
225
+ console.error("[avbridge] remux pipeline failed:", err);
226
+ try { sink?.destroy(); } catch { /* ignore */ }
227
+ });
228
+ },
229
+ async seek(time, autoPlay = false) {
230
+ if (sink) {
231
+ sink.setPlayOnSeek(autoPlay);
232
+ sink.invalidate(time);
233
+ } else {
234
+ pendingAutoPlay = autoPlay;
235
+ pendingStartTime = time;
236
+ }
237
+ pumpLoop(++pumpToken, time).catch((err) => {
238
+ // eslint-disable-next-line no-console
239
+ console.error("[avbridge] remux pipeline reseek failed:", err);
240
+ });
241
+ },
242
+ async destroy() {
243
+ destroyed = true;
244
+ pumpToken++;
245
+ try { if (currentOutput) await currentOutput.cancel(); } catch { /* ignore */ }
246
+ try { await input.dispose(); } catch { /* ignore */ }
247
+ sink?.destroy();
248
+ },
249
+ stats() {
250
+ return { ...stats, decoderType: "remux" };
251
+ },
252
+ };
253
+ }
254
+
@@ -0,0 +1,91 @@
1
+ import type { SubtitleTrackInfo } from "../types.js";
2
+ import { srtToVtt } from "./srt.js";
3
+ import { isVtt } from "./vtt.js";
4
+
5
+ export { srtToVtt } from "./srt.js";
6
+ export { SubtitleOverlay } from "./render.js";
7
+
8
+ /**
9
+ * Discover sidecar `.srt` / `.vtt` files next to the source. Requires the
10
+ * caller to pass a `FileSystemDirectoryHandle` (e.g. via the File System
11
+ * Access API). Without that handle we can't enumerate sibling files.
12
+ */
13
+ export interface DiscoveredSidecar {
14
+ url: string;
15
+ format: "srt" | "vtt";
16
+ language?: string;
17
+ }
18
+
19
+ export async function discoverSidecar(
20
+ file: File,
21
+ directory: FileSystemDirectoryHandle,
22
+ ): Promise<DiscoveredSidecar[]> {
23
+ const baseName = file.name.replace(/\.[^.]+$/, "");
24
+ const found: DiscoveredSidecar[] = [];
25
+
26
+ // Walk the directory and look for `${baseName}*.srt` / `*.vtt`.
27
+ for await (const [name, handle] of (directory as unknown as AsyncIterable<[string, FileSystemHandle]>)) {
28
+ if (handle.kind !== "file") continue;
29
+ if (!name.startsWith(baseName)) continue;
30
+ const lower = name.toLowerCase();
31
+ let format: "srt" | "vtt" | null = null;
32
+ if (lower.endsWith(".srt")) format = "srt";
33
+ else if (lower.endsWith(".vtt")) format = "vtt";
34
+ if (!format) continue;
35
+
36
+ const sidecarFile = await (handle as FileSystemFileHandle).getFile();
37
+ const url = URL.createObjectURL(sidecarFile);
38
+
39
+ // Try to extract a language tag (eg. movie.en.srt → "en").
40
+ const langMatch = name.slice(baseName.length).match(/[._-]([a-z]{2,3})(?:[._-]|\.)/i);
41
+ found.push({
42
+ url,
43
+ format,
44
+ language: langMatch?.[1],
45
+ });
46
+ }
47
+
48
+ return found;
49
+ }
50
+
51
+ /**
52
+ * Attach `<track>` elements for each subtitle to the player's `<video>`. SRT
53
+ * sources are converted to VTT first via blob URLs because `<track>` only
54
+ * accepts WebVTT.
55
+ */
56
+ export async function attachSubtitleTracks(
57
+ video: HTMLVideoElement,
58
+ tracks: SubtitleTrackInfo[],
59
+ ): Promise<void> {
60
+ // Clear existing dynamically-attached tracks.
61
+ for (const t of Array.from(video.querySelectorAll("track[data-avbridge]"))) {
62
+ t.remove();
63
+ }
64
+
65
+ for (const t of tracks) {
66
+ if (!t.sidecarUrl) continue;
67
+ let url = t.sidecarUrl;
68
+ if (t.format === "srt") {
69
+ const res = await fetch(t.sidecarUrl);
70
+ const text = await res.text();
71
+ const vtt = srtToVtt(text);
72
+ const blob = new Blob([vtt], { type: "text/vtt" });
73
+ url = URL.createObjectURL(blob);
74
+ } else if (t.format === "vtt") {
75
+ // Validate quickly so a malformed file fails loudly here.
76
+ const res = await fetch(t.sidecarUrl);
77
+ const text = await res.text();
78
+ if (!isVtt(text)) {
79
+ // eslint-disable-next-line no-console
80
+ console.warn("[avbridge] subtitle missing WEBVTT header:", t.sidecarUrl);
81
+ }
82
+ }
83
+ const track = document.createElement("track");
84
+ track.kind = "subtitles";
85
+ track.src = url;
86
+ track.srclang = t.language ?? "und";
87
+ track.label = t.language ?? `Subtitle ${t.id}`;
88
+ track.dataset.avbridge = "true";
89
+ video.appendChild(track);
90
+ }
91
+ }
@@ -0,0 +1,62 @@
1
+ /**
2
+ * Custom subtitle overlay for the fallback strategy. We don't have a `<video>`
3
+ * with text tracks here, so we render cues into a positioned div ourselves.
4
+ *
5
+ * v1 only handles plain-text WebVTT cues with `HH:MM:SS.mmm` timing. Cue
6
+ * settings, voice tags, and styling are ignored.
7
+ */
8
+
9
+ interface Cue {
10
+ start: number;
11
+ end: number;
12
+ text: string;
13
+ }
14
+
15
+ export class SubtitleOverlay {
16
+ private el: HTMLDivElement;
17
+ private cues: Cue[] = [];
18
+
19
+ constructor(parent: HTMLElement) {
20
+ this.el = document.createElement("div");
21
+ this.el.style.cssText =
22
+ "position:absolute;left:0;right:0;bottom:8%;text-align:center;color:white;text-shadow:0 0 4px black;font-family:sans-serif;font-size:1.4em;pointer-events:none;";
23
+ parent.appendChild(this.el);
24
+ }
25
+
26
+ loadVtt(text: string): void {
27
+ this.cues = parseVtt(text);
28
+ }
29
+
30
+ update(currentTime: number): void {
31
+ const active = this.cues.find((c) => currentTime >= c.start && currentTime <= c.end);
32
+ this.el.textContent = active?.text ?? "";
33
+ }
34
+
35
+ destroy(): void {
36
+ this.el.remove();
37
+ this.cues = [];
38
+ }
39
+ }
40
+
41
+ function parseVtt(text: string): Cue[] {
42
+ const cues: Cue[] = [];
43
+ const blocks = text.replace(/\r\n/g, "\n").split(/\n{2,}/);
44
+ for (const block of blocks) {
45
+ const lines = block.split("\n").filter(Boolean);
46
+ if (lines.length === 0 || lines[0] === "WEBVTT") continue;
47
+ const timingIdx = lines.findIndex((l) => l.includes("-->"));
48
+ if (timingIdx < 0) continue;
49
+ const m = /(\d{2}):(\d{2}):(\d{2})\.(\d{3})\s*-->\s*(\d{2}):(\d{2}):(\d{2})\.(\d{3})/.exec(
50
+ lines[timingIdx],
51
+ );
52
+ if (!m) continue;
53
+ const t = (h: string, mm: string, s: string, ms: string) =>
54
+ Number(h) * 3600 + Number(mm) * 60 + Number(s) + Number(ms) / 1000;
55
+ cues.push({
56
+ start: t(m[1], m[2], m[3], m[4]),
57
+ end: t(m[5], m[6], m[7], m[8]),
58
+ text: lines.slice(timingIdx + 1).join("\n"),
59
+ });
60
+ }
61
+ return cues;
62
+ }
@@ -0,0 +1,62 @@
1
+ /**
2
+ * SRT → WebVTT converter.
3
+ *
4
+ * SRT cues:
5
+ *
6
+ * 1
7
+ * 00:00:20,000 --> 00:00:24,400
8
+ * Subtitle text, possibly multiple lines.
9
+ *
10
+ * WebVTT cues:
11
+ *
12
+ * WEBVTT
13
+ *
14
+ * 00:00:20.000 --> 00:00:24.400
15
+ * Subtitle text, possibly multiple lines.
16
+ *
17
+ * The differences in v1 are:
18
+ * - leading `WEBVTT` magic line
19
+ * - `,` → `.` for milliseconds
20
+ * - cue index lines are stripped (WebVTT allows them but SRT-style ints can
21
+ * confuse some parsers; we drop them)
22
+ * - BOM is stripped
23
+ */
24
+ export function srtToVtt(srt: string): string {
25
+ // Strip BOM
26
+ if (srt.charCodeAt(0) === 0xfeff) srt = srt.slice(1);
27
+ // Normalize line endings
28
+ const normalized = srt.replace(/\r\n/g, "\n").replace(/\r/g, "\n").trim();
29
+
30
+ const blocks = normalized.split(/\n{2,}/);
31
+ const out: string[] = ["WEBVTT", ""];
32
+
33
+ for (const block of blocks) {
34
+ const lines = block.split("\n");
35
+ // Drop the leading numeric index, if present.
36
+ if (lines.length > 0 && /^\d+$/.test(lines[0].trim())) {
37
+ lines.shift();
38
+ }
39
+ if (lines.length === 0) continue;
40
+
41
+ const timing = lines.shift()!;
42
+ const vttTiming = convertTiming(timing);
43
+ if (!vttTiming) continue; // skip malformed cue
44
+
45
+ out.push(vttTiming);
46
+ for (const l of lines) out.push(l);
47
+ out.push("");
48
+ }
49
+
50
+ return out.join("\n");
51
+ }
52
+
53
+ function convertTiming(line: string): string | null {
54
+ // SRT: HH:MM:SS,mmm --> HH:MM:SS,mmm (optional cue settings after)
55
+ const m = /^(\d{1,2}):(\d{2}):(\d{2})[,.](\d{1,3})\s*-->\s*(\d{1,2}):(\d{2}):(\d{2})[,.](\d{1,3})(.*)$/.exec(
56
+ line.trim(),
57
+ );
58
+ if (!m) return null;
59
+ const fmt = (h: string, mm: string, s: string, ms: string) =>
60
+ `${h.padStart(2, "0")}:${mm}:${s}.${ms.padEnd(3, "0").slice(0, 3)}`;
61
+ return `${fmt(m[1], m[2], m[3], m[4])} --> ${fmt(m[5], m[6], m[7], m[8])}${m[9] ?? ""}`;
62
+ }
@@ -0,0 +1,5 @@
1
+ /** Light validation for incoming VTT — we do not parse cues, just confirm header. */
2
+ export function isVtt(text: string): boolean {
3
+ const trimmed = text.replace(/^\ufeff/, "").trimStart();
4
+ return trimmed.startsWith("WEBVTT");
5
+ }
@@ -0,0 +1,3 @@
1
+ // Type shims live in src/libav-stubs.d.ts (referenced via tsconfig `paths`).
2
+ // Keep this file as a placeholder for any future global ambient declarations.
3
+