@editframe/assets 0.18.3-beta.0 → 0.18.7-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Probe.d.ts +441 -29
- package/dist/Probe.js +156 -21
- package/dist/generateTrackFragmentIndexMediabunny.d.ts +3 -0
- package/dist/generateTrackFragmentIndexMediabunny.js +343 -0
- package/dist/generateTrackMediabunny.d.ts +8 -0
- package/dist/generateTrackMediabunny.js +69 -0
- package/dist/idempotentTask.js +33 -35
- package/dist/index.d.ts +2 -2
- package/dist/index.js +2 -2
- package/dist/tasks/findOrCreateCaptions.js +1 -1
- package/dist/tasks/generateTrack.d.ts +1 -2
- package/dist/tasks/generateTrack.js +5 -32
- package/dist/tasks/generateTrackFragmentIndex.js +11 -75
- package/dist/truncateDecimal.d.ts +1 -0
- package/dist/truncateDecimal.js +5 -0
- package/package.json +2 -14
- package/src/tasks/generateTrack.test.ts +90 -0
- package/src/tasks/generateTrack.ts +7 -48
- package/src/tasks/generateTrackFragmentIndex.test.ts +115 -0
- package/src/tasks/generateTrackFragmentIndex.ts +27 -98
- package/types.json +1 -1
- package/dist/DecoderManager.d.ts +0 -62
- package/dist/DecoderManager.js +0 -114
- package/dist/EncodedAsset.d.ts +0 -143
- package/dist/EncodedAsset.js +0 -443
- package/dist/FrameBuffer.d.ts +0 -62
- package/dist/FrameBuffer.js +0 -89
- package/dist/MP4File.d.ts +0 -37
- package/dist/MP4File.js +0 -209
- package/dist/MP4SampleAnalyzer.d.ts +0 -59
- package/dist/MP4SampleAnalyzer.js +0 -119
- package/dist/SeekStrategy.d.ts +0 -82
- package/dist/SeekStrategy.js +0 -101
- package/dist/memoize.js +0 -11
- package/dist/mp4FileWritable.d.ts +0 -3
- package/dist/mp4FileWritable.js +0 -19
package/dist/Probe.js
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
import
|
|
1
|
+
import { truncateDecimal } from "./truncateDecimal.js";
|
|
2
2
|
import { exec, spawn } from "node:child_process";
|
|
3
3
|
import { promisify } from "node:util";
|
|
4
4
|
import { createReadStream } from "node:fs";
|
|
5
5
|
import * as z$1 from "zod";
|
|
6
|
+
import debug from "debug";
|
|
6
7
|
const execPromise = promisify(exec);
|
|
7
8
|
const log = debug("ef:assets:probe");
|
|
8
9
|
const AudioStreamSchema = z$1.object({
|
|
@@ -64,37 +65,56 @@ const ProbeFormatSchema = z$1.object({
|
|
|
64
65
|
const DataStreamSchema = z$1.object({
|
|
65
66
|
index: z$1.number(),
|
|
66
67
|
codec_type: z$1.literal("data"),
|
|
67
|
-
duration: z$1.string().optional()
|
|
68
|
+
duration: z$1.string().optional(),
|
|
69
|
+
duration_ts: z$1.number().optional(),
|
|
70
|
+
start_pts: z$1.number().optional()
|
|
68
71
|
});
|
|
69
72
|
const StreamSchema = z$1.discriminatedUnion("codec_type", [
|
|
70
73
|
AudioStreamSchema,
|
|
71
74
|
VideoStreamSchema,
|
|
72
75
|
DataStreamSchema
|
|
73
76
|
]);
|
|
77
|
+
const PacketSchema = z$1.object({
|
|
78
|
+
stream_index: z$1.number(),
|
|
79
|
+
pts: z$1.number(),
|
|
80
|
+
pts_time: z$1.coerce.number(),
|
|
81
|
+
dts: z$1.number(),
|
|
82
|
+
dts_time: z$1.coerce.number()
|
|
83
|
+
});
|
|
74
84
|
const ProbeSchema = z$1.object({
|
|
75
85
|
streams: z$1.array(StreamSchema),
|
|
76
86
|
format: ProbeFormatSchema
|
|
77
87
|
});
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
88
|
+
const PacketProbeSchema = z$1.object({
|
|
89
|
+
packets: z$1.array(PacketSchema),
|
|
90
|
+
format: ProbeFormatSchema,
|
|
91
|
+
streams: z$1.array(StreamSchema)
|
|
92
|
+
});
|
|
93
|
+
const buildProbeArgs = (options) => {
|
|
94
|
+
return [
|
|
95
|
+
"-v",
|
|
96
|
+
"error",
|
|
97
|
+
"-show_format",
|
|
98
|
+
"-show_streams",
|
|
99
|
+
"-of",
|
|
100
|
+
"json",
|
|
101
|
+
...options.showPackets ? ["-show_entries", "packet=stream_index,pts,pts_time,dts,dts_time"] : []
|
|
102
|
+
];
|
|
103
|
+
};
|
|
104
|
+
var FFProbeRunner = class {
|
|
105
|
+
static async probePath(absolutePath, includePackets) {
|
|
106
|
+
const probeCommand = `ffprobe ${buildProbeArgs({ showPackets: includePackets }).join(" ")} ${absolutePath}`;
|
|
81
107
|
log("Probing", probeCommand);
|
|
82
108
|
const probeResult = await execPromise(probeCommand);
|
|
83
109
|
log("Probe result", probeResult.stdout);
|
|
84
110
|
log("Probe stderr", probeResult.stderr);
|
|
85
|
-
|
|
86
|
-
return new Probe(absolutePath, json);
|
|
111
|
+
return JSON.parse(probeResult.stdout);
|
|
87
112
|
}
|
|
88
|
-
static async probeStream(stream) {
|
|
113
|
+
static async probeStream(stream, includePackets) {
|
|
89
114
|
const probe = spawn("ffprobe", [
|
|
90
115
|
"-i",
|
|
91
116
|
"-",
|
|
92
|
-
|
|
93
|
-
"error",
|
|
94
|
-
"-show_format",
|
|
95
|
-
"-show_streams",
|
|
96
|
-
"-of",
|
|
97
|
-
"json"
|
|
117
|
+
...buildProbeArgs({ showPackets: includePackets })
|
|
98
118
|
], { stdio: [
|
|
99
119
|
"pipe",
|
|
100
120
|
"pipe",
|
|
@@ -126,24 +146,21 @@ var Probe = class Probe {
|
|
|
126
146
|
probe.stdout.on("end", () => {
|
|
127
147
|
try {
|
|
128
148
|
const buffer = Buffer.concat(chunks).toString("utf8");
|
|
129
|
-
log("Got probe from stream", buffer);
|
|
130
149
|
resolve(JSON.parse(buffer));
|
|
131
150
|
} catch (error) {
|
|
132
151
|
reject(error);
|
|
133
152
|
}
|
|
134
153
|
});
|
|
135
154
|
}), processExit]);
|
|
136
|
-
return
|
|
155
|
+
return json;
|
|
137
156
|
} finally {
|
|
138
157
|
stream.unpipe(probe.stdin);
|
|
139
158
|
probe.stdin.end();
|
|
140
159
|
stream.destroy();
|
|
141
160
|
}
|
|
142
161
|
}
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
this.data = ProbeSchema.parse(rawData);
|
|
146
|
-
}
|
|
162
|
+
};
|
|
163
|
+
var ProbeBase = class {
|
|
147
164
|
get audioStreams() {
|
|
148
165
|
return this.data.streams.filter((stream) => stream.codec_type === "audio");
|
|
149
166
|
}
|
|
@@ -186,6 +203,26 @@ var Probe = class Probe {
|
|
|
186
203
|
get mustProcess() {
|
|
187
204
|
return this.mustReencodeAudio || this.mustReencodeVideo || this.mustRemux;
|
|
188
205
|
}
|
|
206
|
+
get audioTimebase() {
|
|
207
|
+
const audioStream = this.audioStreams[0];
|
|
208
|
+
if (!audioStream) return null;
|
|
209
|
+
const [num, den] = audioStream.time_base.split("/").map(Number);
|
|
210
|
+
if (num === void 0 || den === void 0) return null;
|
|
211
|
+
return {
|
|
212
|
+
num,
|
|
213
|
+
den
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
get videoTimebase() {
|
|
217
|
+
const videoStream = this.videoStreams[0];
|
|
218
|
+
if (!videoStream) return null;
|
|
219
|
+
const [num, den] = videoStream.time_base.split("/").map(Number);
|
|
220
|
+
if (num === void 0 || den === void 0) return null;
|
|
221
|
+
return {
|
|
222
|
+
num,
|
|
223
|
+
den
|
|
224
|
+
};
|
|
225
|
+
}
|
|
189
226
|
get ffmpegAudioInputOptions() {
|
|
190
227
|
if (!this.hasAudio) return [];
|
|
191
228
|
if (this.isMp3) return ["-c:a", "mp3"];
|
|
@@ -223,6 +260,9 @@ var Probe = class Probe {
|
|
|
223
260
|
"filter_units=remove_types=6"
|
|
224
261
|
];
|
|
225
262
|
}
|
|
263
|
+
constructor(absolutePath) {
|
|
264
|
+
this.absolutePath = absolutePath;
|
|
265
|
+
}
|
|
226
266
|
createConformingReadstream() {
|
|
227
267
|
if (this.absolutePath === "pipe:0") throw new Error("Cannot create conforming readstream from pipe");
|
|
228
268
|
if (!this.mustProcess) return createReadStream(this.absolutePath);
|
|
@@ -241,6 +281,7 @@ var Probe = class Probe {
|
|
|
241
281
|
...this.ffmpegVideoOutputOptions,
|
|
242
282
|
"-f",
|
|
243
283
|
"mp4",
|
|
284
|
+
"-bitexact",
|
|
244
285
|
...fragmenterArgs,
|
|
245
286
|
"pipe:1"
|
|
246
287
|
];
|
|
@@ -260,6 +301,7 @@ var Probe = class Probe {
|
|
|
260
301
|
"copy",
|
|
261
302
|
"-f",
|
|
262
303
|
"mp4",
|
|
304
|
+
"-bitexact",
|
|
263
305
|
...fragmenterArgs,
|
|
264
306
|
"pipe:1"
|
|
265
307
|
];
|
|
@@ -281,5 +323,98 @@ var Probe = class Probe {
|
|
|
281
323
|
});
|
|
282
324
|
return ffmpegFragmenter.stdout;
|
|
283
325
|
}
|
|
326
|
+
createTrackReadstream(trackIndex) {
|
|
327
|
+
if (this.absolutePath === "pipe:0") throw new Error("Cannot create track readstream from pipe");
|
|
328
|
+
const track = this.data.streams[trackIndex];
|
|
329
|
+
if (!track) throw new Error(`Track ${trackIndex} not found`);
|
|
330
|
+
const isAudioTrack = track.codec_type === "audio";
|
|
331
|
+
const isVideoTrack = track.codec_type === "video";
|
|
332
|
+
if (!isAudioTrack && !isVideoTrack) throw new Error(`Track ${trackIndex} is not audio or video`);
|
|
333
|
+
const fragmenterArgs = isAudioTrack ? [
|
|
334
|
+
"-movflags",
|
|
335
|
+
"empty_moov+default_base_moof",
|
|
336
|
+
"-frag_duration",
|
|
337
|
+
"4000000"
|
|
338
|
+
] : ["-movflags", "frag_keyframe+empty_moov+default_base_moof"];
|
|
339
|
+
const ffmpegArgs = [
|
|
340
|
+
...this.ffmpegAudioInputOptions,
|
|
341
|
+
...this.ffmpegVideoInputOptions,
|
|
342
|
+
"-i",
|
|
343
|
+
this.absolutePath,
|
|
344
|
+
"-map",
|
|
345
|
+
`0:${trackIndex}`,
|
|
346
|
+
"-c",
|
|
347
|
+
"copy",
|
|
348
|
+
"-f",
|
|
349
|
+
"mp4",
|
|
350
|
+
"-bitexact",
|
|
351
|
+
...fragmenterArgs,
|
|
352
|
+
"pipe:1"
|
|
353
|
+
];
|
|
354
|
+
log("Creating track stream", ffmpegArgs);
|
|
355
|
+
const ffmpegProcess = spawn("ffmpeg", ffmpegArgs, { stdio: [
|
|
356
|
+
"ignore",
|
|
357
|
+
"pipe",
|
|
358
|
+
"pipe"
|
|
359
|
+
] });
|
|
360
|
+
ffmpegProcess.stderr.on("data", (data) => {
|
|
361
|
+
log(`TRACK ${trackIndex}: `, data.toString());
|
|
362
|
+
});
|
|
363
|
+
ffmpegProcess.on("error", (error) => {
|
|
364
|
+
ffmpegProcess.stdout.emit("error", error);
|
|
365
|
+
});
|
|
366
|
+
return ffmpegProcess.stdout;
|
|
367
|
+
}
|
|
368
|
+
};
|
|
369
|
+
var Probe = class Probe extends ProbeBase {
|
|
370
|
+
static async probePath(absolutePath) {
|
|
371
|
+
const json = await FFProbeRunner.probePath(absolutePath, false);
|
|
372
|
+
return new Probe(absolutePath, json);
|
|
373
|
+
}
|
|
374
|
+
static async probeStream(stream) {
|
|
375
|
+
const json = await FFProbeRunner.probeStream(stream, false);
|
|
376
|
+
return new Probe("pipe:0", json);
|
|
377
|
+
}
|
|
378
|
+
constructor(absolutePath, rawData) {
|
|
379
|
+
super(absolutePath);
|
|
380
|
+
this.data = ProbeSchema.parse(rawData);
|
|
381
|
+
}
|
|
382
|
+
};
|
|
383
|
+
var PacketProbe = class PacketProbe extends ProbeBase {
|
|
384
|
+
static async probePath(absolutePath) {
|
|
385
|
+
const json = await FFProbeRunner.probePath(absolutePath, true);
|
|
386
|
+
return new PacketProbe(absolutePath, json);
|
|
387
|
+
}
|
|
388
|
+
static async probeStream(stream) {
|
|
389
|
+
const json = await FFProbeRunner.probeStream(stream, true);
|
|
390
|
+
return new PacketProbe("pipe:0", json);
|
|
391
|
+
}
|
|
392
|
+
constructor(absolutePath, rawData) {
|
|
393
|
+
super(absolutePath);
|
|
394
|
+
this.data = PacketProbeSchema.parse(rawData);
|
|
395
|
+
}
|
|
396
|
+
get packets() {
|
|
397
|
+
return this.data.packets;
|
|
398
|
+
}
|
|
399
|
+
get bestEffortAudioDuration() {
|
|
400
|
+
const stream = this.audioStreams[0];
|
|
401
|
+
if (!stream) throw new Error("No audio stream found");
|
|
402
|
+
return truncateDecimal(((stream.duration_ts ?? 0) - (stream.start_pts ?? 0)) / (this.audioTimebase?.den ?? 0), 5);
|
|
403
|
+
}
|
|
404
|
+
get videoPacketDuration() {
|
|
405
|
+
const videoStream = this.videoStreams[0];
|
|
406
|
+
if (!videoStream) return [];
|
|
407
|
+
const videoPackets = this.packets.filter((packet) => packet.stream_index === videoStream.index);
|
|
408
|
+
const frameRate = videoStream.r_frame_rate;
|
|
409
|
+
const [num, den] = frameRate.split("/").map(Number);
|
|
410
|
+
if (!num || !den) return [];
|
|
411
|
+
const packetDuration = den / num;
|
|
412
|
+
if (videoPackets.length === 0) return [];
|
|
413
|
+
const ptsTimes = videoPackets.map((p) => p.pts_time);
|
|
414
|
+
const minPts = Math.min(...ptsTimes);
|
|
415
|
+
const maxPts = Math.max(...ptsTimes);
|
|
416
|
+
const totalDuration = maxPts - minPts + packetDuration;
|
|
417
|
+
return truncateDecimal(Math.round(totalDuration * 1e4) / 1e4, 5);
|
|
418
|
+
}
|
|
284
419
|
};
|
|
285
|
-
export { Probe };
|
|
420
|
+
export { PacketProbe, Probe };
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
import { Readable } from 'node:stream';
|
|
2
|
+
import { TrackFragmentIndex } from './Probe.js';
|
|
3
|
+
export declare const generateTrackFragmentIndexMediabunny: (inputStream: Readable, startTimeOffsetMs?: number, trackIdMapping?: Record<number, number>) => Promise<Record<number, TrackFragmentIndex>>;
|
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
import debug from "debug";
|
|
2
|
+
import { Transform, Writable } from "node:stream";
|
|
3
|
+
import { pipeline } from "node:stream/promises";
|
|
4
|
+
import { EncodedPacketSink, Input, MP4, StreamSource } from "mediabunny";
|
|
5
|
+
const log = debug("ef:generateTrackFragmentIndexMediabunny");
|
|
6
|
+
/**
|
|
7
|
+
* Streaming MP4 box parser that detects box boundaries without loading entire file into memory
|
|
8
|
+
*/
|
|
9
|
+
var StreamingBoxParser = class extends Transform {
|
|
10
|
+
constructor() {
|
|
11
|
+
super({ objectMode: false });
|
|
12
|
+
this.buffer = Buffer.alloc(0);
|
|
13
|
+
this.globalOffset = 0;
|
|
14
|
+
this.fragments = [];
|
|
15
|
+
this.currentMoof = null;
|
|
16
|
+
this.initSegmentEnd = 0;
|
|
17
|
+
this.foundBoxes = [];
|
|
18
|
+
}
|
|
19
|
+
_transform(chunk, _encoding, callback) {
|
|
20
|
+
this.buffer = Buffer.concat([this.buffer, chunk]);
|
|
21
|
+
this.parseBoxes();
|
|
22
|
+
this.push(chunk);
|
|
23
|
+
callback();
|
|
24
|
+
}
|
|
25
|
+
parseBoxes() {
|
|
26
|
+
let bufferOffset = 0;
|
|
27
|
+
while (this.buffer.length - bufferOffset >= 8) {
|
|
28
|
+
const size = this.buffer.readUInt32BE(bufferOffset);
|
|
29
|
+
const type = this.buffer.subarray(bufferOffset + 4, bufferOffset + 8).toString("ascii");
|
|
30
|
+
if (size === 0 || size < 8 || this.buffer.length < bufferOffset + size) break;
|
|
31
|
+
const box = {
|
|
32
|
+
type,
|
|
33
|
+
offset: this.globalOffset + bufferOffset,
|
|
34
|
+
size,
|
|
35
|
+
headerSize: 8
|
|
36
|
+
};
|
|
37
|
+
log(`Found box: ${box.type} at offset ${box.offset}, size ${box.size}`);
|
|
38
|
+
this.foundBoxes.push(box);
|
|
39
|
+
this.handleBox(box);
|
|
40
|
+
bufferOffset += size;
|
|
41
|
+
}
|
|
42
|
+
this.globalOffset += bufferOffset;
|
|
43
|
+
this.buffer = this.buffer.subarray(bufferOffset);
|
|
44
|
+
}
|
|
45
|
+
handleBox(box) {
|
|
46
|
+
switch (box.type) {
|
|
47
|
+
case "ftyp":
|
|
48
|
+
case "moov":
|
|
49
|
+
this.initSegmentEnd = Math.max(this.initSegmentEnd, box.offset + box.size);
|
|
50
|
+
break;
|
|
51
|
+
case "moof":
|
|
52
|
+
this.currentMoof = box;
|
|
53
|
+
break;
|
|
54
|
+
case "mdat":
|
|
55
|
+
if (this.currentMoof) {
|
|
56
|
+
this.fragments.push({
|
|
57
|
+
type: "media",
|
|
58
|
+
offset: this.currentMoof.offset,
|
|
59
|
+
size: box.offset + box.size - this.currentMoof.offset,
|
|
60
|
+
moofOffset: this.currentMoof.offset,
|
|
61
|
+
mdatOffset: box.offset
|
|
62
|
+
});
|
|
63
|
+
this.currentMoof = null;
|
|
64
|
+
} else this.fragments.push({
|
|
65
|
+
type: "media",
|
|
66
|
+
offset: box.offset,
|
|
67
|
+
size: box.size,
|
|
68
|
+
mdatOffset: box.offset
|
|
69
|
+
});
|
|
70
|
+
break;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
_flush(callback) {
|
|
74
|
+
this.parseBoxes();
|
|
75
|
+
if (this.initSegmentEnd > 0) this.fragments.unshift({
|
|
76
|
+
type: "init",
|
|
77
|
+
offset: 0,
|
|
78
|
+
size: this.initSegmentEnd
|
|
79
|
+
});
|
|
80
|
+
callback();
|
|
81
|
+
}
|
|
82
|
+
getFragments() {
|
|
83
|
+
return this.fragments;
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
function extractFragmentData(mediabunnyChunks, initFragment, mediaFragment) {
|
|
87
|
+
const extractBytes = (offset, size) => {
|
|
88
|
+
const buffer = Buffer.alloc(size);
|
|
89
|
+
let written = 0;
|
|
90
|
+
let currentOffset = 0;
|
|
91
|
+
for (const chunk of mediabunnyChunks) {
|
|
92
|
+
if (currentOffset + chunk.length <= offset) {
|
|
93
|
+
currentOffset += chunk.length;
|
|
94
|
+
continue;
|
|
95
|
+
}
|
|
96
|
+
if (currentOffset >= offset + size) break;
|
|
97
|
+
const chunkStart = Math.max(0, offset - currentOffset);
|
|
98
|
+
const chunkEnd = Math.min(chunk.length, offset + size - currentOffset);
|
|
99
|
+
const copySize = chunkEnd - chunkStart;
|
|
100
|
+
chunk.copy(buffer, written, chunkStart, chunkEnd);
|
|
101
|
+
written += copySize;
|
|
102
|
+
currentOffset += chunk.length;
|
|
103
|
+
if (written >= size) break;
|
|
104
|
+
}
|
|
105
|
+
return new Uint8Array(buffer.buffer, buffer.byteOffset, written);
|
|
106
|
+
};
|
|
107
|
+
if (!initFragment) return extractBytes(mediaFragment.offset, mediaFragment.size);
|
|
108
|
+
const initData = extractBytes(initFragment.offset, initFragment.size);
|
|
109
|
+
const mediaData = extractBytes(mediaFragment.offset, mediaFragment.size);
|
|
110
|
+
const combined = new Uint8Array(initData.length + mediaData.length);
|
|
111
|
+
combined.set(initData, 0);
|
|
112
|
+
combined.set(mediaData, initData.length);
|
|
113
|
+
return combined;
|
|
114
|
+
}
|
|
115
|
+
const generateTrackFragmentIndexMediabunny = async (inputStream, startTimeOffsetMs, trackIdMapping) => {
|
|
116
|
+
const parser = new StreamingBoxParser();
|
|
117
|
+
const mediabunnyChunks = [];
|
|
118
|
+
let totalSize = 0;
|
|
119
|
+
const mediabunnyDest = new Writable({ write(chunk, _encoding, callback) {
|
|
120
|
+
mediabunnyChunks.push(chunk);
|
|
121
|
+
totalSize += chunk.length;
|
|
122
|
+
callback();
|
|
123
|
+
} });
|
|
124
|
+
await pipeline(inputStream, parser, mediabunnyDest);
|
|
125
|
+
const fragments = parser.getFragments();
|
|
126
|
+
if (totalSize === 0) return {};
|
|
127
|
+
const source = new StreamSource({
|
|
128
|
+
read: async (start, end) => {
|
|
129
|
+
const size = end - start;
|
|
130
|
+
const buffer = Buffer.alloc(size);
|
|
131
|
+
let written = 0;
|
|
132
|
+
let currentOffset = 0;
|
|
133
|
+
for (const chunk of mediabunnyChunks) {
|
|
134
|
+
if (currentOffset + chunk.length <= start) {
|
|
135
|
+
currentOffset += chunk.length;
|
|
136
|
+
continue;
|
|
137
|
+
}
|
|
138
|
+
if (currentOffset >= end) break;
|
|
139
|
+
const chunkStart = Math.max(0, start - currentOffset);
|
|
140
|
+
const chunkEnd = Math.min(chunk.length, end - currentOffset);
|
|
141
|
+
const copySize = chunkEnd - chunkStart;
|
|
142
|
+
chunk.copy(buffer, written, chunkStart, chunkEnd);
|
|
143
|
+
written += copySize;
|
|
144
|
+
currentOffset += chunk.length;
|
|
145
|
+
if (written >= size) break;
|
|
146
|
+
}
|
|
147
|
+
return new Uint8Array(buffer.buffer, buffer.byteOffset, written);
|
|
148
|
+
},
|
|
149
|
+
getSize: async () => totalSize
|
|
150
|
+
});
|
|
151
|
+
let input;
|
|
152
|
+
let videoTracks;
|
|
153
|
+
let audioTracks;
|
|
154
|
+
try {
|
|
155
|
+
input = new Input({
|
|
156
|
+
formats: [MP4],
|
|
157
|
+
source
|
|
158
|
+
});
|
|
159
|
+
videoTracks = await input.getVideoTracks();
|
|
160
|
+
audioTracks = await input.getAudioTracks();
|
|
161
|
+
} catch (error) {
|
|
162
|
+
console.warn("Failed to parse with Mediabunny:", error);
|
|
163
|
+
return {};
|
|
164
|
+
}
|
|
165
|
+
const trackIndexes = {};
|
|
166
|
+
const initFragment = fragments.find((f) => f.type === "init");
|
|
167
|
+
const mediaFragments = fragments.filter((f) => f.type === "media");
|
|
168
|
+
const videoFragmentTimings = [];
|
|
169
|
+
const audioFragmentTimings = [];
|
|
170
|
+
for (let fragmentIndex = 0; fragmentIndex < mediaFragments.length; fragmentIndex++) {
|
|
171
|
+
const fragment = mediaFragments[fragmentIndex];
|
|
172
|
+
const fragmentData = extractFragmentData(mediabunnyChunks, initFragment, fragment);
|
|
173
|
+
const fragmentSource = new StreamSource({
|
|
174
|
+
read: async (start, end) => fragmentData.subarray(start, end),
|
|
175
|
+
getSize: async () => fragmentData.length
|
|
176
|
+
});
|
|
177
|
+
try {
|
|
178
|
+
const fragmentInput = new Input({
|
|
179
|
+
formats: [MP4],
|
|
180
|
+
source: fragmentSource
|
|
181
|
+
});
|
|
182
|
+
const fragmentVideoTracks = await fragmentInput.getVideoTracks();
|
|
183
|
+
if (fragmentVideoTracks.length > 0) {
|
|
184
|
+
const track = fragmentVideoTracks[0];
|
|
185
|
+
const sink = new EncodedPacketSink(track);
|
|
186
|
+
const packets = [];
|
|
187
|
+
for await (const packet of sink.packets()) packets.push({
|
|
188
|
+
timestamp: packet.timestamp,
|
|
189
|
+
duration: packet.duration
|
|
190
|
+
});
|
|
191
|
+
if (packets.length > 0) {
|
|
192
|
+
const firstPacket = packets[0];
|
|
193
|
+
const lastPacket = packets[packets.length - 1];
|
|
194
|
+
const actualDuration = lastPacket.timestamp + lastPacket.duration - firstPacket.timestamp;
|
|
195
|
+
videoFragmentTimings.push({
|
|
196
|
+
fragmentIndex,
|
|
197
|
+
cts: Math.round(firstPacket.timestamp * track.timeResolution),
|
|
198
|
+
dts: Math.round(firstPacket.timestamp * track.timeResolution),
|
|
199
|
+
duration: Math.round(actualDuration * track.timeResolution),
|
|
200
|
+
sampleCount: packets.length,
|
|
201
|
+
timescale: track.timeResolution
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
const fragmentAudioTracks = await fragmentInput.getAudioTracks();
|
|
206
|
+
if (fragmentAudioTracks.length > 0) {
|
|
207
|
+
const track = fragmentAudioTracks[0];
|
|
208
|
+
const sink = new EncodedPacketSink(track);
|
|
209
|
+
const packets = [];
|
|
210
|
+
for await (const packet of sink.packets()) packets.push({
|
|
211
|
+
timestamp: packet.timestamp,
|
|
212
|
+
duration: packet.duration
|
|
213
|
+
});
|
|
214
|
+
if (packets.length > 0) {
|
|
215
|
+
const firstPacket = packets[0];
|
|
216
|
+
const lastPacket = packets[packets.length - 1];
|
|
217
|
+
const actualDuration = lastPacket.timestamp + lastPacket.duration - firstPacket.timestamp;
|
|
218
|
+
audioFragmentTimings.push({
|
|
219
|
+
fragmentIndex,
|
|
220
|
+
cts: Math.round(firstPacket.timestamp * track.timeResolution),
|
|
221
|
+
dts: Math.round(firstPacket.timestamp * track.timeResolution),
|
|
222
|
+
duration: Math.round(actualDuration * track.timeResolution),
|
|
223
|
+
sampleCount: packets.length,
|
|
224
|
+
timescale: track.timeResolution
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
} catch (error) {
|
|
229
|
+
console.warn(`Failed to parse fragment ${fragmentIndex}:`, error);
|
|
230
|
+
continue;
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
for (const track of videoTracks) {
|
|
234
|
+
const sink = new EncodedPacketSink(track);
|
|
235
|
+
const segments = [];
|
|
236
|
+
let sampleCount = 0;
|
|
237
|
+
let totalDuration = 0;
|
|
238
|
+
const allPackets = [];
|
|
239
|
+
for await (const packet of sink.packets()) {
|
|
240
|
+
allPackets.push({
|
|
241
|
+
timestamp: packet.timestamp,
|
|
242
|
+
duration: packet.duration
|
|
243
|
+
});
|
|
244
|
+
sampleCount++;
|
|
245
|
+
}
|
|
246
|
+
let trackStartTimeOffsetMs;
|
|
247
|
+
if (allPackets.length > 0) {
|
|
248
|
+
const firstPacketTime = allPackets[0].timestamp;
|
|
249
|
+
if (Math.abs(firstPacketTime) > .01) trackStartTimeOffsetMs = firstPacketTime * 1e3;
|
|
250
|
+
}
|
|
251
|
+
if (startTimeOffsetMs !== void 0) trackStartTimeOffsetMs = startTimeOffsetMs;
|
|
252
|
+
const timescale = Math.round(track.timeResolution);
|
|
253
|
+
for (const timing of videoFragmentTimings) {
|
|
254
|
+
const fragment = mediaFragments[timing.fragmentIndex];
|
|
255
|
+
segments.push({
|
|
256
|
+
cts: timing.cts,
|
|
257
|
+
dts: timing.dts,
|
|
258
|
+
duration: timing.duration,
|
|
259
|
+
offset: fragment.offset,
|
|
260
|
+
size: fragment.size
|
|
261
|
+
});
|
|
262
|
+
totalDuration += timing.duration / timescale;
|
|
263
|
+
}
|
|
264
|
+
let width = 1920;
|
|
265
|
+
let height = 1080;
|
|
266
|
+
try {
|
|
267
|
+
const decoderConfig = await track.getDecoderConfig();
|
|
268
|
+
if (decoderConfig) {
|
|
269
|
+
width = decoderConfig.codedWidth ?? decoderConfig.width ?? width;
|
|
270
|
+
height = decoderConfig.codedHeight ?? decoderConfig.height ?? height;
|
|
271
|
+
}
|
|
272
|
+
} catch (e) {}
|
|
273
|
+
const finalTrackId = trackIdMapping?.[track.id] ?? track.id;
|
|
274
|
+
trackIndexes[finalTrackId] = {
|
|
275
|
+
track: finalTrackId,
|
|
276
|
+
type: "video",
|
|
277
|
+
width,
|
|
278
|
+
height,
|
|
279
|
+
timescale: Math.round(track.timeResolution),
|
|
280
|
+
sample_count: sampleCount,
|
|
281
|
+
codec: await track.getCodecParameterString() || "",
|
|
282
|
+
duration: Math.round(totalDuration * track.timeResolution),
|
|
283
|
+
startTimeOffsetMs: trackStartTimeOffsetMs,
|
|
284
|
+
initSegment: {
|
|
285
|
+
offset: 0,
|
|
286
|
+
size: initFragment?.size || 0
|
|
287
|
+
},
|
|
288
|
+
segments
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
for (const track of audioTracks) {
|
|
292
|
+
const sink = new EncodedPacketSink(track);
|
|
293
|
+
const segments = [];
|
|
294
|
+
let sampleCount = 0;
|
|
295
|
+
let totalDuration = 0;
|
|
296
|
+
const allPackets = [];
|
|
297
|
+
for await (const packet of sink.packets()) {
|
|
298
|
+
allPackets.push({
|
|
299
|
+
timestamp: packet.timestamp,
|
|
300
|
+
duration: packet.duration
|
|
301
|
+
});
|
|
302
|
+
sampleCount++;
|
|
303
|
+
}
|
|
304
|
+
let trackStartTimeOffsetMs;
|
|
305
|
+
if (allPackets.length > 0) {
|
|
306
|
+
const firstPacketTime = allPackets[0].timestamp;
|
|
307
|
+
if (Math.abs(firstPacketTime) > .01) trackStartTimeOffsetMs = firstPacketTime * 1e3;
|
|
308
|
+
}
|
|
309
|
+
if (startTimeOffsetMs !== void 0) trackStartTimeOffsetMs = startTimeOffsetMs;
|
|
310
|
+
const timescale = Math.round(track.timeResolution);
|
|
311
|
+
for (const timing of audioFragmentTimings) {
|
|
312
|
+
const fragment = mediaFragments[timing.fragmentIndex];
|
|
313
|
+
segments.push({
|
|
314
|
+
cts: timing.cts,
|
|
315
|
+
dts: timing.dts,
|
|
316
|
+
duration: timing.duration,
|
|
317
|
+
offset: fragment.offset,
|
|
318
|
+
size: fragment.size
|
|
319
|
+
});
|
|
320
|
+
totalDuration += timing.duration / timescale;
|
|
321
|
+
}
|
|
322
|
+
const finalTrackId = trackIdMapping?.[track.id] ?? track.id;
|
|
323
|
+
trackIndexes[finalTrackId] = {
|
|
324
|
+
track: finalTrackId,
|
|
325
|
+
type: "audio",
|
|
326
|
+
channel_count: track.numberOfChannels,
|
|
327
|
+
sample_rate: track.sampleRate,
|
|
328
|
+
sample_size: 16,
|
|
329
|
+
sample_count: sampleCount,
|
|
330
|
+
timescale: Math.round(track.timeResolution),
|
|
331
|
+
codec: await track.getCodecParameterString() || "",
|
|
332
|
+
duration: Math.round(totalDuration * track.timeResolution),
|
|
333
|
+
startTimeOffsetMs: trackStartTimeOffsetMs,
|
|
334
|
+
initSegment: {
|
|
335
|
+
offset: 0,
|
|
336
|
+
size: initFragment?.size || 0
|
|
337
|
+
},
|
|
338
|
+
segments
|
|
339
|
+
};
|
|
340
|
+
}
|
|
341
|
+
return trackIndexes;
|
|
342
|
+
};
|
|
343
|
+
export { generateTrackFragmentIndexMediabunny };
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { PassThrough } from 'node:stream';
|
|
2
|
+
export declare const generateTrackFromPathMediabunny: (absolutePath: string, trackId: number) => Promise<{
|
|
3
|
+
stream: PassThrough;
|
|
4
|
+
fragmentIndex: Promise<Record<number, import('./Probe.js').TrackFragmentIndex>>;
|
|
5
|
+
}>;
|
|
6
|
+
export declare const generateTrackTaskMediabunny: (rootDir: string, absolutePath: string, trackId: number) => Promise<import('./idempotentTask.js').TaskResult>;
|
|
7
|
+
export declare const generateTrackMediabunny: (cacheRoot: string, absolutePath: string, url: string) => Promise<import('./idempotentTask.js').TaskResult>;
|
|
8
|
+
export declare const generateTrackWithIndexMediabunny: (absolutePath: string, trackId: number) => Promise<PassThrough>;
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { Probe } from "./Probe.js";
|
|
2
|
+
import { idempotentTask } from "./idempotentTask.js";
|
|
3
|
+
import { generateTrackFragmentIndexMediabunny } from "./generateTrackFragmentIndexMediabunny.js";
|
|
4
|
+
import debug from "debug";
|
|
5
|
+
import { basename } from "node:path";
|
|
6
|
+
import { PassThrough } from "node:stream";
|
|
7
|
+
const log = debug("ef:generateTrackMediabunny");
|
|
8
|
+
const generateTrackFromPathMediabunny = async (absolutePath, trackId) => {
|
|
9
|
+
log(`Generating track ${trackId} for ${absolutePath}`);
|
|
10
|
+
const probe = await Probe.probePath(absolutePath);
|
|
11
|
+
const streamIndex = trackId - 1;
|
|
12
|
+
if (streamIndex < 0 || streamIndex >= probe.streams.length) throw new Error(`Track ${trackId} not found (valid tracks: 1-${probe.streams.length})`);
|
|
13
|
+
const trackStream = probe.createTrackReadstream(streamIndex);
|
|
14
|
+
const outputStream = new PassThrough();
|
|
15
|
+
const indexStream = new PassThrough();
|
|
16
|
+
trackStream.pipe(outputStream, { end: false });
|
|
17
|
+
trackStream.pipe(indexStream);
|
|
18
|
+
let sourceStreamEnded = false;
|
|
19
|
+
trackStream.on("end", () => {
|
|
20
|
+
sourceStreamEnded = true;
|
|
21
|
+
});
|
|
22
|
+
trackStream.on("error", (error) => {
|
|
23
|
+
outputStream.destroy(error);
|
|
24
|
+
indexStream.destroy(error);
|
|
25
|
+
});
|
|
26
|
+
const trackIdMapping = { 1: trackId };
|
|
27
|
+
const fragmentIndexPromise = generateTrackFragmentIndexMediabunny(indexStream, void 0, trackIdMapping);
|
|
28
|
+
fragmentIndexPromise.then(() => {
|
|
29
|
+
if (sourceStreamEnded) outputStream.end();
|
|
30
|
+
else trackStream.once("end", () => {
|
|
31
|
+
outputStream.end();
|
|
32
|
+
});
|
|
33
|
+
}).catch((error) => {
|
|
34
|
+
outputStream.destroy(error);
|
|
35
|
+
});
|
|
36
|
+
return {
|
|
37
|
+
stream: outputStream,
|
|
38
|
+
fragmentIndex: fragmentIndexPromise
|
|
39
|
+
};
|
|
40
|
+
};
|
|
41
|
+
const generateTrackTaskMediabunny = idempotentTask({
|
|
42
|
+
label: "track-mediabunny",
|
|
43
|
+
filename: (absolutePath, trackId) => `${basename(absolutePath)}.track-${trackId}.mp4`,
|
|
44
|
+
runner: async (absolutePath, trackId) => {
|
|
45
|
+
const result = await generateTrackFromPathMediabunny(absolutePath, trackId);
|
|
46
|
+
const finalStream = new PassThrough();
|
|
47
|
+
let streamEnded = false;
|
|
48
|
+
let fragmentIndexCompleted = false;
|
|
49
|
+
const checkCompletion = () => {
|
|
50
|
+
if (streamEnded && fragmentIndexCompleted) finalStream.end();
|
|
51
|
+
};
|
|
52
|
+
result.stream.pipe(finalStream, { end: false });
|
|
53
|
+
result.stream.on("end", () => {
|
|
54
|
+
streamEnded = true;
|
|
55
|
+
checkCompletion();
|
|
56
|
+
});
|
|
57
|
+
result.stream.on("error", (error) => {
|
|
58
|
+
finalStream.destroy(error);
|
|
59
|
+
});
|
|
60
|
+
result.fragmentIndex.then(() => {
|
|
61
|
+
fragmentIndexCompleted = true;
|
|
62
|
+
checkCompletion();
|
|
63
|
+
}).catch((error) => {
|
|
64
|
+
finalStream.destroy(error);
|
|
65
|
+
});
|
|
66
|
+
return finalStream;
|
|
67
|
+
}
|
|
68
|
+
});
|
|
69
|
+
export { generateTrackFromPathMediabunny };
|