@editframe/assets 0.18.21-beta.0 → 0.18.22-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,25 @@
1
+ import { PacketProbe } from "./Probe.js";
1
2
  import debug from "debug";
2
- import { Transform, Writable } from "node:stream";
3
+ import { Readable, Transform, Writable } from "node:stream";
3
4
  import { pipeline } from "node:stream/promises";
4
- import { EncodedPacketSink, Input, MP4, StreamSource } from "mediabunny";
5
5
  const log = debug("ef:generateFragmentIndex");
6
+ function constructH264CodecString(codecTagString, profile, level) {
7
+ if (codecTagString !== "avc1" || !profile || level === void 0) return codecTagString;
8
+ const profileMap = {
9
+ "Baseline": 66,
10
+ "Main": 77,
11
+ "High": 100,
12
+ "High 10": 110,
13
+ "High 422": 122,
14
+ "High 444": 244
15
+ };
16
+ const profileIdc = profileMap[profile];
17
+ if (!profileIdc) return codecTagString;
18
+ const profileHex = profileIdc.toString(16).padStart(2, "0");
19
+ const constraintFlags = "00";
20
+ const levelHex = level.toString(16).padStart(2, "0");
21
+ return `${codecTagString}.${profileHex}${constraintFlags}${levelHex}`;
22
+ }
6
23
  /**
7
24
  * Streaming MP4 box parser that detects box boundaries without loading entire file into memory
8
25
  */
@@ -61,12 +78,7 @@ var StreamingBoxParser = class extends Transform {
61
78
  mdatOffset: box.offset
62
79
  });
63
80
  this.currentMoof = null;
64
- } else this.fragments.push({
65
- type: "media",
66
- offset: box.offset,
67
- size: box.size,
68
- mdatOffset: box.offset
69
- });
81
+ } else log(`Found non-fragmented mdat at offset ${box.offset}, skipping for fragment index`);
70
82
  break;
71
83
  }
72
84
  }
@@ -83,34 +95,18 @@ var StreamingBoxParser = class extends Transform {
83
95
  return this.fragments;
84
96
  }
85
97
  };
86
- function extractFragmentData(chunks, initFragment, mediaFragment) {
87
- const extractBytes = (offset, size) => {
88
- const buffer = Buffer.alloc(size);
89
- let written = 0;
90
- let currentOffset = 0;
91
- for (const chunk of chunks) {
92
- if (currentOffset + chunk.length <= offset) {
93
- currentOffset += chunk.length;
94
- continue;
95
- }
96
- if (currentOffset >= offset + size) break;
97
- const chunkStart = Math.max(0, offset - currentOffset);
98
- const chunkEnd = Math.min(chunk.length, offset + size - currentOffset);
99
- const copySize = chunkEnd - chunkStart;
100
- chunk.copy(buffer, written, chunkStart, chunkEnd);
101
- written += copySize;
102
- currentOffset += chunk.length;
103
- if (written >= size) break;
98
+ function createFragmentStream(fragmentData) {
99
+ let offset = 0;
100
+ return new Readable({ read() {
101
+ if (offset >= fragmentData.length) {
102
+ this.push(null);
103
+ return;
104
104
  }
105
- return new Uint8Array(buffer.buffer, buffer.byteOffset, written);
106
- };
107
- if (!initFragment) return extractBytes(mediaFragment.offset, mediaFragment.size);
108
- const initData = extractBytes(initFragment.offset, initFragment.size);
109
- const mediaData = extractBytes(mediaFragment.offset, mediaFragment.size);
110
- const combined = new Uint8Array(initData.length + mediaData.length);
111
- combined.set(initData, 0);
112
- combined.set(mediaData, initData.length);
113
- return combined;
105
+ const chunkSize = Math.min(64 * 1024, fragmentData.length - offset);
106
+ const chunk = fragmentData.slice(offset, offset + chunkSize);
107
+ offset += chunkSize;
108
+ this.push(Buffer.from(chunk));
109
+ } });
114
110
  }
115
111
  const generateFragmentIndex = async (inputStream, startTimeOffsetMs, trackIdMapping) => {
116
112
  const parser = new StreamingBoxParser();
@@ -124,166 +120,123 @@ const generateFragmentIndex = async (inputStream, startTimeOffsetMs, trackIdMapp
124
120
  await pipeline(inputStream, parser, dest);
125
121
  const fragments = parser.getFragments();
126
122
  if (totalSize === 0) return {};
127
- const source = new StreamSource({
128
- read: async (start, end) => {
129
- const size = end - start;
130
- const buffer = Buffer.alloc(size);
131
- let written = 0;
132
- let currentOffset = 0;
133
- for (const chunk of chunks) {
134
- if (currentOffset + chunk.length <= start) {
135
- currentOffset += chunk.length;
136
- continue;
137
- }
138
- if (currentOffset >= end) break;
139
- const chunkStart = Math.max(0, start - currentOffset);
140
- const chunkEnd = Math.min(chunk.length, end - currentOffset);
141
- const copySize = chunkEnd - chunkStart;
142
- chunk.copy(buffer, written, chunkStart, chunkEnd);
143
- written += copySize;
144
- currentOffset += chunk.length;
145
- if (written >= size) break;
146
- }
147
- return new Uint8Array(buffer.buffer, buffer.byteOffset, written);
148
- },
149
- getSize: async () => totalSize
150
- });
151
- let input;
152
- let videoTracks;
153
- let audioTracks;
123
+ const completeData = Buffer.concat(chunks);
124
+ const completeStream = createFragmentStream(new Uint8Array(completeData.buffer, completeData.byteOffset, completeData.byteLength));
125
+ let probe;
154
126
  try {
155
- input = new Input({
156
- formats: [MP4],
157
- source
158
- });
159
- videoTracks = await input.getVideoTracks();
160
- audioTracks = await input.getAudioTracks();
127
+ probe = await PacketProbe.probeStream(completeStream);
161
128
  } catch (error) {
162
- console.warn("Failed to parse with parser:", error);
129
+ console.warn("Failed to probe stream with ffprobe:", error);
163
130
  return {};
164
131
  }
132
+ const videoStreams = probe.videoStreams;
133
+ const audioStreams = probe.audioStreams;
165
134
  const trackIndexes = {};
166
135
  const initFragment = fragments.find((f) => f.type === "init");
167
136
  const mediaFragments = fragments.filter((f) => f.type === "media");
168
- const videoFragmentTimings = [];
169
- const audioFragmentTimings = [];
137
+ const fragmentTimingData = [];
170
138
  for (let fragmentIndex = 0; fragmentIndex < mediaFragments.length; fragmentIndex++) {
171
139
  const fragment = mediaFragments[fragmentIndex];
172
- const fragmentData = extractFragmentData(chunks, initFragment, fragment);
173
- const fragmentSource = new StreamSource({
174
- read: async (start, end) => fragmentData.subarray(start, end),
175
- getSize: async () => fragmentData.length
140
+ const fragmentStart = fragment.offset;
141
+ const fragmentEnd = fragment.offset + fragment.size;
142
+ const videoPackets = probe.packets.filter((packet) => {
143
+ const stream = videoStreams.find((s) => s.index === packet.stream_index);
144
+ return stream?.codec_type === "video" && packet.pos !== void 0 && packet.pos >= fragmentStart && packet.pos < fragmentEnd;
145
+ }).map((packet) => ({
146
+ pts: packet.pts,
147
+ dts: packet.dts,
148
+ duration: packet.duration,
149
+ isKeyframe: packet.flags?.includes("K") ?? false
150
+ }));
151
+ const audioPackets = probe.packets.filter((packet) => {
152
+ const stream = audioStreams.find((s) => s.index === packet.stream_index);
153
+ return stream?.codec_type === "audio" && packet.pos !== void 0 && packet.pos >= fragmentStart && packet.pos < fragmentEnd;
154
+ }).map((packet) => ({
155
+ pts: packet.pts,
156
+ dts: packet.dts,
157
+ duration: packet.duration
158
+ }));
159
+ fragmentTimingData.push({
160
+ fragmentIndex,
161
+ videoPackets,
162
+ audioPackets
176
163
  });
177
- try {
178
- const fragmentInput = new Input({
179
- formats: [MP4],
180
- source: fragmentSource
181
- });
182
- const fragmentVideoTracks = await fragmentInput.getVideoTracks();
183
- if (fragmentVideoTracks.length > 0) {
184
- const track = fragmentVideoTracks[0];
185
- const sink = new EncodedPacketSink(track);
186
- const packets = [];
187
- for await (const packet of sink.packets()) packets.push({
188
- timestamp: packet.timestamp,
189
- duration: packet.duration
190
- });
191
- if (packets.length > 0) {
192
- const firstPacket = packets[0];
193
- const lastPacket = packets[packets.length - 1];
194
- const actualDuration = lastPacket.timestamp + lastPacket.duration - firstPacket.timestamp;
195
- videoFragmentTimings.push({
196
- fragmentIndex,
197
- cts: Math.round(firstPacket.timestamp * track.timeResolution),
198
- dts: Math.round(firstPacket.timestamp * track.timeResolution),
199
- duration: Math.round(actualDuration * track.timeResolution),
200
- sampleCount: packets.length,
201
- timescale: track.timeResolution
202
- });
203
- }
204
- }
205
- const fragmentAudioTracks = await fragmentInput.getAudioTracks();
206
- if (fragmentAudioTracks.length > 0) {
207
- const track = fragmentAudioTracks[0];
208
- const sink = new EncodedPacketSink(track);
209
- const packets = [];
210
- for await (const packet of sink.packets()) packets.push({
211
- timestamp: packet.timestamp,
212
- duration: packet.duration
213
- });
214
- if (packets.length > 0) {
215
- const firstPacket = packets[0];
216
- const lastPacket = packets[packets.length - 1];
217
- const actualDuration = lastPacket.timestamp + lastPacket.duration - firstPacket.timestamp;
218
- audioFragmentTimings.push({
219
- fragmentIndex,
220
- cts: Math.round(firstPacket.timestamp * track.timeResolution),
221
- dts: Math.round(firstPacket.timestamp * track.timeResolution),
222
- duration: Math.round(actualDuration * track.timeResolution),
223
- sampleCount: packets.length,
224
- timescale: track.timeResolution
225
- });
226
- }
227
- }
228
- } catch (error) {
229
- console.warn(`Failed to parse fragment ${fragmentIndex}:`, error);
230
- continue;
231
- }
232
164
  }
233
- for (const track of videoTracks) {
234
- const sink = new EncodedPacketSink(track);
165
+ for (const videoStream of videoStreams) {
235
166
  const segments = [];
236
- let sampleCount = 0;
237
- let totalDuration = 0;
238
- const allPackets = [];
239
- for await (const packet of sink.packets()) {
240
- allPackets.push({
241
- timestamp: packet.timestamp,
242
- duration: packet.duration
243
- });
244
- sampleCount++;
167
+ const totalVideoPackets = probe.packets.filter((p) => p.stream_index === videoStream.index);
168
+ const keyframePackets = totalVideoPackets.filter((p) => p.flags?.includes("K"));
169
+ const totalSampleCount = keyframePackets.length;
170
+ log(`Complete stream has ${totalVideoPackets.length} video packets, ${keyframePackets.length} keyframes for stream ${videoStream.index}`);
171
+ const timebase = probe.videoTimebase;
172
+ if (!timebase) {
173
+ console.warn("No timebase found for video stream");
174
+ continue;
245
175
  }
176
+ const timescale = Math.round(timebase.den / timebase.num);
246
177
  let trackStartTimeOffsetMs;
247
- if (allPackets.length > 0) {
248
- const firstPacketTime = allPackets[0].timestamp;
249
- if (Math.abs(firstPacketTime) > .01) trackStartTimeOffsetMs = firstPacketTime * 1e3;
178
+ const allVideoPackets = probe.packets.filter((p) => p.stream_index === videoStream.index);
179
+ if (allVideoPackets.length > 0) {
180
+ const firstPacketTime = allVideoPackets[0].dts_time;
181
+ log(`First video packet dts_time: ${firstPacketTime}, pts_time: ${allVideoPackets[0].pts_time}`);
182
+ const presentationTime = allVideoPackets[0].pts_time;
183
+ if (Math.abs(presentationTime) > .01) trackStartTimeOffsetMs = presentationTime * 1e3;
250
184
  }
251
185
  if (startTimeOffsetMs !== void 0) trackStartTimeOffsetMs = startTimeOffsetMs;
252
- const timescale = Math.round(track.timeResolution);
253
- let accumulatedDts = 0;
254
- let accumulatedCts = 0;
255
- for (const timing of videoFragmentTimings) {
256
- const fragment = mediaFragments[timing.fragmentIndex];
186
+ log(`Processing ${fragmentTimingData.length} fragments for video stream ${videoStream.index}`);
187
+ for (const fragmentData of fragmentTimingData) {
188
+ const fragment = mediaFragments[fragmentData.fragmentIndex];
189
+ const videoPackets = fragmentData.videoPackets;
190
+ log(`Fragment ${fragmentData.fragmentIndex}: ${videoPackets.length} video packets`);
191
+ if (videoPackets.length === 0) {
192
+ log(`Skipping fragment ${fragmentData.fragmentIndex} - no video packets`);
193
+ continue;
194
+ }
195
+ const firstPacket = videoPackets[0];
196
+ const keyframe = videoPackets.find((p) => p.isKeyframe) || firstPacket;
197
+ const segmentCts = Math.round(keyframe.pts * timescale / timebase.den);
198
+ const segmentDts = Math.round(keyframe.dts * timescale / timebase.den);
199
+ const nextFragmentData = fragmentTimingData[fragmentData.fragmentIndex + 1];
200
+ const nextKeyframe = nextFragmentData?.videoPackets.find((p) => p.isKeyframe);
201
+ let segmentDuration;
202
+ if (nextKeyframe) {
203
+ const nextSegmentCts = Math.round(nextKeyframe.pts * timescale / timebase.den);
204
+ segmentDuration = nextSegmentCts - segmentCts;
205
+ } else {
206
+ const allVideoPackets$1 = probe.packets.filter((p) => {
207
+ const stream = videoStreams.find((s) => s.index === p.stream_index);
208
+ return stream?.codec_type === "video";
209
+ }).sort((a, b) => a.pts - b.pts);
210
+ const lastPacket = allVideoPackets$1[allVideoPackets$1.length - 1];
211
+ const streamEnd = Math.round((lastPacket.pts + (lastPacket.duration || 0)) * timescale / timebase.den);
212
+ segmentDuration = streamEnd - segmentCts;
213
+ }
257
214
  segments.push({
258
- cts: accumulatedCts,
259
- dts: accumulatedDts,
260
- duration: timing.duration,
215
+ cts: segmentCts,
216
+ dts: segmentDts,
217
+ duration: segmentDuration,
261
218
  offset: fragment.offset,
262
219
  size: fragment.size
263
220
  });
264
- accumulatedDts += timing.duration;
265
- accumulatedCts += timing.duration;
266
- totalDuration += timing.duration / timescale;
267
221
  }
268
- let width = 1920;
269
- let height = 1080;
270
- try {
271
- const decoderConfig = await track.getDecoderConfig();
272
- if (decoderConfig) {
273
- width = decoderConfig.codedWidth ?? decoderConfig.width ?? width;
274
- height = decoderConfig.codedHeight ?? decoderConfig.height ?? height;
275
- }
276
- } catch (e) {}
277
- const finalTrackId = trackIdMapping?.[track.id] ?? track.id;
222
+ let totalDuration = 0;
223
+ if (totalVideoPackets.length > 0) {
224
+ const firstPacket = totalVideoPackets[0];
225
+ const lastPacket = totalVideoPackets[totalVideoPackets.length - 1];
226
+ const firstPts = Math.round(firstPacket.pts * timescale / timebase.den);
227
+ const lastPts = Math.round(lastPacket.pts * timescale / timebase.den);
228
+ totalDuration = lastPts - firstPts;
229
+ }
230
+ const finalTrackId = trackIdMapping?.[videoStream.index] ?? videoStream.index + 1;
278
231
  trackIndexes[finalTrackId] = {
279
232
  track: finalTrackId,
280
233
  type: "video",
281
- width,
282
- height,
283
- timescale: Math.round(track.timeResolution),
284
- sample_count: sampleCount,
285
- codec: await track.getCodecParameterString() || "",
286
- duration: Math.round(totalDuration * track.timeResolution),
234
+ width: videoStream.coded_width || videoStream.width,
235
+ height: videoStream.coded_height || videoStream.height,
236
+ timescale,
237
+ sample_count: totalSampleCount,
238
+ codec: constructH264CodecString(videoStream.codec_tag_string, videoStream.profile, videoStream.level),
239
+ duration: totalDuration,
287
240
  startTimeOffsetMs: trackStartTimeOffsetMs,
288
241
  initSegment: {
289
242
  offset: 0,
@@ -292,52 +245,70 @@ const generateFragmentIndex = async (inputStream, startTimeOffsetMs, trackIdMapp
292
245
  segments
293
246
  };
294
247
  }
295
- for (const track of audioTracks) {
296
- const sink = new EncodedPacketSink(track);
248
+ for (const audioStream of audioStreams) {
297
249
  const segments = [];
298
- let sampleCount = 0;
299
- let totalDuration = 0;
300
- const allPackets = [];
301
- for await (const packet of sink.packets()) {
302
- allPackets.push({
303
- timestamp: packet.timestamp,
304
- duration: packet.duration
305
- });
306
- sampleCount++;
250
+ const totalAudioPackets = probe.packets.filter((p) => p.stream_index === audioStream.index);
251
+ const totalSampleCount = totalAudioPackets.length;
252
+ const timebase = probe.audioTimebase;
253
+ if (!timebase) {
254
+ console.warn("No timebase found for audio stream");
255
+ continue;
307
256
  }
257
+ const timescale = Math.round(timebase.den / timebase.num);
308
258
  let trackStartTimeOffsetMs;
309
- if (allPackets.length > 0) {
310
- const firstPacketTime = allPackets[0].timestamp;
311
- if (Math.abs(firstPacketTime) > .01) trackStartTimeOffsetMs = firstPacketTime * 1e3;
259
+ const allAudioPackets = probe.packets.filter((p) => p.stream_index === audioStream.index);
260
+ if (allAudioPackets.length > 0) {
261
+ const presentationTime = allAudioPackets[0].pts_time;
262
+ if (Math.abs(presentationTime) > .01) trackStartTimeOffsetMs = presentationTime * 1e3;
312
263
  }
313
264
  if (startTimeOffsetMs !== void 0) trackStartTimeOffsetMs = startTimeOffsetMs;
314
- const timescale = Math.round(track.timeResolution);
315
- let accumulatedDts = 0;
316
- let accumulatedCts = 0;
317
- for (const timing of audioFragmentTimings) {
318
- const fragment = mediaFragments[timing.fragmentIndex];
265
+ log(`Processing ${fragmentTimingData.length} fragments for audio stream ${audioStream.index}`);
266
+ for (const fragmentData of fragmentTimingData) {
267
+ const fragment = mediaFragments[fragmentData.fragmentIndex];
268
+ const audioPackets = fragmentData.audioPackets;
269
+ log(`Fragment ${fragmentData.fragmentIndex}: ${audioPackets.length} audio packets`);
270
+ if (audioPackets.length === 0) {
271
+ log(`Skipping fragment ${fragmentData.fragmentIndex} - no audio packets`);
272
+ continue;
273
+ }
274
+ const firstPacket = audioPackets[0];
275
+ const segmentCts = Math.round(firstPacket.pts * timescale / timebase.den);
276
+ const segmentDts = Math.round(firstPacket.dts * timescale / timebase.den);
277
+ const nextFragmentData = fragmentTimingData[fragmentData.fragmentIndex + 1];
278
+ const nextFirstPacket = nextFragmentData?.audioPackets[0];
279
+ let segmentDuration;
280
+ if (nextFirstPacket) {
281
+ const nextSegmentCts = Math.round(nextFirstPacket.pts * timescale / timebase.den);
282
+ segmentDuration = nextSegmentCts - segmentCts;
283
+ } else {
284
+ const allAudioPackets$1 = probe.packets.filter((p) => {
285
+ const stream = audioStreams.find((s) => s.index === p.stream_index);
286
+ return stream?.codec_type === "audio";
287
+ }).sort((a, b) => a.pts - b.pts);
288
+ const lastPacket = allAudioPackets$1[allAudioPackets$1.length - 1];
289
+ const streamEnd = Math.round((lastPacket.pts + (lastPacket.duration || 0)) * timescale / timebase.den);
290
+ segmentDuration = streamEnd - segmentCts;
291
+ }
319
292
  segments.push({
320
- cts: accumulatedCts,
321
- dts: accumulatedDts,
322
- duration: timing.duration,
293
+ cts: segmentCts,
294
+ dts: segmentDts,
295
+ duration: segmentDuration,
323
296
  offset: fragment.offset,
324
297
  size: fragment.size
325
298
  });
326
- accumulatedDts += timing.duration;
327
- accumulatedCts += timing.duration;
328
- totalDuration += timing.duration / timescale;
329
299
  }
330
- const finalTrackId = trackIdMapping?.[track.id] ?? track.id;
300
+ const totalDuration = segments.reduce((sum, seg) => sum + seg.duration, 0);
301
+ const finalTrackId = trackIdMapping?.[audioStream.index] ?? audioStream.index + 1;
331
302
  trackIndexes[finalTrackId] = {
332
303
  track: finalTrackId,
333
304
  type: "audio",
334
- channel_count: track.numberOfChannels,
335
- sample_rate: track.sampleRate,
336
- sample_size: 16,
337
- sample_count: sampleCount,
338
- timescale: Math.round(track.timeResolution),
339
- codec: await track.getCodecParameterString() || "",
340
- duration: Math.round(totalDuration * track.timeResolution),
305
+ channel_count: audioStream.channels,
306
+ sample_rate: Number(audioStream.sample_rate),
307
+ sample_size: audioStream.bits_per_sample,
308
+ sample_count: totalSampleCount,
309
+ timescale,
310
+ codec: audioStream.codec_tag_string || audioStream.codec_name || "",
311
+ duration: totalDuration,
341
312
  startTimeOffsetMs: trackStartTimeOffsetMs,
342
313
  initSegment: {
343
314
  offset: 0,
@@ -25,7 +25,7 @@ const generateTrackFragmentIndexFromPath = async (absolutePath) => {
25
25
  const trackId = streamIndex + 1;
26
26
  log(`Processing track ${trackId} (${stream.codec_type})`);
27
27
  const trackStream = probe.createTrackReadstream(streamIndex);
28
- const trackIdMapping = { 1: trackId };
28
+ const trackIdMapping = { 0: trackId };
29
29
  const singleTrackIndexes = await generateFragmentIndex(trackStream, startTimeOffsetMs, trackIdMapping);
30
30
  Object.assign(trackFragmentIndexes, singleTrackIndexes);
31
31
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@editframe/assets",
3
- "version": "0.18.21-beta.0",
3
+ "version": "0.18.22-beta.0",
4
4
  "description": "",
5
5
  "exports": {
6
6
  ".": {
@@ -46,10 +46,8 @@ describe("generateTrackFragmentIndex", () => {
46
46
  assert.isNumber(track.sample_rate, `Audio track ${trackId} should have sample_rate`);
47
47
  assert.isNumber(track.sample_size, `Audio track ${trackId} should have sample_size`);
48
48
  }
49
-
50
- console.log(`Track ${trackId} (${track.type}): ${track.segments.length} segments, ${track.sample_count} samples`);
51
49
  }
52
- }, 20000);
50
+ });
53
51
 
54
52
  test("should handle single track files", async () => {
55
53
  const fragmentIndex = await generateTrackFragmentIndexFromPath("test-assets/frame-count.mp4");
@@ -60,9 +58,7 @@ describe("generateTrackFragmentIndex", () => {
60
58
  const track = fragmentIndex[trackIds[0]!]!;
61
59
  assert.equal(track.type, "video", "Should be video track");
62
60
  assert.isAbove(track.segments.length, 0, "Should have segments");
63
-
64
- console.log(`Single track: ${track.segments.length} segments, ${track.sample_count} samples`);
65
- }, 15000);
61
+ });
66
62
 
67
63
  test("should generate consistent results with original implementation", async () => {
68
64
  // Test that the new implementation produces similar structure to the old one
@@ -87,8 +83,6 @@ describe("generateTrackFragmentIndex", () => {
87
83
  assert.isAbove(audioTrack.channel_count, 0, "Audio should have channels");
88
84
  assert.isAbove(audioTrack.sample_rate, 0, "Audio should have sample rate");
89
85
  assert.isAbove(audioTrack.segments.length, 0, "Audio should have segments");
90
-
91
- console.log(`Consistent results: video ${videoTrack.segments.length} segments, audio ${audioTrack.segments.length} segments`);
92
86
  }, 20000);
93
87
 
94
88
  test("should preserve timing offset detection", async () => {
@@ -98,12 +92,8 @@ describe("generateTrackFragmentIndex", () => {
98
92
  const trackIds = Object.keys(fragmentIndex).map(Number);
99
93
  const track = fragmentIndex[trackIds[0]!]!;
100
94
 
101
- if (track.type === 'video' && track.startTimeOffsetMs !== undefined) {
102
- assert.isNumber(track.startTimeOffsetMs, "Should have timing offset");
103
- console.log(`Detected timing offset: ${track.startTimeOffsetMs}ms`);
104
- } else {
105
- console.log("No timing offset detected (expected for this file)");
106
- }
95
+ assert.equal(track.startTimeOffsetMs, 200);
96
+ assert.equal(track.type, "video");
107
97
 
108
98
  // Should still have valid timing data
109
99
  assert.isAbove(track.duration, 0, "Should have positive duration");
@@ -49,7 +49,7 @@ export const generateTrackFragmentIndexFromPath = async (
49
49
 
50
50
  // Generate single-track file and its fragment index
51
51
  const trackStream = probe.createTrackReadstream(streamIndex);
52
- const trackIdMapping = { 1: trackId }; // Map single-track ID 1 to original track ID
52
+ const trackIdMapping = { 0: trackId }; // Map single-track stream index 0 to original track ID
53
53
 
54
54
  const singleTrackIndexes = await generateFragmentIndex(
55
55
  trackStream,