@invintusmedia/tomp4 1.2.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/tomp4.js +653 -172
- package/package.json +5 -2
- package/src/fmp4/converter.js +643 -161
- package/src/fmp4/utils.js +13 -6
- package/src/hls-clip.js +459 -0
- package/src/index.d.ts +413 -0
- package/src/index.js +20 -4
- package/src/mp4-clip.js +132 -0
- package/src/muxers/fmp4.js +493 -0
- package/src/muxers/mp4.js +14 -7
- package/src/thumbnail.js +2 -2
- package/src/ts-to-mp4.js +8 -9
package/src/fmp4/utils.js
CHANGED
|
@@ -85,22 +85,29 @@ export function createBox(type, ...payloads) {
|
|
|
85
85
|
* Parse tfhd (track fragment header) box
|
|
86
86
|
* Extracts track ID and default sample values
|
|
87
87
|
* @param {Uint8Array} tfhdData - tfhd box data
|
|
88
|
-
* @
|
|
88
|
+
* @param {{defaultSampleDuration?: number, defaultSampleSize?: number, defaultSampleFlags?: number}} defaults - Defaults (e.g. from trex)
|
|
89
|
+
* @returns {{trackId: number, flags: number, baseDataOffset: number, defaultSampleDuration: number, defaultSampleSize: number, defaultSampleFlags: number}}
|
|
89
90
|
*/
|
|
90
|
-
export function parseTfhd(tfhdData) {
|
|
91
|
+
export function parseTfhd(tfhdData, defaults = {}) {
|
|
91
92
|
const view = new DataView(tfhdData.buffer, tfhdData.byteOffset, tfhdData.byteLength);
|
|
92
93
|
const flags = (tfhdData[9] << 16) | (tfhdData[10] << 8) | tfhdData[11];
|
|
93
94
|
const trackId = view.getUint32(12);
|
|
94
95
|
let offset = 16;
|
|
95
|
-
let
|
|
96
|
-
|
|
97
|
-
|
|
96
|
+
let baseDataOffset = 0;
|
|
97
|
+
let defaultSampleDuration = defaults.defaultSampleDuration || 0;
|
|
98
|
+
let defaultSampleSize = defaults.defaultSampleSize || 0;
|
|
99
|
+
let defaultSampleFlags = defaults.defaultSampleFlags || 0;
|
|
100
|
+
|
|
101
|
+
if (flags & 0x1) {
|
|
102
|
+
baseDataOffset = Number(view.getBigUint64(offset));
|
|
103
|
+
offset += 8;
|
|
104
|
+
}
|
|
98
105
|
if (flags & 0x2) offset += 4; // sample-description-index
|
|
99
106
|
if (flags & 0x8) { defaultSampleDuration = view.getUint32(offset); offset += 4; }
|
|
100
107
|
if (flags & 0x10) { defaultSampleSize = view.getUint32(offset); offset += 4; }
|
|
101
108
|
if (flags & 0x20) { defaultSampleFlags = view.getUint32(offset); offset += 4; }
|
|
102
109
|
|
|
103
|
-
return { trackId, defaultSampleDuration, defaultSampleSize, defaultSampleFlags };
|
|
110
|
+
return { trackId, flags, baseDataOffset, defaultSampleDuration, defaultSampleSize, defaultSampleFlags };
|
|
104
111
|
}
|
|
105
112
|
|
|
106
113
|
/**
|
package/src/hls-clip.js
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HLS-to-HLS Clipper
|
|
3
|
+
*
|
|
4
|
+
* Clips an HLS stream to a time range, producing a new HLS stream with
|
|
5
|
+
* CMAF (fMP4) segments. Boundary segments are pre-clipped with edit lists
|
|
6
|
+
* for frame-accurate start/end. Middle segments are remuxed on-demand
|
|
7
|
+
* from the original CDN source.
|
|
8
|
+
*
|
|
9
|
+
* @module hls-clip
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* const clip = await clipHls('https://example.com/stream.m3u8', {
|
|
13
|
+
* startTime: 30,
|
|
14
|
+
* endTime: 90,
|
|
15
|
+
* });
|
|
16
|
+
*
|
|
17
|
+
* clip.masterPlaylist // modified m3u8 text
|
|
18
|
+
* clip.getMediaPlaylist(0) // variant media playlist
|
|
19
|
+
* clip.getInitSegment(0) // fMP4 init segment (Uint8Array)
|
|
20
|
+
* await clip.getSegment(0, 0) // fMP4 media segment (Uint8Array)
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
import { parseHls, isHlsUrl, parsePlaylistText, toAbsoluteUrl } from './hls.js';
|
|
24
|
+
import { TSParser, getCodecInfo } from './parsers/mpegts.js';
|
|
25
|
+
import { createInitSegment, createFragment } from './muxers/fmp4.js';
|
|
26
|
+
|
|
27
|
+
// ── constants ─────────────────────────────────────────────
|
|
28
|
+
|
|
29
|
+
const PTS_PER_SECOND = 90000;
|
|
30
|
+
|
|
31
|
+
// ── helpers ───────────────────────────────────────────────
|
|
32
|
+
|
|
33
|
+
function isKeyframe(accessUnit) {
|
|
34
|
+
for (const nalUnit of accessUnit.nalUnits) {
|
|
35
|
+
if ((nalUnit[0] & 0x1F) === 5) return true;
|
|
36
|
+
}
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function extractCodecInfo(parser) {
|
|
41
|
+
let sps = null, pps = null;
|
|
42
|
+
for (const au of parser.videoAccessUnits) {
|
|
43
|
+
for (const nalUnit of au.nalUnits) {
|
|
44
|
+
const nalType = nalUnit[0] & 0x1F;
|
|
45
|
+
if (nalType === 7 && !sps) sps = nalUnit;
|
|
46
|
+
if (nalType === 8 && !pps) pps = nalUnit;
|
|
47
|
+
if (sps && pps) return { sps, pps };
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
return { sps, pps };
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Parse a TS segment and return the parsed data.
|
|
55
|
+
*/
|
|
56
|
+
function parseTs(tsData) {
|
|
57
|
+
const parser = new TSParser();
|
|
58
|
+
parser.parse(tsData);
|
|
59
|
+
parser.finalize();
|
|
60
|
+
return parser;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Remux parsed TS data into an fMP4 fragment.
|
|
65
|
+
* Normalizes timestamps to start at the given base times.
|
|
66
|
+
*/
|
|
67
|
+
function remuxToFragment(parser, sequenceNumber, videoBaseTime, audioBaseTime, audioTimescale) {
|
|
68
|
+
return createFragment({
|
|
69
|
+
videoSamples: parser.videoAccessUnits,
|
|
70
|
+
audioSamples: parser.audioAccessUnits,
|
|
71
|
+
sequenceNumber,
|
|
72
|
+
videoTimescale: PTS_PER_SECOND,
|
|
73
|
+
audioTimescale,
|
|
74
|
+
videoBaseTime,
|
|
75
|
+
audioBaseTime,
|
|
76
|
+
audioSampleDuration: 1024,
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Clip a parsed TS segment at the start (frame-accurate with preroll)
|
|
82
|
+
* and/or at the end. Returns clipped access units + timing metadata.
|
|
83
|
+
*/
|
|
84
|
+
function clipSegment(parser, startTime, endTime) {
|
|
85
|
+
const startPts = (startTime !== undefined ? startTime : 0) * PTS_PER_SECOND;
|
|
86
|
+
const endPts = (endTime !== undefined ? endTime : Infinity) * PTS_PER_SECOND;
|
|
87
|
+
const videoAUs = parser.videoAccessUnits;
|
|
88
|
+
const audioAUs = parser.audioAccessUnits;
|
|
89
|
+
|
|
90
|
+
// Find keyframe at or before startTime
|
|
91
|
+
let keyframeIdx = 0;
|
|
92
|
+
for (let i = 0; i < videoAUs.length; i++) {
|
|
93
|
+
if (videoAUs[i].pts > startPts) break;
|
|
94
|
+
if (isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Find end index
|
|
98
|
+
let endIdx = videoAUs.length;
|
|
99
|
+
for (let i = keyframeIdx; i < videoAUs.length; i++) {
|
|
100
|
+
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
|
|
104
|
+
if (clippedVideo.length === 0) return null;
|
|
105
|
+
|
|
106
|
+
const keyframePts = clippedVideo[0].pts;
|
|
107
|
+
const prerollPts = Math.max(0, startPts - keyframePts);
|
|
108
|
+
|
|
109
|
+
// Clip audio from keyframe (for A/V sync, matching the fix in ts-to-mp4.js)
|
|
110
|
+
const lastVideoPts = clippedVideo[clippedVideo.length - 1].pts;
|
|
111
|
+
const audioEndPts = Math.min(endPts, lastVideoPts + PTS_PER_SECOND);
|
|
112
|
+
const clippedAudio = audioAUs.filter(au => au.pts >= keyframePts && au.pts < audioEndPts);
|
|
113
|
+
|
|
114
|
+
// Normalize timestamps to start at 0
|
|
115
|
+
const offset = keyframePts;
|
|
116
|
+
for (const au of clippedVideo) { au.pts -= offset; au.dts -= offset; }
|
|
117
|
+
for (const au of clippedAudio) { au.pts -= offset; }
|
|
118
|
+
|
|
119
|
+
// Calculate durations
|
|
120
|
+
const videoDuration = clippedVideo.length > 1
|
|
121
|
+
? clippedVideo[clippedVideo.length - 1].dts - clippedVideo[0].dts +
|
|
122
|
+
(clippedVideo[1].dts - clippedVideo[0].dts) // add one frame for last
|
|
123
|
+
: 3003;
|
|
124
|
+
const playbackDuration = (videoDuration - prerollPts) / PTS_PER_SECOND;
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
videoSamples: clippedVideo,
|
|
128
|
+
audioSamples: clippedAudio,
|
|
129
|
+
prerollPts,
|
|
130
|
+
playbackDuration: Math.max(0, playbackDuration),
|
|
131
|
+
mediaDuration: videoDuration / PTS_PER_SECOND,
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ── HlsClipResult ─────────────────────────────────────────
|
|
136
|
+
|
|
137
|
+
class HlsClipResult {
|
|
138
|
+
constructor({ variants, duration, startTime, endTime }) {
|
|
139
|
+
this._variants = variants; // array of VariantClip
|
|
140
|
+
this.duration = duration;
|
|
141
|
+
this.startTime = startTime;
|
|
142
|
+
this.endTime = endTime;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/** Number of quality variants */
|
|
146
|
+
get variantCount() {
|
|
147
|
+
return this._variants.length;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/** Master playlist m3u8 text */
|
|
151
|
+
get masterPlaylist() {
|
|
152
|
+
if (this._variants.length === 1) {
|
|
153
|
+
return this.getMediaPlaylist(0);
|
|
154
|
+
}
|
|
155
|
+
let m3u8 = '#EXTM3U\n';
|
|
156
|
+
for (let i = 0; i < this._variants.length; i++) {
|
|
157
|
+
const v = this._variants[i];
|
|
158
|
+
const res = v.resolution ? `,RESOLUTION=${v.resolution}` : '';
|
|
159
|
+
m3u8 += `#EXT-X-STREAM-INF:BANDWIDTH=${v.bandwidth}${res}\n`;
|
|
160
|
+
m3u8 += `variant-${i}.m3u8\n`;
|
|
161
|
+
}
|
|
162
|
+
return m3u8;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Get CMAF media playlist for a variant.
|
|
167
|
+
* @param {number} variantIndex
|
|
168
|
+
* @returns {string} m3u8 text
|
|
169
|
+
*/
|
|
170
|
+
getMediaPlaylist(variantIndex = 0) {
|
|
171
|
+
const variant = this._variants[variantIndex];
|
|
172
|
+
if (!variant) throw new Error(`Variant ${variantIndex} not found`);
|
|
173
|
+
|
|
174
|
+
const maxDur = Math.max(...variant.segments.map(s => s.duration));
|
|
175
|
+
|
|
176
|
+
let m3u8 = '#EXTM3U\n';
|
|
177
|
+
m3u8 += '#EXT-X-VERSION:7\n';
|
|
178
|
+
m3u8 += `#EXT-X-TARGETDURATION:${Math.ceil(maxDur)}\n`;
|
|
179
|
+
m3u8 += '#EXT-X-PLAYLIST-TYPE:VOD\n';
|
|
180
|
+
m3u8 += '#EXT-X-MEDIA-SEQUENCE:0\n';
|
|
181
|
+
m3u8 += `#EXT-X-MAP:URI="init-${variantIndex}.m4s"\n`;
|
|
182
|
+
|
|
183
|
+
for (let i = 0; i < variant.segments.length; i++) {
|
|
184
|
+
const seg = variant.segments[i];
|
|
185
|
+
m3u8 += `#EXTINF:${seg.duration.toFixed(6)},\n`;
|
|
186
|
+
m3u8 += `segment-${variantIndex}-${i}.m4s\n`;
|
|
187
|
+
}
|
|
188
|
+
m3u8 += '#EXT-X-ENDLIST\n';
|
|
189
|
+
return m3u8;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Get the CMAF init segment for a variant.
|
|
194
|
+
* @param {number} variantIndex
|
|
195
|
+
* @returns {Uint8Array}
|
|
196
|
+
*/
|
|
197
|
+
getInitSegment(variantIndex = 0) {
|
|
198
|
+
return this._variants[variantIndex]?.initSegment ?? null;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* Get a media segment as fMP4 data.
|
|
203
|
+
* Boundary segments are returned from memory (pre-clipped).
|
|
204
|
+
* Middle segments are fetched from CDN and remuxed on-demand.
|
|
205
|
+
*
|
|
206
|
+
* @param {number} variantIndex
|
|
207
|
+
* @param {number} segmentIndex
|
|
208
|
+
* @returns {Promise<Uint8Array>}
|
|
209
|
+
*/
|
|
210
|
+
async getSegment(variantIndex = 0, segmentIndex = 0) {
|
|
211
|
+
const variant = this._variants[variantIndex];
|
|
212
|
+
if (!variant) throw new Error(`Variant ${variantIndex} not found`);
|
|
213
|
+
const seg = variant.segments[segmentIndex];
|
|
214
|
+
if (!seg) throw new Error(`Segment ${segmentIndex} not found`);
|
|
215
|
+
|
|
216
|
+
// Pre-clipped boundary segments are already in memory
|
|
217
|
+
if (seg.data) return seg.data;
|
|
218
|
+
|
|
219
|
+
// Middle segment: fetch from CDN, remux TS → fMP4
|
|
220
|
+
const resp = await fetch(seg.originalUrl);
|
|
221
|
+
if (!resp.ok) throw new Error(`Segment fetch failed: ${resp.status}`);
|
|
222
|
+
const tsData = new Uint8Array(await resp.arrayBuffer());
|
|
223
|
+
|
|
224
|
+
const parser = parseTs(tsData);
|
|
225
|
+
const audioTimescale = parser.audioSampleRate || 48000;
|
|
226
|
+
|
|
227
|
+
// Normalize timestamps: subtract the segment's original start PTS,
|
|
228
|
+
// then add the segment's position in the clip timeline
|
|
229
|
+
const firstVideoPts = parser.videoAccessUnits[0]?.pts ?? 0;
|
|
230
|
+
for (const au of parser.videoAccessUnits) { au.pts -= firstVideoPts; au.dts -= firstVideoPts; }
|
|
231
|
+
for (const au of parser.audioAccessUnits) { au.pts -= firstVideoPts; }
|
|
232
|
+
|
|
233
|
+
const videoBaseTime = Math.round(seg.timelineOffset * PTS_PER_SECOND);
|
|
234
|
+
const audioBaseTime = Math.round(seg.timelineOffset * audioTimescale);
|
|
235
|
+
|
|
236
|
+
const fragment = remuxToFragment(
|
|
237
|
+
parser, segmentIndex + 1,
|
|
238
|
+
videoBaseTime, audioBaseTime, audioTimescale
|
|
239
|
+
);
|
|
240
|
+
|
|
241
|
+
return fragment;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
/**
|
|
245
|
+
* Get all segment data for a variant (fetches middle segments).
|
|
246
|
+
* Useful for downloading the full clip.
|
|
247
|
+
* @param {number} variantIndex
|
|
248
|
+
* @returns {Promise<Uint8Array[]>}
|
|
249
|
+
*/
|
|
250
|
+
async getAllSegments(variantIndex = 0) {
|
|
251
|
+
const variant = this._variants[variantIndex];
|
|
252
|
+
const results = [];
|
|
253
|
+
for (let i = 0; i < variant.segments.length; i++) {
|
|
254
|
+
results.push(await this.getSegment(variantIndex, i));
|
|
255
|
+
}
|
|
256
|
+
return results;
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// ── main function ─────────────────────────────────────────
|
|
261
|
+
|
|
262
|
+
/**
|
|
263
|
+
* Clip an HLS stream to a time range, producing a new HLS stream
|
|
264
|
+
* with CMAF (fMP4) segments.
|
|
265
|
+
*
|
|
266
|
+
* @param {string} source - HLS URL (master or media playlist)
|
|
267
|
+
* @param {object} options
|
|
268
|
+
* @param {number} options.startTime - Start time in seconds
|
|
269
|
+
* @param {number} options.endTime - End time in seconds
|
|
270
|
+
* @param {string|number} [options.quality] - 'highest', 'lowest', or bandwidth (default: all)
|
|
271
|
+
* @param {function} [options.onProgress] - Progress callback
|
|
272
|
+
* @returns {Promise<HlsClipResult>}
|
|
273
|
+
*/
|
|
274
|
+
export async function clipHls(source, options = {}) {
|
|
275
|
+
const { startTime, endTime, quality, onProgress: log = () => {} } = options;
|
|
276
|
+
if (startTime === undefined || endTime === undefined) {
|
|
277
|
+
throw new Error('clipHls requires both startTime and endTime');
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
log('Parsing HLS playlist...');
|
|
281
|
+
const stream = typeof source === 'string' ? await parseHls(source, { onProgress: log }) : source;
|
|
282
|
+
|
|
283
|
+
// Resolve variants to process
|
|
284
|
+
let variantsToProcess = [];
|
|
285
|
+
|
|
286
|
+
if (stream.isMaster) {
|
|
287
|
+
const sorted = stream.qualities; // sorted by bandwidth desc
|
|
288
|
+
if (quality === 'highest') {
|
|
289
|
+
variantsToProcess = [sorted[0]];
|
|
290
|
+
} else if (quality === 'lowest') {
|
|
291
|
+
variantsToProcess = [sorted[sorted.length - 1]];
|
|
292
|
+
} else if (typeof quality === 'number') {
|
|
293
|
+
stream.select(quality);
|
|
294
|
+
variantsToProcess = [stream.selected];
|
|
295
|
+
} else {
|
|
296
|
+
variantsToProcess = sorted; // all variants
|
|
297
|
+
}
|
|
298
|
+
} else {
|
|
299
|
+
// Single media playlist — treat as one variant
|
|
300
|
+
variantsToProcess = [{ url: null, bandwidth: 0, resolution: null, _segments: stream.segments, _initSegmentUrl: stream.initSegmentUrl }];
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
log(`Processing ${variantsToProcess.length} variant(s)...`);
|
|
304
|
+
|
|
305
|
+
const variants = [];
|
|
306
|
+
for (let vi = 0; vi < variantsToProcess.length; vi++) {
|
|
307
|
+
const variant = variantsToProcess[vi];
|
|
308
|
+
log(`Variant ${vi}: ${variant.resolution || variant.bandwidth || 'default'}`);
|
|
309
|
+
|
|
310
|
+
// Get segment list for this variant
|
|
311
|
+
let segments, initSegmentUrl;
|
|
312
|
+
if (variant._segments) {
|
|
313
|
+
segments = variant._segments;
|
|
314
|
+
initSegmentUrl = variant._initSegmentUrl;
|
|
315
|
+
} else {
|
|
316
|
+
const mediaResp = await fetch(variant.url);
|
|
317
|
+
if (!mediaResp.ok) throw new Error(`Failed to fetch media playlist: ${mediaResp.status}`);
|
|
318
|
+
const mediaText = await mediaResp.text();
|
|
319
|
+
const parsed = parsePlaylistText(mediaText, variant.url);
|
|
320
|
+
segments = parsed.segments;
|
|
321
|
+
initSegmentUrl = parsed.initSegmentUrl;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
if (!segments.length) throw new Error('No segments found');
|
|
325
|
+
|
|
326
|
+
// Find overlapping segments
|
|
327
|
+
const overlapping = segments.filter(seg => seg.endTime > startTime && seg.startTime < endTime);
|
|
328
|
+
if (!overlapping.length) throw new Error('No segments overlap the clip range');
|
|
329
|
+
|
|
330
|
+
const firstSeg = overlapping[0];
|
|
331
|
+
const lastSeg = overlapping[overlapping.length - 1];
|
|
332
|
+
const isSingleSegment = overlapping.length === 1;
|
|
333
|
+
|
|
334
|
+
log(`Segments: ${overlapping.length} (${firstSeg.startTime.toFixed(1)}s – ${lastSeg.endTime.toFixed(1)}s)`);
|
|
335
|
+
|
|
336
|
+
// Download and parse boundary segments to get codec info + pre-clip
|
|
337
|
+
log('Downloading boundary segments...');
|
|
338
|
+
const firstTsData = new Uint8Array(await (await fetch(firstSeg.url)).arrayBuffer());
|
|
339
|
+
const firstParser = parseTs(firstTsData);
|
|
340
|
+
|
|
341
|
+
let lastParser = null;
|
|
342
|
+
let lastTsData = null;
|
|
343
|
+
if (!isSingleSegment) {
|
|
344
|
+
lastTsData = new Uint8Array(await (await fetch(lastSeg.url)).arrayBuffer());
|
|
345
|
+
lastParser = parseTs(lastTsData);
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// Extract codec info from first segment
|
|
349
|
+
const { sps, pps } = extractCodecInfo(firstParser);
|
|
350
|
+
if (!sps || !pps) throw new Error('Could not extract SPS/PPS from video');
|
|
351
|
+
const audioSampleRate = firstParser.audioSampleRate || 48000;
|
|
352
|
+
const audioChannels = firstParser.audioChannels || 2;
|
|
353
|
+
const hasAudio = firstParser.audioAccessUnits.length > 0;
|
|
354
|
+
const audioTimescale = audioSampleRate;
|
|
355
|
+
|
|
356
|
+
// Create CMAF init segment
|
|
357
|
+
const initSegment = createInitSegment({
|
|
358
|
+
sps, pps, audioSampleRate, audioChannels, hasAudio,
|
|
359
|
+
videoTimescale: PTS_PER_SECOND,
|
|
360
|
+
audioTimescale,
|
|
361
|
+
});
|
|
362
|
+
|
|
363
|
+
// Build the clip segment list
|
|
364
|
+
const clipSegments = [];
|
|
365
|
+
let timelineOffset = 0;
|
|
366
|
+
|
|
367
|
+
// ── First segment (clipped at start, possibly also at end) ──
|
|
368
|
+
// Convert absolute times to segment-relative times (TS PTS starts at ~0 per segment)
|
|
369
|
+
const firstRelStart = startTime - firstSeg.startTime;
|
|
370
|
+
const firstRelEnd = isSingleSegment ? endTime - firstSeg.startTime : undefined;
|
|
371
|
+
const firstClipped = clipSegment(firstParser, firstRelStart, firstRelEnd);
|
|
372
|
+
if (!firstClipped) throw new Error('First segment clip produced no samples');
|
|
373
|
+
|
|
374
|
+
const firstFragment = createFragment({
|
|
375
|
+
videoSamples: firstClipped.videoSamples,
|
|
376
|
+
audioSamples: firstClipped.audioSamples,
|
|
377
|
+
sequenceNumber: 1,
|
|
378
|
+
videoTimescale: PTS_PER_SECOND,
|
|
379
|
+
audioTimescale,
|
|
380
|
+
videoBaseTime: 0,
|
|
381
|
+
audioBaseTime: 0,
|
|
382
|
+
audioSampleDuration: 1024,
|
|
383
|
+
});
|
|
384
|
+
|
|
385
|
+
clipSegments.push({
|
|
386
|
+
duration: firstClipped.playbackDuration,
|
|
387
|
+
data: firstFragment, // pre-clipped, in memory
|
|
388
|
+
originalUrl: null,
|
|
389
|
+
timelineOffset: 0,
|
|
390
|
+
isBoundary: true,
|
|
391
|
+
});
|
|
392
|
+
timelineOffset += firstClipped.mediaDuration;
|
|
393
|
+
|
|
394
|
+
// ── Middle segments (pass-through, remuxed on demand) ──
|
|
395
|
+
for (let i = 1; i < overlapping.length - 1; i++) {
|
|
396
|
+
const seg = overlapping[i];
|
|
397
|
+
const segDuration = seg.duration;
|
|
398
|
+
clipSegments.push({
|
|
399
|
+
duration: segDuration,
|
|
400
|
+
data: null, // fetched on demand
|
|
401
|
+
originalUrl: seg.url,
|
|
402
|
+
timelineOffset,
|
|
403
|
+
isBoundary: false,
|
|
404
|
+
});
|
|
405
|
+
timelineOffset += segDuration;
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
// ── Last segment (clipped at end, if different from first) ──
|
|
409
|
+
if (!isSingleSegment && lastParser) {
|
|
410
|
+
const lastRelEnd = endTime - lastSeg.startTime;
|
|
411
|
+
const lastClipped = clipSegment(lastParser, undefined, lastRelEnd);
|
|
412
|
+
if (lastClipped && lastClipped.videoSamples.length > 0) {
|
|
413
|
+
const lastSeqNum = overlapping.length;
|
|
414
|
+
const lastVideoBaseTime = Math.round(timelineOffset * PTS_PER_SECOND);
|
|
415
|
+
const lastAudioBaseTime = Math.round(timelineOffset * audioTimescale);
|
|
416
|
+
|
|
417
|
+
const lastFragment = createFragment({
|
|
418
|
+
videoSamples: lastClipped.videoSamples,
|
|
419
|
+
audioSamples: lastClipped.audioSamples,
|
|
420
|
+
sequenceNumber: lastSeqNum,
|
|
421
|
+
videoTimescale: PTS_PER_SECOND,
|
|
422
|
+
audioTimescale,
|
|
423
|
+
videoBaseTime: lastVideoBaseTime,
|
|
424
|
+
audioBaseTime: lastAudioBaseTime,
|
|
425
|
+
audioSampleDuration: 1024,
|
|
426
|
+
});
|
|
427
|
+
|
|
428
|
+
clipSegments.push({
|
|
429
|
+
duration: lastClipped.playbackDuration,
|
|
430
|
+
data: lastFragment,
|
|
431
|
+
originalUrl: null,
|
|
432
|
+
timelineOffset,
|
|
433
|
+
isBoundary: true,
|
|
434
|
+
});
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
const totalDuration = clipSegments.reduce((sum, s) => sum + s.duration, 0);
|
|
439
|
+
log(`Clip ready: ${totalDuration.toFixed(2)}s (${clipSegments.length} segments)`);
|
|
440
|
+
|
|
441
|
+
variants.push({
|
|
442
|
+
bandwidth: variant.bandwidth || 0,
|
|
443
|
+
resolution: variant.resolution || null,
|
|
444
|
+
initSegment,
|
|
445
|
+
segments: clipSegments,
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
const clipDuration = endTime - startTime;
|
|
450
|
+
return new HlsClipResult({
|
|
451
|
+
variants,
|
|
452
|
+
duration: clipDuration,
|
|
453
|
+
startTime,
|
|
454
|
+
endTime,
|
|
455
|
+
});
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
export { HlsClipResult };
|
|
459
|
+
export default clipHls;
|