@invintusmedia/tomp4 1.5.0 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/tomp4.js +2 -2
- package/package.json +1 -1
- package/src/hls-clip.js +97 -132
- package/src/index.js +1 -1
- package/src/codecs/smart-render.js +0 -447
package/dist/tomp4.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* toMp4.js v1.5.
|
|
2
|
+
* toMp4.js v1.5.1
|
|
3
3
|
* Convert MPEG-TS and fMP4 to standard MP4
|
|
4
4
|
* https://github.com/TVWIT/toMp4.js
|
|
5
5
|
* MIT License
|
|
@@ -1186,7 +1186,7 @@
|
|
|
1186
1186
|
toMp4.isMpegTs = isMpegTs;
|
|
1187
1187
|
toMp4.isFmp4 = isFmp4;
|
|
1188
1188
|
toMp4.isStandardMp4 = isStandardMp4;
|
|
1189
|
-
toMp4.version = '1.5.
|
|
1189
|
+
toMp4.version = '1.5.1';
|
|
1190
1190
|
|
|
1191
1191
|
return toMp4;
|
|
1192
1192
|
});
|
package/package.json
CHANGED
package/src/hls-clip.js
CHANGED
|
@@ -1,14 +1,16 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* HLS-to-HLS Clipper
|
|
3
3
|
*
|
|
4
|
-
* Clips an HLS stream to a time range, producing a new HLS
|
|
5
|
-
*
|
|
6
|
-
*
|
|
4
|
+
* Clips an HLS stream to a time range, producing a new HLS playlist.
|
|
5
|
+
* Middle segments use original CDN URLs (completely untouched).
|
|
6
|
+
* Boundary segments are re-muxed from the keyframe nearest to the
|
|
7
|
+
* requested start/end times.
|
|
7
8
|
*
|
|
8
|
-
*
|
|
9
|
-
*
|
|
9
|
+
* The result includes `prerollDuration` — the time between the actual
|
|
10
|
+
* start (keyframe) and the requested start. The player should seek
|
|
11
|
+
* past this on load for frame-accurate playback:
|
|
10
12
|
*
|
|
11
|
-
*
|
|
13
|
+
* video.currentTime = clip.prerollDuration;
|
|
12
14
|
*
|
|
13
15
|
* @module hls-clip
|
|
14
16
|
*
|
|
@@ -18,35 +20,17 @@
|
|
|
18
20
|
* endTime: 90,
|
|
19
21
|
* });
|
|
20
22
|
*
|
|
21
|
-
* clip.
|
|
22
|
-
* clip.getMediaPlaylist(0)
|
|
23
|
-
* await clip.getSegment(0, 0) // boundary
|
|
23
|
+
* clip.prerollDuration // seconds to seek past for frame accuracy
|
|
24
|
+
* clip.getMediaPlaylist(0) // HLS playlist
|
|
25
|
+
* await clip.getSegment(0, 0) // boundary TS data
|
|
24
26
|
*/
|
|
25
27
|
|
|
26
28
|
import { parseHls, parsePlaylistText } from './hls.js';
|
|
27
29
|
import { TSParser } from './parsers/mpegts.js';
|
|
28
30
|
import { TSMuxer } from './muxers/mpegts.js';
|
|
29
|
-
import { smartRender, isSmartRenderSupported } from './codecs/smart-render.js';
|
|
30
31
|
|
|
31
32
|
const PTS_PER_SECOND = 90000;
|
|
32
33
|
|
|
33
|
-
/** Wrap raw AAC frame data with a 7-byte ADTS header. */
|
|
34
|
-
function wrapADTS(aacData, sampleRate, channels) {
|
|
35
|
-
const RATES = [96000,88200,64000,48000,44100,32000,24000,22050,16000,12000,11025,8000,7350];
|
|
36
|
-
const sampleRateIndex = RATES.indexOf(sampleRate);
|
|
37
|
-
const frameLength = aacData.length + 7;
|
|
38
|
-
const adts = new Uint8Array(7 + aacData.length);
|
|
39
|
-
adts[0] = 0xFF;
|
|
40
|
-
adts[1] = 0xF1; // MPEG-4, Layer 0, no CRC
|
|
41
|
-
adts[2] = ((1) << 6) | ((sampleRateIndex < 0 ? 4 : sampleRateIndex) << 2) | ((channels >> 2) & 1); // AAC-LC
|
|
42
|
-
adts[3] = ((channels & 3) << 6) | ((frameLength >> 11) & 3);
|
|
43
|
-
adts[4] = (frameLength >> 3) & 0xFF;
|
|
44
|
-
adts[5] = ((frameLength & 7) << 5) | 0x1F;
|
|
45
|
-
adts[6] = 0xFC;
|
|
46
|
-
adts.set(aacData, 7);
|
|
47
|
-
return adts;
|
|
48
|
-
}
|
|
49
|
-
|
|
50
34
|
// ── helpers ───────────────────────────────────────────────
|
|
51
35
|
|
|
52
36
|
function parseTs(tsData) {
|
|
@@ -63,13 +47,25 @@ function isKeyframe(au) {
|
|
|
63
47
|
return false;
|
|
64
48
|
}
|
|
65
49
|
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
50
|
+
function wrapADTS(aacData, sampleRate, channels) {
|
|
51
|
+
const RATES = [96000,88200,64000,48000,44100,32000,24000,22050,16000,12000,11025,8000,7350];
|
|
52
|
+
const sri = RATES.indexOf(sampleRate);
|
|
53
|
+
const len = aacData.length + 7;
|
|
54
|
+
const adts = new Uint8Array(len);
|
|
55
|
+
adts[0] = 0xFF;
|
|
56
|
+
adts[1] = 0xF1;
|
|
57
|
+
adts[2] = (1 << 6) | ((sri < 0 ? 4 : sri) << 2) | ((channels >> 2) & 1);
|
|
58
|
+
adts[3] = ((channels & 3) << 6) | ((len >> 11) & 3);
|
|
59
|
+
adts[4] = (len >> 3) & 0xFF;
|
|
60
|
+
adts[5] = ((len & 7) << 5) | 0x1F;
|
|
61
|
+
adts[6] = 0xFC;
|
|
62
|
+
adts.set(aacData, 7);
|
|
63
|
+
return adts;
|
|
64
|
+
}
|
|
65
|
+
|
|
69
66
|
function muxToTs(videoAUs, audioAUs, audioSampleRate, audioChannels) {
|
|
70
67
|
const muxer = new TSMuxer();
|
|
71
68
|
|
|
72
|
-
// Extract SPS/PPS for the muxer
|
|
73
69
|
let sps = null, pps = null;
|
|
74
70
|
for (const au of videoAUs) {
|
|
75
71
|
for (const nal of au.nalUnits) {
|
|
@@ -82,17 +78,13 @@ function muxToTs(videoAUs, audioAUs, audioSampleRate, audioChannels) {
|
|
|
82
78
|
if (sps && pps) muxer.setSpsPps(sps, pps);
|
|
83
79
|
muxer.setHasAudio(audioAUs.length > 0);
|
|
84
80
|
|
|
85
|
-
// Add audio samples (wrap raw AAC with ADTS headers)
|
|
86
81
|
const sr = audioSampleRate || 48000;
|
|
87
82
|
const ch = audioChannels || 2;
|
|
88
83
|
for (const au of audioAUs) {
|
|
89
|
-
// Check if already has ADTS header
|
|
90
84
|
const hasADTS = au.data.length > 1 && au.data[0] === 0xFF && (au.data[1] & 0xF0) === 0xF0;
|
|
91
|
-
|
|
92
|
-
muxer.addAudioSample(adtsData, au.pts);
|
|
85
|
+
muxer.addAudioSample(hasADTS ? au.data : wrapADTS(au.data, sr, ch), au.pts);
|
|
93
86
|
}
|
|
94
87
|
|
|
95
|
-
// Add video samples
|
|
96
88
|
for (const au of videoAUs) {
|
|
97
89
|
muxer.addVideoNalUnits(au.nalUnits, isKeyframe(au), au.pts, au.dts);
|
|
98
90
|
}
|
|
@@ -101,54 +93,79 @@ function muxToTs(videoAUs, audioAUs, audioSampleRate, audioChannels) {
|
|
|
101
93
|
}
|
|
102
94
|
|
|
103
95
|
/**
|
|
104
|
-
* Clip a parsed TS segment at
|
|
105
|
-
*
|
|
106
|
-
* Falls back to keyframe-accurate when WebCodecs is unavailable.
|
|
96
|
+
* Clip a parsed TS segment. Starts at nearest keyframe, ends at endTime.
|
|
97
|
+
* Returns the preroll (time from keyframe to requested start).
|
|
107
98
|
*/
|
|
108
|
-
|
|
109
|
-
const
|
|
99
|
+
function clipSegment(parser, startTime, endTime) {
|
|
100
|
+
const startPts = (startTime !== undefined ? startTime : 0) * PTS_PER_SECOND;
|
|
101
|
+
const endPts = (endTime !== undefined ? endTime : Infinity) * PTS_PER_SECOND;
|
|
102
|
+
const videoAUs = parser.videoAccessUnits;
|
|
103
|
+
const audioAUs = parser.audioAccessUnits;
|
|
104
|
+
|
|
105
|
+
if (videoAUs.length === 0) return null;
|
|
106
|
+
|
|
107
|
+
// Find keyframe at or before startTime
|
|
108
|
+
let keyframeIdx = 0;
|
|
109
|
+
for (let i = 0; i < videoAUs.length; i++) {
|
|
110
|
+
if (videoAUs[i].pts > startPts) break;
|
|
111
|
+
if (isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// Find end
|
|
115
|
+
let endIdx = videoAUs.length;
|
|
116
|
+
for (let i = keyframeIdx; i < videoAUs.length; i++) {
|
|
117
|
+
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const clipped = videoAUs.slice(keyframeIdx, endIdx);
|
|
121
|
+
if (clipped.length === 0) return null;
|
|
110
122
|
|
|
111
|
-
|
|
123
|
+
const keyframePts = clipped[0].pts;
|
|
124
|
+
const prerollPts = Math.max(0, startPts - keyframePts);
|
|
112
125
|
|
|
113
|
-
//
|
|
114
|
-
const
|
|
115
|
-
const
|
|
116
|
-
const
|
|
117
|
-
? result.videoAUs[1].dts - result.videoAUs[0].dts
|
|
118
|
-
: 3003;
|
|
119
|
-
const duration = (lastPts - firstPts + frameDuration) / PTS_PER_SECOND;
|
|
126
|
+
// Audio from keyframe (same timeline as video for A/V sync)
|
|
127
|
+
const lastVideoPts = clipped[clipped.length - 1].pts;
|
|
128
|
+
const audioEndPts = Math.min(endPts, lastVideoPts + PTS_PER_SECOND);
|
|
129
|
+
const clippedAudio = audioAUs.filter(au => au.pts >= keyframePts && au.pts < audioEndPts);
|
|
120
130
|
|
|
121
|
-
// Normalize
|
|
122
|
-
const offset =
|
|
123
|
-
for (const au of
|
|
124
|
-
for (const au of
|
|
131
|
+
// Normalize to PTS 0
|
|
132
|
+
const offset = keyframePts;
|
|
133
|
+
for (const au of clipped) { au.pts -= offset; au.dts -= offset; }
|
|
134
|
+
for (const au of clippedAudio) { au.pts -= offset; }
|
|
125
135
|
|
|
126
|
-
|
|
127
|
-
const
|
|
136
|
+
const frameDur = clipped.length > 1 ? clipped[1].dts - clipped[0].dts : 3003;
|
|
137
|
+
const duration = (clipped[clipped.length - 1].dts - clipped[0].dts + frameDur) / PTS_PER_SECOND;
|
|
138
|
+
|
|
139
|
+
const tsData = muxToTs(clipped, clippedAudio, parser.audioSampleRate, parser.audioChannels);
|
|
128
140
|
|
|
129
141
|
return {
|
|
130
142
|
data: tsData,
|
|
131
143
|
duration,
|
|
132
|
-
|
|
133
|
-
smartRenderedFrames: result.smartRenderedFrames || 0,
|
|
144
|
+
preroll: prerollPts / PTS_PER_SECOND,
|
|
134
145
|
};
|
|
135
146
|
}
|
|
136
147
|
|
|
137
148
|
// ── HlsClipResult ─────────────────────────────────────────
|
|
138
149
|
|
|
139
150
|
class HlsClipResult {
|
|
140
|
-
|
|
151
|
+
/**
|
|
152
|
+
* @param {object} opts
|
|
153
|
+
* @param {number} opts.prerollDuration - Seconds to seek past for frame accuracy
|
|
154
|
+
*/
|
|
155
|
+
constructor({ variants, duration, startTime, endTime, prerollDuration }) {
|
|
141
156
|
this._variants = variants;
|
|
142
157
|
this.duration = duration;
|
|
143
158
|
this.startTime = startTime;
|
|
144
159
|
this.endTime = endTime;
|
|
160
|
+
/** Seconds between the keyframe start and the requested startTime.
|
|
161
|
+
* The player should set video.currentTime = prerollDuration on load. */
|
|
162
|
+
this.prerollDuration = prerollDuration;
|
|
145
163
|
}
|
|
146
164
|
|
|
147
165
|
get variantCount() {
|
|
148
166
|
return this._variants.length;
|
|
149
167
|
}
|
|
150
168
|
|
|
151
|
-
/** Master playlist m3u8 text */
|
|
152
169
|
get masterPlaylist() {
|
|
153
170
|
if (this._variants.length === 1) return this.getMediaPlaylist(0);
|
|
154
171
|
let m3u8 = '#EXTM3U\n';
|
|
@@ -161,11 +178,6 @@ class HlsClipResult {
|
|
|
161
178
|
return m3u8;
|
|
162
179
|
}
|
|
163
180
|
|
|
164
|
-
/**
|
|
165
|
-
* Get media playlist for a variant.
|
|
166
|
-
* Boundary segments use custom URLs (served from memory).
|
|
167
|
-
* Middle segments use original CDN URLs.
|
|
168
|
-
*/
|
|
169
181
|
getMediaPlaylist(variantIndex = 0) {
|
|
170
182
|
const variant = this._variants[variantIndex];
|
|
171
183
|
if (!variant) throw new Error(`Variant ${variantIndex} not found`);
|
|
@@ -181,70 +193,37 @@ class HlsClipResult {
|
|
|
181
193
|
for (let i = 0; i < variant.segments.length; i++) {
|
|
182
194
|
const seg = variant.segments[i];
|
|
183
195
|
m3u8 += `#EXTINF:${seg.duration.toFixed(6)},\n`;
|
|
184
|
-
|
|
185
|
-
// Middle segment: original CDN URL (untouched)
|
|
186
|
-
m3u8 += `${seg.originalUrl}\n`;
|
|
187
|
-
} else {
|
|
188
|
-
// Boundary segment: served from memory
|
|
189
|
-
m3u8 += `segment-${variantIndex}-${i}.ts\n`;
|
|
190
|
-
}
|
|
196
|
+
m3u8 += seg.originalUrl || `segment-${variantIndex}-${i}.ts\n`;
|
|
191
197
|
}
|
|
192
198
|
m3u8 += '#EXT-X-ENDLIST\n';
|
|
193
199
|
return m3u8;
|
|
194
200
|
}
|
|
195
201
|
|
|
196
|
-
/**
|
|
197
|
-
* Get a segment's TS data.
|
|
198
|
-
* Boundary segments: return from memory.
|
|
199
|
-
* Middle segments: return null (use originalUrl from playlist).
|
|
200
|
-
*/
|
|
201
202
|
async getSegment(variantIndex = 0, segmentIndex = 0) {
|
|
202
203
|
const variant = this._variants[variantIndex];
|
|
203
204
|
if (!variant) throw new Error(`Variant ${variantIndex} not found`);
|
|
204
205
|
const seg = variant.segments[segmentIndex];
|
|
205
206
|
if (!seg) throw new Error(`Segment ${segmentIndex} not found`);
|
|
206
|
-
|
|
207
207
|
if (seg.data) return seg.data;
|
|
208
|
-
|
|
209
|
-
// Middle segment: fetch from CDN (for cases where caller needs the data)
|
|
210
208
|
if (seg.originalUrl) {
|
|
211
209
|
const resp = await fetch(seg.originalUrl);
|
|
212
210
|
if (!resp.ok) throw new Error(`Segment fetch failed: ${resp.status}`);
|
|
213
211
|
return new Uint8Array(await resp.arrayBuffer());
|
|
214
212
|
}
|
|
215
|
-
|
|
216
213
|
return null;
|
|
217
214
|
}
|
|
218
|
-
|
|
219
|
-
/**
|
|
220
|
-
* Get all segment data (fetches middle segments from CDN).
|
|
221
|
-
*/
|
|
222
|
-
async getAllSegments(variantIndex = 0) {
|
|
223
|
-
const variant = this._variants[variantIndex];
|
|
224
|
-
const results = [];
|
|
225
|
-
for (let i = 0; i < variant.segments.length; i++) {
|
|
226
|
-
results.push(await this.getSegment(variantIndex, i));
|
|
227
|
-
}
|
|
228
|
-
return results;
|
|
229
|
-
}
|
|
230
215
|
}
|
|
231
216
|
|
|
232
|
-
// ── main
|
|
217
|
+
// ── main ──────────────────────────────────────────────────
|
|
233
218
|
|
|
234
219
|
/**
|
|
235
220
|
* Clip an HLS stream to a time range.
|
|
236
221
|
*
|
|
237
|
-
*
|
|
238
|
-
*
|
|
239
|
-
* -
|
|
222
|
+
* Boundary segments start at the nearest keyframe. The result includes
|
|
223
|
+
* `prerollDuration` — the player should seek to this time on load for
|
|
224
|
+
* frame-accurate start.
|
|
240
225
|
*
|
|
241
|
-
*
|
|
242
|
-
* @param {object} options
|
|
243
|
-
* @param {number} options.startTime - Start time in seconds
|
|
244
|
-
* @param {number} options.endTime - End time in seconds
|
|
245
|
-
* @param {string|number} [options.quality] - 'highest', 'lowest', or bandwidth
|
|
246
|
-
* @param {function} [options.onProgress] - Progress callback
|
|
247
|
-
* @returns {Promise<HlsClipResult>}
|
|
226
|
+
* Middle segments use original CDN URLs (completely untouched).
|
|
248
227
|
*/
|
|
249
228
|
export async function clipHls(source, options = {}) {
|
|
250
229
|
const { startTime, endTime, quality, onProgress: log = () => {} } = options;
|
|
@@ -255,7 +234,6 @@ export async function clipHls(source, options = {}) {
|
|
|
255
234
|
log('Parsing HLS playlist...');
|
|
256
235
|
const stream = typeof source === 'string' ? await parseHls(source, { onProgress: log }) : source;
|
|
257
236
|
|
|
258
|
-
// Resolve variants
|
|
259
237
|
let variantsToProcess = [];
|
|
260
238
|
if (stream.isMaster) {
|
|
261
239
|
const sorted = stream.qualities;
|
|
@@ -264,40 +242,30 @@ export async function clipHls(source, options = {}) {
|
|
|
264
242
|
else if (typeof quality === 'number') { stream.select(quality); variantsToProcess = [stream.selected]; }
|
|
265
243
|
else variantsToProcess = sorted;
|
|
266
244
|
} else {
|
|
267
|
-
variantsToProcess = [{
|
|
268
|
-
url: null, bandwidth: 0, resolution: null,
|
|
269
|
-
_segments: stream.segments, _initSegmentUrl: stream.initSegmentUrl,
|
|
270
|
-
}];
|
|
245
|
+
variantsToProcess = [{ url: null, bandwidth: 0, resolution: null, _segments: stream.segments }];
|
|
271
246
|
}
|
|
272
247
|
|
|
273
248
|
log(`Processing ${variantsToProcess.length} variant(s)...`);
|
|
274
|
-
if (isSmartRenderSupported()) {
|
|
275
|
-
log('Smart rendering: enabled (WebCodecs)');
|
|
276
|
-
} else {
|
|
277
|
-
log('Smart rendering: unavailable (keyframe-accurate fallback)');
|
|
278
|
-
}
|
|
279
249
|
|
|
280
250
|
const variants = [];
|
|
251
|
+
let prerollDuration = 0;
|
|
281
252
|
|
|
282
253
|
for (let vi = 0; vi < variantsToProcess.length; vi++) {
|
|
283
254
|
const variant = variantsToProcess[vi];
|
|
284
255
|
log(`Variant ${vi}: ${variant.resolution || variant.bandwidth || 'default'}`);
|
|
285
256
|
|
|
286
|
-
// Get segment list
|
|
287
257
|
let segments;
|
|
288
258
|
if (variant._segments) {
|
|
289
259
|
segments = variant._segments;
|
|
290
260
|
} else {
|
|
291
|
-
const
|
|
292
|
-
if (!
|
|
293
|
-
const
|
|
294
|
-
const parsed = parsePlaylistText(mediaText, variant.url);
|
|
261
|
+
const resp = await fetch(variant.url);
|
|
262
|
+
if (!resp.ok) throw new Error(`Failed to fetch media playlist: ${resp.status}`);
|
|
263
|
+
const parsed = parsePlaylistText(await resp.text(), variant.url);
|
|
295
264
|
segments = parsed.segments;
|
|
296
265
|
}
|
|
297
266
|
|
|
298
267
|
if (!segments.length) throw new Error('No segments found');
|
|
299
268
|
|
|
300
|
-
// Find overlapping segments
|
|
301
269
|
const overlapping = segments.filter(seg => seg.endTime > startTime && seg.startTime < endTime);
|
|
302
270
|
if (!overlapping.length) throw new Error('No segments overlap the clip range');
|
|
303
271
|
|
|
@@ -307,56 +275,52 @@ export async function clipHls(source, options = {}) {
|
|
|
307
275
|
|
|
308
276
|
log(`Segments: ${overlapping.length} (${firstSeg.startTime.toFixed(1)}s – ${lastSeg.endTime.toFixed(1)}s)`);
|
|
309
277
|
|
|
310
|
-
// Download and clip boundary
|
|
278
|
+
// Download and clip first boundary segment
|
|
311
279
|
log('Downloading boundary segments...');
|
|
312
280
|
const firstData = new Uint8Array(await (await fetch(firstSeg.url)).arrayBuffer());
|
|
313
281
|
const firstParser = parseTs(firstData);
|
|
314
|
-
|
|
315
282
|
const firstRelStart = startTime - firstSeg.startTime;
|
|
316
283
|
const firstRelEnd = isSingleSegment ? endTime - firstSeg.startTime : undefined;
|
|
317
|
-
const firstClipped =
|
|
284
|
+
const firstClipped = clipSegment(firstParser, firstRelStart, firstRelEnd);
|
|
318
285
|
if (!firstClipped) throw new Error('First segment clip produced no samples');
|
|
319
286
|
|
|
287
|
+
// Preroll from the first variant (all variants have similar GOP structure)
|
|
288
|
+
if (vi === 0) prerollDuration = firstClipped.preroll;
|
|
289
|
+
|
|
320
290
|
const clipSegments = [];
|
|
321
291
|
|
|
322
|
-
// First segment (boundary, in memory)
|
|
323
292
|
clipSegments.push({
|
|
324
293
|
duration: firstClipped.duration,
|
|
325
294
|
data: firstClipped.data,
|
|
326
295
|
originalUrl: null,
|
|
327
|
-
isBoundary: true,
|
|
328
|
-
smartRendered: firstClipped.smartRendered,
|
|
329
296
|
});
|
|
330
297
|
|
|
331
|
-
// Middle segments
|
|
298
|
+
// Middle segments: original CDN URLs
|
|
332
299
|
for (let i = 1; i < overlapping.length - 1; i++) {
|
|
333
300
|
clipSegments.push({
|
|
334
301
|
duration: overlapping[i].duration,
|
|
335
302
|
data: null,
|
|
336
303
|
originalUrl: overlapping[i].url,
|
|
337
|
-
isBoundary: false,
|
|
338
304
|
});
|
|
339
305
|
}
|
|
340
306
|
|
|
341
|
-
// Last
|
|
307
|
+
// Last boundary segment
|
|
342
308
|
if (!isSingleSegment) {
|
|
343
309
|
const lastData = new Uint8Array(await (await fetch(lastSeg.url)).arrayBuffer());
|
|
344
310
|
const lastParser = parseTs(lastData);
|
|
345
311
|
const lastRelEnd = endTime - lastSeg.startTime;
|
|
346
|
-
const lastClipped =
|
|
312
|
+
const lastClipped = clipSegment(lastParser, undefined, lastRelEnd);
|
|
347
313
|
if (lastClipped && lastClipped.data) {
|
|
348
314
|
clipSegments.push({
|
|
349
315
|
duration: lastClipped.duration,
|
|
350
316
|
data: lastClipped.data,
|
|
351
317
|
originalUrl: null,
|
|
352
|
-
isBoundary: true,
|
|
353
|
-
smartRendered: lastClipped.smartRendered,
|
|
354
318
|
});
|
|
355
319
|
}
|
|
356
320
|
}
|
|
357
321
|
|
|
358
322
|
const totalDuration = clipSegments.reduce((sum, s) => sum + s.duration, 0);
|
|
359
|
-
log(`Clip ready: ${totalDuration.toFixed(2)}s (${clipSegments.length} segments)`);
|
|
323
|
+
log(`Clip ready: ${totalDuration.toFixed(2)}s (${clipSegments.length} segments, preroll: ${firstClipped.preroll.toFixed(2)}s)`);
|
|
360
324
|
|
|
361
325
|
variants.push({
|
|
362
326
|
bandwidth: variant.bandwidth || 0,
|
|
@@ -370,6 +334,7 @@ export async function clipHls(source, options = {}) {
|
|
|
370
334
|
duration: endTime - startTime,
|
|
371
335
|
startTime,
|
|
372
336
|
endTime,
|
|
337
|
+
prerollDuration,
|
|
373
338
|
});
|
|
374
339
|
}
|
|
375
340
|
|
package/src/index.js
CHANGED
|
@@ -1,447 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Smart Rendering via WebCodecs
|
|
3
|
-
*
|
|
4
|
-
* Re-encodes the boundary GOP of an HLS segment using the browser's
|
|
5
|
-
* native WebCodecs API. Decodes preroll frames, re-encodes the target
|
|
6
|
-
* frame as a new keyframe, and re-encodes subsequent frames until the
|
|
7
|
-
* next original keyframe. Original compressed data is used from the
|
|
8
|
-
* next keyframe onward.
|
|
9
|
-
*
|
|
10
|
-
* Falls back to keyframe-accurate clipping when WebCodecs is unavailable
|
|
11
|
-
* (e.g., Node.js).
|
|
12
|
-
*
|
|
13
|
-
* @module codecs/smart-render
|
|
14
|
-
*/
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
* Check if WebCodecs is available in the current environment.
|
|
18
|
-
*/
|
|
19
|
-
export function isSmartRenderSupported() {
|
|
20
|
-
return typeof VideoDecoder !== 'undefined' && typeof VideoEncoder !== 'undefined';
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* Smart-render a TS segment to produce a frame-accurate cut.
|
|
25
|
-
*
|
|
26
|
-
* Decodes from the keyframe before targetTime, re-encodes frames from
|
|
27
|
-
* targetTime onward as new H.264 NAL units (starting with an IDR keyframe),
|
|
28
|
-
* and uses original data from the next keyframe onward.
|
|
29
|
-
*
|
|
30
|
-
* @param {object} parser - Parsed TS segment (TSParser output)
|
|
31
|
-
* @param {number} targetStartTime - Start time in seconds (relative to segment)
|
|
32
|
-
* @param {object} [options]
|
|
33
|
-
* @param {number} [options.endTime] - End time in seconds (relative to segment)
|
|
34
|
-
* @param {number} [options.bitrate] - Encoding bitrate (default: auto from source)
|
|
35
|
-
* @returns {Promise<object>} { videoAUs, audioAUs, actualStartTime }
|
|
36
|
-
*/
|
|
37
|
-
export async function smartRender(parser, targetStartTime, options = {}) {
|
|
38
|
-
if (!isSmartRenderSupported()) {
|
|
39
|
-
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
const { endTime = Infinity } = options;
|
|
43
|
-
const PTS = 90000;
|
|
44
|
-
const targetPts = targetStartTime * PTS;
|
|
45
|
-
const endPts = endTime * PTS;
|
|
46
|
-
const videoAUs = parser.videoAccessUnits;
|
|
47
|
-
const audioAUs = parser.audioAccessUnits;
|
|
48
|
-
|
|
49
|
-
if (videoAUs.length === 0) {
|
|
50
|
-
return { videoAUs: [], audioAUs: [], actualStartTime: targetStartTime };
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
// Find keyframe at or before targetTime
|
|
54
|
-
let keyframeIdx = 0;
|
|
55
|
-
for (let i = 0; i < videoAUs.length; i++) {
|
|
56
|
-
if (videoAUs[i].pts > targetPts) break;
|
|
57
|
-
if (_isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
// Find target frame (first frame at or after targetTime)
|
|
61
|
-
let targetIdx = keyframeIdx;
|
|
62
|
-
for (let i = keyframeIdx; i < videoAUs.length; i++) {
|
|
63
|
-
if (videoAUs[i].pts >= targetPts) { targetIdx = i; break; }
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
// If target IS the keyframe, no smart rendering needed
|
|
67
|
-
if (targetIdx === keyframeIdx) {
|
|
68
|
-
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
// Find next keyframe after target
|
|
72
|
-
let nextKeyframeIdx = videoAUs.length;
|
|
73
|
-
for (let i = targetIdx + 1; i < videoAUs.length; i++) {
|
|
74
|
-
if (_isKeyframe(videoAUs[i])) { nextKeyframeIdx = i; break; }
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
// Find end frame
|
|
78
|
-
let endIdx = videoAUs.length;
|
|
79
|
-
for (let i = 0; i < videoAUs.length; i++) {
|
|
80
|
-
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
// Extract SPS/PPS for decoder configuration
|
|
84
|
-
let sps = null, pps = null;
|
|
85
|
-
for (const au of videoAUs) {
|
|
86
|
-
for (const nal of au.nalUnits) {
|
|
87
|
-
const t = nal[0] & 0x1F;
|
|
88
|
-
if (t === 7 && !sps) sps = nal;
|
|
89
|
-
if (t === 8 && !pps) pps = nal;
|
|
90
|
-
}
|
|
91
|
-
if (sps && pps) break;
|
|
92
|
-
}
|
|
93
|
-
if (!sps || !pps) {
|
|
94
|
-
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
// Parse dimensions from SPS (simplified — just need width/height for encoder config)
|
|
98
|
-
const { width, height } = _parseSPSDimensions(sps);
|
|
99
|
-
|
|
100
|
-
// Estimate bitrate from the original segment
|
|
101
|
-
let totalBytes = 0;
|
|
102
|
-
for (const au of videoAUs) {
|
|
103
|
-
for (const nal of au.nalUnits) totalBytes += nal.length;
|
|
104
|
-
}
|
|
105
|
-
const segDuration = videoAUs.length > 1
|
|
106
|
-
? (videoAUs[videoAUs.length - 1].pts - videoAUs[0].pts) / PTS
|
|
107
|
-
: 1;
|
|
108
|
-
const estimatedBitrate = options.bitrate || Math.round((totalBytes * 8) / segDuration);
|
|
109
|
-
|
|
110
|
-
try {
|
|
111
|
-
// ── Step 1: Decode preroll frames using VideoDecoder ──
|
|
112
|
-
const decodedFrames = await _decodeFrames(videoAUs, keyframeIdx, Math.min(nextKeyframeIdx, endIdx), sps, pps, width, height);
|
|
113
|
-
|
|
114
|
-
// ── Step 2: Re-encode from targetIdx onward using VideoEncoder ──
|
|
115
|
-
const reEncodedNALs = await _encodeFrames(
|
|
116
|
-
decodedFrames, targetIdx - keyframeIdx, Math.min(nextKeyframeIdx, endIdx) - keyframeIdx,
|
|
117
|
-
width, height, estimatedBitrate
|
|
118
|
-
);
|
|
119
|
-
|
|
120
|
-
// ── Step 3: Build output access units ──
|
|
121
|
-
const outputVideo = [];
|
|
122
|
-
const targetPtsActual = videoAUs[targetIdx].pts;
|
|
123
|
-
|
|
124
|
-
// Re-encoded frames (targetIdx to nextKeyframeIdx)
|
|
125
|
-
for (let i = 0; i < reEncodedNALs.length; i++) {
|
|
126
|
-
const srcIdx = targetIdx + i;
|
|
127
|
-
if (srcIdx >= endIdx) break;
|
|
128
|
-
outputVideo.push({
|
|
129
|
-
nalUnits: i === 0
|
|
130
|
-
? [sps, pps, ...reEncodedNALs[i]] // First frame gets SPS/PPS
|
|
131
|
-
: reEncodedNALs[i],
|
|
132
|
-
pts: videoAUs[srcIdx].pts,
|
|
133
|
-
dts: videoAUs[srcIdx].dts,
|
|
134
|
-
});
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
// Original frames from next keyframe onward
|
|
138
|
-
for (let i = nextKeyframeIdx; i < endIdx; i++) {
|
|
139
|
-
outputVideo.push(videoAUs[i]);
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
// Clip audio to match
|
|
143
|
-
const audioStartPts = targetPtsActual;
|
|
144
|
-
const audioEndPts = endIdx < videoAUs.length ? videoAUs[endIdx - 1].pts + PTS : Infinity;
|
|
145
|
-
const outputAudio = audioAUs.filter(au => au.pts >= audioStartPts && au.pts < audioEndPts);
|
|
146
|
-
|
|
147
|
-
// Clean up decoded frames
|
|
148
|
-
for (const frame of decodedFrames) {
|
|
149
|
-
if (frame && typeof frame.close === 'function') frame.close();
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
return {
|
|
153
|
-
videoAUs: outputVideo,
|
|
154
|
-
audioAUs: outputAudio,
|
|
155
|
-
actualStartTime: targetPtsActual / PTS,
|
|
156
|
-
smartRenderedFrames: reEncodedNALs.length,
|
|
157
|
-
originalFrames: Math.max(0, endIdx - nextKeyframeIdx),
|
|
158
|
-
};
|
|
159
|
-
} catch (e) {
|
|
160
|
-
// WebCodecs failed — fall back to keyframe-accurate
|
|
161
|
-
console.warn('Smart render failed, falling back to keyframe-accurate:', e.message);
|
|
162
|
-
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
// ── WebCodecs decode ──────────────────────────────────────
|
|
167
|
-
|
|
168
|
-
async function _decodeFrames(videoAUs, startIdx, endIdx, sps, pps, width, height) {
|
|
169
|
-
const frames = [];
|
|
170
|
-
let resolveFrame;
|
|
171
|
-
|
|
172
|
-
const decoder = new VideoDecoder({
|
|
173
|
-
output(frame) {
|
|
174
|
-
frames.push(frame);
|
|
175
|
-
if (resolveFrame) resolveFrame();
|
|
176
|
-
},
|
|
177
|
-
error(e) {
|
|
178
|
-
console.error('VideoDecoder error:', e);
|
|
179
|
-
},
|
|
180
|
-
});
|
|
181
|
-
|
|
182
|
-
// Build avcC description for decoder config
|
|
183
|
-
const description = _buildAvcCDescription(sps, pps);
|
|
184
|
-
|
|
185
|
-
decoder.configure({
|
|
186
|
-
codec: 'avc1.' + _avcProfileString(sps),
|
|
187
|
-
codedWidth: width,
|
|
188
|
-
codedHeight: height,
|
|
189
|
-
description,
|
|
190
|
-
optimizeForLatency: true,
|
|
191
|
-
});
|
|
192
|
-
|
|
193
|
-
// Feed frames from keyframe to endIdx
|
|
194
|
-
for (let i = startIdx; i < endIdx; i++) {
|
|
195
|
-
const au = videoAUs[i];
|
|
196
|
-
const isKey = _isKeyframe(au);
|
|
197
|
-
|
|
198
|
-
// Convert NAL units to AVCC format (4-byte length prefix)
|
|
199
|
-
const avccData = _nalUnitsToAVCC(au.nalUnits);
|
|
200
|
-
|
|
201
|
-
const chunk = new EncodedVideoChunk({
|
|
202
|
-
type: isKey ? 'key' : 'delta',
|
|
203
|
-
timestamp: au.pts, // microseconds for WebCodecs? No, we use our PTS
|
|
204
|
-
data: avccData,
|
|
205
|
-
});
|
|
206
|
-
|
|
207
|
-
const framePromise = new Promise(r => { resolveFrame = r; });
|
|
208
|
-
decoder.decode(chunk);
|
|
209
|
-
await framePromise;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
await decoder.flush();
|
|
213
|
-
decoder.close();
|
|
214
|
-
|
|
215
|
-
return frames;
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
// ── WebCodecs encode ──────────────────────────────────────
|
|
219
|
-
|
|
220
|
-
async function _encodeFrames(decodedFrames, startOffset, endOffset, width, height, bitrate) {
|
|
221
|
-
const encodedNALs = [];
|
|
222
|
-
let resolveChunk;
|
|
223
|
-
|
|
224
|
-
const encoder = new VideoEncoder({
|
|
225
|
-
output(chunk, metadata) {
|
|
226
|
-
// Extract H.264 NAL units from the encoded chunk
|
|
227
|
-
const buffer = new Uint8Array(chunk.byteLength);
|
|
228
|
-
chunk.copyTo(buffer);
|
|
229
|
-
|
|
230
|
-
// The encoder output is in AVCC format — convert to NAL units
|
|
231
|
-
const nals = _avccToNALUnits(buffer);
|
|
232
|
-
encodedNALs.push(nals);
|
|
233
|
-
if (resolveChunk) resolveChunk();
|
|
234
|
-
},
|
|
235
|
-
error(e) {
|
|
236
|
-
console.error('VideoEncoder error:', e);
|
|
237
|
-
},
|
|
238
|
-
});
|
|
239
|
-
|
|
240
|
-
encoder.configure({
|
|
241
|
-
codec: 'avc1.640028', // High profile, level 4.0
|
|
242
|
-
width,
|
|
243
|
-
height,
|
|
244
|
-
bitrate,
|
|
245
|
-
framerate: 30,
|
|
246
|
-
latencyMode: 'quality',
|
|
247
|
-
avc: { format: 'annexb' }, // Get Annex B output (start codes)
|
|
248
|
-
});
|
|
249
|
-
|
|
250
|
-
for (let i = startOffset; i < Math.min(endOffset, decodedFrames.length); i++) {
|
|
251
|
-
const frame = decodedFrames[i];
|
|
252
|
-
if (!frame) continue;
|
|
253
|
-
|
|
254
|
-
const chunkPromise = new Promise(r => { resolveChunk = r; });
|
|
255
|
-
encoder.encode(frame, { keyFrame: i === startOffset }); // First frame = keyframe
|
|
256
|
-
await chunkPromise;
|
|
257
|
-
}
|
|
258
|
-
|
|
259
|
-
await encoder.flush();
|
|
260
|
-
encoder.close();
|
|
261
|
-
|
|
262
|
-
return encodedNALs;
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
// ── Keyframe-accurate fallback ────────────────────────────
|
|
266
|
-
|
|
267
|
-
function keyframeAccurateFallback(parser, targetStartTime, options = {}) {
|
|
268
|
-
const { endTime = Infinity } = options;
|
|
269
|
-
const PTS = 90000;
|
|
270
|
-
const targetPts = targetStartTime * PTS;
|
|
271
|
-
const endPts = endTime * PTS;
|
|
272
|
-
const videoAUs = parser.videoAccessUnits;
|
|
273
|
-
const audioAUs = parser.audioAccessUnits;
|
|
274
|
-
|
|
275
|
-
if (videoAUs.length === 0) {
|
|
276
|
-
return { videoAUs: [], audioAUs: [], actualStartTime: targetStartTime };
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
// Find keyframe at or before targetTime
|
|
280
|
-
let keyframeIdx = 0;
|
|
281
|
-
for (let i = 0; i < videoAUs.length; i++) {
|
|
282
|
-
if (videoAUs[i].pts > targetPts) break;
|
|
283
|
-
if (_isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
// Find end
|
|
287
|
-
let endIdx = videoAUs.length;
|
|
288
|
-
for (let i = 0; i < videoAUs.length; i++) {
|
|
289
|
-
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
|
|
293
|
-
const startPts = clippedVideo.length > 0 ? clippedVideo[0].pts : 0;
|
|
294
|
-
const endVideoPts = endIdx < videoAUs.length ? videoAUs[endIdx - 1].pts + PTS : Infinity;
|
|
295
|
-
const clippedAudio = audioAUs.filter(au => au.pts >= startPts && au.pts < endVideoPts);
|
|
296
|
-
|
|
297
|
-
return {
|
|
298
|
-
videoAUs: clippedVideo,
|
|
299
|
-
audioAUs: clippedAudio,
|
|
300
|
-
actualStartTime: startPts / PTS,
|
|
301
|
-
smartRenderedFrames: 0,
|
|
302
|
-
originalFrames: clippedVideo.length,
|
|
303
|
-
};
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
// ── Helpers ───────────────────────────────────────────────
|
|
307
|
-
|
|
308
|
-
function _isKeyframe(au) {
|
|
309
|
-
for (const nal of au.nalUnits) {
|
|
310
|
-
if ((nal[0] & 0x1F) === 5) return true;
|
|
311
|
-
}
|
|
312
|
-
return false;
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
function _parseSPSDimensions(sps) {
|
|
316
|
-
// Minimal SPS dimension parsing (reuses logic from muxers/mp4.js parseSPS)
|
|
317
|
-
let width = 1920, height = 1080;
|
|
318
|
-
if (!sps || sps.length < 4) return { width, height };
|
|
319
|
-
|
|
320
|
-
try {
|
|
321
|
-
let offset = 1;
|
|
322
|
-
const profile = sps[offset++];
|
|
323
|
-
offset += 2; // constraint flags + level
|
|
324
|
-
|
|
325
|
-
let bitPos = offset * 8;
|
|
326
|
-
const getBit = () => (sps[Math.floor(bitPos / 8)] >> (7 - (bitPos++ % 8))) & 1;
|
|
327
|
-
const readUE = () => {
|
|
328
|
-
let z = 0;
|
|
329
|
-
while (bitPos < sps.length * 8 && getBit() === 0) z++;
|
|
330
|
-
let v = (1 << z) - 1;
|
|
331
|
-
for (let i = 0; i < z; i++) v += getBit() << (z - 1 - i);
|
|
332
|
-
return v;
|
|
333
|
-
};
|
|
334
|
-
|
|
335
|
-
readUE(); // sps_id
|
|
336
|
-
if ([100, 110, 122, 244, 44, 83, 86, 118, 128].includes(profile)) {
|
|
337
|
-
const cf = readUE(); if (cf === 3) getBit();
|
|
338
|
-
readUE(); readUE(); getBit();
|
|
339
|
-
if (getBit()) { for (let i = 0; i < (cf !== 3 ? 8 : 12); i++) { if (getBit()) { const s = i < 6 ? 16 : 64; let ls = 8, ns = 8; for (let j = 0; j < s; j++) { if (ns !== 0) { const ds = readUE(); // readSE actually
|
|
340
|
-
ns = (ls + ds + 256) % 256; } ls = ns === 0 ? ls : ns; } } } }
|
|
341
|
-
}
|
|
342
|
-
readUE(); // log2_max_frame_num
|
|
343
|
-
const pocType = readUE();
|
|
344
|
-
if (pocType === 0) readUE();
|
|
345
|
-
else if (pocType === 1) { getBit(); readUE(); readUE(); const n = readUE(); for (let i = 0; i < n; i++) readUE(); }
|
|
346
|
-
readUE(); getBit(); // max_ref_frames, gaps
|
|
347
|
-
|
|
348
|
-
const mbW = readUE() + 1;
|
|
349
|
-
const mbH = readUE() + 1;
|
|
350
|
-
const frameMbsOnly = getBit();
|
|
351
|
-
if (!frameMbsOnly) getBit();
|
|
352
|
-
getBit(); // direct_8x8
|
|
353
|
-
|
|
354
|
-
let cropL = 0, cropR = 0, cropT = 0, cropB = 0;
|
|
355
|
-
if (getBit()) { cropL = readUE(); cropR = readUE(); cropT = readUE(); cropB = readUE(); }
|
|
356
|
-
|
|
357
|
-
width = mbW * 16 - (cropL + cropR) * 2;
|
|
358
|
-
height = (2 - frameMbsOnly) * mbH * 16 - (cropT + cropB) * (frameMbsOnly ? 2 : 4);
|
|
359
|
-
} catch (e) { /* use defaults */ }
|
|
360
|
-
|
|
361
|
-
return { width, height };
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
function _avcProfileString(sps) {
|
|
365
|
-
return [sps[1], sps[2], sps[3]].map(b => b.toString(16).padStart(2, '0')).join('');
|
|
366
|
-
}
|
|
367
|
-
|
|
368
|
-
function _buildAvcCDescription(sps, pps) {
|
|
369
|
-
const data = new Uint8Array(11 + sps.length + pps.length);
|
|
370
|
-
const view = new DataView(data.buffer);
|
|
371
|
-
data[0] = 1; data[1] = sps[1]; data[2] = sps[2]; data[3] = sps[3];
|
|
372
|
-
data[4] = 0xFF; data[5] = 0xE1;
|
|
373
|
-
view.setUint16(6, sps.length); data.set(sps, 8);
|
|
374
|
-
data[8 + sps.length] = 1;
|
|
375
|
-
view.setUint16(9 + sps.length, pps.length);
|
|
376
|
-
data.set(pps, 11 + sps.length);
|
|
377
|
-
return data;
|
|
378
|
-
}
|
|
379
|
-
|
|
380
|
-
function _nalUnitsToAVCC(nalUnits) {
|
|
381
|
-
// Filter out SPS/PPS/AUD/SEI — decoder config handles those
|
|
382
|
-
const videoNals = nalUnits.filter(nal => {
|
|
383
|
-
const t = nal[0] & 0x1F;
|
|
384
|
-
return t === 1 || t === 5; // non-IDR or IDR slice
|
|
385
|
-
});
|
|
386
|
-
|
|
387
|
-
let totalSize = 0;
|
|
388
|
-
for (const nal of videoNals) totalSize += 4 + nal.length;
|
|
389
|
-
const result = new Uint8Array(totalSize);
|
|
390
|
-
const view = new DataView(result.buffer);
|
|
391
|
-
let offset = 0;
|
|
392
|
-
for (const nal of videoNals) {
|
|
393
|
-
view.setUint32(offset, nal.length);
|
|
394
|
-
result.set(nal, offset + 4);
|
|
395
|
-
offset += 4 + nal.length;
|
|
396
|
-
}
|
|
397
|
-
return result;
|
|
398
|
-
}
|
|
399
|
-
|
|
400
|
-
function _avccToNALUnits(data) {
|
|
401
|
-
// Parse Annex B format (start codes) or AVCC (length-prefixed)
|
|
402
|
-
const nals = [];
|
|
403
|
-
|
|
404
|
-
// Check for Annex B (0x00000001 or 0x000001)
|
|
405
|
-
if (data.length >= 4 && data[0] === 0 && data[1] === 0) {
|
|
406
|
-
let i = 0;
|
|
407
|
-
while (i < data.length - 3) {
|
|
408
|
-
// Find start code
|
|
409
|
-
let scLen = 0;
|
|
410
|
-
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 1) scLen = 3;
|
|
411
|
-
else if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0 && data[i + 3] === 1) scLen = 4;
|
|
412
|
-
|
|
413
|
-
if (scLen > 0) {
|
|
414
|
-
const nalStart = i + scLen;
|
|
415
|
-
// Find next start code
|
|
416
|
-
let nalEnd = data.length;
|
|
417
|
-
for (let j = nalStart + 1; j < data.length - 2; j++) {
|
|
418
|
-
if (data[j] === 0 && data[j + 1] === 0 && (data[j + 2] === 1 || (data[j + 2] === 0 && j + 3 < data.length && data[j + 3] === 1))) {
|
|
419
|
-
nalEnd = j;
|
|
420
|
-
break;
|
|
421
|
-
}
|
|
422
|
-
}
|
|
423
|
-
if (nalEnd > nalStart) {
|
|
424
|
-
nals.push(data.slice(nalStart, nalEnd));
|
|
425
|
-
}
|
|
426
|
-
i = nalEnd;
|
|
427
|
-
} else {
|
|
428
|
-
i++;
|
|
429
|
-
}
|
|
430
|
-
}
|
|
431
|
-
} else {
|
|
432
|
-
// AVCC format (4-byte length prefix)
|
|
433
|
-
const view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
|
434
|
-
let offset = 0;
|
|
435
|
-
while (offset + 4 < data.length) {
|
|
436
|
-
const len = view.getUint32(offset);
|
|
437
|
-
if (len > 0 && offset + 4 + len <= data.length) {
|
|
438
|
-
nals.push(data.slice(offset + 4, offset + 4 + len));
|
|
439
|
-
}
|
|
440
|
-
offset += 4 + len;
|
|
441
|
-
}
|
|
442
|
-
}
|
|
443
|
-
|
|
444
|
-
return nals;
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
export default smartRender;
|