@invintusmedia/tomp4 1.4.3 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/tomp4.js +2 -2
- package/package.json +1 -1
- package/src/codecs/smart-render.js +374 -96
- package/src/hls-clip.js +177 -433
- package/src/index.js +1 -6
- package/src/codecs/REFERENCE.md +0 -885
- package/src/codecs/h264-cabac-init.js +0 -546
- package/src/codecs/h264-cabac.js +0 -322
- package/src/codecs/h264-cavlc-tables.js +0 -628
- package/src/codecs/h264-decoder.js +0 -940
- package/src/codecs/h264-encoder.js +0 -502
- package/src/codecs/h264-intra.js +0 -292
- package/src/codecs/h264-sps-pps.js +0 -483
- package/src/codecs/h264-tables.js +0 -217
- package/src/codecs/h264-transform.js +0 -268
package/dist/tomp4.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* toMp4.js v1.
|
|
2
|
+
* toMp4.js v1.5.0
|
|
3
3
|
* Convert MPEG-TS and fMP4 to standard MP4
|
|
4
4
|
* https://github.com/TVWIT/toMp4.js
|
|
5
5
|
* MIT License
|
|
@@ -1186,7 +1186,7 @@
|
|
|
1186
1186
|
toMp4.isMpegTs = isMpegTs;
|
|
1187
1187
|
toMp4.isFmp4 = isFmp4;
|
|
1188
1188
|
toMp4.isStandardMp4 = isStandardMp4;
|
|
1189
|
-
toMp4.version = '1.
|
|
1189
|
+
toMp4.version = '1.5.0';
|
|
1190
1190
|
|
|
1191
1191
|
return toMp4;
|
|
1192
1192
|
});
|
package/package.json
CHANGED
|
@@ -1,41 +1,48 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Smart Rendering
|
|
2
|
+
* Smart Rendering via WebCodecs
|
|
3
3
|
*
|
|
4
|
-
* Re-encodes the boundary GOP of an HLS segment
|
|
5
|
-
*
|
|
6
|
-
*
|
|
7
|
-
*
|
|
4
|
+
* Re-encodes the boundary GOP of an HLS segment using the browser's
|
|
5
|
+
* native WebCodecs API. Decodes preroll frames, re-encodes the target
|
|
6
|
+
* frame as a new keyframe, and re-encodes subsequent frames until the
|
|
7
|
+
* next original keyframe. Original compressed data is used from the
|
|
8
|
+
* next keyframe onward.
|
|
9
|
+
*
|
|
10
|
+
* Falls back to keyframe-accurate clipping when WebCodecs is unavailable
|
|
11
|
+
* (e.g., Node.js).
|
|
8
12
|
*
|
|
9
13
|
* @module codecs/smart-render
|
|
10
14
|
*/
|
|
11
15
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
16
|
+
/**
|
|
17
|
+
* Check if WebCodecs is available in the current environment.
|
|
18
|
+
*/
|
|
19
|
+
export function isSmartRenderSupported() {
|
|
20
|
+
return typeof VideoDecoder !== 'undefined' && typeof VideoEncoder !== 'undefined';
|
|
21
|
+
}
|
|
15
22
|
|
|
16
23
|
/**
|
|
17
|
-
* Smart-render a TS segment to
|
|
24
|
+
* Smart-render a TS segment to produce a frame-accurate cut.
|
|
18
25
|
*
|
|
19
|
-
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
* - The frame at targetTime is re-encoded as an IDR keyframe
|
|
23
|
-
* - Frames between targetTime and next original keyframe are re-encoded as I-frames
|
|
24
|
-
* - Frames after the next original keyframe use original compressed data
|
|
26
|
+
* Decodes from the keyframe before targetTime, re-encodes frames from
|
|
27
|
+
* targetTime onward as new H.264 NAL units (starting with an IDR keyframe),
|
|
28
|
+
* and uses original data from the next keyframe onward.
|
|
25
29
|
*
|
|
26
|
-
* @param {
|
|
30
|
+
* @param {object} parser - Parsed TS segment (TSParser output)
|
|
27
31
|
* @param {number} targetStartTime - Start time in seconds (relative to segment)
|
|
28
32
|
* @param {object} [options]
|
|
29
33
|
* @param {number} [options.endTime] - End time in seconds (relative to segment)
|
|
30
|
-
* @param {number} [options.
|
|
31
|
-
* @returns {object} { videoAUs, audioAUs, actualStartTime }
|
|
34
|
+
* @param {number} [options.bitrate] - Encoding bitrate (default: auto from source)
|
|
35
|
+
* @returns {Promise<object>} { videoAUs, audioAUs, actualStartTime }
|
|
32
36
|
*/
|
|
33
|
-
export function smartRender(parser, targetStartTime, options = {}) {
|
|
34
|
-
|
|
37
|
+
export async function smartRender(parser, targetStartTime, options = {}) {
|
|
38
|
+
if (!isSmartRenderSupported()) {
|
|
39
|
+
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const { endTime = Infinity } = options;
|
|
35
43
|
const PTS = 90000;
|
|
36
44
|
const targetPts = targetStartTime * PTS;
|
|
37
45
|
const endPts = endTime * PTS;
|
|
38
|
-
|
|
39
46
|
const videoAUs = parser.videoAccessUnits;
|
|
40
47
|
const audioAUs = parser.audioAccessUnits;
|
|
41
48
|
|
|
@@ -43,20 +50,25 @@ export function smartRender(parser, targetStartTime, options = {}) {
|
|
|
43
50
|
return { videoAUs: [], audioAUs: [], actualStartTime: targetStartTime };
|
|
44
51
|
}
|
|
45
52
|
|
|
46
|
-
// Find
|
|
53
|
+
// Find keyframe at or before targetTime
|
|
47
54
|
let keyframeIdx = 0;
|
|
48
55
|
for (let i = 0; i < videoAUs.length; i++) {
|
|
49
56
|
if (videoAUs[i].pts > targetPts) break;
|
|
50
57
|
if (_isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
51
58
|
}
|
|
52
59
|
|
|
53
|
-
// Find
|
|
60
|
+
// Find target frame (first frame at or after targetTime)
|
|
54
61
|
let targetIdx = keyframeIdx;
|
|
55
62
|
for (let i = keyframeIdx; i < videoAUs.length; i++) {
|
|
56
63
|
if (videoAUs[i].pts >= targetPts) { targetIdx = i; break; }
|
|
57
64
|
}
|
|
58
65
|
|
|
59
|
-
//
|
|
66
|
+
// If target IS the keyframe, no smart rendering needed
|
|
67
|
+
if (targetIdx === keyframeIdx) {
|
|
68
|
+
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Find next keyframe after target
|
|
60
72
|
let nextKeyframeIdx = videoAUs.length;
|
|
61
73
|
for (let i = targetIdx + 1; i < videoAUs.length; i++) {
|
|
62
74
|
if (_isKeyframe(videoAUs[i])) { nextKeyframeIdx = i; break; }
|
|
@@ -68,102 +80,368 @@ export function smartRender(parser, targetStartTime, options = {}) {
|
|
|
68
80
|
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
69
81
|
}
|
|
70
82
|
|
|
71
|
-
//
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
const
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
83
|
+
// Extract SPS/PPS for decoder configuration
|
|
84
|
+
let sps = null, pps = null;
|
|
85
|
+
for (const au of videoAUs) {
|
|
86
|
+
for (const nal of au.nalUnits) {
|
|
87
|
+
const t = nal[0] & 0x1F;
|
|
88
|
+
if (t === 7 && !sps) sps = nal;
|
|
89
|
+
if (t === 8 && !pps) pps = nal;
|
|
90
|
+
}
|
|
91
|
+
if (sps && pps) break;
|
|
92
|
+
}
|
|
93
|
+
if (!sps || !pps) {
|
|
94
|
+
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
81
95
|
}
|
|
82
96
|
|
|
83
|
-
//
|
|
84
|
-
|
|
85
|
-
// Step 1: Decode preroll frames to get pixel data at targetIdx
|
|
86
|
-
const decoder = new H264Decoder();
|
|
87
|
-
let targetFrame = null;
|
|
97
|
+
// Parse dimensions from SPS (simplified — just need width/height for encoder config)
|
|
98
|
+
const { width, height } = _parseSPSDimensions(sps);
|
|
88
99
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
100
|
+
// Estimate bitrate from the original segment
|
|
101
|
+
let totalBytes = 0;
|
|
102
|
+
for (const au of videoAUs) {
|
|
103
|
+
for (const nal of au.nalUnits) totalBytes += nal.length;
|
|
92
104
|
}
|
|
105
|
+
const segDuration = videoAUs.length > 1
|
|
106
|
+
? (videoAUs[videoAUs.length - 1].pts - videoAUs[0].pts) / PTS
|
|
107
|
+
: 1;
|
|
108
|
+
const estimatedBitrate = options.bitrate || Math.round((totalBytes * 8) / segDuration);
|
|
109
|
+
|
|
110
|
+
try {
|
|
111
|
+
// ── Step 1: Decode preroll frames using VideoDecoder ──
|
|
112
|
+
const decodedFrames = await _decodeFrames(videoAUs, keyframeIdx, Math.min(nextKeyframeIdx, endIdx), sps, pps, width, height);
|
|
113
|
+
|
|
114
|
+
// ── Step 2: Re-encode from targetIdx onward using VideoEncoder ──
|
|
115
|
+
const reEncodedNALs = await _encodeFrames(
|
|
116
|
+
decodedFrames, targetIdx - keyframeIdx, Math.min(nextKeyframeIdx, endIdx) - keyframeIdx,
|
|
117
|
+
width, height, estimatedBitrate
|
|
118
|
+
);
|
|
119
|
+
|
|
120
|
+
// ── Step 3: Build output access units ──
|
|
121
|
+
const outputVideo = [];
|
|
122
|
+
const targetPtsActual = videoAUs[targetIdx].pts;
|
|
123
|
+
|
|
124
|
+
// Re-encoded frames (targetIdx to nextKeyframeIdx)
|
|
125
|
+
for (let i = 0; i < reEncodedNALs.length; i++) {
|
|
126
|
+
const srcIdx = targetIdx + i;
|
|
127
|
+
if (srcIdx >= endIdx) break;
|
|
128
|
+
outputVideo.push({
|
|
129
|
+
nalUnits: i === 0
|
|
130
|
+
? [sps, pps, ...reEncodedNALs[i]] // First frame gets SPS/PPS
|
|
131
|
+
: reEncodedNALs[i],
|
|
132
|
+
pts: videoAUs[srcIdx].pts,
|
|
133
|
+
dts: videoAUs[srcIdx].dts,
|
|
134
|
+
});
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Original frames from next keyframe onward
|
|
138
|
+
for (let i = nextKeyframeIdx; i < endIdx; i++) {
|
|
139
|
+
outputVideo.push(videoAUs[i]);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Clip audio to match
|
|
143
|
+
const audioStartPts = targetPtsActual;
|
|
144
|
+
const audioEndPts = endIdx < videoAUs.length ? videoAUs[endIdx - 1].pts + PTS : Infinity;
|
|
145
|
+
const outputAudio = audioAUs.filter(au => au.pts >= audioStartPts && au.pts < audioEndPts);
|
|
146
|
+
|
|
147
|
+
// Clean up decoded frames
|
|
148
|
+
for (const frame of decodedFrames) {
|
|
149
|
+
if (frame && typeof frame.close === 'function') frame.close();
|
|
150
|
+
}
|
|
93
151
|
|
|
94
|
-
if (!targetFrame) {
|
|
95
|
-
// Fallback: couldn't decode, start at keyframe instead
|
|
96
|
-
const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
|
|
97
|
-
const startPts = clippedVideo[0].pts;
|
|
98
152
|
return {
|
|
99
|
-
videoAUs:
|
|
100
|
-
audioAUs:
|
|
101
|
-
actualStartTime:
|
|
153
|
+
videoAUs: outputVideo,
|
|
154
|
+
audioAUs: outputAudio,
|
|
155
|
+
actualStartTime: targetPtsActual / PTS,
|
|
156
|
+
smartRenderedFrames: reEncodedNALs.length,
|
|
157
|
+
originalFrames: Math.max(0, endIdx - nextKeyframeIdx),
|
|
102
158
|
};
|
|
159
|
+
} catch (e) {
|
|
160
|
+
// WebCodecs failed — fall back to keyframe-accurate
|
|
161
|
+
console.warn('Smart render failed, falling back to keyframe-accurate:', e.message);
|
|
162
|
+
return keyframeAccurateFallback(parser, targetStartTime, options);
|
|
103
163
|
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// ── WebCodecs decode ──────────────────────────────────────
|
|
167
|
+
|
|
168
|
+
async function _decodeFrames(videoAUs, startIdx, endIdx, sps, pps, width, height) {
|
|
169
|
+
const frames = [];
|
|
170
|
+
let resolveFrame;
|
|
104
171
|
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
const outputVideo = [];
|
|
114
|
-
const targetPtsActual = videoAUs[targetIdx].pts;
|
|
115
|
-
const targetDts = videoAUs[targetIdx].dts;
|
|
116
|
-
|
|
117
|
-
// First AU: the re-encoded IDR frame (with new SPS/PPS)
|
|
118
|
-
outputVideo.push({
|
|
119
|
-
nalUnits: encodedNals, // [SPS, PPS, IDR]
|
|
120
|
-
pts: targetPtsActual,
|
|
121
|
-
dts: targetDts,
|
|
122
|
-
_smartRendered: true,
|
|
172
|
+
const decoder = new VideoDecoder({
|
|
173
|
+
output(frame) {
|
|
174
|
+
frames.push(frame);
|
|
175
|
+
if (resolveFrame) resolveFrame();
|
|
176
|
+
},
|
|
177
|
+
error(e) {
|
|
178
|
+
console.error('VideoDecoder error:', e);
|
|
179
|
+
},
|
|
123
180
|
});
|
|
124
181
|
|
|
125
|
-
//
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
182
|
+
// Build avcC description for decoder config
|
|
183
|
+
const description = _buildAvcCDescription(sps, pps);
|
|
184
|
+
|
|
185
|
+
decoder.configure({
|
|
186
|
+
codec: 'avc1.' + _avcProfileString(sps),
|
|
187
|
+
codedWidth: width,
|
|
188
|
+
codedHeight: height,
|
|
189
|
+
description,
|
|
190
|
+
optimizeForLatency: true,
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
// Feed frames from keyframe to endIdx
|
|
194
|
+
for (let i = startIdx; i < endIdx; i++) {
|
|
195
|
+
const au = videoAUs[i];
|
|
196
|
+
const isKey = _isKeyframe(au);
|
|
197
|
+
|
|
198
|
+
// Convert NAL units to AVCC format (4-byte length prefix)
|
|
199
|
+
const avccData = _nalUnitsToAVCC(au.nalUnits);
|
|
200
|
+
|
|
201
|
+
const chunk = new EncodedVideoChunk({
|
|
202
|
+
type: isKey ? 'key' : 'delta',
|
|
203
|
+
timestamp: au.pts, // microseconds for WebCodecs? No, we use our PTS
|
|
204
|
+
data: avccData,
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
const framePromise = new Promise(r => { resolveFrame = r; });
|
|
208
|
+
decoder.decode(chunk);
|
|
209
|
+
await framePromise;
|
|
141
210
|
}
|
|
142
211
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
212
|
+
await decoder.flush();
|
|
213
|
+
decoder.close();
|
|
214
|
+
|
|
215
|
+
return frames;
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// ── WebCodecs encode ──────────────────────────────────────
|
|
219
|
+
|
|
220
|
+
async function _encodeFrames(decodedFrames, startOffset, endOffset, width, height, bitrate) {
|
|
221
|
+
const encodedNALs = [];
|
|
222
|
+
let resolveChunk;
|
|
223
|
+
|
|
224
|
+
const encoder = new VideoEncoder({
|
|
225
|
+
output(chunk, metadata) {
|
|
226
|
+
// Extract H.264 NAL units from the encoded chunk
|
|
227
|
+
const buffer = new Uint8Array(chunk.byteLength);
|
|
228
|
+
chunk.copyTo(buffer);
|
|
229
|
+
|
|
230
|
+
// The encoder output is in AVCC format — convert to NAL units
|
|
231
|
+
const nals = _avccToNALUnits(buffer);
|
|
232
|
+
encodedNALs.push(nals);
|
|
233
|
+
if (resolveChunk) resolveChunk();
|
|
234
|
+
},
|
|
235
|
+
error(e) {
|
|
236
|
+
console.error('VideoEncoder error:', e);
|
|
237
|
+
},
|
|
238
|
+
});
|
|
239
|
+
|
|
240
|
+
encoder.configure({
|
|
241
|
+
codec: 'avc1.640028', // High profile, level 4.0
|
|
242
|
+
width,
|
|
243
|
+
height,
|
|
244
|
+
bitrate,
|
|
245
|
+
framerate: 30,
|
|
246
|
+
latencyMode: 'quality',
|
|
247
|
+
avc: { format: 'annexb' }, // Get Annex B output (start codes)
|
|
248
|
+
});
|
|
249
|
+
|
|
250
|
+
for (let i = startOffset; i < Math.min(endOffset, decodedFrames.length); i++) {
|
|
251
|
+
const frame = decodedFrames[i];
|
|
252
|
+
if (!frame) continue;
|
|
253
|
+
|
|
254
|
+
const chunkPromise = new Promise(r => { resolveChunk = r; });
|
|
255
|
+
encoder.encode(frame, { keyFrame: i === startOffset }); // First frame = keyframe
|
|
256
|
+
await chunkPromise;
|
|
146
257
|
}
|
|
147
258
|
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
259
|
+
await encoder.flush();
|
|
260
|
+
encoder.close();
|
|
261
|
+
|
|
262
|
+
return encodedNALs;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ── Keyframe-accurate fallback ────────────────────────────
|
|
266
|
+
|
|
267
|
+
function keyframeAccurateFallback(parser, targetStartTime, options = {}) {
|
|
268
|
+
const { endTime = Infinity } = options;
|
|
269
|
+
const PTS = 90000;
|
|
270
|
+
const targetPts = targetStartTime * PTS;
|
|
271
|
+
const endPts = endTime * PTS;
|
|
272
|
+
const videoAUs = parser.videoAccessUnits;
|
|
273
|
+
const audioAUs = parser.audioAccessUnits;
|
|
274
|
+
|
|
275
|
+
if (videoAUs.length === 0) {
|
|
276
|
+
return { videoAUs: [], audioAUs: [], actualStartTime: targetStartTime };
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// Find keyframe at or before targetTime
|
|
280
|
+
let keyframeIdx = 0;
|
|
281
|
+
for (let i = 0; i < videoAUs.length; i++) {
|
|
282
|
+
if (videoAUs[i].pts > targetPts) break;
|
|
283
|
+
if (_isKeyframe(videoAUs[i])) keyframeIdx = i;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// Find end
|
|
287
|
+
let endIdx = videoAUs.length;
|
|
288
|
+
for (let i = 0; i < videoAUs.length; i++) {
|
|
289
|
+
if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
|
|
293
|
+
const startPts = clippedVideo.length > 0 ? clippedVideo[0].pts : 0;
|
|
294
|
+
const endVideoPts = endIdx < videoAUs.length ? videoAUs[endIdx - 1].pts + PTS : Infinity;
|
|
295
|
+
const clippedAudio = audioAUs.filter(au => au.pts >= startPts && au.pts < endVideoPts);
|
|
152
296
|
|
|
153
297
|
return {
|
|
154
|
-
videoAUs:
|
|
155
|
-
audioAUs:
|
|
156
|
-
actualStartTime:
|
|
157
|
-
smartRenderedFrames:
|
|
158
|
-
originalFrames:
|
|
298
|
+
videoAUs: clippedVideo,
|
|
299
|
+
audioAUs: clippedAudio,
|
|
300
|
+
actualStartTime: startPts / PTS,
|
|
301
|
+
smartRenderedFrames: 0,
|
|
302
|
+
originalFrames: clippedVideo.length,
|
|
159
303
|
};
|
|
160
304
|
}
|
|
161
305
|
|
|
306
|
+
// ── Helpers ───────────────────────────────────────────────
|
|
307
|
+
|
|
162
308
|
function _isKeyframe(au) {
|
|
163
309
|
for (const nal of au.nalUnits) {
|
|
164
|
-
if ((nal[0] & 0x1F) === 5) return true;
|
|
310
|
+
if ((nal[0] & 0x1F) === 5) return true;
|
|
165
311
|
}
|
|
166
312
|
return false;
|
|
167
313
|
}
|
|
168
314
|
|
|
315
|
+
function _parseSPSDimensions(sps) {
|
|
316
|
+
// Minimal SPS dimension parsing (reuses logic from muxers/mp4.js parseSPS)
|
|
317
|
+
let width = 1920, height = 1080;
|
|
318
|
+
if (!sps || sps.length < 4) return { width, height };
|
|
319
|
+
|
|
320
|
+
try {
|
|
321
|
+
let offset = 1;
|
|
322
|
+
const profile = sps[offset++];
|
|
323
|
+
offset += 2; // constraint flags + level
|
|
324
|
+
|
|
325
|
+
let bitPos = offset * 8;
|
|
326
|
+
const getBit = () => (sps[Math.floor(bitPos / 8)] >> (7 - (bitPos++ % 8))) & 1;
|
|
327
|
+
const readUE = () => {
|
|
328
|
+
let z = 0;
|
|
329
|
+
while (bitPos < sps.length * 8 && getBit() === 0) z++;
|
|
330
|
+
let v = (1 << z) - 1;
|
|
331
|
+
for (let i = 0; i < z; i++) v += getBit() << (z - 1 - i);
|
|
332
|
+
return v;
|
|
333
|
+
};
|
|
334
|
+
|
|
335
|
+
readUE(); // sps_id
|
|
336
|
+
if ([100, 110, 122, 244, 44, 83, 86, 118, 128].includes(profile)) {
|
|
337
|
+
const cf = readUE(); if (cf === 3) getBit();
|
|
338
|
+
readUE(); readUE(); getBit();
|
|
339
|
+
if (getBit()) { for (let i = 0; i < (cf !== 3 ? 8 : 12); i++) { if (getBit()) { const s = i < 6 ? 16 : 64; let ls = 8, ns = 8; for (let j = 0; j < s; j++) { if (ns !== 0) { const ds = readUE(); // readSE actually
|
|
340
|
+
ns = (ls + ds + 256) % 256; } ls = ns === 0 ? ls : ns; } } } }
|
|
341
|
+
}
|
|
342
|
+
readUE(); // log2_max_frame_num
|
|
343
|
+
const pocType = readUE();
|
|
344
|
+
if (pocType === 0) readUE();
|
|
345
|
+
else if (pocType === 1) { getBit(); readUE(); readUE(); const n = readUE(); for (let i = 0; i < n; i++) readUE(); }
|
|
346
|
+
readUE(); getBit(); // max_ref_frames, gaps
|
|
347
|
+
|
|
348
|
+
const mbW = readUE() + 1;
|
|
349
|
+
const mbH = readUE() + 1;
|
|
350
|
+
const frameMbsOnly = getBit();
|
|
351
|
+
if (!frameMbsOnly) getBit();
|
|
352
|
+
getBit(); // direct_8x8
|
|
353
|
+
|
|
354
|
+
let cropL = 0, cropR = 0, cropT = 0, cropB = 0;
|
|
355
|
+
if (getBit()) { cropL = readUE(); cropR = readUE(); cropT = readUE(); cropB = readUE(); }
|
|
356
|
+
|
|
357
|
+
width = mbW * 16 - (cropL + cropR) * 2;
|
|
358
|
+
height = (2 - frameMbsOnly) * mbH * 16 - (cropT + cropB) * (frameMbsOnly ? 2 : 4);
|
|
359
|
+
} catch (e) { /* use defaults */ }
|
|
360
|
+
|
|
361
|
+
return { width, height };
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
function _avcProfileString(sps) {
|
|
365
|
+
return [sps[1], sps[2], sps[3]].map(b => b.toString(16).padStart(2, '0')).join('');
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
function _buildAvcCDescription(sps, pps) {
|
|
369
|
+
const data = new Uint8Array(11 + sps.length + pps.length);
|
|
370
|
+
const view = new DataView(data.buffer);
|
|
371
|
+
data[0] = 1; data[1] = sps[1]; data[2] = sps[2]; data[3] = sps[3];
|
|
372
|
+
data[4] = 0xFF; data[5] = 0xE1;
|
|
373
|
+
view.setUint16(6, sps.length); data.set(sps, 8);
|
|
374
|
+
data[8 + sps.length] = 1;
|
|
375
|
+
view.setUint16(9 + sps.length, pps.length);
|
|
376
|
+
data.set(pps, 11 + sps.length);
|
|
377
|
+
return data;
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
function _nalUnitsToAVCC(nalUnits) {
|
|
381
|
+
// Filter out SPS/PPS/AUD/SEI — decoder config handles those
|
|
382
|
+
const videoNals = nalUnits.filter(nal => {
|
|
383
|
+
const t = nal[0] & 0x1F;
|
|
384
|
+
return t === 1 || t === 5; // non-IDR or IDR slice
|
|
385
|
+
});
|
|
386
|
+
|
|
387
|
+
let totalSize = 0;
|
|
388
|
+
for (const nal of videoNals) totalSize += 4 + nal.length;
|
|
389
|
+
const result = new Uint8Array(totalSize);
|
|
390
|
+
const view = new DataView(result.buffer);
|
|
391
|
+
let offset = 0;
|
|
392
|
+
for (const nal of videoNals) {
|
|
393
|
+
view.setUint32(offset, nal.length);
|
|
394
|
+
result.set(nal, offset + 4);
|
|
395
|
+
offset += 4 + nal.length;
|
|
396
|
+
}
|
|
397
|
+
return result;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
function _avccToNALUnits(data) {
|
|
401
|
+
// Parse Annex B format (start codes) or AVCC (length-prefixed)
|
|
402
|
+
const nals = [];
|
|
403
|
+
|
|
404
|
+
// Check for Annex B (0x00000001 or 0x000001)
|
|
405
|
+
if (data.length >= 4 && data[0] === 0 && data[1] === 0) {
|
|
406
|
+
let i = 0;
|
|
407
|
+
while (i < data.length - 3) {
|
|
408
|
+
// Find start code
|
|
409
|
+
let scLen = 0;
|
|
410
|
+
if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 1) scLen = 3;
|
|
411
|
+
else if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0 && data[i + 3] === 1) scLen = 4;
|
|
412
|
+
|
|
413
|
+
if (scLen > 0) {
|
|
414
|
+
const nalStart = i + scLen;
|
|
415
|
+
// Find next start code
|
|
416
|
+
let nalEnd = data.length;
|
|
417
|
+
for (let j = nalStart + 1; j < data.length - 2; j++) {
|
|
418
|
+
if (data[j] === 0 && data[j + 1] === 0 && (data[j + 2] === 1 || (data[j + 2] === 0 && j + 3 < data.length && data[j + 3] === 1))) {
|
|
419
|
+
nalEnd = j;
|
|
420
|
+
break;
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
if (nalEnd > nalStart) {
|
|
424
|
+
nals.push(data.slice(nalStart, nalEnd));
|
|
425
|
+
}
|
|
426
|
+
i = nalEnd;
|
|
427
|
+
} else {
|
|
428
|
+
i++;
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
} else {
|
|
432
|
+
// AVCC format (4-byte length prefix)
|
|
433
|
+
const view = new DataView(data.buffer, data.byteOffset, data.byteLength);
|
|
434
|
+
let offset = 0;
|
|
435
|
+
while (offset + 4 < data.length) {
|
|
436
|
+
const len = view.getUint32(offset);
|
|
437
|
+
if (len > 0 && offset + 4 + len <= data.length) {
|
|
438
|
+
nals.push(data.slice(offset + 4, offset + 4 + len));
|
|
439
|
+
}
|
|
440
|
+
offset += 4 + len;
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
return nals;
|
|
445
|
+
}
|
|
446
|
+
|
|
169
447
|
export default smartRender;
|