@invintusmedia/tomp4 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,268 @@
1
+ /**
2
+ * H.264 Integer Transforms and Quantization
3
+ *
4
+ * Forward and inverse 4x4/8x8 integer DCT transforms,
5
+ * quantization, and dequantization as specified in H.264.
6
+ *
7
+ * Reference: ITU-T H.264, Section 8.5
8
+ *
9
+ * @module codecs/h264-transform
10
+ */
11
+
12
+ import { levelScale4x4, quantMF4x4, scanOrder4x4 } from './h264-tables.js';
13
+
14
+ // ══════════════════════════════════════════════════════════
15
+ // 4x4 Inverse Integer Transform (Section 8.5.12.1)
16
+ // ══════════════════════════════════════════════════════════
17
+
18
+ /**
19
+ * Inverse 4x4 integer DCT.
20
+ * Input: 16-element array in raster order (after dequantization).
21
+ * Output: 16-element residual array in raster order.
22
+ */
23
+ export function inverseDCT4x4(coeffs) {
24
+ const d = new Int32Array(16);
25
+ const r = new Int32Array(16);
26
+
27
+ // Copy input
28
+ for (let i = 0; i < 16; i++) d[i] = coeffs[i];
29
+
30
+ // Horizontal pass (rows)
31
+ for (let i = 0; i < 4; i++) {
32
+ const si = i * 4;
33
+ const e0 = d[si + 0] + d[si + 2];
34
+ const e1 = d[si + 0] - d[si + 2];
35
+ const e2 = (d[si + 1] >> 1) - d[si + 3];
36
+ const e3 = d[si + 1] + (d[si + 3] >> 1);
37
+
38
+ r[si + 0] = e0 + e3;
39
+ r[si + 1] = e1 + e2;
40
+ r[si + 2] = e1 - e2;
41
+ r[si + 3] = e0 - e3;
42
+ }
43
+
44
+ // Vertical pass (columns)
45
+ const out = new Int32Array(16);
46
+ for (let j = 0; j < 4; j++) {
47
+ const e0 = r[j] + r[8 + j];
48
+ const e1 = r[j] - r[8 + j];
49
+ const e2 = (r[4 + j] >> 1) - r[12 + j];
50
+ const e3 = r[4 + j] + (r[12 + j] >> 1);
51
+
52
+ out[j] = (e0 + e3 + 32) >> 6;
53
+ out[4 + j] = (e1 + e2 + 32) >> 6;
54
+ out[8 + j] = (e1 - e2 + 32) >> 6;
55
+ out[12 + j] = (e0 - e3 + 32) >> 6;
56
+ }
57
+
58
+ return out;
59
+ }
60
+
61
+ // ══════════════════════════════════════════════════════════
62
+ // 4x4 Forward Integer Transform (Section 8.5 inverse)
63
+ // ══════════════════════════════════════════════════════════
64
+
65
+ /**
66
+ * Forward 4x4 integer DCT (for encoder).
67
+ * Input: 16-element residual array in raster order.
68
+ * Output: 16-element coefficient array in raster order.
69
+ */
70
+ export function forwardDCT4x4(residual) {
71
+ const d = new Int32Array(16);
72
+ const r = new Int32Array(16);
73
+
74
+ for (let i = 0; i < 16; i++) d[i] = residual[i];
75
+
76
+ // Horizontal pass (Cf * X)
77
+ for (let i = 0; i < 4; i++) {
78
+ const si = i * 4;
79
+ const p0 = d[si + 0] + d[si + 3];
80
+ const p1 = d[si + 1] + d[si + 2];
81
+ const p2 = d[si + 1] - d[si + 2];
82
+ const p3 = d[si + 0] - d[si + 3];
83
+
84
+ r[si + 0] = p0 + p1;
85
+ r[si + 1] = (p3 << 1) + p2;
86
+ r[si + 2] = p0 - p1;
87
+ r[si + 3] = p3 - (p2 << 1);
88
+ }
89
+
90
+ // Vertical pass (result * Cf^T)
91
+ const out = new Int32Array(16);
92
+ for (let j = 0; j < 4; j++) {
93
+ const p0 = r[j] + r[12 + j];
94
+ const p1 = r[4 + j] + r[8 + j];
95
+ const p2 = r[4 + j] - r[8 + j];
96
+ const p3 = r[j] - r[12 + j];
97
+
98
+ out[j] = p0 + p1;
99
+ out[4 + j] = (p3 << 1) + p2;
100
+ out[8 + j] = p0 - p1;
101
+ out[12 + j] = p3 - (p2 << 1);
102
+ }
103
+
104
+ return out;
105
+ }
106
+
107
+ // ══════════════════════════════════════════════════════════
108
+ // 4x4 Hadamard Transform (for DC coefficients of Intra16x16)
109
+ // ══════════════════════════════════════════════════════════
110
+
111
+ /**
112
+ * Forward 4x4 Hadamard transform for Intra16x16 luma DC coefficients.
113
+ * Input: 16 DC values (one per 4x4 block in the 16x16 macroblock).
114
+ * Output: 16 transformed values.
115
+ */
116
+ export function forwardHadamard4x4(dc) {
117
+ const t = new Int32Array(16);
118
+ const out = new Int32Array(16);
119
+
120
+ // Horizontal
121
+ for (let i = 0; i < 4; i++) {
122
+ const s = i * 4;
123
+ const p0 = dc[s] + dc[s + 3];
124
+ const p1 = dc[s + 1] + dc[s + 2];
125
+ const p2 = dc[s + 1] - dc[s + 2];
126
+ const p3 = dc[s] - dc[s + 3];
127
+ t[s] = p0 + p1;
128
+ t[s + 1] = p3 + p2;
129
+ t[s + 2] = p0 - p1;
130
+ t[s + 3] = p3 - p2;
131
+ }
132
+
133
+ // Vertical
134
+ for (let j = 0; j < 4; j++) {
135
+ const p0 = t[j] + t[12 + j];
136
+ const p1 = t[4 + j] + t[8 + j];
137
+ const p2 = t[4 + j] - t[8 + j];
138
+ const p3 = t[j] - t[12 + j];
139
+ out[j] = (p0 + p1) >> 1;
140
+ out[4 + j] = (p3 + p2) >> 1;
141
+ out[8 + j] = (p0 - p1) >> 1;
142
+ out[12 + j] = (p3 - p2) >> 1;
143
+ }
144
+
145
+ return out;
146
+ }
147
+
148
+ /**
149
+ * Inverse 4x4 Hadamard transform for Intra16x16 luma DC.
150
+ */
151
+ export function inverseHadamard4x4(dc) {
152
+ // Same as forward (Hadamard is its own inverse up to scaling)
153
+ const t = new Int32Array(16);
154
+ const out = new Int32Array(16);
155
+
156
+ for (let i = 0; i < 4; i++) {
157
+ const s = i * 4;
158
+ const p0 = dc[s] + dc[s + 3];
159
+ const p1 = dc[s + 1] + dc[s + 2];
160
+ const p2 = dc[s + 1] - dc[s + 2];
161
+ const p3 = dc[s] - dc[s + 3];
162
+ t[s] = p0 + p1;
163
+ t[s + 1] = p3 + p2;
164
+ t[s + 2] = p0 - p1;
165
+ t[s + 3] = p3 - p2;
166
+ }
167
+
168
+ for (let j = 0; j < 4; j++) {
169
+ const p0 = t[j] + t[12 + j];
170
+ const p1 = t[4 + j] + t[8 + j];
171
+ const p2 = t[4 + j] - t[8 + j];
172
+ const p3 = t[j] - t[12 + j];
173
+ out[j] = p0 + p1;
174
+ out[4 + j] = p3 + p2;
175
+ out[8 + j] = p0 - p1;
176
+ out[12 + j] = p3 - p2;
177
+ }
178
+
179
+ return out;
180
+ }
181
+
182
+ // ══════════════════════════════════════════════════════════
183
+ // 2x2 Hadamard Transform (for chroma DC)
184
+ // ══════════════════════════════════════════════════════════
185
+
186
+ export function forwardHadamard2x2(dc) {
187
+ return new Int32Array([
188
+ dc[0] + dc[1] + dc[2] + dc[3],
189
+ dc[0] - dc[1] + dc[2] - dc[3],
190
+ dc[0] + dc[1] - dc[2] - dc[3],
191
+ dc[0] - dc[1] - dc[2] + dc[3],
192
+ ]);
193
+ }
194
+
195
+ export function inverseHadamard2x2(dc) {
196
+ // Same structure, no scaling needed for 2x2
197
+ return forwardHadamard2x2(dc);
198
+ }
199
+
200
+ // ══════════════════════════════════════════════════════════
201
+ // Inverse Quantization (Dequantization)
202
+ // Section 8.5.12.1
203
+ // ══════════════════════════════════════════════════════════
204
+
205
+ /**
206
+ * Dequantize a 4x4 block of transform coefficients.
207
+ * @param {Int32Array} coeffs - 16 coefficients in scan order
208
+ * @param {number} qp - Quantization parameter (0-51)
209
+ * @param {boolean} isIntra - Whether the macroblock is intra
210
+ * @returns {Int32Array} Dequantized coefficients in raster order
211
+ */
212
+ export function dequantize4x4(coeffs, qp, isIntra) {
213
+ const qpMod6 = qp % 6;
214
+ const qpDiv6 = Math.floor(qp / 6);
215
+ const scale = levelScale4x4[qpMod6];
216
+ const out = new Int32Array(16);
217
+
218
+ for (let i = 0; i < 16; i++) {
219
+ const pos = scanOrder4x4[i];
220
+ if (qpDiv6 >= 4) {
221
+ out[pos] = (coeffs[i] * scale[i]) << (qpDiv6 - 4);
222
+ } else {
223
+ out[pos] = (coeffs[i] * scale[i] + (1 << (3 - qpDiv6))) >> (4 - qpDiv6);
224
+ }
225
+ }
226
+
227
+ return out;
228
+ }
229
+
230
+ // ══════════════════════════════════════════════════════════
231
+ // Forward Quantization (for encoder)
232
+ // ══════════════════════════════════════════════════════════
233
+
234
+ /**
235
+ * Quantize a 4x4 block of transform coefficients.
236
+ * @param {Int32Array} coeffs - 16 coefficients in raster order
237
+ * @param {number} qp - Quantization parameter (0-51)
238
+ * @returns {Int32Array} Quantized coefficients in scan order
239
+ */
240
+ export function quantize4x4(coeffs, qp) {
241
+ const qpMod6 = qp % 6;
242
+ const qpDiv6 = Math.floor(qp / 6);
243
+ const mf = quantMF4x4[qpMod6];
244
+ const qBits = 15 + qpDiv6;
245
+ const offset = (1 << qBits) / 3; // intra offset = 1/3
246
+ const out = new Int32Array(16);
247
+
248
+ for (let i = 0; i < 16; i++) {
249
+ const pos = scanOrder4x4[i];
250
+ const sign = coeffs[pos] < 0 ? -1 : 1;
251
+ const absVal = Math.abs(coeffs[pos]);
252
+ out[i] = sign * ((absVal * mf[i] + offset) >> qBits);
253
+ }
254
+
255
+ return out;
256
+ }
257
+
258
+ // ══════════════════════════════════════════════════════════
259
+ // Clipping utility
260
+ // ══════════════════════════════════════════════════════════
261
+
262
+ export function clip(val, min, max) {
263
+ return val < min ? min : val > max ? max : val;
264
+ }
265
+
266
+ export function clip255(val) {
267
+ return val < 0 ? 0 : val > 255 ? 255 : val;
268
+ }
@@ -0,0 +1,169 @@
1
+ /**
2
+ * Smart Rendering
3
+ *
4
+ * Re-encodes the boundary GOP of an HLS segment to produce a
5
+ * frame-accurate cut point. Decodes preroll frames, re-encodes
6
+ * the target frame as a new keyframe, and re-encodes subsequent
7
+ * frames until the next original keyframe.
8
+ *
9
+ * @module codecs/smart-render
10
+ */
11
+
12
+ import { H264Decoder, YUVFrame } from './h264-decoder.js';
13
+ import { H264Encoder } from './h264-encoder.js';
14
+ import { TSParser, getCodecInfo } from '../parsers/mpegts.js';
15
+
16
+ /**
17
+ * Smart-render a TS segment to start at a precise frame.
18
+ *
19
+ * Takes a TS segment and a target start time (relative to segment start).
20
+ * Returns an array of NAL units where:
21
+ * - Frames before targetTime are removed
22
+ * - The frame at targetTime is re-encoded as an IDR keyframe
23
+ * - Frames between targetTime and next original keyframe are re-encoded as I-frames
24
+ * - Frames after the next original keyframe use original compressed data
25
+ *
26
+ * @param {TSParser} parser - Parsed TS segment
27
+ * @param {number} targetStartTime - Start time in seconds (relative to segment)
28
+ * @param {object} [options]
29
+ * @param {number} [options.endTime] - End time in seconds (relative to segment)
30
+ * @param {number} [options.qp=20] - Encoding quality (lower = better, 0-51)
31
+ * @returns {object} { videoAUs, audioAUs, actualStartTime }
32
+ */
33
+ export function smartRender(parser, targetStartTime, options = {}) {
34
+ const { endTime = Infinity, qp = 20 } = options;
35
+ const PTS = 90000;
36
+ const targetPts = targetStartTime * PTS;
37
+ const endPts = endTime * PTS;
38
+
39
+ const videoAUs = parser.videoAccessUnits;
40
+ const audioAUs = parser.audioAccessUnits;
41
+
42
+ if (videoAUs.length === 0) {
43
+ return { videoAUs: [], audioAUs: [], actualStartTime: targetStartTime };
44
+ }
45
+
46
+ // Find the keyframe at or before targetTime
47
+ let keyframeIdx = 0;
48
+ for (let i = 0; i < videoAUs.length; i++) {
49
+ if (videoAUs[i].pts > targetPts) break;
50
+ if (_isKeyframe(videoAUs[i])) keyframeIdx = i;
51
+ }
52
+
53
+ // Find the target frame (first frame at or after targetTime)
54
+ let targetIdx = keyframeIdx;
55
+ for (let i = keyframeIdx; i < videoAUs.length; i++) {
56
+ if (videoAUs[i].pts >= targetPts) { targetIdx = i; break; }
57
+ }
58
+
59
+ // Find the next keyframe after targetIdx
60
+ let nextKeyframeIdx = videoAUs.length;
61
+ for (let i = targetIdx + 1; i < videoAUs.length; i++) {
62
+ if (_isKeyframe(videoAUs[i])) { nextKeyframeIdx = i; break; }
63
+ }
64
+
65
+ // Find end frame
66
+ let endIdx = videoAUs.length;
67
+ for (let i = 0; i < videoAUs.length; i++) {
68
+ if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
69
+ }
70
+
71
+ // If target is already a keyframe, no smart rendering needed
72
+ if (targetIdx === keyframeIdx) {
73
+ const clippedVideo = videoAUs.slice(targetIdx, endIdx);
74
+ const startPts = clippedVideo.length > 0 ? clippedVideo[0].pts : 0;
75
+ const clippedAudio = audioAUs.filter(au => au.pts >= startPts && au.pts < (endIdx < videoAUs.length ? videoAUs[endIdx].pts : Infinity));
76
+ return {
77
+ videoAUs: clippedVideo,
78
+ audioAUs: clippedAudio,
79
+ actualStartTime: startPts / PTS,
80
+ };
81
+ }
82
+
83
+ // ── Smart rendering: decode preroll, re-encode boundary ──
84
+
85
+ // Step 1: Decode preroll frames to get pixel data at targetIdx
86
+ const decoder = new H264Decoder();
87
+ let targetFrame = null;
88
+
89
+ for (let i = keyframeIdx; i <= targetIdx; i++) {
90
+ const frame = decoder.decodeAccessUnit(videoAUs[i].nalUnits);
91
+ if (frame && i === targetIdx) targetFrame = frame;
92
+ }
93
+
94
+ if (!targetFrame) {
95
+ // Fallback: couldn't decode, start at keyframe instead
96
+ const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
97
+ const startPts = clippedVideo[0].pts;
98
+ return {
99
+ videoAUs: clippedVideo,
100
+ audioAUs: audioAUs.filter(au => au.pts >= startPts),
101
+ actualStartTime: startPts / PTS,
102
+ };
103
+ }
104
+
105
+ // Step 2: Re-encode target frame as IDR
106
+ const encoder = new H264Encoder();
107
+ const encodedNals = encoder.encode(
108
+ targetFrame.Y, targetFrame.U, targetFrame.V,
109
+ targetFrame.width, targetFrame.height, qp
110
+ );
111
+
112
+ // Step 3: Build output access units
113
+ const outputVideo = [];
114
+ const targetPtsActual = videoAUs[targetIdx].pts;
115
+ const targetDts = videoAUs[targetIdx].dts;
116
+
117
+ // First AU: the re-encoded IDR frame (with new SPS/PPS)
118
+ outputVideo.push({
119
+ nalUnits: encodedNals, // [SPS, PPS, IDR]
120
+ pts: targetPtsActual,
121
+ dts: targetDts,
122
+ _smartRendered: true,
123
+ });
124
+
125
+ // Step 4: Re-encode frames between target and next keyframe as I-frames
126
+ for (let i = targetIdx + 1; i < Math.min(nextKeyframeIdx, endIdx); i++) {
127
+ // Decode this frame
128
+ const frame = decoder.decodeAccessUnit(videoAUs[i].nalUnits);
129
+ if (frame) {
130
+ const frameNals = encoder.encode(frame.Y, frame.U, frame.V,
131
+ frame.width, frame.height, qp);
132
+ // Use only the IDR NAL (skip SPS/PPS for subsequent frames)
133
+ const idrOnly = frameNals.filter(n => (n[0] & 0x1F) === 5);
134
+ outputVideo.push({
135
+ nalUnits: idrOnly,
136
+ pts: videoAUs[i].pts,
137
+ dts: videoAUs[i].dts,
138
+ _smartRendered: true,
139
+ });
140
+ }
141
+ }
142
+
143
+ // Step 5: Original compressed data from next keyframe onward
144
+ for (let i = nextKeyframeIdx; i < endIdx; i++) {
145
+ outputVideo.push(videoAUs[i]);
146
+ }
147
+
148
+ // Clip audio to match video range
149
+ const audioStartPts = targetPtsActual;
150
+ const audioEndPts = endIdx < videoAUs.length ? videoAUs[endIdx - 1].pts + PTS : Infinity;
151
+ const outputAudio = audioAUs.filter(au => au.pts >= audioStartPts && au.pts < audioEndPts);
152
+
153
+ return {
154
+ videoAUs: outputVideo,
155
+ audioAUs: outputAudio,
156
+ actualStartTime: targetPtsActual / PTS,
157
+ smartRenderedFrames: Math.min(nextKeyframeIdx, endIdx) - targetIdx,
158
+ originalFrames: Math.max(0, endIdx - nextKeyframeIdx),
159
+ };
160
+ }
161
+
162
+ function _isKeyframe(au) {
163
+ for (const nal of au.nalUnits) {
164
+ if ((nal[0] & 0x1F) === 5) return true; // IDR
165
+ }
166
+ return false;
167
+ }
168
+
169
+ export default smartRender;
package/src/hls-clip.js CHANGED
@@ -23,6 +23,7 @@
23
23
  import { parseHls, isHlsUrl, parsePlaylistText, toAbsoluteUrl } from './hls.js';
24
24
  import { TSParser, getCodecInfo } from './parsers/mpegts.js';
25
25
  import { createInitSegment, createFragment } from './muxers/fmp4.js';
26
+ import { smartRender } from './codecs/smart-render.js';
26
27
 
27
28
  // ── constants ─────────────────────────────────────────────
28
29
 
@@ -78,57 +79,85 @@ function remuxToFragment(parser, sequenceNumber, videoBaseTime, audioBaseTime, a
78
79
  }
79
80
 
80
81
  /**
81
- * Clip a parsed TS segment at the start (frame-accurate with preroll)
82
- * and/or at the end. Returns clipped access units + timing metadata.
82
+ * Clip a parsed TS segment at the start and/or end.
83
+ *
84
+ * Uses smart rendering when clipping at the start: re-encodes the
85
+ * boundary GOP so the segment starts with a new keyframe at the
86
+ * exact requested time. No preroll, no edit list, frame-accurate.
87
+ *
88
+ * @param {TSParser} parser - Parsed TS segment
89
+ * @param {number} [startTime] - Start time in seconds (relative to segment)
90
+ * @param {number} [endTime] - End time in seconds (relative to segment)
91
+ * @param {object} [options]
92
+ * @param {number} [options.qp=20] - Encoding quality for smart-rendered frames
83
93
  */
84
- function clipSegment(parser, startTime, endTime) {
94
+ function clipSegment(parser, startTime, endTime, options = {}) {
95
+ const { qp = 20 } = options;
85
96
  const startPts = (startTime !== undefined ? startTime : 0) * PTS_PER_SECOND;
86
97
  const endPts = (endTime !== undefined ? endTime : Infinity) * PTS_PER_SECOND;
87
98
  const videoAUs = parser.videoAccessUnits;
88
99
  const audioAUs = parser.audioAccessUnits;
89
100
 
90
- // Find keyframe at or before startTime
101
+ if (videoAUs.length === 0) return null;
102
+
103
+ // Check if startTime falls between keyframes (needs smart rendering)
91
104
  let keyframeIdx = 0;
92
105
  for (let i = 0; i < videoAUs.length; i++) {
93
106
  if (videoAUs[i].pts > startPts) break;
94
107
  if (isKeyframe(videoAUs[i])) keyframeIdx = i;
95
108
  }
96
109
 
97
- // Find end index
98
- let endIdx = videoAUs.length;
110
+ let targetIdx = keyframeIdx;
99
111
  for (let i = keyframeIdx; i < videoAUs.length; i++) {
100
- if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
112
+ if (videoAUs[i].pts >= startPts) { targetIdx = i; break; }
101
113
  }
102
114
 
103
- const clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
104
- if (clippedVideo.length === 0) return null;
115
+ const needsSmartRender = startTime !== undefined && targetIdx > keyframeIdx;
116
+
117
+ let clippedVideo, clippedAudio, startOffset;
118
+
119
+ if (needsSmartRender) {
120
+ // Smart render: re-encode boundary GOP for frame-accurate start
121
+ const result = smartRender(parser, startTime, { endTime, qp });
122
+ clippedVideo = result.videoAUs;
123
+ startOffset = result.videoAUs.length > 0 ? result.videoAUs[0].pts : 0;
105
124
 
106
- const keyframePts = clippedVideo[0].pts;
107
- const prerollPts = Math.max(0, startPts - keyframePts);
125
+ // Clip audio to match smart-rendered video
126
+ const audioEnd = endPts < Infinity ? Math.min(endPts, videoAUs[videoAUs.length - 1].pts + PTS_PER_SECOND) : Infinity;
127
+ clippedAudio = audioAUs.filter(au => au.pts >= startOffset && au.pts < audioEnd);
128
+ } else {
129
+ // Start is at a keyframe — no smart rendering needed
130
+ let endIdx = videoAUs.length;
131
+ for (let i = keyframeIdx; i < videoAUs.length; i++) {
132
+ if (videoAUs[i].pts >= endPts) { endIdx = i; break; }
133
+ }
134
+
135
+ clippedVideo = videoAUs.slice(keyframeIdx, endIdx);
136
+ if (clippedVideo.length === 0) return null;
137
+ startOffset = clippedVideo[0].pts;
108
138
 
109
- // Clip audio from keyframe (for A/V sync, matching the fix in ts-to-mp4.js)
110
- const lastVideoPts = clippedVideo[clippedVideo.length - 1].pts;
111
- const audioEndPts = Math.min(endPts, lastVideoPts + PTS_PER_SECOND);
112
- const clippedAudio = audioAUs.filter(au => au.pts >= keyframePts && au.pts < audioEndPts);
139
+ const lastVideoPts = clippedVideo[clippedVideo.length - 1].pts;
140
+ const audioEndPts = Math.min(endPts, lastVideoPts + PTS_PER_SECOND);
141
+ clippedAudio = audioAUs.filter(au => au.pts >= startOffset && au.pts < audioEndPts);
142
+ }
143
+
144
+ if (clippedVideo.length === 0) return null;
113
145
 
114
146
  // Normalize timestamps to start at 0
115
- const offset = keyframePts;
116
- for (const au of clippedVideo) { au.pts -= offset; au.dts -= offset; }
117
- for (const au of clippedAudio) { au.pts -= offset; }
147
+ for (const au of clippedVideo) { au.pts -= startOffset; au.dts -= startOffset; }
148
+ for (const au of clippedAudio) { au.pts -= startOffset; }
118
149
 
119
- // Calculate durations
120
- const videoDuration = clippedVideo.length > 1
150
+ // Duration from actual content
151
+ const duration = clippedVideo.length > 1
121
152
  ? clippedVideo[clippedVideo.length - 1].dts - clippedVideo[0].dts +
122
- (clippedVideo[1].dts - clippedVideo[0].dts) // add one frame for last
153
+ (clippedVideo.length > 1 ? clippedVideo[1].dts - clippedVideo[0].dts : 3003)
123
154
  : 3003;
124
- const playbackDuration = (videoDuration - prerollPts) / PTS_PER_SECOND;
125
155
 
126
156
  return {
127
157
  videoSamples: clippedVideo,
128
158
  audioSamples: clippedAudio,
129
- prerollPts,
130
- playbackDuration: Math.max(0, playbackDuration),
131
- mediaDuration: videoDuration / PTS_PER_SECOND,
159
+ duration: duration / PTS_PER_SECOND,
160
+ smartRendered: needsSmartRender,
132
161
  };
133
162
  }
134
163
 
@@ -383,13 +412,13 @@ export async function clipHls(source, options = {}) {
383
412
  });
384
413
 
385
414
  clipSegments.push({
386
- duration: firstClipped.playbackDuration,
415
+ duration: firstClipped.duration,
387
416
  data: firstFragment, // pre-clipped, in memory
388
417
  originalUrl: null,
389
418
  timelineOffset: 0,
390
419
  isBoundary: true,
391
420
  });
392
- timelineOffset += firstClipped.mediaDuration;
421
+ timelineOffset += firstClipped.duration;
393
422
 
394
423
  // ── Middle segments (pass-through, remuxed on demand) ──
395
424
  for (let i = 1; i < overlapping.length - 1; i++) {
@@ -410,6 +439,7 @@ export async function clipHls(source, options = {}) {
410
439
  const lastRelEnd = endTime - lastSeg.startTime;
411
440
  const lastClipped = clipSegment(lastParser, undefined, lastRelEnd);
412
441
  if (lastClipped && lastClipped.videoSamples.length > 0) {
442
+ const lastDuration = lastClipped.duration;
413
443
  const lastSeqNum = overlapping.length;
414
444
  const lastVideoBaseTime = Math.round(timelineOffset * PTS_PER_SECOND);
415
445
  const lastAudioBaseTime = Math.round(timelineOffset * audioTimescale);
@@ -426,7 +456,7 @@ export async function clipHls(source, options = {}) {
426
456
  });
427
457
 
428
458
  clipSegments.push({
429
- duration: lastClipped.playbackDuration,
459
+ duration: lastClipped.duration,
430
460
  data: lastFragment,
431
461
  originalUrl: null,
432
462
  timelineOffset,
package/src/index.js CHANGED
@@ -342,7 +342,7 @@ toMp4.TSParser = TSParser;
342
342
  toMp4.RemoteMp4 = RemoteMp4;
343
343
 
344
344
  // Version (injected at build time for dist, read from package.json for ESM)
345
- toMp4.version = '1.3.0';
345
+ toMp4.version = '1.4.0';
346
346
 
347
347
  // Export
348
348
  export {