node-av 3.1.3 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +65 -52
- package/binding.gyp +4 -0
- package/dist/api/audio-frame-buffer.d.ts +201 -0
- package/dist/api/audio-frame-buffer.js +275 -0
- package/dist/api/audio-frame-buffer.js.map +1 -0
- package/dist/api/bitstream-filter.d.ts +319 -78
- package/dist/api/bitstream-filter.js +680 -151
- package/dist/api/bitstream-filter.js.map +1 -1
- package/dist/api/constants.d.ts +44 -0
- package/dist/api/constants.js +45 -0
- package/dist/api/constants.js.map +1 -0
- package/dist/api/data/test_av1.ivf +0 -0
- package/dist/api/data/test_mjpeg.mjpeg +0 -0
- package/dist/api/data/test_vp8.ivf +0 -0
- package/dist/api/data/test_vp9.ivf +0 -0
- package/dist/api/decoder.d.ts +279 -17
- package/dist/api/decoder.js +998 -209
- package/dist/api/decoder.js.map +1 -1
- package/dist/api/{media-input.d.ts → demuxer.d.ts} +294 -44
- package/dist/api/demuxer.js +1968 -0
- package/dist/api/demuxer.js.map +1 -0
- package/dist/api/encoder.d.ts +308 -50
- package/dist/api/encoder.js +1133 -111
- package/dist/api/encoder.js.map +1 -1
- package/dist/api/filter-presets.d.ts +12 -5
- package/dist/api/filter-presets.js +21 -7
- package/dist/api/filter-presets.js.map +1 -1
- package/dist/api/filter.d.ts +406 -40
- package/dist/api/filter.js +966 -139
- package/dist/api/filter.js.map +1 -1
- package/dist/api/{fmp4.d.ts → fmp4-stream.d.ts} +141 -140
- package/dist/api/fmp4-stream.js +539 -0
- package/dist/api/fmp4-stream.js.map +1 -0
- package/dist/api/hardware.d.ts +58 -6
- package/dist/api/hardware.js +127 -11
- package/dist/api/hardware.js.map +1 -1
- package/dist/api/index.d.ts +6 -4
- package/dist/api/index.js +14 -8
- package/dist/api/index.js.map +1 -1
- package/dist/api/io-stream.d.ts +3 -3
- package/dist/api/io-stream.js +5 -4
- package/dist/api/io-stream.js.map +1 -1
- package/dist/api/{media-output.d.ts → muxer.d.ts} +274 -60
- package/dist/api/muxer.js +1934 -0
- package/dist/api/muxer.js.map +1 -0
- package/dist/api/pipeline.d.ts +77 -29
- package/dist/api/pipeline.js +435 -425
- package/dist/api/pipeline.js.map +1 -1
- package/dist/api/rtp-stream.d.ts +312 -0
- package/dist/api/rtp-stream.js +630 -0
- package/dist/api/rtp-stream.js.map +1 -0
- package/dist/api/types.d.ts +476 -55
- package/dist/api/utilities/async-queue.d.ts +91 -0
- package/dist/api/utilities/async-queue.js +162 -0
- package/dist/api/utilities/async-queue.js.map +1 -0
- package/dist/api/utilities/audio-sample.d.ts +1 -1
- package/dist/api/utilities/image.d.ts +1 -1
- package/dist/api/utilities/index.d.ts +2 -0
- package/dist/api/utilities/index.js +4 -0
- package/dist/api/utilities/index.js.map +1 -1
- package/dist/api/utilities/media-type.d.ts +1 -1
- package/dist/api/utilities/pixel-format.d.ts +1 -1
- package/dist/api/utilities/sample-format.d.ts +1 -1
- package/dist/api/utilities/scheduler.d.ts +169 -0
- package/dist/api/utilities/scheduler.js +136 -0
- package/dist/api/utilities/scheduler.js.map +1 -0
- package/dist/api/utilities/streaming.d.ts +74 -15
- package/dist/api/utilities/streaming.js +170 -12
- package/dist/api/utilities/streaming.js.map +1 -1
- package/dist/api/utilities/timestamp.d.ts +1 -1
- package/dist/api/webrtc-stream.d.ts +288 -0
- package/dist/api/webrtc-stream.js +440 -0
- package/dist/api/webrtc-stream.js.map +1 -0
- package/dist/constants/constants.d.ts +51 -1
- package/dist/constants/constants.js +47 -1
- package/dist/constants/constants.js.map +1 -1
- package/dist/constants/encoders.d.ts +2 -1
- package/dist/constants/encoders.js +4 -3
- package/dist/constants/encoders.js.map +1 -1
- package/dist/constants/hardware.d.ts +26 -0
- package/dist/constants/hardware.js +27 -0
- package/dist/constants/hardware.js.map +1 -0
- package/dist/constants/index.d.ts +1 -0
- package/dist/constants/index.js +1 -0
- package/dist/constants/index.js.map +1 -1
- package/dist/lib/binding.d.ts +19 -8
- package/dist/lib/binding.js.map +1 -1
- package/dist/lib/codec-context.d.ts +87 -0
- package/dist/lib/codec-context.js +125 -4
- package/dist/lib/codec-context.js.map +1 -1
- package/dist/lib/codec-parameters.d.ts +183 -1
- package/dist/lib/codec-parameters.js +209 -0
- package/dist/lib/codec-parameters.js.map +1 -1
- package/dist/lib/codec-parser.d.ts +23 -0
- package/dist/lib/codec-parser.js +25 -0
- package/dist/lib/codec-parser.js.map +1 -1
- package/dist/lib/codec.d.ts +26 -4
- package/dist/lib/codec.js +35 -0
- package/dist/lib/codec.js.map +1 -1
- package/dist/lib/dictionary.js +1 -0
- package/dist/lib/dictionary.js.map +1 -1
- package/dist/lib/error.js +1 -1
- package/dist/lib/error.js.map +1 -1
- package/dist/lib/filter-context.d.ts +52 -11
- package/dist/lib/filter-context.js +56 -12
- package/dist/lib/filter-context.js.map +1 -1
- package/dist/lib/filter-graph.d.ts +9 -0
- package/dist/lib/filter-graph.js +13 -0
- package/dist/lib/filter-graph.js.map +1 -1
- package/dist/lib/filter.d.ts +21 -0
- package/dist/lib/filter.js +28 -0
- package/dist/lib/filter.js.map +1 -1
- package/dist/lib/format-context.d.ts +48 -14
- package/dist/lib/format-context.js +76 -7
- package/dist/lib/format-context.js.map +1 -1
- package/dist/lib/frame.d.ts +168 -0
- package/dist/lib/frame.js +212 -0
- package/dist/lib/frame.js.map +1 -1
- package/dist/lib/hardware-device-context.d.ts +3 -2
- package/dist/lib/hardware-device-context.js.map +1 -1
- package/dist/lib/index.d.ts +1 -0
- package/dist/lib/index.js +2 -0
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/input-format.d.ts +21 -0
- package/dist/lib/input-format.js +42 -2
- package/dist/lib/input-format.js.map +1 -1
- package/dist/lib/native-types.d.ts +48 -26
- package/dist/lib/option.d.ts +25 -13
- package/dist/lib/option.js +28 -0
- package/dist/lib/option.js.map +1 -1
- package/dist/lib/output-format.d.ts +22 -1
- package/dist/lib/output-format.js +28 -0
- package/dist/lib/output-format.js.map +1 -1
- package/dist/lib/packet.d.ts +35 -0
- package/dist/lib/packet.js +52 -2
- package/dist/lib/packet.js.map +1 -1
- package/dist/lib/stream.d.ts +126 -0
- package/dist/lib/stream.js +188 -5
- package/dist/lib/stream.js.map +1 -1
- package/dist/lib/sync-queue.d.ts +179 -0
- package/dist/lib/sync-queue.js +197 -0
- package/dist/lib/sync-queue.js.map +1 -0
- package/dist/lib/types.d.ts +27 -1
- package/dist/lib/utilities.d.ts +281 -53
- package/dist/lib/utilities.js +298 -55
- package/dist/lib/utilities.js.map +1 -1
- package/package.json +20 -19
- package/dist/api/fmp4.js +0 -710
- package/dist/api/fmp4.js.map +0 -1
- package/dist/api/media-input.js +0 -1075
- package/dist/api/media-input.js.map +0 -1
- package/dist/api/media-output.js +0 -1040
- package/dist/api/media-output.js.map +0 -1
- package/dist/api/webrtc.d.ts +0 -664
- package/dist/api/webrtc.js +0 -1132
- package/dist/api/webrtc.js.map +0 -1
|
@@ -0,0 +1,1968 @@
|
|
|
1
|
+
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
2
|
+
if (value !== null && value !== void 0) {
|
|
3
|
+
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
4
|
+
var dispose, inner;
|
|
5
|
+
if (async) {
|
|
6
|
+
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
7
|
+
dispose = value[Symbol.asyncDispose];
|
|
8
|
+
}
|
|
9
|
+
if (dispose === void 0) {
|
|
10
|
+
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
11
|
+
dispose = value[Symbol.dispose];
|
|
12
|
+
if (async) inner = dispose;
|
|
13
|
+
}
|
|
14
|
+
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
15
|
+
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
16
|
+
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
17
|
+
}
|
|
18
|
+
else if (async) {
|
|
19
|
+
env.stack.push({ async: true });
|
|
20
|
+
}
|
|
21
|
+
return value;
|
|
22
|
+
};
|
|
23
|
+
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
24
|
+
return function (env) {
|
|
25
|
+
function fail(e) {
|
|
26
|
+
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
27
|
+
env.hasError = true;
|
|
28
|
+
}
|
|
29
|
+
var r, s = 0;
|
|
30
|
+
function next() {
|
|
31
|
+
while (r = env.stack.pop()) {
|
|
32
|
+
try {
|
|
33
|
+
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
34
|
+
if (r.dispose) {
|
|
35
|
+
var result = r.dispose.call(r.value);
|
|
36
|
+
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
37
|
+
}
|
|
38
|
+
else s |= 1;
|
|
39
|
+
}
|
|
40
|
+
catch (e) {
|
|
41
|
+
fail(e);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
45
|
+
if (env.hasError) throw env.error;
|
|
46
|
+
}
|
|
47
|
+
return next();
|
|
48
|
+
};
|
|
49
|
+
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
50
|
+
var e = new Error(message);
|
|
51
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
|
+
});
|
|
53
|
+
import { createSocket } from 'dgram';
|
|
54
|
+
import { closeSync, openSync, readSync } from 'fs';
|
|
55
|
+
import { open } from 'fs/promises';
|
|
56
|
+
import { resolve } from 'path';
|
|
57
|
+
import { RtpPacket } from 'werift';
|
|
58
|
+
import { AV_NOPTS_VALUE, AV_PIX_FMT_NONE, AV_ROUND_NEAR_INF, AV_ROUND_PASS_MINMAX, AV_TIME_BASE, AV_TIME_BASE_Q, AVFLAG_NONE, AVFMT_FLAG_CUSTOM_IO, AVFMT_FLAG_NONBLOCK, AVFMT_TS_DISCONT, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, AVSEEK_CUR, AVSEEK_END, AVSEEK_SET, } from '../constants/constants.js';
|
|
59
|
+
import { Dictionary } from '../lib/dictionary.js';
|
|
60
|
+
import { FFmpegError } from '../lib/error.js';
|
|
61
|
+
import { FormatContext } from '../lib/format-context.js';
|
|
62
|
+
import { InputFormat } from '../lib/input-format.js';
|
|
63
|
+
import { IOContext } from '../lib/io-context.js';
|
|
64
|
+
import { Packet } from '../lib/packet.js';
|
|
65
|
+
import { Rational } from '../lib/rational.js';
|
|
66
|
+
import { avGetPixFmtName, avGetSampleFmtName, avInvQ, avMulQ, avRescaleQ, avRescaleQRnd } from '../lib/utilities.js';
|
|
67
|
+
import { DELTA_THRESHOLD, DTS_ERROR_THRESHOLD, IO_BUFFER_SIZE, MAX_INPUT_QUEUE_SIZE } from './constants.js';
|
|
68
|
+
import { IOStream } from './io-stream.js';
|
|
69
|
+
import { StreamingUtils } from './utilities/streaming.js';
|
|
70
|
+
/**
|
|
71
|
+
* High-level demuxer for reading and demuxing media files.
|
|
72
|
+
*
|
|
73
|
+
* Provides simplified access to media streams, packets, and metadata.
|
|
74
|
+
* Handles file opening, format detection, and stream information extraction.
|
|
75
|
+
* Supports files, URLs, buffers, and raw data input with automatic cleanup.
|
|
76
|
+
* Essential component for media processing pipelines and transcoding.
|
|
77
|
+
*
|
|
78
|
+
* @example
|
|
79
|
+
* ```typescript
|
|
80
|
+
* import { Demuxer } from 'node-av/api';
|
|
81
|
+
*
|
|
82
|
+
* // Open media file
|
|
83
|
+
* await using input = await Demuxer.open('video.mp4');
|
|
84
|
+
* console.log(`Format: ${input.formatName}`);
|
|
85
|
+
* console.log(`Duration: ${input.duration}s`);
|
|
86
|
+
*
|
|
87
|
+
* // Process packets
|
|
88
|
+
* for await (const packet of input.packets()) {
|
|
89
|
+
* console.log(`Packet from stream ${packet.streamIndex}`);
|
|
90
|
+
* packet.free();
|
|
91
|
+
* }
|
|
92
|
+
* ```
|
|
93
|
+
*
|
|
94
|
+
* @example
|
|
95
|
+
* ```typescript
|
|
96
|
+
* // From buffer
|
|
97
|
+
* const buffer = await fs.readFile('video.mp4');
|
|
98
|
+
* await using input = await Demuxer.open(buffer);
|
|
99
|
+
*
|
|
100
|
+
* // Access streams
|
|
101
|
+
* const videoStream = input.video();
|
|
102
|
+
* const audioStream = input.audio();
|
|
103
|
+
* ```
|
|
104
|
+
*
|
|
105
|
+
* @see {@link Muxer} For writing media files
|
|
106
|
+
* @see {@link Decoder} For decoding packets to frames
|
|
107
|
+
* @see {@link FormatContext} For low-level API
|
|
108
|
+
*/
|
|
109
|
+
export class Demuxer {
|
|
110
|
+
formatContext;
|
|
111
|
+
_streams = [];
|
|
112
|
+
ioContext;
|
|
113
|
+
isClosed = false;
|
|
114
|
+
options;
|
|
115
|
+
// Timestamp processing state (per-stream)
|
|
116
|
+
streamStates = new Map();
|
|
117
|
+
// Timestamp discontinuity tracking (global)
|
|
118
|
+
tsOffsetDiscont = 0n;
|
|
119
|
+
lastTs = AV_NOPTS_VALUE;
|
|
120
|
+
// Demux manager for handling multiple parallel packet generators
|
|
121
|
+
activeGenerators = 0;
|
|
122
|
+
demuxThread = null;
|
|
123
|
+
packetQueues = new Map(); // streamIndex or 'all' -> queue
|
|
124
|
+
queueResolvers = new Map(); // Promise resolvers for waiting consumers
|
|
125
|
+
demuxThreadActive = false;
|
|
126
|
+
demuxEof = false;
|
|
127
|
+
/**
|
|
128
|
+
* @param formatContext - Opened format context
|
|
129
|
+
*
|
|
130
|
+
* @param options - Media input options
|
|
131
|
+
*
|
|
132
|
+
* @param ioContext - Optional IO context for custom I/O (e.g., from Buffer)
|
|
133
|
+
*
|
|
134
|
+
* @internal
|
|
135
|
+
*/
|
|
136
|
+
constructor(formatContext, options, ioContext) {
|
|
137
|
+
this.formatContext = formatContext;
|
|
138
|
+
this.ioContext = ioContext;
|
|
139
|
+
this._streams = formatContext.streams ?? [];
|
|
140
|
+
this.options = options;
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Probe media format without fully opening the file.
|
|
144
|
+
*
|
|
145
|
+
* Detects format by analyzing file headers and content.
|
|
146
|
+
* Useful for format validation before processing.
|
|
147
|
+
*
|
|
148
|
+
* Direct mapping to av_probe_input_format().
|
|
149
|
+
*
|
|
150
|
+
* @param input - File path or buffer to probe
|
|
151
|
+
*
|
|
152
|
+
* @returns Format information or null if unrecognized
|
|
153
|
+
*
|
|
154
|
+
* @example
|
|
155
|
+
* ```typescript
|
|
156
|
+
* const info = await Demuxer.probeFormat('video.mp4');
|
|
157
|
+
* if (info) {
|
|
158
|
+
* console.log(`Format: ${info.format}`);
|
|
159
|
+
* console.log(`Confidence: ${info.confidence}%`);
|
|
160
|
+
* }
|
|
161
|
+
* ```
|
|
162
|
+
*
|
|
163
|
+
* @example
|
|
164
|
+
* ```typescript
|
|
165
|
+
* // Probe from buffer
|
|
166
|
+
* const buffer = await fs.readFile('video.webm');
|
|
167
|
+
* const info = await Demuxer.probeFormat(buffer);
|
|
168
|
+
* console.log(`MIME type: ${info?.mimeType}`);
|
|
169
|
+
* ```
|
|
170
|
+
*
|
|
171
|
+
* @see {@link InputFormat.probe} For low-level probing
|
|
172
|
+
*/
|
|
173
|
+
static async probeFormat(input) {
|
|
174
|
+
try {
|
|
175
|
+
if (Buffer.isBuffer(input)) {
|
|
176
|
+
// Probe from buffer
|
|
177
|
+
const format = InputFormat.probe(input);
|
|
178
|
+
if (!format) {
|
|
179
|
+
return null;
|
|
180
|
+
}
|
|
181
|
+
return {
|
|
182
|
+
format: format.name ?? 'unknown',
|
|
183
|
+
longName: format.longName ?? undefined,
|
|
184
|
+
extensions: format.extensions ?? undefined,
|
|
185
|
+
mimeType: format.mimeType ?? undefined,
|
|
186
|
+
confidence: 100, // Direct probe always has high confidence
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
else {
|
|
190
|
+
// For files, read first part and probe
|
|
191
|
+
let fileHandle;
|
|
192
|
+
try {
|
|
193
|
+
fileHandle = await open(input, 'r');
|
|
194
|
+
// Read first 64KB for probing
|
|
195
|
+
const buffer = Buffer.alloc(65536);
|
|
196
|
+
const { bytesRead } = await fileHandle.read(buffer, 0, 65536, 0);
|
|
197
|
+
const probeBuffer = buffer.subarray(0, bytesRead);
|
|
198
|
+
const format = InputFormat.probe(probeBuffer, input);
|
|
199
|
+
if (!format) {
|
|
200
|
+
return null;
|
|
201
|
+
}
|
|
202
|
+
return {
|
|
203
|
+
format: format.name ?? 'unknown',
|
|
204
|
+
longName: format.longName ?? undefined,
|
|
205
|
+
extensions: format.extensions ?? undefined,
|
|
206
|
+
mimeType: format.mimeType ?? undefined,
|
|
207
|
+
confidence: 90, // File-based probe with filename hint
|
|
208
|
+
};
|
|
209
|
+
}
|
|
210
|
+
catch {
|
|
211
|
+
// If file reading fails, return null
|
|
212
|
+
return null;
|
|
213
|
+
}
|
|
214
|
+
finally {
|
|
215
|
+
await fileHandle?.close();
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
catch {
|
|
220
|
+
return null;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
/**
|
|
224
|
+
* Probe media format without fully opening the file synchronously.
|
|
225
|
+
* Synchronous version of probeFormat.
|
|
226
|
+
*
|
|
227
|
+
* Detects format by analyzing file headers and content.
|
|
228
|
+
* Useful for format validation before processing.
|
|
229
|
+
*
|
|
230
|
+
* Direct mapping to av_probe_input_format().
|
|
231
|
+
*
|
|
232
|
+
* @param input - File path or buffer to probe
|
|
233
|
+
*
|
|
234
|
+
* @returns Format information or null if unrecognized
|
|
235
|
+
*
|
|
236
|
+
* @example
|
|
237
|
+
* ```typescript
|
|
238
|
+
* const info = Demuxer.probeFormatSync('video.mp4');
|
|
239
|
+
* if (info) {
|
|
240
|
+
* console.log(`Format: ${info.format}`);
|
|
241
|
+
* console.log(`Confidence: ${info.confidence}%`);
|
|
242
|
+
* }
|
|
243
|
+
* ```
|
|
244
|
+
*
|
|
245
|
+
* @example
|
|
246
|
+
* ```typescript
|
|
247
|
+
* // Probe from buffer
|
|
248
|
+
* const buffer = fs.readFileSync('video.webm');
|
|
249
|
+
* const info = Demuxer.probeFormatSync(buffer);
|
|
250
|
+
* console.log(`MIME type: ${info?.mimeType}`);
|
|
251
|
+
* ```
|
|
252
|
+
*
|
|
253
|
+
* @see {@link probeFormat} For async version
|
|
254
|
+
*/
|
|
255
|
+
static probeFormatSync(input) {
|
|
256
|
+
try {
|
|
257
|
+
if (Buffer.isBuffer(input)) {
|
|
258
|
+
// Probe from buffer
|
|
259
|
+
const format = InputFormat.probe(input);
|
|
260
|
+
if (!format) {
|
|
261
|
+
return null;
|
|
262
|
+
}
|
|
263
|
+
return {
|
|
264
|
+
format: format.name ?? 'unknown',
|
|
265
|
+
longName: format.longName ?? undefined,
|
|
266
|
+
extensions: format.extensions ?? undefined,
|
|
267
|
+
mimeType: format.mimeType ?? undefined,
|
|
268
|
+
confidence: 100, // Direct probe always has high confidence
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
else {
|
|
272
|
+
// For files, read first part and probe
|
|
273
|
+
let fd;
|
|
274
|
+
try {
|
|
275
|
+
fd = openSync(input, 'r');
|
|
276
|
+
// Read first 64KB for probing
|
|
277
|
+
const buffer = Buffer.alloc(65536);
|
|
278
|
+
const bytesRead = readSync(fd, buffer, 0, 65536, 0);
|
|
279
|
+
const probeBuffer = buffer.subarray(0, bytesRead);
|
|
280
|
+
const format = InputFormat.probe(probeBuffer, input);
|
|
281
|
+
if (!format) {
|
|
282
|
+
return null;
|
|
283
|
+
}
|
|
284
|
+
return {
|
|
285
|
+
format: format.name ?? 'unknown',
|
|
286
|
+
longName: format.longName ?? undefined,
|
|
287
|
+
extensions: format.extensions ?? undefined,
|
|
288
|
+
mimeType: format.mimeType ?? undefined,
|
|
289
|
+
confidence: 90, // File-based probe with filename hint
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
catch {
|
|
293
|
+
// If file reading fails, return null
|
|
294
|
+
return null;
|
|
295
|
+
}
|
|
296
|
+
finally {
|
|
297
|
+
if (fd !== undefined)
|
|
298
|
+
closeSync(fd);
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
catch {
|
|
303
|
+
return null;
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
static async open(input, options = {}) {
|
|
307
|
+
// Check if input is raw data
|
|
308
|
+
if (typeof input === 'object' && 'type' in input && ('width' in input || 'sampleRate' in input)) {
|
|
309
|
+
// Build options for raw data
|
|
310
|
+
const rawOptions = {
|
|
311
|
+
bufferSize: options.bufferSize,
|
|
312
|
+
format: options.format ?? (input.type === 'video' ? 'rawvideo' : 's16le'),
|
|
313
|
+
options: {
|
|
314
|
+
...options.options,
|
|
315
|
+
},
|
|
316
|
+
};
|
|
317
|
+
if (input.type === 'video') {
|
|
318
|
+
rawOptions.options = {
|
|
319
|
+
...rawOptions.options,
|
|
320
|
+
video_size: `${input.width}x${input.height}`,
|
|
321
|
+
pixel_format: avGetPixFmtName(input.pixelFormat) ?? 'yuv420p',
|
|
322
|
+
framerate: new Rational(input.frameRate.num, input.frameRate.den).toString(),
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
else {
|
|
326
|
+
rawOptions.options = {
|
|
327
|
+
...rawOptions.options,
|
|
328
|
+
sample_rate: input.sampleRate,
|
|
329
|
+
channels: input.channels,
|
|
330
|
+
sample_fmt: avGetSampleFmtName(input.sampleFormat) ?? 's16le',
|
|
331
|
+
};
|
|
332
|
+
}
|
|
333
|
+
input = input.input;
|
|
334
|
+
options = rawOptions;
|
|
335
|
+
}
|
|
336
|
+
// Original implementation for non-raw data
|
|
337
|
+
const formatContext = new FormatContext();
|
|
338
|
+
let ioContext;
|
|
339
|
+
let optionsDict = null;
|
|
340
|
+
let inputFormat = null;
|
|
341
|
+
try {
|
|
342
|
+
// Create options dictionary if options are provided
|
|
343
|
+
if (options.options) {
|
|
344
|
+
optionsDict = Dictionary.fromObject(options.options);
|
|
345
|
+
}
|
|
346
|
+
// Find input format if specified
|
|
347
|
+
if (options.format) {
|
|
348
|
+
inputFormat = InputFormat.findInputFormat(options.format);
|
|
349
|
+
if (!inputFormat) {
|
|
350
|
+
throw new Error(`Input format '${options.format}' not found`);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
if (typeof input === 'string') {
|
|
354
|
+
// File path or URL - resolve relative paths to absolute
|
|
355
|
+
// Check if it's a URL (starts with protocol://) or a file path
|
|
356
|
+
const isUrl = /^[a-zA-Z][a-zA-Z0-9+.-]*:\/\//.test(input);
|
|
357
|
+
const resolvedInput = isUrl ? input : resolve(input);
|
|
358
|
+
const ret = await formatContext.openInput(resolvedInput, inputFormat, optionsDict);
|
|
359
|
+
FFmpegError.throwIfError(ret, 'Failed to open input');
|
|
360
|
+
formatContext.setFlags(AVFMT_FLAG_NONBLOCK);
|
|
361
|
+
}
|
|
362
|
+
else if (Buffer.isBuffer(input)) {
|
|
363
|
+
// Validate buffer is not empty
|
|
364
|
+
if (input.length === 0) {
|
|
365
|
+
throw new Error('Cannot open media from empty buffer');
|
|
366
|
+
}
|
|
367
|
+
// From buffer - allocate context first for custom I/O
|
|
368
|
+
formatContext.allocContext();
|
|
369
|
+
ioContext = IOStream.create(input, { bufferSize: options.bufferSize });
|
|
370
|
+
formatContext.pb = ioContext;
|
|
371
|
+
const ret = await formatContext.openInput('', inputFormat, optionsDict);
|
|
372
|
+
FFmpegError.throwIfError(ret, 'Failed to open input from buffer');
|
|
373
|
+
}
|
|
374
|
+
else if (typeof input === 'object' && 'read' in input) {
|
|
375
|
+
// Custom I/O with callbacks - format is required
|
|
376
|
+
if (!options.format) {
|
|
377
|
+
throw new Error('Format must be specified for custom I/O');
|
|
378
|
+
}
|
|
379
|
+
// Allocate context first for custom I/O
|
|
380
|
+
formatContext.allocContext();
|
|
381
|
+
// Setup custom I/O with callbacks
|
|
382
|
+
ioContext = new IOContext();
|
|
383
|
+
ioContext.allocContextWithCallbacks(options.bufferSize ?? IO_BUFFER_SIZE, 0, input.read, null, input.seek);
|
|
384
|
+
formatContext.pb = ioContext;
|
|
385
|
+
formatContext.setFlags(AVFMT_FLAG_CUSTOM_IO);
|
|
386
|
+
const ret = await formatContext.openInput('', inputFormat, optionsDict);
|
|
387
|
+
FFmpegError.throwIfError(ret, 'Failed to open input from custom I/O');
|
|
388
|
+
}
|
|
389
|
+
else {
|
|
390
|
+
throw new TypeError('Invalid input type. Expected file path, URL, Buffer, or IOInputCallbacks');
|
|
391
|
+
}
|
|
392
|
+
// Find stream information
|
|
393
|
+
if (!options.skipStreamInfo) {
|
|
394
|
+
const ret = await formatContext.findStreamInfo(null);
|
|
395
|
+
FFmpegError.throwIfError(ret, 'Failed to find stream info');
|
|
396
|
+
// Try to parse extradata for video streams with missing dimensions
|
|
397
|
+
for (const stream of formatContext.streams ?? []) {
|
|
398
|
+
if (stream.codecpar.codecType === AVMEDIA_TYPE_VIDEO) {
|
|
399
|
+
const dimensionsMissing = stream.codecpar.width === 0 || stream.codecpar.height === 0;
|
|
400
|
+
const invalidFormat = stream.codecpar.format === AV_PIX_FMT_NONE;
|
|
401
|
+
const invalidRate = stream.codecpar.frameRate.num === 0 || stream.codecpar.frameRate.den === 0;
|
|
402
|
+
const needsParsing = dimensionsMissing || invalidFormat || invalidRate;
|
|
403
|
+
if (needsParsing && stream.codecpar.extradataSize > 0) {
|
|
404
|
+
stream.codecpar.parseExtradata();
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
// Determine buffer size
|
|
410
|
+
let bufferSize = options.bufferSize ?? IO_BUFFER_SIZE;
|
|
411
|
+
if (!ioContext && formatContext.iformat && formatContext.pb) {
|
|
412
|
+
// Check if this is a streaming input (like RTSP, HTTP, etc.)
|
|
413
|
+
const isStreaming = formatContext.pb.seekable === 0;
|
|
414
|
+
if (isStreaming) {
|
|
415
|
+
bufferSize *= 2; // double buffer size for streaming inputs
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
// Apply defaults to options
|
|
419
|
+
const fullOptions = {
|
|
420
|
+
bufferSize,
|
|
421
|
+
format: options.format ?? '',
|
|
422
|
+
skipStreamInfo: options.skipStreamInfo ?? false,
|
|
423
|
+
startWithKeyframe: options.startWithKeyframe ?? false,
|
|
424
|
+
dtsDeltaThreshold: options.dtsDeltaThreshold ?? DELTA_THRESHOLD,
|
|
425
|
+
dtsErrorThreshold: options.dtsErrorThreshold ?? DTS_ERROR_THRESHOLD,
|
|
426
|
+
copyTs: options.copyTs ?? false,
|
|
427
|
+
options: options.options ?? {},
|
|
428
|
+
};
|
|
429
|
+
const mediaInput = new Demuxer(formatContext, fullOptions, ioContext);
|
|
430
|
+
return mediaInput;
|
|
431
|
+
}
|
|
432
|
+
catch (error) {
|
|
433
|
+
// Clean up only on error
|
|
434
|
+
if (ioContext) {
|
|
435
|
+
// Clear the pb reference first
|
|
436
|
+
formatContext.pb = null;
|
|
437
|
+
// Free the IOContext (for both custom I/O and buffer-based I/O)
|
|
438
|
+
ioContext.freeContext();
|
|
439
|
+
}
|
|
440
|
+
// Clean up FormatContext
|
|
441
|
+
await formatContext.closeInput();
|
|
442
|
+
throw error;
|
|
443
|
+
}
|
|
444
|
+
finally {
|
|
445
|
+
// Clean up options dictionary
|
|
446
|
+
if (optionsDict) {
|
|
447
|
+
optionsDict.free();
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
static openSync(input, options = {}) {
|
|
452
|
+
// Check if input is raw data
|
|
453
|
+
if (typeof input === 'object' && 'type' in input && ('width' in input || 'sampleRate' in input)) {
|
|
454
|
+
// Build options for raw data
|
|
455
|
+
const rawOptions = {
|
|
456
|
+
bufferSize: options.bufferSize,
|
|
457
|
+
format: options.format ?? (input.type === 'video' ? 'rawvideo' : 's16le'),
|
|
458
|
+
options: {
|
|
459
|
+
...options.options,
|
|
460
|
+
},
|
|
461
|
+
};
|
|
462
|
+
if (input.type === 'video') {
|
|
463
|
+
rawOptions.options = {
|
|
464
|
+
...rawOptions.options,
|
|
465
|
+
video_size: `${input.width}x${input.height}`,
|
|
466
|
+
pixel_format: avGetPixFmtName(input.pixelFormat) ?? 'yuv420p',
|
|
467
|
+
framerate: new Rational(input.frameRate.num, input.frameRate.den).toString(),
|
|
468
|
+
};
|
|
469
|
+
}
|
|
470
|
+
else {
|
|
471
|
+
rawOptions.options = {
|
|
472
|
+
...rawOptions.options,
|
|
473
|
+
sample_rate: input.sampleRate,
|
|
474
|
+
channels: input.channels,
|
|
475
|
+
sample_fmt: avGetSampleFmtName(input.sampleFormat) ?? 's16le',
|
|
476
|
+
};
|
|
477
|
+
}
|
|
478
|
+
input = input.input;
|
|
479
|
+
options = rawOptions;
|
|
480
|
+
}
|
|
481
|
+
// Original implementation for non-raw data
|
|
482
|
+
const formatContext = new FormatContext();
|
|
483
|
+
let ioContext;
|
|
484
|
+
let optionsDict = null;
|
|
485
|
+
let inputFormat = null;
|
|
486
|
+
try {
|
|
487
|
+
// Create options dictionary if options are provided
|
|
488
|
+
if (options.options) {
|
|
489
|
+
optionsDict = Dictionary.fromObject(options.options);
|
|
490
|
+
}
|
|
491
|
+
// Find input format if specified
|
|
492
|
+
if (options.format) {
|
|
493
|
+
inputFormat = InputFormat.findInputFormat(options.format);
|
|
494
|
+
if (!inputFormat) {
|
|
495
|
+
throw new Error(`Input format '${options.format}' not found`);
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
if (typeof input === 'string') {
|
|
499
|
+
// File path or URL - resolve relative paths to absolute
|
|
500
|
+
// Check if it's a URL (starts with protocol://) or a file path
|
|
501
|
+
const isUrl = /^[a-zA-Z][a-zA-Z0-9+.-]*:\/\//.test(input);
|
|
502
|
+
const resolvedInput = isUrl ? input : resolve(input);
|
|
503
|
+
const ret = formatContext.openInputSync(resolvedInput, inputFormat, optionsDict);
|
|
504
|
+
FFmpegError.throwIfError(ret, 'Failed to open input');
|
|
505
|
+
formatContext.setFlags(AVFMT_FLAG_NONBLOCK);
|
|
506
|
+
}
|
|
507
|
+
else if (Buffer.isBuffer(input)) {
|
|
508
|
+
// Validate buffer is not empty
|
|
509
|
+
if (input.length === 0) {
|
|
510
|
+
throw new Error('Cannot open media from empty buffer');
|
|
511
|
+
}
|
|
512
|
+
// From buffer - allocate context first for custom I/O
|
|
513
|
+
formatContext.allocContext();
|
|
514
|
+
ioContext = IOStream.create(input, { bufferSize: options.bufferSize });
|
|
515
|
+
formatContext.pb = ioContext;
|
|
516
|
+
const ret = formatContext.openInputSync('', inputFormat, optionsDict);
|
|
517
|
+
FFmpegError.throwIfError(ret, 'Failed to open input from buffer');
|
|
518
|
+
}
|
|
519
|
+
else if (typeof input === 'object' && 'read' in input) {
|
|
520
|
+
// Custom I/O with callbacks - format is required
|
|
521
|
+
if (!options.format) {
|
|
522
|
+
throw new Error('Format must be specified for custom I/O');
|
|
523
|
+
}
|
|
524
|
+
// Allocate context first for custom I/O
|
|
525
|
+
formatContext.allocContext();
|
|
526
|
+
// Setup custom I/O with callbacks
|
|
527
|
+
ioContext = new IOContext();
|
|
528
|
+
ioContext.allocContextWithCallbacks(options.bufferSize ?? IO_BUFFER_SIZE, 0, input.read, null, input.seek);
|
|
529
|
+
formatContext.pb = ioContext;
|
|
530
|
+
formatContext.setFlags(AVFMT_FLAG_CUSTOM_IO);
|
|
531
|
+
const ret = formatContext.openInputSync('', inputFormat, optionsDict);
|
|
532
|
+
FFmpegError.throwIfError(ret, 'Failed to open input from custom I/O');
|
|
533
|
+
}
|
|
534
|
+
else {
|
|
535
|
+
throw new TypeError('Invalid input type. Expected file path, URL, Buffer, or IOInputCallbacks');
|
|
536
|
+
}
|
|
537
|
+
// Find stream information
|
|
538
|
+
if (!options.skipStreamInfo) {
|
|
539
|
+
const ret = formatContext.findStreamInfoSync(null);
|
|
540
|
+
FFmpegError.throwIfError(ret, 'Failed to find stream info');
|
|
541
|
+
}
|
|
542
|
+
// Determine buffer size
|
|
543
|
+
let bufferSize = options.bufferSize ?? IO_BUFFER_SIZE;
|
|
544
|
+
if (!ioContext && formatContext.iformat && formatContext.pb) {
|
|
545
|
+
// Check if this is a streaming input (like RTSP, HTTP, etc.)
|
|
546
|
+
const isStreaming = formatContext.pb.seekable === 0;
|
|
547
|
+
if (isStreaming) {
|
|
548
|
+
bufferSize *= 2; // double buffer size for streaming inputs
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
// Apply defaults to options
|
|
552
|
+
const fullOptions = {
|
|
553
|
+
bufferSize,
|
|
554
|
+
format: options.format ?? '',
|
|
555
|
+
skipStreamInfo: options.skipStreamInfo ?? false,
|
|
556
|
+
startWithKeyframe: options.startWithKeyframe ?? false,
|
|
557
|
+
dtsDeltaThreshold: options.dtsDeltaThreshold ?? DELTA_THRESHOLD,
|
|
558
|
+
dtsErrorThreshold: options.dtsErrorThreshold ?? DTS_ERROR_THRESHOLD,
|
|
559
|
+
copyTs: options.copyTs ?? false,
|
|
560
|
+
options: options.options ?? {},
|
|
561
|
+
};
|
|
562
|
+
const mediaInput = new Demuxer(formatContext, fullOptions, ioContext);
|
|
563
|
+
return mediaInput;
|
|
564
|
+
}
|
|
565
|
+
catch (error) {
|
|
566
|
+
// Clean up only on error
|
|
567
|
+
if (ioContext) {
|
|
568
|
+
// Clear the pb reference first
|
|
569
|
+
formatContext.pb = null;
|
|
570
|
+
// Free the IOContext (for both custom I/O and buffer-based I/O)
|
|
571
|
+
ioContext.freeContext();
|
|
572
|
+
}
|
|
573
|
+
// Clean up FormatContext
|
|
574
|
+
formatContext.closeInputSync();
|
|
575
|
+
throw error;
|
|
576
|
+
}
|
|
577
|
+
finally {
|
|
578
|
+
// Clean up options dictionary
|
|
579
|
+
if (optionsDict) {
|
|
580
|
+
optionsDict.free();
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
/**
|
|
585
|
+
* Open RTP/SRTP input stream via localhost UDP.
|
|
586
|
+
*
|
|
587
|
+
* Creates a Demuxer from SDP string received via UDP socket.
|
|
588
|
+
* Opens UDP socket and configures FFmpeg to receive and parse RTP packets.
|
|
589
|
+
*
|
|
590
|
+
* @param sdpContent - SDP content string describing the RTP stream
|
|
591
|
+
*
|
|
592
|
+
* @throws {Error} If SDP parsing or socket setup fails
|
|
593
|
+
*
|
|
594
|
+
* @throws {FFmpegError} If FFmpeg operations fail
|
|
595
|
+
*
|
|
596
|
+
* @returns Promise with Demuxer, sendPacket function and cleanup
|
|
597
|
+
*
|
|
598
|
+
* @example
|
|
599
|
+
* ```typescript
|
|
600
|
+
* import { Demuxer, StreamingUtils } from 'node-av/api';
|
|
601
|
+
* import { AV_CODEC_ID_OPUS } from 'node-av/constants';
|
|
602
|
+
*
|
|
603
|
+
* // Generate SDP for SRTP encrypted Opus
|
|
604
|
+
* const sdp = StreamingUtils.createRTPInputSDP([{
|
|
605
|
+
* port: 5004,
|
|
606
|
+
* codecId: AV_CODEC_ID_OPUS,
|
|
607
|
+
* payloadType: 111,
|
|
608
|
+
* clockRate: 16000,
|
|
609
|
+
* channels: 1,
|
|
610
|
+
* srtp: { key: srtpKey, salt: srtpSalt }
|
|
611
|
+
* }]);
|
|
612
|
+
*
|
|
613
|
+
* // Open RTP input
|
|
614
|
+
* const { input, sendPacket, close } = await Demuxer.openSDP(sdp);
|
|
615
|
+
*
|
|
616
|
+
* // Route encrypted RTP packets from network
|
|
617
|
+
* socket.on('message', (msg) => sendPacket(msg));
|
|
618
|
+
*
|
|
619
|
+
* // Decode audio
|
|
620
|
+
* const decoder = await Decoder.create(input.audio()!);
|
|
621
|
+
* for await (const packet of input.packets()) {
|
|
622
|
+
* const frame = await decoder.decode(packet);
|
|
623
|
+
* // Process frame...
|
|
624
|
+
* }
|
|
625
|
+
*
|
|
626
|
+
* // Cleanup
|
|
627
|
+
* await close();
|
|
628
|
+
* ```
|
|
629
|
+
*
|
|
630
|
+
* @see {@link StreamingUtils.createInputSDP} to generate SDP content.
|
|
631
|
+
*/
|
|
632
|
+
static async openSDP(sdpContent) {
|
|
633
|
+
// Extract all ports from SDP (supports multi-stream: video + audio)
|
|
634
|
+
const ports = StreamingUtils.extractPortsFromSDP(sdpContent);
|
|
635
|
+
if (ports.length === 0) {
|
|
636
|
+
throw new Error('Failed to extract any ports from SDP content');
|
|
637
|
+
}
|
|
638
|
+
// Convert SDP to buffer for custom I/O
|
|
639
|
+
const sdpBuffer = Buffer.from(sdpContent);
|
|
640
|
+
let position = 0;
|
|
641
|
+
// Create custom I/O callbacks for SDP content
|
|
642
|
+
const callbacks = {
|
|
643
|
+
read: (size) => {
|
|
644
|
+
if (position >= sdpBuffer.length) {
|
|
645
|
+
return null; // EOF
|
|
646
|
+
}
|
|
647
|
+
const chunk = sdpBuffer.subarray(position, Math.min(position + size, sdpBuffer.length));
|
|
648
|
+
position += chunk.length;
|
|
649
|
+
return chunk;
|
|
650
|
+
},
|
|
651
|
+
seek: (offset, whence) => {
|
|
652
|
+
const offsetNum = Number(offset);
|
|
653
|
+
if (whence === AVSEEK_SET) {
|
|
654
|
+
position = offsetNum;
|
|
655
|
+
}
|
|
656
|
+
else if (whence === AVSEEK_CUR) {
|
|
657
|
+
position += offsetNum;
|
|
658
|
+
}
|
|
659
|
+
else if (whence === AVSEEK_END) {
|
|
660
|
+
position = sdpBuffer.length + offsetNum;
|
|
661
|
+
}
|
|
662
|
+
return position;
|
|
663
|
+
},
|
|
664
|
+
};
|
|
665
|
+
// Create UDP socket for sending packets to FFmpeg
|
|
666
|
+
const udpSocket = createSocket('udp4');
|
|
667
|
+
try {
|
|
668
|
+
// Open Demuxer with SDP format using custom I/O
|
|
669
|
+
const input = await Demuxer.open(callbacks, {
|
|
670
|
+
format: 'sdp',
|
|
671
|
+
skipStreamInfo: true,
|
|
672
|
+
options: {
|
|
673
|
+
protocol_whitelist: 'pipe,udp,rtp,file,crypto',
|
|
674
|
+
listen_timeout: -1,
|
|
675
|
+
},
|
|
676
|
+
});
|
|
677
|
+
const sendPacket = (rtpPacket, streamIndex = 0) => {
|
|
678
|
+
const port = ports[streamIndex];
|
|
679
|
+
if (!port) {
|
|
680
|
+
throw new Error(`No port found for stream index ${streamIndex}. Available streams: ${ports.length}`);
|
|
681
|
+
}
|
|
682
|
+
const data = rtpPacket instanceof RtpPacket ? rtpPacket.serialize() : rtpPacket;
|
|
683
|
+
udpSocket.send(data, port, '127.0.0.1');
|
|
684
|
+
};
|
|
685
|
+
const close = async () => {
|
|
686
|
+
await input.close();
|
|
687
|
+
udpSocket.close();
|
|
688
|
+
};
|
|
689
|
+
const closeSync = () => {
|
|
690
|
+
input.closeSync();
|
|
691
|
+
udpSocket.close();
|
|
692
|
+
};
|
|
693
|
+
return { input, sendPacket, close, closeSync };
|
|
694
|
+
}
|
|
695
|
+
catch (error) {
|
|
696
|
+
// Cleanup on error
|
|
697
|
+
udpSocket.close();
|
|
698
|
+
throw error;
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
/**
|
|
702
|
+
* Open RTP/SRTP input stream via localhost UDP synchronously.
|
|
703
|
+
* Synchronous version of openSDP.
|
|
704
|
+
*
|
|
705
|
+
* Creates a Demuxer from SDP string received via UDP socket.
|
|
706
|
+
* Opens UDP socket and configures FFmpeg to receive and parse RTP packets.
|
|
707
|
+
*
|
|
708
|
+
* @param sdpContent - SDP content string describing the RTP stream
|
|
709
|
+
*
|
|
710
|
+
* @throws {Error} If SDP parsing or socket setup fails
|
|
711
|
+
*
|
|
712
|
+
* @throws {FFmpegError} If FFmpeg operations fail
|
|
713
|
+
*
|
|
714
|
+
* @returns Object with Demuxer, sendPacket function and cleanup
|
|
715
|
+
*
|
|
716
|
+
* @example
|
|
717
|
+
* ```typescript
|
|
718
|
+
* import { Demuxer, StreamingUtils } from 'node-av/api';
|
|
719
|
+
* import { AV_CODEC_ID_OPUS } from 'node-av/constants';
|
|
720
|
+
*
|
|
721
|
+
* // Generate SDP for SRTP encrypted Opus
|
|
722
|
+
* const sdp = StreamingUtils.createRTPInputSDP([{
|
|
723
|
+
* port: 5004,
|
|
724
|
+
* codecId: AV_CODEC_ID_OPUS,
|
|
725
|
+
* payloadType: 111,
|
|
726
|
+
* clockRate: 16000,
|
|
727
|
+
* channels: 1,
|
|
728
|
+
* srtp: { key: srtpKey, salt: srtpSalt }
|
|
729
|
+
* }]);
|
|
730
|
+
*
|
|
731
|
+
* // Open RTP input
|
|
732
|
+
* const { input, sendPacket, closeSync } = Demuxer.openSDPSync(sdp);
|
|
733
|
+
*
|
|
734
|
+
* // Route encrypted RTP packets from network
|
|
735
|
+
* socket.on('message', (msg) => sendPacket(msg));
|
|
736
|
+
*
|
|
737
|
+
* // Decode audio
|
|
738
|
+
* const decoder = await Decoder.create(input.audio()!);
|
|
739
|
+
* for await (const packet of input.packets()) {
|
|
740
|
+
* const frame = await decoder.decode(packet);
|
|
741
|
+
* // Process frame...
|
|
742
|
+
* }
|
|
743
|
+
*
|
|
744
|
+
* // Cleanup synchronously
|
|
745
|
+
* closeSync();
|
|
746
|
+
* ```
|
|
747
|
+
*
|
|
748
|
+
* @see {@link StreamingUtils.createInputSDP} to generate SDP content.
|
|
749
|
+
* @see {@link openSDP} For async version
|
|
750
|
+
*/
|
|
751
|
+
static openSDPSync(sdpContent) {
|
|
752
|
+
// Extract all ports from SDP (supports multi-stream: video + audio)
|
|
753
|
+
const ports = StreamingUtils.extractPortsFromSDP(sdpContent);
|
|
754
|
+
if (ports.length === 0) {
|
|
755
|
+
throw new Error('Failed to extract any ports from SDP content');
|
|
756
|
+
}
|
|
757
|
+
// Convert SDP to buffer for custom I/O
|
|
758
|
+
const sdpBuffer = Buffer.from(sdpContent);
|
|
759
|
+
let position = 0;
|
|
760
|
+
// Create custom I/O callbacks for SDP content
|
|
761
|
+
const callbacks = {
|
|
762
|
+
read: (size) => {
|
|
763
|
+
if (position >= sdpBuffer.length) {
|
|
764
|
+
return null; // EOF
|
|
765
|
+
}
|
|
766
|
+
const chunk = sdpBuffer.subarray(position, Math.min(position + size, sdpBuffer.length));
|
|
767
|
+
position += chunk.length;
|
|
768
|
+
return chunk;
|
|
769
|
+
},
|
|
770
|
+
seek: (offset, whence) => {
|
|
771
|
+
const offsetNum = Number(offset);
|
|
772
|
+
if (whence === AVSEEK_SET) {
|
|
773
|
+
position = offsetNum;
|
|
774
|
+
}
|
|
775
|
+
else if (whence === AVSEEK_CUR) {
|
|
776
|
+
position += offsetNum;
|
|
777
|
+
}
|
|
778
|
+
else if (whence === AVSEEK_END) {
|
|
779
|
+
position = sdpBuffer.length + offsetNum;
|
|
780
|
+
}
|
|
781
|
+
return position;
|
|
782
|
+
},
|
|
783
|
+
};
|
|
784
|
+
// Create UDP socket for sending packets to FFmpeg
|
|
785
|
+
const udpSocket = createSocket('udp4');
|
|
786
|
+
try {
|
|
787
|
+
// Open Demuxer with SDP format using custom I/O
|
|
788
|
+
const input = Demuxer.openSync(callbacks, {
|
|
789
|
+
format: 'sdp',
|
|
790
|
+
skipStreamInfo: true,
|
|
791
|
+
options: {
|
|
792
|
+
protocol_whitelist: 'pipe,udp,rtp,file,crypto',
|
|
793
|
+
listen_timeout: -1,
|
|
794
|
+
},
|
|
795
|
+
});
|
|
796
|
+
const sendPacket = (rtpPacket, streamIndex = 0) => {
|
|
797
|
+
const port = ports[streamIndex];
|
|
798
|
+
if (!port) {
|
|
799
|
+
throw new Error(`No port found for stream index ${streamIndex}. Available streams: ${ports.length}`);
|
|
800
|
+
}
|
|
801
|
+
const data = rtpPacket instanceof RtpPacket ? rtpPacket.serialize() : rtpPacket;
|
|
802
|
+
udpSocket.send(data, port, '127.0.0.1');
|
|
803
|
+
};
|
|
804
|
+
const close = async () => {
|
|
805
|
+
await input.close();
|
|
806
|
+
udpSocket.close();
|
|
807
|
+
};
|
|
808
|
+
const closeSync = () => {
|
|
809
|
+
input.closeSync();
|
|
810
|
+
udpSocket.close();
|
|
811
|
+
};
|
|
812
|
+
return { input, sendPacket, close, closeSync };
|
|
813
|
+
}
|
|
814
|
+
catch (error) {
|
|
815
|
+
// Cleanup on error
|
|
816
|
+
udpSocket.close();
|
|
817
|
+
throw error;
|
|
818
|
+
}
|
|
819
|
+
}
|
|
820
|
+
/**
|
|
821
|
+
* Check if input is open.
|
|
822
|
+
*
|
|
823
|
+
* @example
|
|
824
|
+
* ```typescript
|
|
825
|
+
* if (!input.isInputOpen) {
|
|
826
|
+
* console.log('Input is not open');
|
|
827
|
+
* }
|
|
828
|
+
* ```
|
|
829
|
+
*/
|
|
830
|
+
get isInputOpen() {
|
|
831
|
+
return !this.isClosed;
|
|
832
|
+
}
|
|
833
|
+
/**
|
|
834
|
+
* Get all streams in the media.
|
|
835
|
+
*
|
|
836
|
+
* @example
|
|
837
|
+
* ```typescript
|
|
838
|
+
* for (const stream of input.streams) {
|
|
839
|
+
* console.log(`Stream ${stream.index}: ${stream.codecpar.codecType}`);
|
|
840
|
+
* }
|
|
841
|
+
* ```
|
|
842
|
+
*/
|
|
843
|
+
get streams() {
|
|
844
|
+
return this._streams;
|
|
845
|
+
}
|
|
846
|
+
/**
|
|
847
|
+
* Get media duration in seconds.
|
|
848
|
+
*
|
|
849
|
+
* Returns 0 if duration is unknown or not available or input is closed.
|
|
850
|
+
*
|
|
851
|
+
* @example
|
|
852
|
+
* ```typescript
|
|
853
|
+
* console.log(`Duration: ${input.duration} seconds`);
|
|
854
|
+
* ```
|
|
855
|
+
*/
|
|
856
|
+
get duration() {
|
|
857
|
+
if (this.isClosed) {
|
|
858
|
+
return 0;
|
|
859
|
+
}
|
|
860
|
+
const duration = this.formatContext.duration;
|
|
861
|
+
if (!duration || duration <= 0) {
|
|
862
|
+
return 0;
|
|
863
|
+
}
|
|
864
|
+
// Convert from AV_TIME_BASE (microseconds) to seconds
|
|
865
|
+
return Number(duration) / 1000000;
|
|
866
|
+
}
|
|
867
|
+
/**
|
|
868
|
+
* Get media bitrate in kilobits per second.
|
|
869
|
+
*
|
|
870
|
+
* Returns 0 if bitrate is unknown or not available or input is closed.
|
|
871
|
+
*
|
|
872
|
+
* @example
|
|
873
|
+
* ```typescript
|
|
874
|
+
* console.log(`Bitrate: ${input.bitRate} kbps`);
|
|
875
|
+
* ```
|
|
876
|
+
*/
|
|
877
|
+
get bitRate() {
|
|
878
|
+
if (this.isClosed) {
|
|
879
|
+
return 0;
|
|
880
|
+
}
|
|
881
|
+
const bitrate = this.formatContext.bitRate;
|
|
882
|
+
if (!bitrate || bitrate <= 0) {
|
|
883
|
+
return 0;
|
|
884
|
+
}
|
|
885
|
+
// Convert from bits per second to kilobits per second
|
|
886
|
+
return Number(bitrate) / 1000;
|
|
887
|
+
}
|
|
888
|
+
/**
|
|
889
|
+
* Get media metadata.
|
|
890
|
+
*
|
|
891
|
+
* Returns all metadata tags as key-value pairs.
|
|
892
|
+
*
|
|
893
|
+
* @example
|
|
894
|
+
* ```typescript
|
|
895
|
+
* const metadata = input.metadata;
|
|
896
|
+
* console.log(`Title: ${metadata.title}`);
|
|
897
|
+
* console.log(`Artist: ${metadata.artist}`);
|
|
898
|
+
* ```
|
|
899
|
+
*/
|
|
900
|
+
get metadata() {
|
|
901
|
+
if (this.isClosed) {
|
|
902
|
+
return {};
|
|
903
|
+
}
|
|
904
|
+
return this.formatContext.metadata?.getAll() ?? {};
|
|
905
|
+
}
|
|
906
|
+
/**
|
|
907
|
+
* Get format name.
|
|
908
|
+
*
|
|
909
|
+
* Returns 'unknown' if input is closed or format is not available.
|
|
910
|
+
*
|
|
911
|
+
* @example
|
|
912
|
+
* ```typescript
|
|
913
|
+
* console.log(`Format: ${input.formatName}`); // "mov,mp4,m4a,3gp,3g2,mj2"
|
|
914
|
+
* ```
|
|
915
|
+
*/
|
|
916
|
+
get formatName() {
|
|
917
|
+
if (this.isClosed) {
|
|
918
|
+
return 'unknown';
|
|
919
|
+
}
|
|
920
|
+
return this.formatContext.iformat?.name ?? 'unknown';
|
|
921
|
+
}
|
|
922
|
+
/**
|
|
923
|
+
* Get format long name.
|
|
924
|
+
*
|
|
925
|
+
* Returns 'Unknown Format' if input is closed or format is not available.
|
|
926
|
+
*
|
|
927
|
+
* @example
|
|
928
|
+
* ```typescript
|
|
929
|
+
* console.log(`Format: ${input.formatLongName}`); // "QuickTime / MOV"
|
|
930
|
+
* ```
|
|
931
|
+
*/
|
|
932
|
+
get formatLongName() {
|
|
933
|
+
if (this.isClosed) {
|
|
934
|
+
return 'Unknown Format';
|
|
935
|
+
}
|
|
936
|
+
return this.formatContext.iformat?.longName ?? 'Unknown Format';
|
|
937
|
+
}
|
|
938
|
+
/**
|
|
939
|
+
* Get MIME type of the input format.
|
|
940
|
+
*
|
|
941
|
+
* Returns null if input is closed or format is not available.
|
|
942
|
+
*
|
|
943
|
+
* @example
|
|
944
|
+
* ```typescript
|
|
945
|
+
* console.log(`MIME Type: ${input.mimeType}`); // "video/mp4"
|
|
946
|
+
* ```
|
|
947
|
+
*/
|
|
948
|
+
get mimeType() {
|
|
949
|
+
if (this.isClosed) {
|
|
950
|
+
return null;
|
|
951
|
+
}
|
|
952
|
+
return this.formatContext.iformat?.mimeType ?? null;
|
|
953
|
+
}
|
|
954
|
+
/**
|
|
955
|
+
* Get input stream by index.
|
|
956
|
+
*
|
|
957
|
+
* Returns the stream at the specified index.
|
|
958
|
+
*
|
|
959
|
+
* @param index - Stream index
|
|
960
|
+
*
|
|
961
|
+
* @returns Stream or undefined if index is invalid
|
|
962
|
+
*
|
|
963
|
+
* @example
|
|
964
|
+
* ```typescript
|
|
965
|
+
* const input = await Demuxer.open('input.mp4');
|
|
966
|
+
*
|
|
967
|
+
* // Get the input stream to inspect codec parameters
|
|
968
|
+
* const stream = input.getStream(1); // Get stream at index 1
|
|
969
|
+
* if (stream) {
|
|
970
|
+
* console.log(`Input codec: ${stream.codecpar.codecId}`);
|
|
971
|
+
* }
|
|
972
|
+
* ```
|
|
973
|
+
*
|
|
974
|
+
* @see {@link video} For getting video streams
|
|
975
|
+
* @see {@link audio} For getting audio streams
|
|
976
|
+
*/
|
|
977
|
+
getStream(index) {
|
|
978
|
+
const streams = this.formatContext.streams;
|
|
979
|
+
if (!streams || index < 0 || index >= streams.length) {
|
|
980
|
+
return undefined;
|
|
981
|
+
}
|
|
982
|
+
return streams[index];
|
|
983
|
+
}
|
|
984
|
+
/**
|
|
985
|
+
* Get video stream by index.
|
|
986
|
+
*
|
|
987
|
+
* Returns the nth video stream (0-based index).
|
|
988
|
+
* Returns undefined if stream doesn't exist.
|
|
989
|
+
*
|
|
990
|
+
* @param index - Video stream index (default: 0)
|
|
991
|
+
*
|
|
992
|
+
* @returns Video stream or undefined
|
|
993
|
+
*
|
|
994
|
+
* @example
|
|
995
|
+
* ```typescript
|
|
996
|
+
* const videoStream = input.video();
|
|
997
|
+
* if (videoStream) {
|
|
998
|
+
* console.log(`Video: ${videoStream.codecpar.width}x${videoStream.codecpar.height}`);
|
|
999
|
+
* }
|
|
1000
|
+
* ```
|
|
1001
|
+
*
|
|
1002
|
+
* @example
|
|
1003
|
+
* ```typescript
|
|
1004
|
+
* // Get second video stream
|
|
1005
|
+
* const secondVideo = input.video(1);
|
|
1006
|
+
* ```
|
|
1007
|
+
*
|
|
1008
|
+
* @see {@link audio} For audio streams
|
|
1009
|
+
* @see {@link findBestStream} For automatic selection
|
|
1010
|
+
*/
|
|
1011
|
+
video(index = 0) {
|
|
1012
|
+
const streams = this._streams.filter((s) => s.codecpar.codecType === AVMEDIA_TYPE_VIDEO);
|
|
1013
|
+
return streams[index];
|
|
1014
|
+
}
|
|
1015
|
+
/**
|
|
1016
|
+
* Get audio stream by index.
|
|
1017
|
+
*
|
|
1018
|
+
* Returns the nth audio stream (0-based index).
|
|
1019
|
+
* Returns undefined if stream doesn't exist.
|
|
1020
|
+
*
|
|
1021
|
+
* @param index - Audio stream index (default: 0)
|
|
1022
|
+
*
|
|
1023
|
+
* @returns Audio stream or undefined
|
|
1024
|
+
*
|
|
1025
|
+
* @example
|
|
1026
|
+
* ```typescript
|
|
1027
|
+
* const audioStream = input.audio();
|
|
1028
|
+
* if (audioStream) {
|
|
1029
|
+
* console.log(`Audio: ${audioStream.codecpar.sampleRate}Hz`);
|
|
1030
|
+
* }
|
|
1031
|
+
* ```
|
|
1032
|
+
*
|
|
1033
|
+
* @example
|
|
1034
|
+
* ```typescript
|
|
1035
|
+
* // Get second audio stream
|
|
1036
|
+
* const secondAudio = input.audio(1);
|
|
1037
|
+
* ```
|
|
1038
|
+
*
|
|
1039
|
+
* @see {@link video} For video streams
|
|
1040
|
+
* @see {@link findBestStream} For automatic selection
|
|
1041
|
+
*/
|
|
1042
|
+
audio(index = 0) {
|
|
1043
|
+
const streams = this._streams.filter((s) => s.codecpar.codecType === AVMEDIA_TYPE_AUDIO);
|
|
1044
|
+
return streams[index];
|
|
1045
|
+
}
|
|
1046
|
+
/**
|
|
1047
|
+
* Get input format details.
|
|
1048
|
+
*
|
|
1049
|
+
* Returns null if input is closed or format is not available.
|
|
1050
|
+
*
|
|
1051
|
+
* @returns Input format or null
|
|
1052
|
+
*
|
|
1053
|
+
* @example
|
|
1054
|
+
* ```typescript
|
|
1055
|
+
* const inputFormat = input.inputFormat;
|
|
1056
|
+
* if (inputFormat) {
|
|
1057
|
+
* console.log(`Input Format: ${inputFormat.name}`);
|
|
1058
|
+
* }
|
|
1059
|
+
* ```
|
|
1060
|
+
*/
|
|
1061
|
+
inputFormat() {
|
|
1062
|
+
return this.formatContext.iformat;
|
|
1063
|
+
}
|
|
1064
|
+
/**
|
|
1065
|
+
* Find the best stream of a given type.
|
|
1066
|
+
*
|
|
1067
|
+
* Uses FFmpeg's stream selection algorithm.
|
|
1068
|
+
* Considers codec support, default flags, and quality.
|
|
1069
|
+
*
|
|
1070
|
+
* Direct mapping to av_find_best_stream().
|
|
1071
|
+
*
|
|
1072
|
+
* @param type - Media type to find
|
|
1073
|
+
*
|
|
1074
|
+
* @returns Best stream or undefined if not found or input is closed
|
|
1075
|
+
*
|
|
1076
|
+
* @example
|
|
1077
|
+
* ```typescript
|
|
1078
|
+
* import { AVMEDIA_TYPE_VIDEO } from 'node-av/constants';
|
|
1079
|
+
*
|
|
1080
|
+
* const bestVideo = input.findBestStream(AVMEDIA_TYPE_VIDEO);
|
|
1081
|
+
* if (bestVideo) {
|
|
1082
|
+
* const decoder = await Decoder.create(bestVideo);
|
|
1083
|
+
* }
|
|
1084
|
+
* ```
|
|
1085
|
+
*
|
|
1086
|
+
* @see {@link video} For direct video stream access
|
|
1087
|
+
* @see {@link audio} For direct audio stream access
|
|
1088
|
+
*/
|
|
1089
|
+
findBestStream(type) {
|
|
1090
|
+
if (this.isClosed) {
|
|
1091
|
+
return undefined;
|
|
1092
|
+
}
|
|
1093
|
+
const bestStreamIndex = this.formatContext.findBestStream(type);
|
|
1094
|
+
return this._streams.find((s) => s.index === bestStreamIndex);
|
|
1095
|
+
}
|
|
1096
|
+
/**
|
|
1097
|
+
* Read packets from media as async generator.
|
|
1098
|
+
*
|
|
1099
|
+
* Yields demuxed packets for processing.
|
|
1100
|
+
* Automatically handles packet memory management.
|
|
1101
|
+
* Optionally filters packets by stream index.
|
|
1102
|
+
*
|
|
1103
|
+
* **Supports parallel generators**: Multiple `packets()` iterators can run concurrently.
|
|
1104
|
+
* When multiple generators are active, an internal demux thread automatically handles
|
|
1105
|
+
* packet distribution to avoid race conditions.
|
|
1106
|
+
*
|
|
1107
|
+
* Direct mapping to av_read_frame().
|
|
1108
|
+
*
|
|
1109
|
+
* @param index - Optional stream index to filter
|
|
1110
|
+
*
|
|
1111
|
+
* @yields {Packet} Demuxed packets (must be freed by caller)
|
|
1112
|
+
*
|
|
1113
|
+
* @throws {Error} If packet cloning fails
|
|
1114
|
+
*
|
|
1115
|
+
* @example
|
|
1116
|
+
* ```typescript
|
|
1117
|
+
* // Read all packets
|
|
1118
|
+
* for await (const packet of input.packets()) {
|
|
1119
|
+
* console.log(`Packet: stream=${packet.streamIndex}, pts=${packet.pts}`);
|
|
1120
|
+
* packet.free();
|
|
1121
|
+
* }
|
|
1122
|
+
* ```
|
|
1123
|
+
*
|
|
1124
|
+
* @example
|
|
1125
|
+
* ```typescript
|
|
1126
|
+
* // Read only video packets
|
|
1127
|
+
* const videoStream = input.video();
|
|
1128
|
+
* for await (const packet of input.packets(videoStream.index)) {
|
|
1129
|
+
* // Process video packet
|
|
1130
|
+
* packet.free();
|
|
1131
|
+
* }
|
|
1132
|
+
* ```
|
|
1133
|
+
*
|
|
1134
|
+
* @example
|
|
1135
|
+
* ```typescript
|
|
1136
|
+
* // Parallel processing of video and audio streams
|
|
1137
|
+
* const videoGen = input.packets(videoStream.index);
|
|
1138
|
+
* const audioGen = input.packets(audioStream.index);
|
|
1139
|
+
*
|
|
1140
|
+
* await Promise.all([
|
|
1141
|
+
* (async () => {
|
|
1142
|
+
* for await (const packet of videoGen) {
|
|
1143
|
+
* // Process video
|
|
1144
|
+
* packet.free();
|
|
1145
|
+
* }
|
|
1146
|
+
* })(),
|
|
1147
|
+
* (async () => {
|
|
1148
|
+
* for await (const packet of audioGen) {
|
|
1149
|
+
* // Process audio
|
|
1150
|
+
* packet.free();
|
|
1151
|
+
* }
|
|
1152
|
+
* })()
|
|
1153
|
+
* ]);
|
|
1154
|
+
* ```
|
|
1155
|
+
*
|
|
1156
|
+
* @see {@link Decoder.frames} For decoding packets
|
|
1157
|
+
*/
|
|
1158
|
+
async *packets(index) {
|
|
1159
|
+
// Register this generator
|
|
1160
|
+
this.activeGenerators++;
|
|
1161
|
+
const queueKey = index ?? 'all';
|
|
1162
|
+
// Initialize queue for this generator
|
|
1163
|
+
if (!this.packetQueues.has(queueKey)) {
|
|
1164
|
+
this.packetQueues.set(queueKey, []);
|
|
1165
|
+
}
|
|
1166
|
+
// Always start demux thread (handles single and multiple generators)
|
|
1167
|
+
this.startDemuxThread();
|
|
1168
|
+
try {
|
|
1169
|
+
let hasSeenKeyframe = !this.options.startWithKeyframe;
|
|
1170
|
+
// Read from queue (demux thread is handling av_read_frame)
|
|
1171
|
+
const queue = this.packetQueues.get(queueKey);
|
|
1172
|
+
while (!this.isClosed) {
|
|
1173
|
+
// Try to get packet from queue
|
|
1174
|
+
let packet = queue.shift();
|
|
1175
|
+
// If queue is empty, wait for next packet
|
|
1176
|
+
if (!packet) {
|
|
1177
|
+
// Check for EOF first
|
|
1178
|
+
if (this.demuxEof) {
|
|
1179
|
+
break; // End of stream
|
|
1180
|
+
}
|
|
1181
|
+
// Create promise and register resolver
|
|
1182
|
+
const { promise, resolve } = Promise.withResolvers();
|
|
1183
|
+
this.queueResolvers.set(queueKey, resolve);
|
|
1184
|
+
// Wait for demux thread to add packet
|
|
1185
|
+
await promise;
|
|
1186
|
+
// Check again after wakeup
|
|
1187
|
+
if (this.demuxEof) {
|
|
1188
|
+
break;
|
|
1189
|
+
}
|
|
1190
|
+
packet = queue.shift();
|
|
1191
|
+
if (!packet) {
|
|
1192
|
+
continue;
|
|
1193
|
+
}
|
|
1194
|
+
}
|
|
1195
|
+
// Apply keyframe filtering if needed
|
|
1196
|
+
if (!hasSeenKeyframe) {
|
|
1197
|
+
const stream = this._streams[packet.streamIndex];
|
|
1198
|
+
const isVideoStream = stream?.codecpar.codecType === AVMEDIA_TYPE_VIDEO;
|
|
1199
|
+
if (isVideoStream && packet.isKeyframe) {
|
|
1200
|
+
hasSeenKeyframe = true;
|
|
1201
|
+
}
|
|
1202
|
+
else if (isVideoStream && !packet.isKeyframe) {
|
|
1203
|
+
packet.free();
|
|
1204
|
+
continue;
|
|
1205
|
+
}
|
|
1206
|
+
}
|
|
1207
|
+
yield packet;
|
|
1208
|
+
}
|
|
1209
|
+
// Signal EOF
|
|
1210
|
+
yield null;
|
|
1211
|
+
}
|
|
1212
|
+
finally {
|
|
1213
|
+
// Unregister this generator
|
|
1214
|
+
this.activeGenerators--;
|
|
1215
|
+
// Stop demux thread if no more generators
|
|
1216
|
+
if (this.activeGenerators === 0) {
|
|
1217
|
+
await this.stopDemuxThread();
|
|
1218
|
+
}
|
|
1219
|
+
}
|
|
1220
|
+
}
|
|
1221
|
+
/**
|
|
1222
|
+
* Read packets from media as generator synchronously.
|
|
1223
|
+
* Synchronous version of packets.
|
|
1224
|
+
*
|
|
1225
|
+
* Yields demuxed packets for processing.
|
|
1226
|
+
* Automatically handles packet memory management.
|
|
1227
|
+
* Optionally filters packets by stream index.
|
|
1228
|
+
*
|
|
1229
|
+
* Direct mapping to av_read_frame().
|
|
1230
|
+
*
|
|
1231
|
+
* @param index - Optional stream index to filter
|
|
1232
|
+
*
|
|
1233
|
+
* @yields {Packet} Demuxed packets (must be freed by caller)
|
|
1234
|
+
*
|
|
1235
|
+
* @throws {Error} If packet cloning fails
|
|
1236
|
+
*
|
|
1237
|
+
* @example
|
|
1238
|
+
* ```typescript
|
|
1239
|
+
* // Read all packets
|
|
1240
|
+
* for (const packet of input.packetsSync()) {
|
|
1241
|
+
* console.log(`Packet: stream=${packet.streamIndex}, pts=${packet.pts}`);
|
|
1242
|
+
* packet.free();
|
|
1243
|
+
* }
|
|
1244
|
+
* ```
|
|
1245
|
+
*
|
|
1246
|
+
* @example
|
|
1247
|
+
* ```typescript
|
|
1248
|
+
* // Read only video packets
|
|
1249
|
+
* const videoStream = input.video();
|
|
1250
|
+
* for (const packet of input.packetsSync(videoStream.index)) {
|
|
1251
|
+
* // Process video packet
|
|
1252
|
+
* packet.free();
|
|
1253
|
+
* }
|
|
1254
|
+
* ```
|
|
1255
|
+
*
|
|
1256
|
+
* @see {@link packets} For async version
|
|
1257
|
+
*/
|
|
1258
|
+
*packetsSync(index) {
|
|
1259
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
1260
|
+
try {
|
|
1261
|
+
const packet = __addDisposableResource(env_1, new Packet(), false);
|
|
1262
|
+
packet.alloc();
|
|
1263
|
+
let hasSeenKeyframe = !this.options.startWithKeyframe;
|
|
1264
|
+
while (!this.isClosed) {
|
|
1265
|
+
const ret = this.formatContext.readFrameSync(packet);
|
|
1266
|
+
if (ret < 0) {
|
|
1267
|
+
break;
|
|
1268
|
+
}
|
|
1269
|
+
// Get stream for timestamp processing
|
|
1270
|
+
const stream = this._streams[packet.streamIndex];
|
|
1271
|
+
if (stream) {
|
|
1272
|
+
// Set packet timebase to stream timebase
|
|
1273
|
+
// This must be done BEFORE any timestamp processing
|
|
1274
|
+
packet.timeBase = stream.timeBase;
|
|
1275
|
+
// Apply timestamp processing
|
|
1276
|
+
// 1. PTS wrap-around correction
|
|
1277
|
+
this.ptsWrapAroundCorrection(packet, stream);
|
|
1278
|
+
// 2. Timestamp discontinuity processing
|
|
1279
|
+
this.timestampDiscontinuityProcess(packet, stream);
|
|
1280
|
+
// 3. DTS prediction/update
|
|
1281
|
+
this.dtsPredict(packet, stream);
|
|
1282
|
+
}
|
|
1283
|
+
if (index === undefined || packet.streamIndex === index) {
|
|
1284
|
+
// If startWithKeyframe is enabled, skip packets until we see a keyframe
|
|
1285
|
+
// Only apply to video streams - audio packets should always pass through
|
|
1286
|
+
if (!hasSeenKeyframe) {
|
|
1287
|
+
const stream = this._streams[packet.streamIndex];
|
|
1288
|
+
const isVideoStream = stream?.codecpar.codecType === AVMEDIA_TYPE_VIDEO;
|
|
1289
|
+
if (isVideoStream && packet.isKeyframe) {
|
|
1290
|
+
hasSeenKeyframe = true;
|
|
1291
|
+
}
|
|
1292
|
+
else if (isVideoStream && !packet.isKeyframe) {
|
|
1293
|
+
// Skip video P-frames until first keyframe
|
|
1294
|
+
packet.unref();
|
|
1295
|
+
continue;
|
|
1296
|
+
}
|
|
1297
|
+
// Non-video streams (audio, etc.) always pass through
|
|
1298
|
+
}
|
|
1299
|
+
// Clone the packet for the user
|
|
1300
|
+
// This creates a new Packet object that shares the same data buffer
|
|
1301
|
+
// through reference counting. The data won't be freed until both
|
|
1302
|
+
// the original and the clone are unreferenced.
|
|
1303
|
+
const cloned = packet.clone();
|
|
1304
|
+
if (!cloned) {
|
|
1305
|
+
throw new Error('Failed to clone packet (out of memory)');
|
|
1306
|
+
}
|
|
1307
|
+
yield cloned;
|
|
1308
|
+
}
|
|
1309
|
+
// Unreference the original packet's data buffer
|
|
1310
|
+
// This allows us to reuse the packet object for the next readFrame()
|
|
1311
|
+
// The data itself is still alive because the clone has a reference
|
|
1312
|
+
packet.unref();
|
|
1313
|
+
}
|
|
1314
|
+
// Signal EOF
|
|
1315
|
+
yield null;
|
|
1316
|
+
}
|
|
1317
|
+
catch (e_1) {
|
|
1318
|
+
env_1.error = e_1;
|
|
1319
|
+
env_1.hasError = true;
|
|
1320
|
+
}
|
|
1321
|
+
finally {
|
|
1322
|
+
__disposeResources(env_1);
|
|
1323
|
+
}
|
|
1324
|
+
}
|
|
1325
|
+
/**
|
|
1326
|
+
* Seek to timestamp in media.
|
|
1327
|
+
*
|
|
1328
|
+
* Seeks to the specified position in seconds.
|
|
1329
|
+
* Can seek in specific stream or globally.
|
|
1330
|
+
*
|
|
1331
|
+
* Direct mapping to av_seek_frame().
|
|
1332
|
+
*
|
|
1333
|
+
* @param timestamp - Target position in seconds
|
|
1334
|
+
*
|
|
1335
|
+
* @param streamIndex - Stream index or -1 for global (default: -1)
|
|
1336
|
+
*
|
|
1337
|
+
* @param flags - Seek flags (default: AVFLAG_NONE)
|
|
1338
|
+
*
|
|
1339
|
+
* @returns 0 on success, negative on error
|
|
1340
|
+
*
|
|
1341
|
+
* @throws {Error} If input is closed
|
|
1342
|
+
*
|
|
1343
|
+
* @example
|
|
1344
|
+
* ```typescript
|
|
1345
|
+
* // Seek to 30 seconds
|
|
1346
|
+
* const ret = await input.seek(30);
|
|
1347
|
+
* FFmpegError.throwIfError(ret, 'seek failed');
|
|
1348
|
+
* ```
|
|
1349
|
+
*
|
|
1350
|
+
* @example
|
|
1351
|
+
* ```typescript
|
|
1352
|
+
* import { AVSEEK_FLAG_BACKWARD } from 'node-av/constants';
|
|
1353
|
+
*
|
|
1354
|
+
* // Seek to keyframe before 60 seconds
|
|
1355
|
+
* await input.seek(60, -1, AVSEEK_FLAG_BACKWARD);
|
|
1356
|
+
* ```
|
|
1357
|
+
*
|
|
1358
|
+
* @see {@link AVSeekFlag} For seek flags
|
|
1359
|
+
*/
|
|
1360
|
+
async seek(timestamp, streamIndex = -1, flags = AVFLAG_NONE) {
|
|
1361
|
+
if (this.isClosed) {
|
|
1362
|
+
throw new Error('Cannot seek on closed input');
|
|
1363
|
+
}
|
|
1364
|
+
// Convert seconds to AV_TIME_BASE
|
|
1365
|
+
const ts = BigInt(Math.floor(timestamp * 1000000));
|
|
1366
|
+
return this.formatContext.seekFrame(streamIndex, ts, flags);
|
|
1367
|
+
}
|
|
1368
|
+
/**
|
|
1369
|
+
* Seek to timestamp in media synchronously.
|
|
1370
|
+
* Synchronous version of seek.
|
|
1371
|
+
*
|
|
1372
|
+
* Seeks to the specified position in seconds.
|
|
1373
|
+
* Can seek in specific stream or globally.
|
|
1374
|
+
*
|
|
1375
|
+
* Direct mapping to av_seek_frame().
|
|
1376
|
+
*
|
|
1377
|
+
* @param timestamp - Target position in seconds
|
|
1378
|
+
*
|
|
1379
|
+
* @param streamIndex - Stream index or -1 for global (default: -1)
|
|
1380
|
+
*
|
|
1381
|
+
* @param flags - Seek flags (default: AVFLAG_NONE)
|
|
1382
|
+
*
|
|
1383
|
+
* @returns 0 on success, negative on error
|
|
1384
|
+
*
|
|
1385
|
+
* @throws {Error} If input is closed
|
|
1386
|
+
*
|
|
1387
|
+
* @example
|
|
1388
|
+
* ```typescript
|
|
1389
|
+
* // Seek to 30 seconds
|
|
1390
|
+
* const ret = input.seekSync(30);
|
|
1391
|
+
* FFmpegError.throwIfError(ret, 'seek failed');
|
|
1392
|
+
* ```
|
|
1393
|
+
*
|
|
1394
|
+
* @example
|
|
1395
|
+
* ```typescript
|
|
1396
|
+
* import { AVSEEK_FLAG_BACKWARD } from 'node-av/constants';
|
|
1397
|
+
*
|
|
1398
|
+
* // Seek to keyframe before 60 seconds
|
|
1399
|
+
* input.seekSync(60, -1, AVSEEK_FLAG_BACKWARD);
|
|
1400
|
+
* ```
|
|
1401
|
+
*
|
|
1402
|
+
* @see {@link seek} For async version
|
|
1403
|
+
*/
|
|
1404
|
+
seekSync(timestamp, streamIndex = -1, flags = AVFLAG_NONE) {
|
|
1405
|
+
if (this.isClosed) {
|
|
1406
|
+
throw new Error('Cannot seek on closed input');
|
|
1407
|
+
}
|
|
1408
|
+
// Convert seconds to AV_TIME_BASE
|
|
1409
|
+
const ts = BigInt(Math.floor(timestamp * 1000000));
|
|
1410
|
+
return this.formatContext.seekFrameSync(streamIndex, ts, flags);
|
|
1411
|
+
}
|
|
1412
|
+
/**
|
|
1413
|
+
* Start the internal demux thread for handling multiple parallel packet generators.
|
|
1414
|
+
* This thread reads packets from the format context and distributes them to queues.
|
|
1415
|
+
*
|
|
1416
|
+
* @internal
|
|
1417
|
+
*/
|
|
1418
|
+
startDemuxThread() {
|
|
1419
|
+
if (this.demuxThreadActive || this.demuxThread) {
|
|
1420
|
+
return; // Already running
|
|
1421
|
+
}
|
|
1422
|
+
this.demuxThreadActive = true;
|
|
1423
|
+
this.demuxThread = (async () => {
|
|
1424
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
1425
|
+
try {
|
|
1426
|
+
const packet = __addDisposableResource(env_2, new Packet(), false);
|
|
1427
|
+
packet.alloc();
|
|
1428
|
+
while (this.demuxThreadActive && !this.isClosed) {
|
|
1429
|
+
// Check if all queues are full - if so, wait a bit
|
|
1430
|
+
let allQueuesFull = true;
|
|
1431
|
+
for (const queue of this.packetQueues.values()) {
|
|
1432
|
+
if (queue.length < MAX_INPUT_QUEUE_SIZE) {
|
|
1433
|
+
allQueuesFull = false;
|
|
1434
|
+
break;
|
|
1435
|
+
}
|
|
1436
|
+
}
|
|
1437
|
+
if (allQueuesFull) {
|
|
1438
|
+
await new Promise(setImmediate);
|
|
1439
|
+
continue;
|
|
1440
|
+
}
|
|
1441
|
+
// Read next packet
|
|
1442
|
+
const ret = await this.formatContext.readFrame(packet);
|
|
1443
|
+
if (ret < 0) {
|
|
1444
|
+
// End of stream - notify all waiting consumers
|
|
1445
|
+
this.demuxEof = true;
|
|
1446
|
+
for (const resolve of this.queueResolvers.values()) {
|
|
1447
|
+
resolve();
|
|
1448
|
+
}
|
|
1449
|
+
this.queueResolvers.clear();
|
|
1450
|
+
break;
|
|
1451
|
+
}
|
|
1452
|
+
// Get stream for timestamp processing
|
|
1453
|
+
const stream = this._streams[packet.streamIndex];
|
|
1454
|
+
if (stream) {
|
|
1455
|
+
packet.timeBase = stream.timeBase;
|
|
1456
|
+
this.ptsWrapAroundCorrection(packet, stream);
|
|
1457
|
+
this.timestampDiscontinuityProcess(packet, stream);
|
|
1458
|
+
this.dtsPredict(packet, stream);
|
|
1459
|
+
}
|
|
1460
|
+
// Find which queues need this packet
|
|
1461
|
+
const allQueue = this.packetQueues.get('all');
|
|
1462
|
+
const streamQueue = this.packetQueues.get(packet.streamIndex);
|
|
1463
|
+
const targetQueues = [];
|
|
1464
|
+
if (allQueue && allQueue.length < MAX_INPUT_QUEUE_SIZE) {
|
|
1465
|
+
targetQueues.push({ queue: allQueue, event: 'packet-all' });
|
|
1466
|
+
}
|
|
1467
|
+
// Only add stream queue if it's different from 'all' queue
|
|
1468
|
+
if (streamQueue && streamQueue !== allQueue && streamQueue.length < MAX_INPUT_QUEUE_SIZE) {
|
|
1469
|
+
targetQueues.push({ queue: streamQueue, event: `packet-${packet.streamIndex}` });
|
|
1470
|
+
}
|
|
1471
|
+
if (targetQueues.length === 0) {
|
|
1472
|
+
// No queue needs this packet, skip it
|
|
1473
|
+
packet.unref();
|
|
1474
|
+
continue;
|
|
1475
|
+
}
|
|
1476
|
+
// Clone once, then share reference for additional queues
|
|
1477
|
+
const firstClone = packet.clone();
|
|
1478
|
+
if (!firstClone) {
|
|
1479
|
+
throw new Error('Failed to clone packet in demux thread (out of memory)');
|
|
1480
|
+
}
|
|
1481
|
+
// Add to first queue and resolve waiting promise
|
|
1482
|
+
const firstKey = targetQueues[0].event.replace('packet-', '') === 'all' ? 'all' : packet.streamIndex;
|
|
1483
|
+
targetQueues[0].queue.push(firstClone);
|
|
1484
|
+
const firstResolver = this.queueResolvers.get(firstKey);
|
|
1485
|
+
if (firstResolver) {
|
|
1486
|
+
firstResolver();
|
|
1487
|
+
this.queueResolvers.delete(firstKey);
|
|
1488
|
+
}
|
|
1489
|
+
// Additional queues get clones (shares data buffer via reference counting)
|
|
1490
|
+
for (let i = 1; i < targetQueues.length; i++) {
|
|
1491
|
+
const additionalClone = firstClone.clone();
|
|
1492
|
+
if (!additionalClone) {
|
|
1493
|
+
throw new Error('Failed to clone packet for additional queue (out of memory)');
|
|
1494
|
+
}
|
|
1495
|
+
const queueKey = targetQueues[i].event.replace('packet-', '') === 'all' ? 'all' : packet.streamIndex;
|
|
1496
|
+
targetQueues[i].queue.push(additionalClone);
|
|
1497
|
+
const resolver = this.queueResolvers.get(queueKey);
|
|
1498
|
+
if (resolver) {
|
|
1499
|
+
resolver();
|
|
1500
|
+
this.queueResolvers.delete(queueKey);
|
|
1501
|
+
}
|
|
1502
|
+
}
|
|
1503
|
+
packet.unref();
|
|
1504
|
+
}
|
|
1505
|
+
this.demuxThreadActive = false;
|
|
1506
|
+
}
|
|
1507
|
+
catch (e_2) {
|
|
1508
|
+
env_2.error = e_2;
|
|
1509
|
+
env_2.hasError = true;
|
|
1510
|
+
}
|
|
1511
|
+
finally {
|
|
1512
|
+
__disposeResources(env_2);
|
|
1513
|
+
}
|
|
1514
|
+
})();
|
|
1515
|
+
}
|
|
1516
|
+
/**
|
|
1517
|
+
* Stop the internal demux thread.
|
|
1518
|
+
*
|
|
1519
|
+
* @internal
|
|
1520
|
+
*/
|
|
1521
|
+
async stopDemuxThread() {
|
|
1522
|
+
if (!this.demuxThreadActive) {
|
|
1523
|
+
return;
|
|
1524
|
+
}
|
|
1525
|
+
this.demuxThreadActive = false;
|
|
1526
|
+
if (this.demuxThread) {
|
|
1527
|
+
await this.demuxThread;
|
|
1528
|
+
this.demuxThread = null;
|
|
1529
|
+
}
|
|
1530
|
+
// Clear all queues and resolvers
|
|
1531
|
+
for (const queue of this.packetQueues.values()) {
|
|
1532
|
+
for (const packet of queue) {
|
|
1533
|
+
packet.free();
|
|
1534
|
+
}
|
|
1535
|
+
queue.length = 0;
|
|
1536
|
+
}
|
|
1537
|
+
this.packetQueues.clear();
|
|
1538
|
+
this.queueResolvers.clear();
|
|
1539
|
+
this.demuxEof = false;
|
|
1540
|
+
}
|
|
1541
|
+
/**
|
|
1542
|
+
* Get or create stream state for timestamp processing.
|
|
1543
|
+
*
|
|
1544
|
+
* @param streamIndex - Stream index
|
|
1545
|
+
*
|
|
1546
|
+
* @returns Stream state
|
|
1547
|
+
*
|
|
1548
|
+
* @internal
|
|
1549
|
+
*/
|
|
1550
|
+
getStreamState(streamIndex) {
|
|
1551
|
+
let state = this.streamStates.get(streamIndex);
|
|
1552
|
+
if (!state) {
|
|
1553
|
+
state = {
|
|
1554
|
+
wrapCorrectionDone: false,
|
|
1555
|
+
sawFirstTs: false,
|
|
1556
|
+
firstDts: AV_NOPTS_VALUE,
|
|
1557
|
+
nextDts: AV_NOPTS_VALUE,
|
|
1558
|
+
dts: AV_NOPTS_VALUE,
|
|
1559
|
+
};
|
|
1560
|
+
this.streamStates.set(streamIndex, state);
|
|
1561
|
+
}
|
|
1562
|
+
return state;
|
|
1563
|
+
}
|
|
1564
|
+
/**
|
|
1565
|
+
* PTS Wrap-Around Correction.
|
|
1566
|
+
*
|
|
1567
|
+
* Based on FFmpeg's ts_fixup().
|
|
1568
|
+
*
|
|
1569
|
+
* Corrects timestamp wrap-around for streams with limited timestamp bits.
|
|
1570
|
+
* DVB streams typically use 31-bit timestamps that wrap around.
|
|
1571
|
+
* Without correction, timestamps become negative causing playback errors.
|
|
1572
|
+
*
|
|
1573
|
+
* Handles:
|
|
1574
|
+
* - Detects wrap-around based on pts_wrap_bits from stream
|
|
1575
|
+
* - Applies correction once per stream
|
|
1576
|
+
* - Corrects both PTS and DTS
|
|
1577
|
+
*
|
|
1578
|
+
* @param packet - Packet to correct
|
|
1579
|
+
*
|
|
1580
|
+
* @param stream - Stream metadata
|
|
1581
|
+
*
|
|
1582
|
+
* @internal
|
|
1583
|
+
*/
|
|
1584
|
+
ptsWrapAroundCorrection(packet, stream) {
|
|
1585
|
+
const state = this.getStreamState(packet.streamIndex);
|
|
1586
|
+
// Already corrected or no wrap bits configured
|
|
1587
|
+
if (state.wrapCorrectionDone || stream.ptsWrapBits >= 64) {
|
|
1588
|
+
return;
|
|
1589
|
+
}
|
|
1590
|
+
const startTime = this.formatContext.startTime;
|
|
1591
|
+
if (startTime === AV_NOPTS_VALUE) {
|
|
1592
|
+
return;
|
|
1593
|
+
}
|
|
1594
|
+
const ptsWrapBits = stream.ptsWrapBits;
|
|
1595
|
+
// Rescale start_time to packet's timebase
|
|
1596
|
+
// Note: packet.timeBase was set to stream.timeBase in packets() generator
|
|
1597
|
+
const stime = avRescaleQ(startTime, AV_TIME_BASE_Q, packet.timeBase);
|
|
1598
|
+
const stime2 = stime + (1n << BigInt(ptsWrapBits));
|
|
1599
|
+
state.wrapCorrectionDone = true;
|
|
1600
|
+
const wrapThreshold = stime + (1n << BigInt(ptsWrapBits - 1));
|
|
1601
|
+
// Check DTS for wrap-around
|
|
1602
|
+
if (stime2 > stime && packet.dts !== AV_NOPTS_VALUE && packet.dts > wrapThreshold) {
|
|
1603
|
+
packet.dts -= 1n << BigInt(ptsWrapBits);
|
|
1604
|
+
state.wrapCorrectionDone = false; // May wrap again
|
|
1605
|
+
}
|
|
1606
|
+
// Check PTS for wrap-around
|
|
1607
|
+
if (stime2 > stime && packet.pts !== AV_NOPTS_VALUE && packet.pts > wrapThreshold) {
|
|
1608
|
+
packet.pts -= 1n << BigInt(ptsWrapBits);
|
|
1609
|
+
state.wrapCorrectionDone = false; // May wrap again
|
|
1610
|
+
}
|
|
1611
|
+
}
|
|
1612
|
+
/**
|
|
1613
|
+
* DTS Prediction and Update.
|
|
1614
|
+
*
|
|
1615
|
+
* Based on FFmpeg's ist_dts_update().
|
|
1616
|
+
*
|
|
1617
|
+
* Predicts next expected DTS for frame ordering validation and discontinuity detection.
|
|
1618
|
+
* Uses codec-specific logic:
|
|
1619
|
+
* - Audio: Based on sample_rate and frame_size
|
|
1620
|
+
* - Video: Based on framerate or duration
|
|
1621
|
+
*
|
|
1622
|
+
* Handles:
|
|
1623
|
+
* - First timestamp initialization
|
|
1624
|
+
* - Codec-specific duration calculation
|
|
1625
|
+
* - DTS sequence tracking
|
|
1626
|
+
*
|
|
1627
|
+
* @param packet - Packet to process
|
|
1628
|
+
*
|
|
1629
|
+
* @param stream - Stream metadata
|
|
1630
|
+
*
|
|
1631
|
+
* @internal
|
|
1632
|
+
*/
|
|
1633
|
+
dtsPredict(packet, stream) {
|
|
1634
|
+
const state = this.getStreamState(packet.streamIndex);
|
|
1635
|
+
const par = stream.codecpar;
|
|
1636
|
+
// First timestamp seen
|
|
1637
|
+
if (!state.sawFirstTs) {
|
|
1638
|
+
// For video with avg_frame_rate, account for video_delay
|
|
1639
|
+
const avgFrameRate = stream.avgFrameRate;
|
|
1640
|
+
if (avgFrameRate && avgFrameRate.num > 0) {
|
|
1641
|
+
const frameRateD = Number(avgFrameRate.num) / Number(avgFrameRate.den);
|
|
1642
|
+
state.firstDts = state.dts = BigInt(Math.floor((-par.videoDelay * Number(AV_TIME_BASE)) / frameRateD));
|
|
1643
|
+
}
|
|
1644
|
+
else {
|
|
1645
|
+
state.firstDts = state.dts = 0n;
|
|
1646
|
+
}
|
|
1647
|
+
if (packet.pts !== AV_NOPTS_VALUE) {
|
|
1648
|
+
const ptsDts = avRescaleQ(packet.pts, packet.timeBase, AV_TIME_BASE_Q);
|
|
1649
|
+
state.firstDts += ptsDts;
|
|
1650
|
+
state.dts += ptsDts;
|
|
1651
|
+
}
|
|
1652
|
+
state.sawFirstTs = true;
|
|
1653
|
+
}
|
|
1654
|
+
// Initialize next_dts if not set
|
|
1655
|
+
if (state.nextDts === AV_NOPTS_VALUE) {
|
|
1656
|
+
state.nextDts = state.dts;
|
|
1657
|
+
}
|
|
1658
|
+
// Update from packet DTS if available
|
|
1659
|
+
if (packet.dts !== AV_NOPTS_VALUE) {
|
|
1660
|
+
state.nextDts = state.dts = avRescaleQ(packet.dts, packet.timeBase, AV_TIME_BASE_Q);
|
|
1661
|
+
}
|
|
1662
|
+
state.dts = state.nextDts;
|
|
1663
|
+
// Predict next DTS based on codec type
|
|
1664
|
+
switch (par.codecType) {
|
|
1665
|
+
case AVMEDIA_TYPE_AUDIO:
|
|
1666
|
+
// Audio: duration from sample_rate or packet duration
|
|
1667
|
+
if (par.sampleRate > 0 && par.frameSize > 0) {
|
|
1668
|
+
state.nextDts += (BigInt(AV_TIME_BASE) * BigInt(par.frameSize)) / BigInt(par.sampleRate);
|
|
1669
|
+
}
|
|
1670
|
+
else {
|
|
1671
|
+
state.nextDts += avRescaleQ(packet.duration, packet.timeBase, AV_TIME_BASE_Q);
|
|
1672
|
+
}
|
|
1673
|
+
break;
|
|
1674
|
+
case AVMEDIA_TYPE_VIDEO: {
|
|
1675
|
+
// Video: various methods depending on available metadata
|
|
1676
|
+
// Note: FFmpeg has ist->framerate (forced with -r), but we don't support that option
|
|
1677
|
+
if (packet.duration > 0n) {
|
|
1678
|
+
// Use packet duration
|
|
1679
|
+
state.nextDts += avRescaleQ(packet.duration, packet.timeBase, AV_TIME_BASE_Q);
|
|
1680
|
+
}
|
|
1681
|
+
else if (par.frameRate && par.frameRate.num > 0) {
|
|
1682
|
+
// Use codec framerate with field handling
|
|
1683
|
+
const fieldRate = avMulQ(par.frameRate, { num: 2, den: 1 });
|
|
1684
|
+
let fields = 2; // Default: 2 fields (progressive or standard interlaced)
|
|
1685
|
+
// Check if parser is available for accurate field count
|
|
1686
|
+
const parser = stream.parser;
|
|
1687
|
+
if (parser) {
|
|
1688
|
+
// Get repeat_pict from parser for accurate field count
|
|
1689
|
+
fields = 1 + parser.repeatPict;
|
|
1690
|
+
}
|
|
1691
|
+
const invFieldRate = avInvQ(fieldRate);
|
|
1692
|
+
state.nextDts += avRescaleQ(BigInt(fields), invFieldRate, AV_TIME_BASE_Q);
|
|
1693
|
+
}
|
|
1694
|
+
break;
|
|
1695
|
+
}
|
|
1696
|
+
}
|
|
1697
|
+
}
|
|
1698
|
+
/**
|
|
1699
|
+
* Timestamp Discontinuity Detection.
|
|
1700
|
+
*
|
|
1701
|
+
* Based on FFmpeg's ts_discontinuity_detect().
|
|
1702
|
+
*
|
|
1703
|
+
* Detects and corrects timestamp discontinuities in streams.
|
|
1704
|
+
* Handles two cases:
|
|
1705
|
+
* - Discontinuous formats (MPEG-TS): Apply offset correction
|
|
1706
|
+
* - Continuous formats (MP4): Mark timestamps as invalid
|
|
1707
|
+
*
|
|
1708
|
+
* Handles:
|
|
1709
|
+
* - Format-specific discontinuity handling (AVFMT_TS_DISCONT flag)
|
|
1710
|
+
* - PTS wrap-around detection for streams with limited timestamp bits
|
|
1711
|
+
* - Intra-stream discontinuity detection
|
|
1712
|
+
* - Inter-stream discontinuity detection
|
|
1713
|
+
* - Offset accumulation and application
|
|
1714
|
+
* - copyTs mode with selective correction
|
|
1715
|
+
*
|
|
1716
|
+
* @param packet - Packet to check for discontinuities
|
|
1717
|
+
*
|
|
1718
|
+
* @param stream - Stream metadata
|
|
1719
|
+
*
|
|
1720
|
+
* @internal
|
|
1721
|
+
*/
|
|
1722
|
+
timestampDiscontinuityDetect(packet, stream) {
|
|
1723
|
+
const state = this.getStreamState(packet.streamIndex);
|
|
1724
|
+
const inputFormat = this.formatContext.iformat;
|
|
1725
|
+
// Check if format declares timestamp discontinuities
|
|
1726
|
+
const fmtIsDiscont = !!(inputFormat && inputFormat.flags & AVFMT_TS_DISCONT);
|
|
1727
|
+
// Disable correction when copyTs is enabled
|
|
1728
|
+
let disableDiscontinuityCorrection = this.options.copyTs;
|
|
1729
|
+
// Rescale packet DTS to AV_TIME_BASE for comparison
|
|
1730
|
+
const pktDts = avRescaleQRnd(packet.dts, packet.timeBase, AV_TIME_BASE_Q, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
1731
|
+
// PTS wrap-around detection
|
|
1732
|
+
// Only applies when copyTs is enabled and stream has limited timestamp bits
|
|
1733
|
+
if (this.options.copyTs && state.nextDts !== AV_NOPTS_VALUE && fmtIsDiscont && stream.ptsWrapBits < 60) {
|
|
1734
|
+
// Calculate wrapped DTS by adding 2^pts_wrap_bits to packet DTS
|
|
1735
|
+
const wrapDts = avRescaleQRnd(packet.dts + (1n << BigInt(stream.ptsWrapBits)), packet.timeBase, AV_TIME_BASE_Q, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
|
|
1736
|
+
// If wrapped DTS is closer to predicted nextDts, enable correction
|
|
1737
|
+
const wrapDelta = wrapDts > state.nextDts ? wrapDts - state.nextDts : state.nextDts - wrapDts;
|
|
1738
|
+
const normalDelta = pktDts > state.nextDts ? pktDts - state.nextDts : state.nextDts - pktDts;
|
|
1739
|
+
if (wrapDelta < normalDelta / 10n) {
|
|
1740
|
+
disableDiscontinuityCorrection = false;
|
|
1741
|
+
}
|
|
1742
|
+
}
|
|
1743
|
+
// Intra-stream discontinuity detection
|
|
1744
|
+
if (state.nextDts !== AV_NOPTS_VALUE && !disableDiscontinuityCorrection) {
|
|
1745
|
+
const delta = pktDts - state.nextDts;
|
|
1746
|
+
if (fmtIsDiscont) {
|
|
1747
|
+
// Discontinuous format (e.g., MPEG-TS) - apply offset correction
|
|
1748
|
+
const threshold = BigInt(this.options.dtsDeltaThreshold) * BigInt(AV_TIME_BASE);
|
|
1749
|
+
if (delta > threshold || delta < -threshold || pktDts + BigInt(AV_TIME_BASE) / 10n < state.dts) {
|
|
1750
|
+
this.tsOffsetDiscont -= delta;
|
|
1751
|
+
// Apply correction to packet
|
|
1752
|
+
const deltaInPktTb = avRescaleQ(delta, AV_TIME_BASE_Q, packet.timeBase);
|
|
1753
|
+
packet.dts -= deltaInPktTb;
|
|
1754
|
+
if (packet.pts !== AV_NOPTS_VALUE) {
|
|
1755
|
+
packet.pts -= deltaInPktTb;
|
|
1756
|
+
}
|
|
1757
|
+
}
|
|
1758
|
+
}
|
|
1759
|
+
else {
|
|
1760
|
+
// Continuous format (e.g., MP4) - mark invalid timestamps
|
|
1761
|
+
const threshold = BigInt(this.options.dtsErrorThreshold) * BigInt(AV_TIME_BASE);
|
|
1762
|
+
// Check DTS
|
|
1763
|
+
if (delta > threshold || delta < -threshold) {
|
|
1764
|
+
packet.dts = AV_NOPTS_VALUE;
|
|
1765
|
+
}
|
|
1766
|
+
// Check PTS
|
|
1767
|
+
if (packet.pts !== AV_NOPTS_VALUE) {
|
|
1768
|
+
const pktPts = avRescaleQ(packet.pts, packet.timeBase, AV_TIME_BASE_Q);
|
|
1769
|
+
const ptsDelta = pktPts - state.nextDts;
|
|
1770
|
+
if (ptsDelta > threshold || ptsDelta < -threshold) {
|
|
1771
|
+
packet.pts = AV_NOPTS_VALUE;
|
|
1772
|
+
}
|
|
1773
|
+
}
|
|
1774
|
+
}
|
|
1775
|
+
}
|
|
1776
|
+
else if (state.nextDts === AV_NOPTS_VALUE && !this.options.copyTs && fmtIsDiscont && this.lastTs !== AV_NOPTS_VALUE) {
|
|
1777
|
+
// Inter-stream discontinuity detection
|
|
1778
|
+
const delta = pktDts - this.lastTs;
|
|
1779
|
+
const threshold = BigInt(this.options.dtsDeltaThreshold) * BigInt(AV_TIME_BASE);
|
|
1780
|
+
if (delta > threshold || delta < -threshold) {
|
|
1781
|
+
this.tsOffsetDiscont -= delta;
|
|
1782
|
+
// Apply correction to packet
|
|
1783
|
+
const deltaInPktTb = avRescaleQ(delta, AV_TIME_BASE_Q, packet.timeBase);
|
|
1784
|
+
packet.dts -= deltaInPktTb;
|
|
1785
|
+
if (packet.pts !== AV_NOPTS_VALUE) {
|
|
1786
|
+
packet.pts -= deltaInPktTb;
|
|
1787
|
+
}
|
|
1788
|
+
}
|
|
1789
|
+
}
|
|
1790
|
+
// Update last timestamp
|
|
1791
|
+
this.lastTs = avRescaleQ(packet.dts, packet.timeBase, AV_TIME_BASE_Q);
|
|
1792
|
+
}
|
|
1793
|
+
/**
|
|
1794
|
+
* Timestamp Discontinuity Processing - main entry point.
|
|
1795
|
+
*
|
|
1796
|
+
* Based on FFmpeg's ts_discontinuity_process().
|
|
1797
|
+
*
|
|
1798
|
+
* Applies accumulated discontinuity offset and detects new discontinuities.
|
|
1799
|
+
* Must be called for every packet before other timestamp processing.
|
|
1800
|
+
*
|
|
1801
|
+
* Handles:
|
|
1802
|
+
* - Applying previously-detected offset to all streams
|
|
1803
|
+
* - Detecting new discontinuities for audio/video streams
|
|
1804
|
+
*
|
|
1805
|
+
* @param packet - Packet to process
|
|
1806
|
+
*
|
|
1807
|
+
* @param stream - Stream metadata
|
|
1808
|
+
*
|
|
1809
|
+
* @internal
|
|
1810
|
+
*/
|
|
1811
|
+
timestampDiscontinuityProcess(packet, stream) {
|
|
1812
|
+
// Apply previously-detected discontinuity offset
|
|
1813
|
+
// This applies to ALL streams, not just audio/video
|
|
1814
|
+
const offset = avRescaleQ(this.tsOffsetDiscont, AV_TIME_BASE_Q, packet.timeBase);
|
|
1815
|
+
if (packet.dts !== AV_NOPTS_VALUE) {
|
|
1816
|
+
packet.dts += offset;
|
|
1817
|
+
}
|
|
1818
|
+
if (packet.pts !== AV_NOPTS_VALUE) {
|
|
1819
|
+
packet.pts += offset;
|
|
1820
|
+
}
|
|
1821
|
+
// Detect new timestamp discontinuities for audio/video
|
|
1822
|
+
const par = stream.codecpar;
|
|
1823
|
+
if ((par.codecType === AVMEDIA_TYPE_VIDEO || par.codecType === AVMEDIA_TYPE_AUDIO) && packet.dts !== AV_NOPTS_VALUE) {
|
|
1824
|
+
this.timestampDiscontinuityDetect(packet, stream);
|
|
1825
|
+
}
|
|
1826
|
+
}
|
|
1827
|
+
/**
|
|
1828
|
+
* Close demuxer and free resources.
|
|
1829
|
+
*
|
|
1830
|
+
* Releases format context and I/O context.
|
|
1831
|
+
* Safe to call multiple times.
|
|
1832
|
+
* Automatically called by Symbol.asyncDispose.
|
|
1833
|
+
*
|
|
1834
|
+
* Direct mapping to avformat_close_input().
|
|
1835
|
+
*
|
|
1836
|
+
* @example
|
|
1837
|
+
* ```typescript
|
|
1838
|
+
* const input = await Demuxer.open('video.mp4');
|
|
1839
|
+
* try {
|
|
1840
|
+
* // Use input
|
|
1841
|
+
* } finally {
|
|
1842
|
+
* await input.close();
|
|
1843
|
+
* }
|
|
1844
|
+
* ```
|
|
1845
|
+
*
|
|
1846
|
+
* @see {@link Symbol.asyncDispose} For automatic cleanup
|
|
1847
|
+
*/
|
|
1848
|
+
async close() {
|
|
1849
|
+
if (this.isClosed) {
|
|
1850
|
+
return;
|
|
1851
|
+
}
|
|
1852
|
+
this.isClosed = true;
|
|
1853
|
+
// Clear pb reference FIRST to prevent use-after-free
|
|
1854
|
+
if (this.ioContext) {
|
|
1855
|
+
this.formatContext.pb = null;
|
|
1856
|
+
}
|
|
1857
|
+
// IMPORTANT: Close FormatContext BEFORE stopping demux thread
|
|
1858
|
+
// This interrupts any blocking read() calls in the demux loop
|
|
1859
|
+
await this.formatContext.closeInput();
|
|
1860
|
+
// Safely stop the demux thread
|
|
1861
|
+
await this.stopDemuxThread();
|
|
1862
|
+
// NOW we can safely free the IOContext
|
|
1863
|
+
if (this.ioContext) {
|
|
1864
|
+
this.ioContext.freeContext();
|
|
1865
|
+
this.ioContext = undefined;
|
|
1866
|
+
}
|
|
1867
|
+
}
|
|
1868
|
+
/**
|
|
1869
|
+
* Close demuxer and free resources synchronously.
|
|
1870
|
+
* Synchronous version of close.
|
|
1871
|
+
*
|
|
1872
|
+
* Releases format context and I/O context.
|
|
1873
|
+
* Safe to call multiple times.
|
|
1874
|
+
* Automatically called by Symbol.dispose.
|
|
1875
|
+
*
|
|
1876
|
+
* Direct mapping to avformat_close_input().
|
|
1877
|
+
*
|
|
1878
|
+
* @example
|
|
1879
|
+
* ```typescript
|
|
1880
|
+
* const input = Demuxer.openSync('video.mp4');
|
|
1881
|
+
* try {
|
|
1882
|
+
* // Use input
|
|
1883
|
+
* } finally {
|
|
1884
|
+
* input.closeSync();
|
|
1885
|
+
* }
|
|
1886
|
+
* ```
|
|
1887
|
+
*
|
|
1888
|
+
* @see {@link close} For async version
|
|
1889
|
+
*/
|
|
1890
|
+
closeSync() {
|
|
1891
|
+
if (this.isClosed) {
|
|
1892
|
+
return;
|
|
1893
|
+
}
|
|
1894
|
+
this.isClosed = true;
|
|
1895
|
+
// IMPORTANT: Clear pb reference FIRST to prevent use-after-free
|
|
1896
|
+
if (this.ioContext) {
|
|
1897
|
+
this.formatContext.pb = null;
|
|
1898
|
+
}
|
|
1899
|
+
// Close FormatContext
|
|
1900
|
+
this.formatContext.closeInputSync();
|
|
1901
|
+
this.demuxThreadActive = false;
|
|
1902
|
+
for (const queue of this.packetQueues.values()) {
|
|
1903
|
+
for (const packet of queue) {
|
|
1904
|
+
packet.free();
|
|
1905
|
+
}
|
|
1906
|
+
queue.length = 0;
|
|
1907
|
+
}
|
|
1908
|
+
this.packetQueues.clear();
|
|
1909
|
+
this.queueResolvers.clear();
|
|
1910
|
+
this.demuxEof = false;
|
|
1911
|
+
// NOW we can safely free the IOContext
|
|
1912
|
+
if (this.ioContext) {
|
|
1913
|
+
this.ioContext.freeContext();
|
|
1914
|
+
this.ioContext = undefined;
|
|
1915
|
+
}
|
|
1916
|
+
}
|
|
1917
|
+
/**
|
|
1918
|
+
* Get underlying format context.
|
|
1919
|
+
*
|
|
1920
|
+
* Returns the internal format context for advanced operations.
|
|
1921
|
+
*
|
|
1922
|
+
* @returns Format context
|
|
1923
|
+
*
|
|
1924
|
+
* @internal
|
|
1925
|
+
*/
|
|
1926
|
+
getFormatContext() {
|
|
1927
|
+
return this.formatContext;
|
|
1928
|
+
}
|
|
1929
|
+
/**
|
|
1930
|
+
* Dispose of demuxer.
|
|
1931
|
+
*
|
|
1932
|
+
* Implements AsyncDisposable interface for automatic cleanup.
|
|
1933
|
+
* Equivalent to calling close().
|
|
1934
|
+
*
|
|
1935
|
+
* @example
|
|
1936
|
+
* ```typescript
|
|
1937
|
+
* {
|
|
1938
|
+
* await using input = await Demuxer.open('video.mp4');
|
|
1939
|
+
* // Process media...
|
|
1940
|
+
* } // Automatically closed
|
|
1941
|
+
* ```
|
|
1942
|
+
*
|
|
1943
|
+
* @see {@link close} For manual cleanup
|
|
1944
|
+
*/
|
|
1945
|
+
async [Symbol.asyncDispose]() {
|
|
1946
|
+
await this.close();
|
|
1947
|
+
}
|
|
1948
|
+
/**
|
|
1949
|
+
* Dispose of demuxer synchronously.
|
|
1950
|
+
*
|
|
1951
|
+
* Implements Disposable interface for automatic cleanup.
|
|
1952
|
+
* Equivalent to calling closeSync().
|
|
1953
|
+
*
|
|
1954
|
+
* @example
|
|
1955
|
+
* ```typescript
|
|
1956
|
+
* {
|
|
1957
|
+
* using input = Demuxer.openSync('video.mp4');
|
|
1958
|
+
* // Process media...
|
|
1959
|
+
* } // Automatically closed
|
|
1960
|
+
* ```
|
|
1961
|
+
*
|
|
1962
|
+
* @see {@link closeSync} For manual cleanup
|
|
1963
|
+
*/
|
|
1964
|
+
[Symbol.dispose]() {
|
|
1965
|
+
this.closeSync();
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
//# sourceMappingURL=demuxer.js.map
|