node-av 3.1.3 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/README.md +88 -52
  2. package/binding.gyp +23 -11
  3. package/dist/api/audio-frame-buffer.d.ts +201 -0
  4. package/dist/api/audio-frame-buffer.js +275 -0
  5. package/dist/api/audio-frame-buffer.js.map +1 -0
  6. package/dist/api/bitstream-filter.d.ts +320 -78
  7. package/dist/api/bitstream-filter.js +684 -151
  8. package/dist/api/bitstream-filter.js.map +1 -1
  9. package/dist/api/constants.d.ts +44 -0
  10. package/dist/api/constants.js +45 -0
  11. package/dist/api/constants.js.map +1 -0
  12. package/dist/api/data/test_av1.ivf +0 -0
  13. package/dist/api/data/test_mjpeg.mjpeg +0 -0
  14. package/dist/api/data/test_vp8.ivf +0 -0
  15. package/dist/api/data/test_vp9.ivf +0 -0
  16. package/dist/api/decoder.d.ts +454 -77
  17. package/dist/api/decoder.js +1081 -271
  18. package/dist/api/decoder.js.map +1 -1
  19. package/dist/api/{media-input.d.ts → demuxer.d.ts} +295 -45
  20. package/dist/api/demuxer.js +1965 -0
  21. package/dist/api/demuxer.js.map +1 -0
  22. package/dist/api/encoder.d.ts +423 -132
  23. package/dist/api/encoder.js +1089 -240
  24. package/dist/api/encoder.js.map +1 -1
  25. package/dist/api/filter-complex.d.ts +769 -0
  26. package/dist/api/filter-complex.js +1596 -0
  27. package/dist/api/filter-complex.js.map +1 -0
  28. package/dist/api/filter-presets.d.ts +80 -5
  29. package/dist/api/filter-presets.js +117 -7
  30. package/dist/api/filter-presets.js.map +1 -1
  31. package/dist/api/filter.d.ts +561 -125
  32. package/dist/api/filter.js +1083 -274
  33. package/dist/api/filter.js.map +1 -1
  34. package/dist/api/{fmp4.d.ts → fmp4-stream.d.ts} +141 -140
  35. package/dist/api/fmp4-stream.js +539 -0
  36. package/dist/api/fmp4-stream.js.map +1 -0
  37. package/dist/api/hardware.d.ts +58 -6
  38. package/dist/api/hardware.js +127 -11
  39. package/dist/api/hardware.js.map +1 -1
  40. package/dist/api/index.d.ts +8 -4
  41. package/dist/api/index.js +17 -8
  42. package/dist/api/index.js.map +1 -1
  43. package/dist/api/io-stream.d.ts +6 -6
  44. package/dist/api/io-stream.js +5 -4
  45. package/dist/api/io-stream.js.map +1 -1
  46. package/dist/api/{media-output.d.ts → muxer.d.ts} +280 -66
  47. package/dist/api/muxer.js +1934 -0
  48. package/dist/api/muxer.js.map +1 -0
  49. package/dist/api/pipeline.d.ts +77 -29
  50. package/dist/api/pipeline.js +449 -439
  51. package/dist/api/pipeline.js.map +1 -1
  52. package/dist/api/rtp-stream.d.ts +312 -0
  53. package/dist/api/rtp-stream.js +630 -0
  54. package/dist/api/rtp-stream.js.map +1 -0
  55. package/dist/api/types.d.ts +533 -56
  56. package/dist/api/utilities/async-queue.d.ts +91 -0
  57. package/dist/api/utilities/async-queue.js +162 -0
  58. package/dist/api/utilities/async-queue.js.map +1 -0
  59. package/dist/api/utilities/audio-sample.d.ts +11 -1
  60. package/dist/api/utilities/audio-sample.js +10 -0
  61. package/dist/api/utilities/audio-sample.js.map +1 -1
  62. package/dist/api/utilities/channel-layout.d.ts +1 -0
  63. package/dist/api/utilities/channel-layout.js +1 -0
  64. package/dist/api/utilities/channel-layout.js.map +1 -1
  65. package/dist/api/utilities/image.d.ts +39 -1
  66. package/dist/api/utilities/image.js +38 -0
  67. package/dist/api/utilities/image.js.map +1 -1
  68. package/dist/api/utilities/index.d.ts +3 -0
  69. package/dist/api/utilities/index.js +6 -0
  70. package/dist/api/utilities/index.js.map +1 -1
  71. package/dist/api/utilities/media-type.d.ts +2 -1
  72. package/dist/api/utilities/media-type.js +1 -0
  73. package/dist/api/utilities/media-type.js.map +1 -1
  74. package/dist/api/utilities/pixel-format.d.ts +4 -1
  75. package/dist/api/utilities/pixel-format.js +3 -0
  76. package/dist/api/utilities/pixel-format.js.map +1 -1
  77. package/dist/api/utilities/sample-format.d.ts +6 -1
  78. package/dist/api/utilities/sample-format.js +5 -0
  79. package/dist/api/utilities/sample-format.js.map +1 -1
  80. package/dist/api/utilities/scheduler.d.ts +138 -0
  81. package/dist/api/utilities/scheduler.js +98 -0
  82. package/dist/api/utilities/scheduler.js.map +1 -0
  83. package/dist/api/utilities/streaming.d.ts +105 -15
  84. package/dist/api/utilities/streaming.js +201 -12
  85. package/dist/api/utilities/streaming.js.map +1 -1
  86. package/dist/api/utilities/timestamp.d.ts +15 -1
  87. package/dist/api/utilities/timestamp.js +14 -0
  88. package/dist/api/utilities/timestamp.js.map +1 -1
  89. package/dist/api/utilities/whisper-model.d.ts +310 -0
  90. package/dist/api/utilities/whisper-model.js +528 -0
  91. package/dist/api/utilities/whisper-model.js.map +1 -0
  92. package/dist/api/webrtc-stream.d.ts +288 -0
  93. package/dist/api/webrtc-stream.js +440 -0
  94. package/dist/api/webrtc-stream.js.map +1 -0
  95. package/dist/api/whisper.d.ts +324 -0
  96. package/dist/api/whisper.js +362 -0
  97. package/dist/api/whisper.js.map +1 -0
  98. package/dist/constants/constants.d.ts +54 -2
  99. package/dist/constants/constants.js +48 -1
  100. package/dist/constants/constants.js.map +1 -1
  101. package/dist/constants/encoders.d.ts +2 -1
  102. package/dist/constants/encoders.js +4 -3
  103. package/dist/constants/encoders.js.map +1 -1
  104. package/dist/constants/hardware.d.ts +26 -0
  105. package/dist/constants/hardware.js +27 -0
  106. package/dist/constants/hardware.js.map +1 -0
  107. package/dist/constants/index.d.ts +1 -0
  108. package/dist/constants/index.js +1 -0
  109. package/dist/constants/index.js.map +1 -1
  110. package/dist/ffmpeg/index.d.ts +3 -3
  111. package/dist/ffmpeg/index.js +3 -3
  112. package/dist/ffmpeg/utils.d.ts +27 -0
  113. package/dist/ffmpeg/utils.js +28 -16
  114. package/dist/ffmpeg/utils.js.map +1 -1
  115. package/dist/lib/binding.d.ts +22 -11
  116. package/dist/lib/binding.js.map +1 -1
  117. package/dist/lib/codec-context.d.ts +87 -0
  118. package/dist/lib/codec-context.js +125 -4
  119. package/dist/lib/codec-context.js.map +1 -1
  120. package/dist/lib/codec-parameters.d.ts +229 -1
  121. package/dist/lib/codec-parameters.js +264 -0
  122. package/dist/lib/codec-parameters.js.map +1 -1
  123. package/dist/lib/codec-parser.d.ts +23 -0
  124. package/dist/lib/codec-parser.js +25 -0
  125. package/dist/lib/codec-parser.js.map +1 -1
  126. package/dist/lib/codec.d.ts +26 -4
  127. package/dist/lib/codec.js +35 -0
  128. package/dist/lib/codec.js.map +1 -1
  129. package/dist/lib/dictionary.js +1 -0
  130. package/dist/lib/dictionary.js.map +1 -1
  131. package/dist/lib/error.js +1 -1
  132. package/dist/lib/error.js.map +1 -1
  133. package/dist/lib/fifo.d.ts +416 -0
  134. package/dist/lib/fifo.js +453 -0
  135. package/dist/lib/fifo.js.map +1 -0
  136. package/dist/lib/filter-context.d.ts +52 -11
  137. package/dist/lib/filter-context.js +56 -12
  138. package/dist/lib/filter-context.js.map +1 -1
  139. package/dist/lib/filter-graph.d.ts +9 -0
  140. package/dist/lib/filter-graph.js +13 -0
  141. package/dist/lib/filter-graph.js.map +1 -1
  142. package/dist/lib/filter.d.ts +21 -0
  143. package/dist/lib/filter.js +28 -0
  144. package/dist/lib/filter.js.map +1 -1
  145. package/dist/lib/format-context.d.ts +48 -14
  146. package/dist/lib/format-context.js +76 -7
  147. package/dist/lib/format-context.js.map +1 -1
  148. package/dist/lib/frame.d.ts +264 -1
  149. package/dist/lib/frame.js +351 -1
  150. package/dist/lib/frame.js.map +1 -1
  151. package/dist/lib/hardware-device-context.d.ts +3 -2
  152. package/dist/lib/hardware-device-context.js.map +1 -1
  153. package/dist/lib/index.d.ts +2 -0
  154. package/dist/lib/index.js +4 -0
  155. package/dist/lib/index.js.map +1 -1
  156. package/dist/lib/input-format.d.ts +21 -0
  157. package/dist/lib/input-format.js +42 -2
  158. package/dist/lib/input-format.js.map +1 -1
  159. package/dist/lib/native-types.d.ts +76 -27
  160. package/dist/lib/option.d.ts +25 -13
  161. package/dist/lib/option.js +28 -0
  162. package/dist/lib/option.js.map +1 -1
  163. package/dist/lib/output-format.d.ts +22 -1
  164. package/dist/lib/output-format.js +28 -0
  165. package/dist/lib/output-format.js.map +1 -1
  166. package/dist/lib/packet.d.ts +35 -0
  167. package/dist/lib/packet.js +52 -2
  168. package/dist/lib/packet.js.map +1 -1
  169. package/dist/lib/rational.d.ts +18 -0
  170. package/dist/lib/rational.js +19 -0
  171. package/dist/lib/rational.js.map +1 -1
  172. package/dist/lib/stream.d.ts +126 -0
  173. package/dist/lib/stream.js +188 -5
  174. package/dist/lib/stream.js.map +1 -1
  175. package/dist/lib/sync-queue.d.ts +179 -0
  176. package/dist/lib/sync-queue.js +197 -0
  177. package/dist/lib/sync-queue.js.map +1 -0
  178. package/dist/lib/types.d.ts +49 -1
  179. package/dist/lib/utilities.d.ts +281 -53
  180. package/dist/lib/utilities.js +298 -55
  181. package/dist/lib/utilities.js.map +1 -1
  182. package/install/check.js +2 -2
  183. package/package.json +37 -26
  184. package/dist/api/fmp4.js +0 -710
  185. package/dist/api/fmp4.js.map +0 -1
  186. package/dist/api/media-input.js +0 -1075
  187. package/dist/api/media-input.js.map +0 -1
  188. package/dist/api/media-output.js +0 -1040
  189. package/dist/api/media-output.js.map +0 -1
  190. package/dist/api/webrtc.d.ts +0 -664
  191. package/dist/api/webrtc.js +0 -1132
  192. package/dist/api/webrtc.js.map +0 -1
@@ -0,0 +1,1965 @@
1
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
+ if (value !== null && value !== void 0) {
3
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
+ var dispose, inner;
5
+ if (async) {
6
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
+ dispose = value[Symbol.asyncDispose];
8
+ }
9
+ if (dispose === void 0) {
10
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
+ dispose = value[Symbol.dispose];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
+ env.stack.push({ value: value, dispose: dispose, async: async });
17
+ }
18
+ else if (async) {
19
+ env.stack.push({ async: true });
20
+ }
21
+ return value;
22
+ };
23
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
+ return function (env) {
25
+ function fail(e) {
26
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
+ env.hasError = true;
28
+ }
29
+ var r, s = 0;
30
+ function next() {
31
+ while (r = env.stack.pop()) {
32
+ try {
33
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
+ if (r.dispose) {
35
+ var result = r.dispose.call(r.value);
36
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
+ }
38
+ else s |= 1;
39
+ }
40
+ catch (e) {
41
+ fail(e);
42
+ }
43
+ }
44
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
+ if (env.hasError) throw env.error;
46
+ }
47
+ return next();
48
+ };
49
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
+ var e = new Error(message);
51
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
+ });
53
+ import { createSocket } from 'dgram';
54
+ import { closeSync, openSync, readSync } from 'fs';
55
+ import { open } from 'fs/promises';
56
+ import { resolve } from 'path';
57
+ import { RtpPacket } from 'werift';
58
+ import { AV_CODEC_PROP_FIELDS, AV_NOPTS_VALUE, AV_PIX_FMT_NONE, AV_ROUND_NEAR_INF, AV_ROUND_PASS_MINMAX, AV_TIME_BASE, AV_TIME_BASE_Q, AVFLAG_NONE, AVFMT_FLAG_CUSTOM_IO, AVFMT_FLAG_NONBLOCK, AVFMT_TS_DISCONT, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, AVSEEK_CUR, AVSEEK_END, AVSEEK_SET, } from '../constants/constants.js';
59
+ import { Dictionary } from '../lib/dictionary.js';
60
+ import { FFmpegError } from '../lib/error.js';
61
+ import { FormatContext } from '../lib/format-context.js';
62
+ import { InputFormat } from '../lib/input-format.js';
63
+ import { IOContext } from '../lib/io-context.js';
64
+ import { Packet } from '../lib/packet.js';
65
+ import { Rational } from '../lib/rational.js';
66
+ import { avGetPixFmtName, avGetSampleFmtName, avInvQ, avMulQ, avRescaleQ, avRescaleQRnd } from '../lib/utilities.js';
67
+ import { DELTA_THRESHOLD, DTS_ERROR_THRESHOLD, IO_BUFFER_SIZE, MAX_INPUT_QUEUE_SIZE } from './constants.js';
68
+ import { IOStream } from './io-stream.js';
69
+ import { StreamingUtils } from './utilities/streaming.js';
70
+ /**
71
+ * High-level demuxer for reading and demuxing media files.
72
+ *
73
+ * Provides simplified access to media streams, packets, and metadata.
74
+ * Handles file opening, format detection, and stream information extraction.
75
+ * Supports files, URLs, buffers, and raw data input with automatic cleanup.
76
+ * Essential component for media processing pipelines and transcoding.
77
+ *
78
+ * @example
79
+ * ```typescript
80
+ * import { Demuxer } from 'node-av/api';
81
+ *
82
+ * // Open media file
83
+ * await using input = await Demuxer.open('video.mp4');
84
+ * console.log(`Format: ${input.formatName}`);
85
+ * console.log(`Duration: ${input.duration}s`);
86
+ *
87
+ * // Process packets
88
+ * for await (const packet of input.packets()) {
89
+ * console.log(`Packet from stream ${packet.streamIndex}`);
90
+ * packet.free();
91
+ * }
92
+ * ```
93
+ *
94
+ * @example
95
+ * ```typescript
96
+ * // From buffer
97
+ * const buffer = await fs.readFile('video.mp4');
98
+ * await using input = await Demuxer.open(buffer);
99
+ *
100
+ * // Access streams
101
+ * const videoStream = input.video();
102
+ * const audioStream = input.audio();
103
+ * ```
104
+ *
105
+ * @see {@link Muxer} For writing media files
106
+ * @see {@link Decoder} For decoding packets to frames
107
+ * @see {@link FormatContext} For low-level API
108
+ */
109
+ export class Demuxer {
110
+ formatContext;
111
+ _streams = [];
112
+ ioContext;
113
+ isClosed = false;
114
+ options;
115
+ // Timestamp processing state (per-stream)
116
+ streamStates = new Map();
117
+ // Timestamp discontinuity tracking (global)
118
+ tsOffsetDiscont = 0n;
119
+ lastTs = AV_NOPTS_VALUE;
120
+ // Demux manager for handling multiple parallel packet generators
121
+ activeGenerators = 0;
122
+ demuxThread = null;
123
+ packetQueues = new Map(); // streamIndex or 'all' -> queue
124
+ queueResolvers = new Map(); // Promise resolvers for waiting consumers
125
+ demuxThreadActive = false;
126
+ demuxEof = false;
127
+ /**
128
+ * @param formatContext - Opened format context
129
+ *
130
+ * @param options - Media input options
131
+ *
132
+ * @param ioContext - Optional IO context for custom I/O (e.g., from Buffer)
133
+ *
134
+ * @internal
135
+ */
136
+ constructor(formatContext, options, ioContext) {
137
+ this.formatContext = formatContext;
138
+ this.ioContext = ioContext;
139
+ this._streams = formatContext.streams ?? [];
140
+ this.options = options;
141
+ }
142
+ /**
143
+ * Probe media format without fully opening the file.
144
+ *
145
+ * Detects format by analyzing file headers and content.
146
+ * Useful for format validation before processing.
147
+ *
148
+ * Direct mapping to av_probe_input_format().
149
+ *
150
+ * @param input - File path or buffer to probe
151
+ *
152
+ * @returns Format information or null if unrecognized
153
+ *
154
+ * @example
155
+ * ```typescript
156
+ * const info = await Demuxer.probeFormat('video.mp4');
157
+ * if (info) {
158
+ * console.log(`Format: ${info.format}`);
159
+ * console.log(`Confidence: ${info.confidence}%`);
160
+ * }
161
+ * ```
162
+ *
163
+ * @example
164
+ * ```typescript
165
+ * // Probe from buffer
166
+ * const buffer = await fs.readFile('video.webm');
167
+ * const info = await Demuxer.probeFormat(buffer);
168
+ * console.log(`MIME type: ${info?.mimeType}`);
169
+ * ```
170
+ *
171
+ * @see {@link InputFormat.probe} For low-level probing
172
+ */
173
+ static async probeFormat(input) {
174
+ try {
175
+ if (Buffer.isBuffer(input)) {
176
+ // Probe from buffer
177
+ const format = InputFormat.probe(input);
178
+ if (!format) {
179
+ return null;
180
+ }
181
+ return {
182
+ format: format.name ?? 'unknown',
183
+ longName: format.longName ?? undefined,
184
+ extensions: format.extensions ?? undefined,
185
+ mimeType: format.mimeType ?? undefined,
186
+ confidence: 100, // Direct probe always has high confidence
187
+ };
188
+ }
189
+ else {
190
+ // For files, read first part and probe
191
+ let fileHandle;
192
+ try {
193
+ fileHandle = await open(input, 'r');
194
+ // Read first 64KB for probing
195
+ const buffer = Buffer.alloc(65536);
196
+ const { bytesRead } = await fileHandle.read(buffer, 0, 65536, 0);
197
+ const probeBuffer = buffer.subarray(0, bytesRead);
198
+ const format = InputFormat.probe(probeBuffer, input);
199
+ if (!format) {
200
+ return null;
201
+ }
202
+ return {
203
+ format: format.name ?? 'unknown',
204
+ longName: format.longName ?? undefined,
205
+ extensions: format.extensions ?? undefined,
206
+ mimeType: format.mimeType ?? undefined,
207
+ confidence: 90, // File-based probe with filename hint
208
+ };
209
+ }
210
+ catch {
211
+ // If file reading fails, return null
212
+ return null;
213
+ }
214
+ finally {
215
+ await fileHandle?.close();
216
+ }
217
+ }
218
+ }
219
+ catch {
220
+ return null;
221
+ }
222
+ }
223
+ /**
224
+ * Probe media format without fully opening the file synchronously.
225
+ * Synchronous version of probeFormat.
226
+ *
227
+ * Detects format by analyzing file headers and content.
228
+ * Useful for format validation before processing.
229
+ *
230
+ * Direct mapping to av_probe_input_format().
231
+ *
232
+ * @param input - File path or buffer to probe
233
+ *
234
+ * @returns Format information or null if unrecognized
235
+ *
236
+ * @example
237
+ * ```typescript
238
+ * const info = Demuxer.probeFormatSync('video.mp4');
239
+ * if (info) {
240
+ * console.log(`Format: ${info.format}`);
241
+ * console.log(`Confidence: ${info.confidence}%`);
242
+ * }
243
+ * ```
244
+ *
245
+ * @example
246
+ * ```typescript
247
+ * // Probe from buffer
248
+ * const buffer = fs.readFileSync('video.webm');
249
+ * const info = Demuxer.probeFormatSync(buffer);
250
+ * console.log(`MIME type: ${info?.mimeType}`);
251
+ * ```
252
+ *
253
+ * @see {@link probeFormat} For async version
254
+ */
255
+ static probeFormatSync(input) {
256
+ try {
257
+ if (Buffer.isBuffer(input)) {
258
+ // Probe from buffer
259
+ const format = InputFormat.probe(input);
260
+ if (!format) {
261
+ return null;
262
+ }
263
+ return {
264
+ format: format.name ?? 'unknown',
265
+ longName: format.longName ?? undefined,
266
+ extensions: format.extensions ?? undefined,
267
+ mimeType: format.mimeType ?? undefined,
268
+ confidence: 100, // Direct probe always has high confidence
269
+ };
270
+ }
271
+ else {
272
+ // For files, read first part and probe
273
+ let fd;
274
+ try {
275
+ fd = openSync(input, 'r');
276
+ // Read first 64KB for probing
277
+ const buffer = Buffer.alloc(65536);
278
+ const bytesRead = readSync(fd, buffer, 0, 65536, 0);
279
+ const probeBuffer = buffer.subarray(0, bytesRead);
280
+ const format = InputFormat.probe(probeBuffer, input);
281
+ if (!format) {
282
+ return null;
283
+ }
284
+ return {
285
+ format: format.name ?? 'unknown',
286
+ longName: format.longName ?? undefined,
287
+ extensions: format.extensions ?? undefined,
288
+ mimeType: format.mimeType ?? undefined,
289
+ confidence: 90, // File-based probe with filename hint
290
+ };
291
+ }
292
+ catch {
293
+ // If file reading fails, return null
294
+ return null;
295
+ }
296
+ finally {
297
+ if (fd !== undefined)
298
+ closeSync(fd);
299
+ }
300
+ }
301
+ }
302
+ catch {
303
+ return null;
304
+ }
305
+ }
306
+ static async open(input, options = {}) {
307
+ // Check if input is raw data
308
+ if (typeof input === 'object' && 'type' in input && ('width' in input || 'sampleRate' in input)) {
309
+ // Build options for raw data
310
+ const rawOptions = {
311
+ bufferSize: options.bufferSize,
312
+ format: options.format ?? (input.type === 'video' ? 'rawvideo' : 's16le'),
313
+ options: {
314
+ ...options.options,
315
+ },
316
+ };
317
+ if (input.type === 'video') {
318
+ rawOptions.options = {
319
+ ...rawOptions.options,
320
+ video_size: `${input.width}x${input.height}`,
321
+ pixel_format: avGetPixFmtName(input.pixelFormat) ?? 'yuv420p',
322
+ framerate: new Rational(input.frameRate.num, input.frameRate.den).toString(),
323
+ };
324
+ }
325
+ else {
326
+ rawOptions.options = {
327
+ ...rawOptions.options,
328
+ sample_rate: input.sampleRate,
329
+ channels: input.channels,
330
+ sample_fmt: avGetSampleFmtName(input.sampleFormat) ?? 's16le',
331
+ };
332
+ }
333
+ input = input.input;
334
+ options = rawOptions;
335
+ }
336
+ // Original implementation for non-raw data
337
+ const formatContext = new FormatContext();
338
+ let ioContext;
339
+ let optionsDict = null;
340
+ let inputFormat = null;
341
+ try {
342
+ // Create options dictionary if options are provided
343
+ if (options.options) {
344
+ optionsDict = Dictionary.fromObject(options.options);
345
+ }
346
+ // Find input format if specified
347
+ if (options.format) {
348
+ inputFormat = InputFormat.findInputFormat(options.format);
349
+ if (!inputFormat) {
350
+ throw new Error(`Input format '${options.format}' not found`);
351
+ }
352
+ }
353
+ if (typeof input === 'string') {
354
+ // File path or URL - resolve relative paths to absolute
355
+ // Check if it's a URL (starts with protocol://) or a file path
356
+ const isUrl = /^[a-zA-Z][a-zA-Z0-9+.-]*:\/\//.test(input);
357
+ const resolvedInput = isUrl ? input : resolve(input);
358
+ const ret = await formatContext.openInput(resolvedInput, inputFormat, optionsDict);
359
+ FFmpegError.throwIfError(ret, 'Failed to open input');
360
+ formatContext.setFlags(AVFMT_FLAG_NONBLOCK);
361
+ }
362
+ else if (Buffer.isBuffer(input)) {
363
+ // Validate buffer is not empty
364
+ if (input.length === 0) {
365
+ throw new Error('Cannot open media from empty buffer');
366
+ }
367
+ // From buffer - allocate context first for custom I/O
368
+ formatContext.allocContext();
369
+ ioContext = IOStream.create(input, { bufferSize: options.bufferSize });
370
+ formatContext.pb = ioContext;
371
+ const ret = await formatContext.openInput('', inputFormat, optionsDict);
372
+ FFmpegError.throwIfError(ret, 'Failed to open input from buffer');
373
+ }
374
+ else if (typeof input === 'object' && 'read' in input) {
375
+ // Custom I/O with callbacks - format is required
376
+ if (!options.format) {
377
+ throw new Error('Format must be specified for custom I/O');
378
+ }
379
+ // Allocate context first for custom I/O
380
+ formatContext.allocContext();
381
+ // Setup custom I/O with callbacks
382
+ ioContext = new IOContext();
383
+ ioContext.allocContextWithCallbacks(options.bufferSize ?? IO_BUFFER_SIZE, 0, input.read, null, input.seek);
384
+ formatContext.pb = ioContext;
385
+ formatContext.setFlags(AVFMT_FLAG_CUSTOM_IO);
386
+ const ret = await formatContext.openInput('', inputFormat, optionsDict);
387
+ FFmpegError.throwIfError(ret, 'Failed to open input from custom I/O');
388
+ }
389
+ else {
390
+ throw new TypeError('Invalid input type. Expected file path, URL, Buffer, or IOInputCallbacks');
391
+ }
392
+ // Find stream information
393
+ if (!options.skipStreamInfo) {
394
+ const ret = await formatContext.findStreamInfo(null);
395
+ FFmpegError.throwIfError(ret, 'Failed to find stream info');
396
+ // Try to parse extradata for video streams with missing dimensions
397
+ for (const stream of formatContext.streams ?? []) {
398
+ if (stream.codecpar.codecType === AVMEDIA_TYPE_VIDEO) {
399
+ const dimensionsMissing = stream.codecpar.width === 0 || stream.codecpar.height === 0;
400
+ const invalidFormat = stream.codecpar.format === AV_PIX_FMT_NONE;
401
+ const invalidRate = stream.codecpar.frameRate.num === 0 || stream.codecpar.frameRate.den === 0;
402
+ const needsParsing = dimensionsMissing || invalidFormat || invalidRate;
403
+ if (needsParsing && stream.codecpar.extradataSize > 0) {
404
+ stream.codecpar.parseExtradata();
405
+ }
406
+ }
407
+ }
408
+ }
409
+ // Determine buffer size
410
+ let bufferSize = options.bufferSize ?? IO_BUFFER_SIZE;
411
+ if (!ioContext && formatContext.iformat && formatContext.pb) {
412
+ // Check if this is a streaming input (like RTSP, HTTP, etc.)
413
+ const isStreaming = formatContext.pb.seekable === 0;
414
+ if (isStreaming) {
415
+ bufferSize *= 2; // double buffer size for streaming inputs
416
+ }
417
+ }
418
+ // Apply defaults to options
419
+ const fullOptions = {
420
+ bufferSize,
421
+ format: options.format ?? '',
422
+ skipStreamInfo: options.skipStreamInfo ?? false,
423
+ startWithKeyframe: options.startWithKeyframe ?? false,
424
+ dtsDeltaThreshold: options.dtsDeltaThreshold ?? DELTA_THRESHOLD,
425
+ dtsErrorThreshold: options.dtsErrorThreshold ?? DTS_ERROR_THRESHOLD,
426
+ copyTs: options.copyTs ?? false,
427
+ options: options.options ?? {},
428
+ };
429
+ return new Demuxer(formatContext, fullOptions, ioContext);
430
+ }
431
+ catch (error) {
432
+ // Clean up only on error
433
+ if (ioContext) {
434
+ // Clear the pb reference first
435
+ formatContext.pb = null;
436
+ // Free the IOContext (for both custom I/O and buffer-based I/O)
437
+ ioContext.freeContext();
438
+ }
439
+ // Clean up FormatContext
440
+ await formatContext.closeInput();
441
+ throw error;
442
+ }
443
+ finally {
444
+ // Clean up options dictionary
445
+ if (optionsDict) {
446
+ optionsDict.free();
447
+ }
448
+ }
449
+ }
450
+ static openSync(input, options = {}) {
451
+ // Check if input is raw data
452
+ if (typeof input === 'object' && 'type' in input && ('width' in input || 'sampleRate' in input)) {
453
+ // Build options for raw data
454
+ const rawOptions = {
455
+ bufferSize: options.bufferSize,
456
+ format: options.format ?? (input.type === 'video' ? 'rawvideo' : 's16le'),
457
+ options: {
458
+ ...options.options,
459
+ },
460
+ };
461
+ if (input.type === 'video') {
462
+ rawOptions.options = {
463
+ ...rawOptions.options,
464
+ video_size: `${input.width}x${input.height}`,
465
+ pixel_format: avGetPixFmtName(input.pixelFormat) ?? 'yuv420p',
466
+ framerate: new Rational(input.frameRate.num, input.frameRate.den).toString(),
467
+ };
468
+ }
469
+ else {
470
+ rawOptions.options = {
471
+ ...rawOptions.options,
472
+ sample_rate: input.sampleRate,
473
+ channels: input.channels,
474
+ sample_fmt: avGetSampleFmtName(input.sampleFormat) ?? 's16le',
475
+ };
476
+ }
477
+ input = input.input;
478
+ options = rawOptions;
479
+ }
480
+ // Original implementation for non-raw data
481
+ const formatContext = new FormatContext();
482
+ let ioContext;
483
+ let optionsDict = null;
484
+ let inputFormat = null;
485
+ try {
486
+ // Create options dictionary if options are provided
487
+ if (options.options) {
488
+ optionsDict = Dictionary.fromObject(options.options);
489
+ }
490
+ // Find input format if specified
491
+ if (options.format) {
492
+ inputFormat = InputFormat.findInputFormat(options.format);
493
+ if (!inputFormat) {
494
+ throw new Error(`Input format '${options.format}' not found`);
495
+ }
496
+ }
497
+ if (typeof input === 'string') {
498
+ // File path or URL - resolve relative paths to absolute
499
+ // Check if it's a URL (starts with protocol://) or a file path
500
+ const isUrl = /^[a-zA-Z][a-zA-Z0-9+.-]*:\/\//.test(input);
501
+ const resolvedInput = isUrl ? input : resolve(input);
502
+ const ret = formatContext.openInputSync(resolvedInput, inputFormat, optionsDict);
503
+ FFmpegError.throwIfError(ret, 'Failed to open input');
504
+ formatContext.setFlags(AVFMT_FLAG_NONBLOCK);
505
+ }
506
+ else if (Buffer.isBuffer(input)) {
507
+ // Validate buffer is not empty
508
+ if (input.length === 0) {
509
+ throw new Error('Cannot open media from empty buffer');
510
+ }
511
+ // From buffer - allocate context first for custom I/O
512
+ formatContext.allocContext();
513
+ ioContext = IOStream.create(input, { bufferSize: options.bufferSize });
514
+ formatContext.pb = ioContext;
515
+ const ret = formatContext.openInputSync('', inputFormat, optionsDict);
516
+ FFmpegError.throwIfError(ret, 'Failed to open input from buffer');
517
+ }
518
+ else if (typeof input === 'object' && 'read' in input) {
519
+ // Custom I/O with callbacks - format is required
520
+ if (!options.format) {
521
+ throw new Error('Format must be specified for custom I/O');
522
+ }
523
+ // Allocate context first for custom I/O
524
+ formatContext.allocContext();
525
+ // Setup custom I/O with callbacks
526
+ ioContext = new IOContext();
527
+ ioContext.allocContextWithCallbacks(options.bufferSize ?? IO_BUFFER_SIZE, 0, input.read, null, input.seek);
528
+ formatContext.pb = ioContext;
529
+ formatContext.setFlags(AVFMT_FLAG_CUSTOM_IO);
530
+ const ret = formatContext.openInputSync('', inputFormat, optionsDict);
531
+ FFmpegError.throwIfError(ret, 'Failed to open input from custom I/O');
532
+ }
533
+ else {
534
+ throw new TypeError('Invalid input type. Expected file path, URL, Buffer, or IOInputCallbacks');
535
+ }
536
+ // Find stream information
537
+ if (!options.skipStreamInfo) {
538
+ const ret = formatContext.findStreamInfoSync(null);
539
+ FFmpegError.throwIfError(ret, 'Failed to find stream info');
540
+ }
541
+ // Determine buffer size
542
+ let bufferSize = options.bufferSize ?? IO_BUFFER_SIZE;
543
+ if (!ioContext && formatContext.iformat && formatContext.pb) {
544
+ // Check if this is a streaming input (like RTSP, HTTP, etc.)
545
+ const isStreaming = formatContext.pb.seekable === 0;
546
+ if (isStreaming) {
547
+ bufferSize *= 2; // double buffer size for streaming inputs
548
+ }
549
+ }
550
+ // Apply defaults to options
551
+ const fullOptions = {
552
+ bufferSize,
553
+ format: options.format ?? '',
554
+ skipStreamInfo: options.skipStreamInfo ?? false,
555
+ startWithKeyframe: options.startWithKeyframe ?? false,
556
+ dtsDeltaThreshold: options.dtsDeltaThreshold ?? DELTA_THRESHOLD,
557
+ dtsErrorThreshold: options.dtsErrorThreshold ?? DTS_ERROR_THRESHOLD,
558
+ copyTs: options.copyTs ?? false,
559
+ options: options.options ?? {},
560
+ };
561
+ return new Demuxer(formatContext, fullOptions, ioContext);
562
+ }
563
+ catch (error) {
564
+ // Clean up only on error
565
+ if (ioContext) {
566
+ // Clear the pb reference first
567
+ formatContext.pb = null;
568
+ // Free the IOContext (for both custom I/O and buffer-based I/O)
569
+ ioContext.freeContext();
570
+ }
571
+ // Clean up FormatContext
572
+ formatContext.closeInputSync();
573
+ throw error;
574
+ }
575
+ finally {
576
+ // Clean up options dictionary
577
+ if (optionsDict) {
578
+ optionsDict.free();
579
+ }
580
+ }
581
+ }
582
+ /**
583
+ * Open RTP/SRTP input stream via localhost UDP.
584
+ *
585
+ * Creates a Demuxer from SDP string received via UDP socket.
586
+ * Opens UDP socket and configures FFmpeg to receive and parse RTP packets.
587
+ *
588
+ * @param sdpContent - SDP content string describing the RTP stream
589
+ *
590
+ * @throws {Error} If SDP parsing or socket setup fails
591
+ *
592
+ * @throws {FFmpegError} If FFmpeg operations fail
593
+ *
594
+ * @returns Promise with Demuxer, sendPacket function and cleanup
595
+ *
596
+ * @example
597
+ * ```typescript
598
+ * import { Demuxer, StreamingUtils } from 'node-av/api';
599
+ * import { AV_CODEC_ID_OPUS } from 'node-av/constants';
600
+ *
601
+ * // Generate SDP for SRTP encrypted Opus
602
+ * const sdp = StreamingUtils.createRTPInputSDP([{
603
+ * port: 5004,
604
+ * codecId: AV_CODEC_ID_OPUS,
605
+ * payloadType: 111,
606
+ * clockRate: 16000,
607
+ * channels: 1,
608
+ * srtp: { key: srtpKey, salt: srtpSalt }
609
+ * }]);
610
+ *
611
+ * // Open RTP input
612
+ * const { input, sendPacket, close } = await Demuxer.openSDP(sdp);
613
+ *
614
+ * // Route encrypted RTP packets from network
615
+ * socket.on('message', (msg) => sendPacket(msg));
616
+ *
617
+ * // Decode audio
618
+ * const decoder = await Decoder.create(input.audio()!);
619
+ * for await (const packet of input.packets()) {
620
+ * const frame = await decoder.decode(packet);
621
+ * // Process frame...
622
+ * }
623
+ *
624
+ * // Cleanup
625
+ * await close();
626
+ * ```
627
+ *
628
+ * @see {@link StreamingUtils.createInputSDP} to generate SDP content.
629
+ */
630
+ static async openSDP(sdpContent) {
631
+ // Extract all ports from SDP (supports multi-stream: video + audio)
632
+ const ports = StreamingUtils.extractPortsFromSDP(sdpContent);
633
+ if (ports.length === 0) {
634
+ throw new Error('Failed to extract any ports from SDP content');
635
+ }
636
+ // Convert SDP to buffer for custom I/O
637
+ const sdpBuffer = Buffer.from(sdpContent);
638
+ let position = 0;
639
+ // Create custom I/O callbacks for SDP content
640
+ const callbacks = {
641
+ read: (size) => {
642
+ if (position >= sdpBuffer.length) {
643
+ return null; // EOF
644
+ }
645
+ const chunk = sdpBuffer.subarray(position, Math.min(position + size, sdpBuffer.length));
646
+ position += chunk.length;
647
+ return chunk;
648
+ },
649
+ seek: (offset, whence) => {
650
+ const offsetNum = Number(offset);
651
+ if (whence === AVSEEK_SET) {
652
+ position = offsetNum;
653
+ }
654
+ else if (whence === AVSEEK_CUR) {
655
+ position += offsetNum;
656
+ }
657
+ else if (whence === AVSEEK_END) {
658
+ position = sdpBuffer.length + offsetNum;
659
+ }
660
+ return position;
661
+ },
662
+ };
663
+ // Create UDP socket for sending packets to FFmpeg
664
+ const udpSocket = createSocket('udp4');
665
+ try {
666
+ // Open Demuxer with SDP format using custom I/O
667
+ const input = await Demuxer.open(callbacks, {
668
+ format: 'sdp',
669
+ skipStreamInfo: true,
670
+ options: {
671
+ protocol_whitelist: 'pipe,udp,rtp,file,crypto',
672
+ listen_timeout: -1,
673
+ },
674
+ });
675
+ const sendPacket = (rtpPacket, streamIndex = 0) => {
676
+ const port = ports[streamIndex];
677
+ if (!port) {
678
+ throw new Error(`No port found for stream index ${streamIndex}. Available streams: ${ports.length}`);
679
+ }
680
+ const data = rtpPacket instanceof RtpPacket ? rtpPacket.serialize() : rtpPacket;
681
+ udpSocket.send(data, port, '127.0.0.1');
682
+ };
683
+ const close = async () => {
684
+ await input.close();
685
+ udpSocket.close();
686
+ };
687
+ const closeSync = () => {
688
+ input.closeSync();
689
+ udpSocket.close();
690
+ };
691
+ return { input, sendPacket, close, closeSync };
692
+ }
693
+ catch (error) {
694
+ // Cleanup on error
695
+ udpSocket.close();
696
+ throw error;
697
+ }
698
+ }
699
+ /**
700
+ * Open RTP/SRTP input stream via localhost UDP synchronously.
701
+ * Synchronous version of openSDP.
702
+ *
703
+ * Creates a Demuxer from SDP string received via UDP socket.
704
+ * Opens UDP socket and configures FFmpeg to receive and parse RTP packets.
705
+ *
706
+ * @param sdpContent - SDP content string describing the RTP stream
707
+ *
708
+ * @throws {Error} If SDP parsing or socket setup fails
709
+ *
710
+ * @throws {FFmpegError} If FFmpeg operations fail
711
+ *
712
+ * @returns Object with Demuxer, sendPacket function and cleanup
713
+ *
714
+ * @example
715
+ * ```typescript
716
+ * import { Demuxer, StreamingUtils } from 'node-av/api';
717
+ * import { AV_CODEC_ID_OPUS } from 'node-av/constants';
718
+ *
719
+ * // Generate SDP for SRTP encrypted Opus
720
+ * const sdp = StreamingUtils.createRTPInputSDP([{
721
+ * port: 5004,
722
+ * codecId: AV_CODEC_ID_OPUS,
723
+ * payloadType: 111,
724
+ * clockRate: 16000,
725
+ * channels: 1,
726
+ * srtp: { key: srtpKey, salt: srtpSalt }
727
+ * }]);
728
+ *
729
+ * // Open RTP input
730
+ * const { input, sendPacket, closeSync } = Demuxer.openSDPSync(sdp);
731
+ *
732
+ * // Route encrypted RTP packets from network
733
+ * socket.on('message', (msg) => sendPacket(msg));
734
+ *
735
+ * // Decode audio
736
+ * const decoder = await Decoder.create(input.audio()!);
737
+ * for await (const packet of input.packets()) {
738
+ * const frame = await decoder.decode(packet);
739
+ * // Process frame...
740
+ * }
741
+ *
742
+ * // Cleanup synchronously
743
+ * closeSync();
744
+ * ```
745
+ *
746
+ * @see {@link StreamingUtils.createInputSDP} to generate SDP content.
747
+ * @see {@link openSDP} For async version
748
+ */
749
+ static openSDPSync(sdpContent) {
750
+ // Extract all ports from SDP (supports multi-stream: video + audio)
751
+ const ports = StreamingUtils.extractPortsFromSDP(sdpContent);
752
+ if (ports.length === 0) {
753
+ throw new Error('Failed to extract any ports from SDP content');
754
+ }
755
+ // Convert SDP to buffer for custom I/O
756
+ const sdpBuffer = Buffer.from(sdpContent);
757
+ let position = 0;
758
+ // Create custom I/O callbacks for SDP content
759
+ const callbacks = {
760
+ read: (size) => {
761
+ if (position >= sdpBuffer.length) {
762
+ return null; // EOF
763
+ }
764
+ const chunk = sdpBuffer.subarray(position, Math.min(position + size, sdpBuffer.length));
765
+ position += chunk.length;
766
+ return chunk;
767
+ },
768
+ seek: (offset, whence) => {
769
+ const offsetNum = Number(offset);
770
+ if (whence === AVSEEK_SET) {
771
+ position = offsetNum;
772
+ }
773
+ else if (whence === AVSEEK_CUR) {
774
+ position += offsetNum;
775
+ }
776
+ else if (whence === AVSEEK_END) {
777
+ position = sdpBuffer.length + offsetNum;
778
+ }
779
+ return position;
780
+ },
781
+ };
782
+ // Create UDP socket for sending packets to FFmpeg
783
+ const udpSocket = createSocket('udp4');
784
+ try {
785
+ // Open Demuxer with SDP format using custom I/O
786
+ const input = Demuxer.openSync(callbacks, {
787
+ format: 'sdp',
788
+ skipStreamInfo: true,
789
+ options: {
790
+ protocol_whitelist: 'pipe,udp,rtp,file,crypto',
791
+ listen_timeout: -1,
792
+ },
793
+ });
794
+ const sendPacket = (rtpPacket, streamIndex = 0) => {
795
+ const port = ports[streamIndex];
796
+ if (!port) {
797
+ throw new Error(`No port found for stream index ${streamIndex}. Available streams: ${ports.length}`);
798
+ }
799
+ const data = rtpPacket instanceof RtpPacket ? rtpPacket.serialize() : rtpPacket;
800
+ udpSocket.send(data, port, '127.0.0.1');
801
+ };
802
+ const close = async () => {
803
+ await input.close();
804
+ udpSocket.close();
805
+ };
806
+ const closeSync = () => {
807
+ input.closeSync();
808
+ udpSocket.close();
809
+ };
810
+ return { input, sendPacket, close, closeSync };
811
+ }
812
+ catch (error) {
813
+ // Cleanup on error
814
+ udpSocket.close();
815
+ throw error;
816
+ }
817
+ }
818
+ /**
819
+ * Check if input is open.
820
+ *
821
+ * @example
822
+ * ```typescript
823
+ * if (!input.isInputOpen) {
824
+ * console.log('Input is not open');
825
+ * }
826
+ * ```
827
+ */
828
+ get isInputOpen() {
829
+ return !this.isClosed;
830
+ }
831
+ /**
832
+ * Get all streams in the media.
833
+ *
834
+ * @example
835
+ * ```typescript
836
+ * for (const stream of input.streams) {
837
+ * console.log(`Stream ${stream.index}: ${stream.codecpar.codecType}`);
838
+ * }
839
+ * ```
840
+ */
841
+ get streams() {
842
+ return this._streams;
843
+ }
844
+ /**
845
+ * Get media duration in seconds.
846
+ *
847
+ * Returns 0 if duration is unknown or not available or input is closed.
848
+ *
849
+ * @example
850
+ * ```typescript
851
+ * console.log(`Duration: ${input.duration} seconds`);
852
+ * ```
853
+ */
854
+ get duration() {
855
+ if (this.isClosed) {
856
+ return 0;
857
+ }
858
+ const duration = this.formatContext.duration;
859
+ if (!duration || duration <= 0) {
860
+ return 0;
861
+ }
862
+ // Convert from AV_TIME_BASE (microseconds) to seconds
863
+ return Number(duration) / 1000000;
864
+ }
865
+ /**
866
+ * Get media bitrate in kilobits per second.
867
+ *
868
+ * Returns 0 if bitrate is unknown or not available or input is closed.
869
+ *
870
+ * @example
871
+ * ```typescript
872
+ * console.log(`Bitrate: ${input.bitRate} kbps`);
873
+ * ```
874
+ */
875
+ get bitRate() {
876
+ if (this.isClosed) {
877
+ return 0;
878
+ }
879
+ const bitrate = this.formatContext.bitRate;
880
+ if (!bitrate || bitrate <= 0) {
881
+ return 0;
882
+ }
883
+ // Convert from bits per second to kilobits per second
884
+ return Number(bitrate) / 1000;
885
+ }
886
+ /**
887
+ * Get media metadata.
888
+ *
889
+ * Returns all metadata tags as key-value pairs.
890
+ *
891
+ * @example
892
+ * ```typescript
893
+ * const metadata = input.metadata;
894
+ * console.log(`Title: ${metadata.title}`);
895
+ * console.log(`Artist: ${metadata.artist}`);
896
+ * ```
897
+ */
898
+ get metadata() {
899
+ if (this.isClosed) {
900
+ return {};
901
+ }
902
+ return this.formatContext.metadata?.getAll() ?? {};
903
+ }
904
+ /**
905
+ * Get format name.
906
+ *
907
+ * Returns 'unknown' if input is closed or format is not available.
908
+ *
909
+ * @example
910
+ * ```typescript
911
+ * console.log(`Format: ${input.formatName}`); // "mov,mp4,m4a,3gp,3g2,mj2"
912
+ * ```
913
+ */
914
+ get formatName() {
915
+ if (this.isClosed) {
916
+ return 'unknown';
917
+ }
918
+ return this.formatContext.iformat?.name ?? 'unknown';
919
+ }
920
+ /**
921
+ * Get format long name.
922
+ *
923
+ * Returns 'Unknown Format' if input is closed or format is not available.
924
+ *
925
+ * @example
926
+ * ```typescript
927
+ * console.log(`Format: ${input.formatLongName}`); // "QuickTime / MOV"
928
+ * ```
929
+ */
930
+ get formatLongName() {
931
+ if (this.isClosed) {
932
+ return 'Unknown Format';
933
+ }
934
+ return this.formatContext.iformat?.longName ?? 'Unknown Format';
935
+ }
936
+ /**
937
+ * Get MIME type of the input format.
938
+ *
939
+ * Returns null if input is closed or format is not available.
940
+ *
941
+ * @example
942
+ * ```typescript
943
+ * console.log(`MIME Type: ${input.mimeType}`); // "video/mp4"
944
+ * ```
945
+ */
946
+ get mimeType() {
947
+ if (this.isClosed) {
948
+ return null;
949
+ }
950
+ return this.formatContext.iformat?.mimeType ?? null;
951
+ }
952
+ /**
953
+ * Get input stream by index.
954
+ *
955
+ * Returns the stream at the specified index.
956
+ *
957
+ * @param index - Stream index
958
+ *
959
+ * @returns Stream or undefined if index is invalid
960
+ *
961
+ * @example
962
+ * ```typescript
963
+ * const input = await Demuxer.open('input.mp4');
964
+ *
965
+ * // Get the input stream to inspect codec parameters
966
+ * const stream = input.getStream(1); // Get stream at index 1
967
+ * if (stream) {
968
+ * console.log(`Input codec: ${stream.codecpar.codecId}`);
969
+ * }
970
+ * ```
971
+ *
972
+ * @see {@link video} For getting video streams
973
+ * @see {@link audio} For getting audio streams
974
+ */
975
+ getStream(index) {
976
+ const streams = this.formatContext.streams;
977
+ if (!streams || index < 0 || index >= streams.length) {
978
+ return undefined;
979
+ }
980
+ return streams[index];
981
+ }
982
+ /**
983
+ * Get video stream by index.
984
+ *
985
+ * Returns the nth video stream (0-based index).
986
+ * Returns undefined if stream doesn't exist.
987
+ *
988
+ * @param index - Video stream index (default: 0)
989
+ *
990
+ * @returns Video stream or undefined
991
+ *
992
+ * @example
993
+ * ```typescript
994
+ * const videoStream = input.video();
995
+ * if (videoStream) {
996
+ * console.log(`Video: ${videoStream.codecpar.width}x${videoStream.codecpar.height}`);
997
+ * }
998
+ * ```
999
+ *
1000
+ * @example
1001
+ * ```typescript
1002
+ * // Get second video stream
1003
+ * const secondVideo = input.video(1);
1004
+ * ```
1005
+ *
1006
+ * @see {@link audio} For audio streams
1007
+ * @see {@link findBestStream} For automatic selection
1008
+ */
1009
+ video(index = 0) {
1010
+ const streams = this._streams.filter((s) => s.codecpar.codecType === AVMEDIA_TYPE_VIDEO);
1011
+ return streams[index];
1012
+ }
1013
+ /**
1014
+ * Get audio stream by index.
1015
+ *
1016
+ * Returns the nth audio stream (0-based index).
1017
+ * Returns undefined if stream doesn't exist.
1018
+ *
1019
+ * @param index - Audio stream index (default: 0)
1020
+ *
1021
+ * @returns Audio stream or undefined
1022
+ *
1023
+ * @example
1024
+ * ```typescript
1025
+ * const audioStream = input.audio();
1026
+ * if (audioStream) {
1027
+ * console.log(`Audio: ${audioStream.codecpar.sampleRate}Hz`);
1028
+ * }
1029
+ * ```
1030
+ *
1031
+ * @example
1032
+ * ```typescript
1033
+ * // Get second audio stream
1034
+ * const secondAudio = input.audio(1);
1035
+ * ```
1036
+ *
1037
+ * @see {@link video} For video streams
1038
+ * @see {@link findBestStream} For automatic selection
1039
+ */
1040
+ audio(index = 0) {
1041
+ const streams = this._streams.filter((s) => s.codecpar.codecType === AVMEDIA_TYPE_AUDIO);
1042
+ return streams[index];
1043
+ }
1044
+ /**
1045
+ * Get input format details.
1046
+ *
1047
+ * Returns null if input is closed or format is not available.
1048
+ *
1049
+ * @returns Input format or null
1050
+ *
1051
+ * @example
1052
+ * ```typescript
1053
+ * const inputFormat = input.inputFormat;
1054
+ * if (inputFormat) {
1055
+ * console.log(`Input Format: ${inputFormat.name}`);
1056
+ * }
1057
+ * ```
1058
+ */
1059
+ inputFormat() {
1060
+ return this.formatContext.iformat;
1061
+ }
1062
+ /**
1063
+ * Find the best stream of a given type.
1064
+ *
1065
+ * Uses FFmpeg's stream selection algorithm.
1066
+ * Considers codec support, default flags, and quality.
1067
+ *
1068
+ * Direct mapping to av_find_best_stream().
1069
+ *
1070
+ * @param type - Media type to find
1071
+ *
1072
+ * @returns Best stream or undefined if not found or input is closed
1073
+ *
1074
+ * @example
1075
+ * ```typescript
1076
+ * import { AVMEDIA_TYPE_VIDEO } from 'node-av/constants';
1077
+ *
1078
+ * const bestVideo = input.findBestStream(AVMEDIA_TYPE_VIDEO);
1079
+ * if (bestVideo) {
1080
+ * const decoder = await Decoder.create(bestVideo);
1081
+ * }
1082
+ * ```
1083
+ *
1084
+ * @see {@link video} For direct video stream access
1085
+ * @see {@link audio} For direct audio stream access
1086
+ */
1087
+ findBestStream(type) {
1088
+ if (this.isClosed) {
1089
+ return undefined;
1090
+ }
1091
+ const bestStreamIndex = this.formatContext.findBestStream(type);
1092
+ return this._streams.find((s) => s.index === bestStreamIndex);
1093
+ }
1094
+ /**
1095
+ * Read packets from media as async generator.
1096
+ *
1097
+ * Yields demuxed packets for processing.
1098
+ * Automatically handles packet memory management.
1099
+ * Optionally filters packets by stream index.
1100
+ *
1101
+ * **Supports parallel generators**: Multiple `packets()` iterators can run concurrently.
1102
+ * When multiple generators are active, an internal demux thread automatically handles
1103
+ * packet distribution to avoid race conditions.
1104
+ *
1105
+ * Direct mapping to av_read_frame().
1106
+ *
1107
+ * @param index - Optional stream index to filter
1108
+ *
1109
+ * @yields {Packet} Demuxed packets (must be freed by caller)
1110
+ *
1111
+ * @throws {Error} If packet cloning fails
1112
+ *
1113
+ * @example
1114
+ * ```typescript
1115
+ * // Read all packets
1116
+ * for await (const packet of input.packets()) {
1117
+ * console.log(`Packet: stream=${packet.streamIndex}, pts=${packet.pts}`);
1118
+ * packet.free();
1119
+ * }
1120
+ * ```
1121
+ *
1122
+ * @example
1123
+ * ```typescript
1124
+ * // Read only video packets
1125
+ * const videoStream = input.video();
1126
+ * for await (const packet of input.packets(videoStream.index)) {
1127
+ * // Process video packet
1128
+ * packet.free();
1129
+ * }
1130
+ * ```
1131
+ *
1132
+ * @example
1133
+ * ```typescript
1134
+ * // Parallel processing of video and audio streams
1135
+ * const videoGen = input.packets(videoStream.index);
1136
+ * const audioGen = input.packets(audioStream.index);
1137
+ *
1138
+ * await Promise.all([
1139
+ * (async () => {
1140
+ * for await (const packet of videoGen) {
1141
+ * // Process video
1142
+ * packet.free();
1143
+ * }
1144
+ * })(),
1145
+ * (async () => {
1146
+ * for await (const packet of audioGen) {
1147
+ * // Process audio
1148
+ * packet.free();
1149
+ * }
1150
+ * })()
1151
+ * ]);
1152
+ * ```
1153
+ *
1154
+ * @see {@link Decoder.frames} For decoding packets
1155
+ */
1156
+ async *packets(index) {
1157
+ // Register this generator
1158
+ this.activeGenerators++;
1159
+ const queueKey = index ?? 'all';
1160
+ // Initialize queue for this generator
1161
+ if (!this.packetQueues.has(queueKey)) {
1162
+ this.packetQueues.set(queueKey, []);
1163
+ }
1164
+ // Always start demux thread (handles single and multiple generators)
1165
+ this.startDemuxThread();
1166
+ try {
1167
+ let hasSeenKeyframe = !this.options.startWithKeyframe;
1168
+ // Read from queue (demux thread is handling av_read_frame)
1169
+ const queue = this.packetQueues.get(queueKey);
1170
+ while (!this.isClosed) {
1171
+ // Try to get packet from queue
1172
+ let packet = queue.shift();
1173
+ // If queue is empty, wait for next packet
1174
+ if (!packet) {
1175
+ // Check for EOF first
1176
+ if (this.demuxEof) {
1177
+ break; // End of stream
1178
+ }
1179
+ // Create promise and register resolver
1180
+ const { promise, resolve } = Promise.withResolvers();
1181
+ this.queueResolvers.set(queueKey, resolve);
1182
+ // Wait for demux thread to add packet
1183
+ await promise;
1184
+ // Check again after wakeup
1185
+ if (this.demuxEof) {
1186
+ break;
1187
+ }
1188
+ packet = queue.shift();
1189
+ if (!packet) {
1190
+ continue;
1191
+ }
1192
+ }
1193
+ // Apply keyframe filtering if needed
1194
+ if (!hasSeenKeyframe) {
1195
+ const stream = this._streams[packet.streamIndex];
1196
+ const isVideoStream = stream?.codecpar.codecType === AVMEDIA_TYPE_VIDEO;
1197
+ if (isVideoStream && packet.isKeyframe) {
1198
+ hasSeenKeyframe = true;
1199
+ }
1200
+ else if (isVideoStream && !packet.isKeyframe) {
1201
+ packet.free();
1202
+ continue;
1203
+ }
1204
+ }
1205
+ yield packet;
1206
+ }
1207
+ }
1208
+ finally {
1209
+ // Unregister this generator
1210
+ this.activeGenerators--;
1211
+ // Stop demux thread if no more generators
1212
+ if (this.activeGenerators === 0) {
1213
+ await this.stopDemuxThread();
1214
+ }
1215
+ yield null; // Signal EOF
1216
+ }
1217
+ }
1218
+ /**
1219
+ * Read packets from media as generator synchronously.
1220
+ * Synchronous version of packets.
1221
+ *
1222
+ * Yields demuxed packets for processing.
1223
+ * Automatically handles packet memory management.
1224
+ * Optionally filters packets by stream index.
1225
+ *
1226
+ * Direct mapping to av_read_frame().
1227
+ *
1228
+ * @param index - Optional stream index to filter
1229
+ *
1230
+ * @yields {Packet} Demuxed packets (must be freed by caller)
1231
+ *
1232
+ * @throws {Error} If packet cloning fails
1233
+ *
1234
+ * @example
1235
+ * ```typescript
1236
+ * // Read all packets
1237
+ * for (const packet of input.packetsSync()) {
1238
+ * console.log(`Packet: stream=${packet.streamIndex}, pts=${packet.pts}`);
1239
+ * packet.free();
1240
+ * }
1241
+ * ```
1242
+ *
1243
+ * @example
1244
+ * ```typescript
1245
+ * // Read only video packets
1246
+ * const videoStream = input.video();
1247
+ * for (const packet of input.packetsSync(videoStream.index)) {
1248
+ * // Process video packet
1249
+ * packet.free();
1250
+ * }
1251
+ * ```
1252
+ *
1253
+ * @see {@link packets} For async version
1254
+ */
1255
+ *packetsSync(index) {
1256
+ const env_1 = { stack: [], error: void 0, hasError: false };
1257
+ try {
1258
+ const packet = __addDisposableResource(env_1, new Packet(), false);
1259
+ packet.alloc();
1260
+ let hasSeenKeyframe = !this.options.startWithKeyframe;
1261
+ while (!this.isClosed) {
1262
+ const ret = this.formatContext.readFrameSync(packet);
1263
+ if (ret < 0) {
1264
+ break;
1265
+ }
1266
+ // Get stream for timestamp processing
1267
+ const stream = this._streams[packet.streamIndex];
1268
+ if (stream) {
1269
+ // Set packet timebase to stream timebase
1270
+ // This must be done BEFORE any timestamp processing
1271
+ packet.timeBase = stream.timeBase;
1272
+ // Apply timestamp processing
1273
+ // 1. PTS wrap-around correction
1274
+ this.ptsWrapAroundCorrection(packet, stream);
1275
+ // 2. Timestamp discontinuity processing
1276
+ this.timestampDiscontinuityProcess(packet, stream);
1277
+ // 3. DTS prediction/update
1278
+ this.dtsPredict(packet, stream);
1279
+ }
1280
+ if (index === undefined || packet.streamIndex === index) {
1281
+ // If startWithKeyframe is enabled, skip packets until we see a keyframe
1282
+ // Only apply to video streams - audio packets should always pass through
1283
+ if (!hasSeenKeyframe) {
1284
+ const stream = this._streams[packet.streamIndex];
1285
+ const isVideoStream = stream?.codecpar.codecType === AVMEDIA_TYPE_VIDEO;
1286
+ if (isVideoStream && packet.isKeyframe) {
1287
+ hasSeenKeyframe = true;
1288
+ }
1289
+ else if (isVideoStream && !packet.isKeyframe) {
1290
+ // Skip video P-frames until first keyframe
1291
+ packet.unref();
1292
+ continue;
1293
+ }
1294
+ // Non-video streams (audio, etc.) always pass through
1295
+ }
1296
+ // Clone the packet for the user
1297
+ // This creates a new Packet object that shares the same data buffer
1298
+ // through reference counting. The data won't be freed until both
1299
+ // the original and the clone are unreferenced.
1300
+ const cloned = packet.clone();
1301
+ if (!cloned) {
1302
+ throw new Error('Failed to clone packet (out of memory)');
1303
+ }
1304
+ yield cloned;
1305
+ }
1306
+ // Unreference the original packet's data buffer
1307
+ // This allows us to reuse the packet object for the next readFrame()
1308
+ // The data itself is still alive because the clone has a reference
1309
+ packet.unref();
1310
+ }
1311
+ // Signal EOF
1312
+ yield null;
1313
+ }
1314
+ catch (e_1) {
1315
+ env_1.error = e_1;
1316
+ env_1.hasError = true;
1317
+ }
1318
+ finally {
1319
+ __disposeResources(env_1);
1320
+ }
1321
+ }
1322
+ /**
1323
+ * Seek to timestamp in media.
1324
+ *
1325
+ * Seeks to the specified position in seconds.
1326
+ * Can seek in specific stream or globally.
1327
+ *
1328
+ * Direct mapping to av_seek_frame().
1329
+ *
1330
+ * @param timestamp - Target position in seconds
1331
+ *
1332
+ * @param streamIndex - Stream index or -1 for global (default: -1)
1333
+ *
1334
+ * @param flags - Seek flags (default: AVFLAG_NONE)
1335
+ *
1336
+ * @returns 0 on success, negative on error
1337
+ *
1338
+ * @throws {Error} If input is closed
1339
+ *
1340
+ * @example
1341
+ * ```typescript
1342
+ * // Seek to 30 seconds
1343
+ * const ret = await input.seek(30);
1344
+ * FFmpegError.throwIfError(ret, 'seek failed');
1345
+ * ```
1346
+ *
1347
+ * @example
1348
+ * ```typescript
1349
+ * import { AVSEEK_FLAG_BACKWARD } from 'node-av/constants';
1350
+ *
1351
+ * // Seek to keyframe before 60 seconds
1352
+ * await input.seek(60, -1, AVSEEK_FLAG_BACKWARD);
1353
+ * ```
1354
+ *
1355
+ * @see {@link AVSeekFlag} For seek flags
1356
+ */
1357
+ async seek(timestamp, streamIndex = -1, flags = AVFLAG_NONE) {
1358
+ if (this.isClosed) {
1359
+ throw new Error('Cannot seek on closed input');
1360
+ }
1361
+ // Convert seconds to AV_TIME_BASE
1362
+ const ts = BigInt(Math.floor(timestamp * 1000000));
1363
+ return this.formatContext.seekFrame(streamIndex, ts, flags);
1364
+ }
1365
+ /**
1366
+ * Seek to timestamp in media synchronously.
1367
+ * Synchronous version of seek.
1368
+ *
1369
+ * Seeks to the specified position in seconds.
1370
+ * Can seek in specific stream or globally.
1371
+ *
1372
+ * Direct mapping to av_seek_frame().
1373
+ *
1374
+ * @param timestamp - Target position in seconds
1375
+ *
1376
+ * @param streamIndex - Stream index or -1 for global (default: -1)
1377
+ *
1378
+ * @param flags - Seek flags (default: AVFLAG_NONE)
1379
+ *
1380
+ * @returns 0 on success, negative on error
1381
+ *
1382
+ * @throws {Error} If input is closed
1383
+ *
1384
+ * @example
1385
+ * ```typescript
1386
+ * // Seek to 30 seconds
1387
+ * const ret = input.seekSync(30);
1388
+ * FFmpegError.throwIfError(ret, 'seek failed');
1389
+ * ```
1390
+ *
1391
+ * @example
1392
+ * ```typescript
1393
+ * import { AVSEEK_FLAG_BACKWARD } from 'node-av/constants';
1394
+ *
1395
+ * // Seek to keyframe before 60 seconds
1396
+ * input.seekSync(60, -1, AVSEEK_FLAG_BACKWARD);
1397
+ * ```
1398
+ *
1399
+ * @see {@link seek} For async version
1400
+ */
1401
+ seekSync(timestamp, streamIndex = -1, flags = AVFLAG_NONE) {
1402
+ if (this.isClosed) {
1403
+ throw new Error('Cannot seek on closed input');
1404
+ }
1405
+ // Convert seconds to AV_TIME_BASE
1406
+ const ts = BigInt(Math.floor(timestamp * 1000000));
1407
+ return this.formatContext.seekFrameSync(streamIndex, ts, flags);
1408
+ }
1409
+ /**
1410
+ * Start the internal demux thread for handling multiple parallel packet generators.
1411
+ * This thread reads packets from the format context and distributes them to queues.
1412
+ *
1413
+ * @internal
1414
+ */
1415
+ startDemuxThread() {
1416
+ if (this.demuxThreadActive || this.demuxThread) {
1417
+ return; // Already running
1418
+ }
1419
+ this.demuxThreadActive = true;
1420
+ this.demuxThread = (async () => {
1421
+ const env_2 = { stack: [], error: void 0, hasError: false };
1422
+ try {
1423
+ const packet = __addDisposableResource(env_2, new Packet(), false);
1424
+ packet.alloc();
1425
+ while (this.demuxThreadActive && !this.isClosed) {
1426
+ // Check if all queues are full - if so, wait a bit
1427
+ let allQueuesFull = true;
1428
+ for (const queue of this.packetQueues.values()) {
1429
+ if (queue.length < MAX_INPUT_QUEUE_SIZE) {
1430
+ allQueuesFull = false;
1431
+ break;
1432
+ }
1433
+ }
1434
+ if (allQueuesFull) {
1435
+ await new Promise(setImmediate);
1436
+ continue;
1437
+ }
1438
+ // Read next packet
1439
+ const ret = await this.formatContext.readFrame(packet);
1440
+ if (ret < 0) {
1441
+ // End of stream - notify all waiting consumers
1442
+ this.demuxEof = true;
1443
+ for (const resolve of this.queueResolvers.values()) {
1444
+ resolve();
1445
+ }
1446
+ this.queueResolvers.clear();
1447
+ break;
1448
+ }
1449
+ // Get stream for timestamp processing
1450
+ const stream = this._streams[packet.streamIndex];
1451
+ if (stream) {
1452
+ packet.timeBase = stream.timeBase;
1453
+ this.ptsWrapAroundCorrection(packet, stream);
1454
+ this.timestampDiscontinuityProcess(packet, stream);
1455
+ this.dtsPredict(packet, stream);
1456
+ }
1457
+ // Find which queues need this packet
1458
+ const allQueue = this.packetQueues.get('all');
1459
+ const streamQueue = this.packetQueues.get(packet.streamIndex);
1460
+ const targetQueues = [];
1461
+ if (allQueue && allQueue.length < MAX_INPUT_QUEUE_SIZE) {
1462
+ targetQueues.push({ queue: allQueue, event: 'packet-all' });
1463
+ }
1464
+ // Only add stream queue if it's different from 'all' queue
1465
+ if (streamQueue && streamQueue !== allQueue && streamQueue.length < MAX_INPUT_QUEUE_SIZE) {
1466
+ targetQueues.push({ queue: streamQueue, event: `packet-${packet.streamIndex}` });
1467
+ }
1468
+ if (targetQueues.length === 0) {
1469
+ // No queue needs this packet, skip it
1470
+ packet.unref();
1471
+ continue;
1472
+ }
1473
+ // Clone once, then share reference for additional queues
1474
+ const firstClone = packet.clone();
1475
+ if (!firstClone) {
1476
+ throw new Error('Failed to clone packet in demux thread (out of memory)');
1477
+ }
1478
+ // Add to first queue and resolve waiting promise
1479
+ const firstKey = targetQueues[0].event.replace('packet-', '') === 'all' ? 'all' : packet.streamIndex;
1480
+ targetQueues[0].queue.push(firstClone);
1481
+ const firstResolver = this.queueResolvers.get(firstKey);
1482
+ if (firstResolver) {
1483
+ firstResolver();
1484
+ this.queueResolvers.delete(firstKey);
1485
+ }
1486
+ // Additional queues get clones (shares data buffer via reference counting)
1487
+ for (let i = 1; i < targetQueues.length; i++) {
1488
+ const additionalClone = firstClone.clone();
1489
+ if (!additionalClone) {
1490
+ throw new Error('Failed to clone packet for additional queue (out of memory)');
1491
+ }
1492
+ const queueKey = targetQueues[i].event.replace('packet-', '') === 'all' ? 'all' : packet.streamIndex;
1493
+ targetQueues[i].queue.push(additionalClone);
1494
+ const resolver = this.queueResolvers.get(queueKey);
1495
+ if (resolver) {
1496
+ resolver();
1497
+ this.queueResolvers.delete(queueKey);
1498
+ }
1499
+ }
1500
+ packet.unref();
1501
+ }
1502
+ this.demuxThreadActive = false;
1503
+ }
1504
+ catch (e_2) {
1505
+ env_2.error = e_2;
1506
+ env_2.hasError = true;
1507
+ }
1508
+ finally {
1509
+ __disposeResources(env_2);
1510
+ }
1511
+ })();
1512
+ }
1513
+ /**
1514
+ * Stop the internal demux thread.
1515
+ *
1516
+ * @internal
1517
+ */
1518
+ async stopDemuxThread() {
1519
+ if (!this.demuxThreadActive) {
1520
+ return;
1521
+ }
1522
+ this.demuxThreadActive = false;
1523
+ if (this.demuxThread) {
1524
+ await this.demuxThread;
1525
+ this.demuxThread = null;
1526
+ }
1527
+ // Clear all queues and resolvers
1528
+ for (const queue of this.packetQueues.values()) {
1529
+ for (const packet of queue) {
1530
+ packet.free();
1531
+ }
1532
+ queue.length = 0;
1533
+ }
1534
+ this.packetQueues.clear();
1535
+ this.queueResolvers.clear();
1536
+ this.demuxEof = false;
1537
+ }
1538
+ /**
1539
+ * Get or create stream state for timestamp processing.
1540
+ *
1541
+ * @param streamIndex - Stream index
1542
+ *
1543
+ * @returns Stream state
1544
+ *
1545
+ * @internal
1546
+ */
1547
+ getStreamState(streamIndex) {
1548
+ let state = this.streamStates.get(streamIndex);
1549
+ if (!state) {
1550
+ state = {
1551
+ wrapCorrectionDone: false,
1552
+ sawFirstTs: false,
1553
+ firstDts: AV_NOPTS_VALUE,
1554
+ nextDts: AV_NOPTS_VALUE,
1555
+ dts: AV_NOPTS_VALUE,
1556
+ };
1557
+ this.streamStates.set(streamIndex, state);
1558
+ }
1559
+ return state;
1560
+ }
1561
+ /**
1562
+ * PTS Wrap-Around Correction.
1563
+ *
1564
+ * Based on FFmpeg's ts_fixup().
1565
+ *
1566
+ * Corrects timestamp wrap-around for streams with limited timestamp bits.
1567
+ * DVB streams typically use 31-bit timestamps that wrap around.
1568
+ * Without correction, timestamps become negative causing playback errors.
1569
+ *
1570
+ * Handles:
1571
+ * - Detects wrap-around based on pts_wrap_bits from stream
1572
+ * - Applies correction once per stream
1573
+ * - Corrects both PTS and DTS
1574
+ *
1575
+ * @param packet - Packet to correct
1576
+ *
1577
+ * @param stream - Stream metadata
1578
+ *
1579
+ * @internal
1580
+ */
1581
+ ptsWrapAroundCorrection(packet, stream) {
1582
+ const state = this.getStreamState(packet.streamIndex);
1583
+ // Already corrected or no wrap bits configured
1584
+ if (state.wrapCorrectionDone || stream.ptsWrapBits >= 64) {
1585
+ return;
1586
+ }
1587
+ const startTime = this.formatContext.startTime;
1588
+ if (startTime === AV_NOPTS_VALUE) {
1589
+ return;
1590
+ }
1591
+ const ptsWrapBits = stream.ptsWrapBits;
1592
+ // Rescale start_time to packet's timebase
1593
+ // Note: packet.timeBase was set to stream.timeBase in packets() generator
1594
+ const stime = avRescaleQ(startTime, AV_TIME_BASE_Q, packet.timeBase);
1595
+ const stime2 = stime + (1n << BigInt(ptsWrapBits));
1596
+ state.wrapCorrectionDone = true;
1597
+ const wrapThreshold = stime + (1n << BigInt(ptsWrapBits - 1));
1598
+ // Check DTS for wrap-around
1599
+ if (stime2 > stime && packet.dts !== AV_NOPTS_VALUE && packet.dts > wrapThreshold) {
1600
+ packet.dts -= 1n << BigInt(ptsWrapBits);
1601
+ state.wrapCorrectionDone = false; // May wrap again
1602
+ }
1603
+ // Check PTS for wrap-around
1604
+ if (stime2 > stime && packet.pts !== AV_NOPTS_VALUE && packet.pts > wrapThreshold) {
1605
+ packet.pts -= 1n << BigInt(ptsWrapBits);
1606
+ state.wrapCorrectionDone = false; // May wrap again
1607
+ }
1608
+ }
1609
+ /**
1610
+ * DTS Prediction and Update.
1611
+ *
1612
+ * Based on FFmpeg's ist_dts_update().
1613
+ *
1614
+ * Predicts next expected DTS for frame ordering validation and discontinuity detection.
1615
+ * Uses codec-specific logic:
1616
+ * - Audio: Based on sample_rate and frame_size
1617
+ * - Video: Based on framerate or duration
1618
+ *
1619
+ * Handles:
1620
+ * - First timestamp initialization
1621
+ * - Codec-specific duration calculation
1622
+ * - DTS sequence tracking
1623
+ *
1624
+ * @param packet - Packet to process
1625
+ *
1626
+ * @param stream - Stream metadata
1627
+ *
1628
+ * @internal
1629
+ */
1630
+ dtsPredict(packet, stream) {
1631
+ const state = this.getStreamState(packet.streamIndex);
1632
+ const par = stream.codecpar;
1633
+ // First timestamp seen
1634
+ if (!state.sawFirstTs) {
1635
+ // For video with avg_frame_rate, account for video_delay
1636
+ const avgFrameRate = stream.avgFrameRate;
1637
+ if (avgFrameRate && avgFrameRate.num > 0) {
1638
+ const frameRateD = Number(avgFrameRate.num) / Number(avgFrameRate.den);
1639
+ state.firstDts = state.dts = BigInt(Math.floor((-par.videoDelay * Number(AV_TIME_BASE)) / frameRateD));
1640
+ }
1641
+ else {
1642
+ state.firstDts = state.dts = 0n;
1643
+ }
1644
+ if (packet.pts !== AV_NOPTS_VALUE) {
1645
+ const ptsDts = avRescaleQ(packet.pts, packet.timeBase, AV_TIME_BASE_Q);
1646
+ state.firstDts += ptsDts;
1647
+ state.dts += ptsDts;
1648
+ }
1649
+ state.sawFirstTs = true;
1650
+ }
1651
+ // Initialize next_dts if not set
1652
+ if (state.nextDts === AV_NOPTS_VALUE) {
1653
+ state.nextDts = state.dts;
1654
+ }
1655
+ // Update from packet DTS if available
1656
+ if (packet.dts !== AV_NOPTS_VALUE) {
1657
+ state.nextDts = state.dts = avRescaleQ(packet.dts, packet.timeBase, AV_TIME_BASE_Q);
1658
+ }
1659
+ state.dts = state.nextDts;
1660
+ // Predict next DTS based on codec type
1661
+ switch (par.codecType) {
1662
+ case AVMEDIA_TYPE_AUDIO:
1663
+ // Audio: duration from sample_rate or packet duration
1664
+ if (par.sampleRate > 0) {
1665
+ state.nextDts += (BigInt(AV_TIME_BASE) * BigInt(par.frameSize)) / BigInt(par.sampleRate);
1666
+ }
1667
+ else {
1668
+ state.nextDts += avRescaleQ(packet.duration, packet.timeBase, AV_TIME_BASE_Q);
1669
+ }
1670
+ break;
1671
+ case AVMEDIA_TYPE_VIDEO: {
1672
+ // Video: various methods depending on available metadata
1673
+ // Note: FFmpeg has ist->framerate (forced with -r), but we don't support that option
1674
+ if (packet.duration > 0n) {
1675
+ // Use packet duration
1676
+ state.nextDts += avRescaleQ(packet.duration, packet.timeBase, AV_TIME_BASE_Q);
1677
+ }
1678
+ else if (par.frameRate && par.frameRate.num > 0) {
1679
+ // Use codec framerate with field handling
1680
+ const fieldRate = avMulQ(par.frameRate, { num: 2, den: 1 });
1681
+ let fields = 2; // Default: 2 fields (progressive or standard interlaced)
1682
+ // Check if codec has fields property and parser is available
1683
+ const parser = stream.parser;
1684
+ if (par.hasProperties(AV_CODEC_PROP_FIELDS) && parser) {
1685
+ // Get repeat_pict from parser for accurate field count
1686
+ fields = 1 + parser.repeatPict;
1687
+ }
1688
+ const invFieldRate = avInvQ(fieldRate);
1689
+ state.nextDts += avRescaleQ(BigInt(fields), invFieldRate, AV_TIME_BASE_Q);
1690
+ }
1691
+ break;
1692
+ }
1693
+ }
1694
+ }
1695
+ /**
1696
+ * Timestamp Discontinuity Detection.
1697
+ *
1698
+ * Based on FFmpeg's ts_discontinuity_detect().
1699
+ *
1700
+ * Detects and corrects timestamp discontinuities in streams.
1701
+ * Handles two cases:
1702
+ * - Discontinuous formats (MPEG-TS): Apply offset correction
1703
+ * - Continuous formats (MP4): Mark timestamps as invalid
1704
+ *
1705
+ * Handles:
1706
+ * - Format-specific discontinuity handling (AVFMT_TS_DISCONT flag)
1707
+ * - PTS wrap-around detection for streams with limited timestamp bits
1708
+ * - Intra-stream discontinuity detection
1709
+ * - Inter-stream discontinuity detection
1710
+ * - Offset accumulation and application
1711
+ * - copyTs mode with selective correction
1712
+ *
1713
+ * @param packet - Packet to check for discontinuities
1714
+ *
1715
+ * @param stream - Stream metadata
1716
+ *
1717
+ * @internal
1718
+ */
1719
+ timestampDiscontinuityDetect(packet, stream) {
1720
+ const state = this.getStreamState(packet.streamIndex);
1721
+ const inputFormat = this.formatContext.iformat;
1722
+ // Check if format declares timestamp discontinuities
1723
+ const fmtIsDiscont = !!(inputFormat && inputFormat.flags & AVFMT_TS_DISCONT);
1724
+ // Disable correction when copyTs is enabled
1725
+ let disableDiscontinuityCorrection = this.options.copyTs;
1726
+ // Rescale packet DTS to AV_TIME_BASE for comparison
1727
+ const pktDts = avRescaleQRnd(packet.dts, packet.timeBase, AV_TIME_BASE_Q, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
1728
+ // PTS wrap-around detection
1729
+ // Only applies when copyTs is enabled and stream has limited timestamp bits
1730
+ if (this.options.copyTs && state.nextDts !== AV_NOPTS_VALUE && fmtIsDiscont && stream.ptsWrapBits < 60) {
1731
+ // Calculate wrapped DTS by adding 2^pts_wrap_bits to packet DTS
1732
+ const wrapDts = avRescaleQRnd(packet.dts + (1n << BigInt(stream.ptsWrapBits)), packet.timeBase, AV_TIME_BASE_Q, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
1733
+ // If wrapped DTS is closer to predicted nextDts, enable correction
1734
+ const wrapDelta = wrapDts > state.nextDts ? wrapDts - state.nextDts : state.nextDts - wrapDts;
1735
+ const normalDelta = pktDts > state.nextDts ? pktDts - state.nextDts : state.nextDts - pktDts;
1736
+ if (wrapDelta < normalDelta / 10n) {
1737
+ disableDiscontinuityCorrection = false;
1738
+ }
1739
+ }
1740
+ // Intra-stream discontinuity detection
1741
+ if (state.nextDts !== AV_NOPTS_VALUE && !disableDiscontinuityCorrection) {
1742
+ const delta = pktDts - state.nextDts;
1743
+ if (fmtIsDiscont) {
1744
+ // Discontinuous format (e.g., MPEG-TS) - apply offset correction
1745
+ const threshold = BigInt(this.options.dtsDeltaThreshold) * BigInt(AV_TIME_BASE);
1746
+ if (delta > threshold || delta < -threshold || pktDts + BigInt(AV_TIME_BASE) / 10n < state.dts) {
1747
+ this.tsOffsetDiscont -= delta;
1748
+ // Apply correction to packet
1749
+ const deltaInPktTb = avRescaleQ(delta, AV_TIME_BASE_Q, packet.timeBase);
1750
+ packet.dts -= deltaInPktTb;
1751
+ if (packet.pts !== AV_NOPTS_VALUE) {
1752
+ packet.pts -= deltaInPktTb;
1753
+ }
1754
+ }
1755
+ }
1756
+ else {
1757
+ // Continuous format (e.g., MP4) - mark invalid timestamps
1758
+ const threshold = BigInt(this.options.dtsErrorThreshold) * BigInt(AV_TIME_BASE);
1759
+ // Check DTS
1760
+ if (delta > threshold || delta < -threshold) {
1761
+ packet.dts = AV_NOPTS_VALUE;
1762
+ }
1763
+ // Check PTS
1764
+ if (packet.pts !== AV_NOPTS_VALUE) {
1765
+ const pktPts = avRescaleQ(packet.pts, packet.timeBase, AV_TIME_BASE_Q);
1766
+ const ptsDelta = pktPts - state.nextDts;
1767
+ if (ptsDelta > threshold || ptsDelta < -threshold) {
1768
+ packet.pts = AV_NOPTS_VALUE;
1769
+ }
1770
+ }
1771
+ }
1772
+ }
1773
+ else if (state.nextDts === AV_NOPTS_VALUE && !this.options.copyTs && fmtIsDiscont && this.lastTs !== AV_NOPTS_VALUE) {
1774
+ // Inter-stream discontinuity detection
1775
+ const delta = pktDts - this.lastTs;
1776
+ const threshold = BigInt(this.options.dtsDeltaThreshold) * BigInt(AV_TIME_BASE);
1777
+ if (delta > threshold || delta < -threshold) {
1778
+ this.tsOffsetDiscont -= delta;
1779
+ // Apply correction to packet
1780
+ const deltaInPktTb = avRescaleQ(delta, AV_TIME_BASE_Q, packet.timeBase);
1781
+ packet.dts -= deltaInPktTb;
1782
+ if (packet.pts !== AV_NOPTS_VALUE) {
1783
+ packet.pts -= deltaInPktTb;
1784
+ }
1785
+ }
1786
+ }
1787
+ // Update last timestamp
1788
+ this.lastTs = avRescaleQ(packet.dts, packet.timeBase, AV_TIME_BASE_Q);
1789
+ }
1790
+ /**
1791
+ * Timestamp Discontinuity Processing - main entry point.
1792
+ *
1793
+ * Based on FFmpeg's ts_discontinuity_process().
1794
+ *
1795
+ * Applies accumulated discontinuity offset and detects new discontinuities.
1796
+ * Must be called for every packet before other timestamp processing.
1797
+ *
1798
+ * Handles:
1799
+ * - Applying previously-detected offset to all streams
1800
+ * - Detecting new discontinuities for audio/video streams
1801
+ *
1802
+ * @param packet - Packet to process
1803
+ *
1804
+ * @param stream - Stream metadata
1805
+ *
1806
+ * @internal
1807
+ */
1808
+ timestampDiscontinuityProcess(packet, stream) {
1809
+ // Apply previously-detected discontinuity offset
1810
+ // This applies to ALL streams, not just audio/video
1811
+ const offset = avRescaleQ(this.tsOffsetDiscont, AV_TIME_BASE_Q, packet.timeBase);
1812
+ if (packet.dts !== AV_NOPTS_VALUE) {
1813
+ packet.dts += offset;
1814
+ }
1815
+ if (packet.pts !== AV_NOPTS_VALUE) {
1816
+ packet.pts += offset;
1817
+ }
1818
+ // Detect new timestamp discontinuities for audio/video
1819
+ const par = stream.codecpar;
1820
+ if ((par.codecType === AVMEDIA_TYPE_VIDEO || par.codecType === AVMEDIA_TYPE_AUDIO) && packet.dts !== AV_NOPTS_VALUE) {
1821
+ this.timestampDiscontinuityDetect(packet, stream);
1822
+ }
1823
+ }
1824
+ /**
1825
+ * Close demuxer and free resources.
1826
+ *
1827
+ * Releases format context and I/O context.
1828
+ * Safe to call multiple times.
1829
+ * Automatically called by Symbol.asyncDispose.
1830
+ *
1831
+ * Direct mapping to avformat_close_input().
1832
+ *
1833
+ * @example
1834
+ * ```typescript
1835
+ * const input = await Demuxer.open('video.mp4');
1836
+ * try {
1837
+ * // Use input
1838
+ * } finally {
1839
+ * await input.close();
1840
+ * }
1841
+ * ```
1842
+ *
1843
+ * @see {@link Symbol.asyncDispose} For automatic cleanup
1844
+ */
1845
+ async close() {
1846
+ if (this.isClosed) {
1847
+ return;
1848
+ }
1849
+ this.isClosed = true;
1850
+ // Clear pb reference FIRST to prevent use-after-free
1851
+ if (this.ioContext) {
1852
+ this.formatContext.pb = null;
1853
+ }
1854
+ // IMPORTANT: Close FormatContext BEFORE stopping demux thread
1855
+ // This interrupts any blocking read() calls in the demux loop
1856
+ await this.formatContext.closeInput();
1857
+ // Safely stop the demux thread
1858
+ await this.stopDemuxThread();
1859
+ // NOW we can safely free the IOContext
1860
+ if (this.ioContext) {
1861
+ this.ioContext.freeContext();
1862
+ this.ioContext = undefined;
1863
+ }
1864
+ }
1865
+ /**
1866
+ * Close demuxer and free resources synchronously.
1867
+ * Synchronous version of close.
1868
+ *
1869
+ * Releases format context and I/O context.
1870
+ * Safe to call multiple times.
1871
+ * Automatically called by Symbol.dispose.
1872
+ *
1873
+ * Direct mapping to avformat_close_input().
1874
+ *
1875
+ * @example
1876
+ * ```typescript
1877
+ * const input = Demuxer.openSync('video.mp4');
1878
+ * try {
1879
+ * // Use input
1880
+ * } finally {
1881
+ * input.closeSync();
1882
+ * }
1883
+ * ```
1884
+ *
1885
+ * @see {@link close} For async version
1886
+ */
1887
+ closeSync() {
1888
+ if (this.isClosed) {
1889
+ return;
1890
+ }
1891
+ this.isClosed = true;
1892
+ // IMPORTANT: Clear pb reference FIRST to prevent use-after-free
1893
+ if (this.ioContext) {
1894
+ this.formatContext.pb = null;
1895
+ }
1896
+ // Close FormatContext
1897
+ this.formatContext.closeInputSync();
1898
+ this.demuxThreadActive = false;
1899
+ for (const queue of this.packetQueues.values()) {
1900
+ for (const packet of queue) {
1901
+ packet.free();
1902
+ }
1903
+ queue.length = 0;
1904
+ }
1905
+ this.packetQueues.clear();
1906
+ this.queueResolvers.clear();
1907
+ this.demuxEof = false;
1908
+ // NOW we can safely free the IOContext
1909
+ if (this.ioContext) {
1910
+ this.ioContext.freeContext();
1911
+ this.ioContext = undefined;
1912
+ }
1913
+ }
1914
+ /**
1915
+ * Get underlying format context.
1916
+ *
1917
+ * Returns the internal format context for advanced operations.
1918
+ *
1919
+ * @returns Format context
1920
+ *
1921
+ * @internal
1922
+ */
1923
+ getFormatContext() {
1924
+ return this.formatContext;
1925
+ }
1926
+ /**
1927
+ * Dispose of demuxer.
1928
+ *
1929
+ * Implements AsyncDisposable interface for automatic cleanup.
1930
+ * Equivalent to calling close().
1931
+ *
1932
+ * @example
1933
+ * ```typescript
1934
+ * {
1935
+ * await using input = await Demuxer.open('video.mp4');
1936
+ * // Process media...
1937
+ * } // Automatically closed
1938
+ * ```
1939
+ *
1940
+ * @see {@link close} For manual cleanup
1941
+ */
1942
+ async [Symbol.asyncDispose]() {
1943
+ await this.close();
1944
+ }
1945
+ /**
1946
+ * Dispose of demuxer synchronously.
1947
+ *
1948
+ * Implements Disposable interface for automatic cleanup.
1949
+ * Equivalent to calling closeSync().
1950
+ *
1951
+ * @example
1952
+ * ```typescript
1953
+ * {
1954
+ * using input = Demuxer.openSync('video.mp4');
1955
+ * // Process media...
1956
+ * } // Automatically closed
1957
+ * ```
1958
+ *
1959
+ * @see {@link closeSync} For manual cleanup
1960
+ */
1961
+ [Symbol.dispose]() {
1962
+ this.closeSync();
1963
+ }
1964
+ }
1965
+ //# sourceMappingURL=demuxer.js.map