node-av 3.1.3 → 5.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. package/README.md +88 -52
  2. package/binding.gyp +23 -11
  3. package/dist/api/audio-frame-buffer.d.ts +201 -0
  4. package/dist/api/audio-frame-buffer.js +275 -0
  5. package/dist/api/audio-frame-buffer.js.map +1 -0
  6. package/dist/api/bitstream-filter.d.ts +320 -78
  7. package/dist/api/bitstream-filter.js +684 -151
  8. package/dist/api/bitstream-filter.js.map +1 -1
  9. package/dist/api/constants.d.ts +44 -0
  10. package/dist/api/constants.js +45 -0
  11. package/dist/api/constants.js.map +1 -0
  12. package/dist/api/data/test_av1.ivf +0 -0
  13. package/dist/api/data/test_mjpeg.mjpeg +0 -0
  14. package/dist/api/data/test_vp8.ivf +0 -0
  15. package/dist/api/data/test_vp9.ivf +0 -0
  16. package/dist/api/decoder.d.ts +454 -77
  17. package/dist/api/decoder.js +1081 -271
  18. package/dist/api/decoder.js.map +1 -1
  19. package/dist/api/{media-input.d.ts → demuxer.d.ts} +295 -45
  20. package/dist/api/demuxer.js +1965 -0
  21. package/dist/api/demuxer.js.map +1 -0
  22. package/dist/api/encoder.d.ts +423 -132
  23. package/dist/api/encoder.js +1089 -240
  24. package/dist/api/encoder.js.map +1 -1
  25. package/dist/api/filter-complex.d.ts +769 -0
  26. package/dist/api/filter-complex.js +1596 -0
  27. package/dist/api/filter-complex.js.map +1 -0
  28. package/dist/api/filter-presets.d.ts +80 -5
  29. package/dist/api/filter-presets.js +117 -7
  30. package/dist/api/filter-presets.js.map +1 -1
  31. package/dist/api/filter.d.ts +561 -125
  32. package/dist/api/filter.js +1083 -274
  33. package/dist/api/filter.js.map +1 -1
  34. package/dist/api/{fmp4.d.ts → fmp4-stream.d.ts} +141 -140
  35. package/dist/api/fmp4-stream.js +539 -0
  36. package/dist/api/fmp4-stream.js.map +1 -0
  37. package/dist/api/hardware.d.ts +58 -6
  38. package/dist/api/hardware.js +127 -11
  39. package/dist/api/hardware.js.map +1 -1
  40. package/dist/api/index.d.ts +8 -4
  41. package/dist/api/index.js +17 -8
  42. package/dist/api/index.js.map +1 -1
  43. package/dist/api/io-stream.d.ts +6 -6
  44. package/dist/api/io-stream.js +5 -4
  45. package/dist/api/io-stream.js.map +1 -1
  46. package/dist/api/{media-output.d.ts → muxer.d.ts} +280 -66
  47. package/dist/api/muxer.js +1934 -0
  48. package/dist/api/muxer.js.map +1 -0
  49. package/dist/api/pipeline.d.ts +77 -29
  50. package/dist/api/pipeline.js +449 -439
  51. package/dist/api/pipeline.js.map +1 -1
  52. package/dist/api/rtp-stream.d.ts +312 -0
  53. package/dist/api/rtp-stream.js +630 -0
  54. package/dist/api/rtp-stream.js.map +1 -0
  55. package/dist/api/types.d.ts +533 -56
  56. package/dist/api/utilities/async-queue.d.ts +91 -0
  57. package/dist/api/utilities/async-queue.js +162 -0
  58. package/dist/api/utilities/async-queue.js.map +1 -0
  59. package/dist/api/utilities/audio-sample.d.ts +11 -1
  60. package/dist/api/utilities/audio-sample.js +10 -0
  61. package/dist/api/utilities/audio-sample.js.map +1 -1
  62. package/dist/api/utilities/channel-layout.d.ts +1 -0
  63. package/dist/api/utilities/channel-layout.js +1 -0
  64. package/dist/api/utilities/channel-layout.js.map +1 -1
  65. package/dist/api/utilities/image.d.ts +39 -1
  66. package/dist/api/utilities/image.js +38 -0
  67. package/dist/api/utilities/image.js.map +1 -1
  68. package/dist/api/utilities/index.d.ts +3 -0
  69. package/dist/api/utilities/index.js +6 -0
  70. package/dist/api/utilities/index.js.map +1 -1
  71. package/dist/api/utilities/media-type.d.ts +2 -1
  72. package/dist/api/utilities/media-type.js +1 -0
  73. package/dist/api/utilities/media-type.js.map +1 -1
  74. package/dist/api/utilities/pixel-format.d.ts +4 -1
  75. package/dist/api/utilities/pixel-format.js +3 -0
  76. package/dist/api/utilities/pixel-format.js.map +1 -1
  77. package/dist/api/utilities/sample-format.d.ts +6 -1
  78. package/dist/api/utilities/sample-format.js +5 -0
  79. package/dist/api/utilities/sample-format.js.map +1 -1
  80. package/dist/api/utilities/scheduler.d.ts +138 -0
  81. package/dist/api/utilities/scheduler.js +98 -0
  82. package/dist/api/utilities/scheduler.js.map +1 -0
  83. package/dist/api/utilities/streaming.d.ts +105 -15
  84. package/dist/api/utilities/streaming.js +201 -12
  85. package/dist/api/utilities/streaming.js.map +1 -1
  86. package/dist/api/utilities/timestamp.d.ts +15 -1
  87. package/dist/api/utilities/timestamp.js +14 -0
  88. package/dist/api/utilities/timestamp.js.map +1 -1
  89. package/dist/api/utilities/whisper-model.d.ts +310 -0
  90. package/dist/api/utilities/whisper-model.js +528 -0
  91. package/dist/api/utilities/whisper-model.js.map +1 -0
  92. package/dist/api/webrtc-stream.d.ts +288 -0
  93. package/dist/api/webrtc-stream.js +440 -0
  94. package/dist/api/webrtc-stream.js.map +1 -0
  95. package/dist/api/whisper.d.ts +324 -0
  96. package/dist/api/whisper.js +362 -0
  97. package/dist/api/whisper.js.map +1 -0
  98. package/dist/constants/constants.d.ts +54 -2
  99. package/dist/constants/constants.js +48 -1
  100. package/dist/constants/constants.js.map +1 -1
  101. package/dist/constants/encoders.d.ts +2 -1
  102. package/dist/constants/encoders.js +4 -3
  103. package/dist/constants/encoders.js.map +1 -1
  104. package/dist/constants/hardware.d.ts +26 -0
  105. package/dist/constants/hardware.js +27 -0
  106. package/dist/constants/hardware.js.map +1 -0
  107. package/dist/constants/index.d.ts +1 -0
  108. package/dist/constants/index.js +1 -0
  109. package/dist/constants/index.js.map +1 -1
  110. package/dist/ffmpeg/index.d.ts +3 -3
  111. package/dist/ffmpeg/index.js +3 -3
  112. package/dist/ffmpeg/utils.d.ts +27 -0
  113. package/dist/ffmpeg/utils.js +28 -16
  114. package/dist/ffmpeg/utils.js.map +1 -1
  115. package/dist/lib/binding.d.ts +22 -11
  116. package/dist/lib/binding.js.map +1 -1
  117. package/dist/lib/codec-context.d.ts +87 -0
  118. package/dist/lib/codec-context.js +125 -4
  119. package/dist/lib/codec-context.js.map +1 -1
  120. package/dist/lib/codec-parameters.d.ts +229 -1
  121. package/dist/lib/codec-parameters.js +264 -0
  122. package/dist/lib/codec-parameters.js.map +1 -1
  123. package/dist/lib/codec-parser.d.ts +23 -0
  124. package/dist/lib/codec-parser.js +25 -0
  125. package/dist/lib/codec-parser.js.map +1 -1
  126. package/dist/lib/codec.d.ts +26 -4
  127. package/dist/lib/codec.js +35 -0
  128. package/dist/lib/codec.js.map +1 -1
  129. package/dist/lib/dictionary.js +1 -0
  130. package/dist/lib/dictionary.js.map +1 -1
  131. package/dist/lib/error.js +1 -1
  132. package/dist/lib/error.js.map +1 -1
  133. package/dist/lib/fifo.d.ts +416 -0
  134. package/dist/lib/fifo.js +453 -0
  135. package/dist/lib/fifo.js.map +1 -0
  136. package/dist/lib/filter-context.d.ts +52 -11
  137. package/dist/lib/filter-context.js +56 -12
  138. package/dist/lib/filter-context.js.map +1 -1
  139. package/dist/lib/filter-graph.d.ts +9 -0
  140. package/dist/lib/filter-graph.js +13 -0
  141. package/dist/lib/filter-graph.js.map +1 -1
  142. package/dist/lib/filter.d.ts +21 -0
  143. package/dist/lib/filter.js +28 -0
  144. package/dist/lib/filter.js.map +1 -1
  145. package/dist/lib/format-context.d.ts +48 -14
  146. package/dist/lib/format-context.js +76 -7
  147. package/dist/lib/format-context.js.map +1 -1
  148. package/dist/lib/frame.d.ts +264 -1
  149. package/dist/lib/frame.js +351 -1
  150. package/dist/lib/frame.js.map +1 -1
  151. package/dist/lib/hardware-device-context.d.ts +3 -2
  152. package/dist/lib/hardware-device-context.js.map +1 -1
  153. package/dist/lib/index.d.ts +2 -0
  154. package/dist/lib/index.js +4 -0
  155. package/dist/lib/index.js.map +1 -1
  156. package/dist/lib/input-format.d.ts +21 -0
  157. package/dist/lib/input-format.js +42 -2
  158. package/dist/lib/input-format.js.map +1 -1
  159. package/dist/lib/native-types.d.ts +76 -27
  160. package/dist/lib/option.d.ts +25 -13
  161. package/dist/lib/option.js +28 -0
  162. package/dist/lib/option.js.map +1 -1
  163. package/dist/lib/output-format.d.ts +22 -1
  164. package/dist/lib/output-format.js +28 -0
  165. package/dist/lib/output-format.js.map +1 -1
  166. package/dist/lib/packet.d.ts +35 -0
  167. package/dist/lib/packet.js +52 -2
  168. package/dist/lib/packet.js.map +1 -1
  169. package/dist/lib/rational.d.ts +18 -0
  170. package/dist/lib/rational.js +19 -0
  171. package/dist/lib/rational.js.map +1 -1
  172. package/dist/lib/stream.d.ts +126 -0
  173. package/dist/lib/stream.js +188 -5
  174. package/dist/lib/stream.js.map +1 -1
  175. package/dist/lib/sync-queue.d.ts +179 -0
  176. package/dist/lib/sync-queue.js +197 -0
  177. package/dist/lib/sync-queue.js.map +1 -0
  178. package/dist/lib/types.d.ts +49 -1
  179. package/dist/lib/utilities.d.ts +281 -53
  180. package/dist/lib/utilities.js +298 -55
  181. package/dist/lib/utilities.js.map +1 -1
  182. package/install/check.js +2 -2
  183. package/package.json +37 -26
  184. package/dist/api/fmp4.js +0 -710
  185. package/dist/api/fmp4.js.map +0 -1
  186. package/dist/api/media-input.js +0 -1075
  187. package/dist/api/media-input.js.map +0 -1
  188. package/dist/api/media-output.js +0 -1040
  189. package/dist/api/media-output.js.map +0 -1
  190. package/dist/api/webrtc.d.ts +0 -664
  191. package/dist/api/webrtc.js +0 -1132
  192. package/dist/api/webrtc.js.map +0 -1
@@ -1,5 +1,67 @@
1
- import { AVERROR_EAGAIN, AVERROR_EOF } from '../constants/constants.js';
2
- import { Codec, CodecContext, Dictionary, FFmpegError, Frame } from '../lib/index.js';
1
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
2
+ if (value !== null && value !== void 0) {
3
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
4
+ var dispose, inner;
5
+ if (async) {
6
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
7
+ dispose = value[Symbol.asyncDispose];
8
+ }
9
+ if (dispose === void 0) {
10
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
11
+ dispose = value[Symbol.dispose];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
15
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
16
+ env.stack.push({ value: value, dispose: dispose, async: async });
17
+ }
18
+ else if (async) {
19
+ env.stack.push({ async: true });
20
+ }
21
+ return value;
22
+ };
23
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
24
+ return function (env) {
25
+ function fail(e) {
26
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
27
+ env.hasError = true;
28
+ }
29
+ var r, s = 0;
30
+ function next() {
31
+ while (r = env.stack.pop()) {
32
+ try {
33
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
34
+ if (r.dispose) {
35
+ var result = r.dispose.call(r.value);
36
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
37
+ }
38
+ else s |= 1;
39
+ }
40
+ catch (e) {
41
+ fail(e);
42
+ }
43
+ }
44
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
45
+ if (env.hasError) throw env.error;
46
+ }
47
+ return next();
48
+ };
49
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
50
+ var e = new Error(message);
51
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
52
+ });
53
+ import { AV_CODEC_FLAG_COPY_OPAQUE, AV_FRAME_FLAG_CORRUPT, AV_NOPTS_VALUE, AV_ROUND_UP, AVERROR_EAGAIN, AVERROR_EOF, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, EOF, INT_MAX, } from '../constants/constants.js';
54
+ import { CodecContext } from '../lib/codec-context.js';
55
+ import { Codec } from '../lib/codec.js';
56
+ import { Dictionary } from '../lib/dictionary.js';
57
+ import { FFmpegError } from '../lib/error.js';
58
+ import { Frame } from '../lib/frame.js';
59
+ import { Packet } from '../lib/packet.js';
60
+ import { Rational } from '../lib/rational.js';
61
+ import { avGcd, avInvQ, avMulQ, avRescaleDelta, avRescaleQ, avRescaleQRnd } from '../lib/utilities.js';
62
+ import { FRAME_THREAD_QUEUE_SIZE, PACKET_THREAD_QUEUE_SIZE } from './constants.js';
63
+ import { AsyncQueue } from './utilities/async-queue.js';
64
+ import { Scheduler } from './utilities/scheduler.js';
3
65
  /**
4
66
  * High-level decoder for audio and video streams.
5
67
  *
@@ -10,10 +72,10 @@ import { Codec, CodecContext, Dictionary, FFmpegError, Frame } from '../lib/inde
10
72
  *
11
73
  * @example
12
74
  * ```typescript
13
- * import { MediaInput, Decoder } from 'node-av/api';
75
+ * import { Demuxer, Decoder } from 'node-av/api';
14
76
  *
15
77
  * // Open media and create decoder
16
- * await using input = await MediaInput.open('video.mp4');
78
+ * await using input = await Demuxer.open('video.mp4');
17
79
  * using decoder = await Decoder.create(input.video());
18
80
  *
19
81
  * // Decode frames
@@ -39,7 +101,7 @@ import { Codec, CodecContext, Dictionary, FFmpegError, Frame } from '../lib/inde
39
101
  * ```
40
102
  *
41
103
  * @see {@link Encoder} For encoding frames to packets
42
- * @see {@link MediaInput} For reading media files
104
+ * @see {@link Demuxer} For reading media files
43
105
  * @see {@link HardwareContext} For GPU acceleration
44
106
  */
45
107
  export class Decoder {
@@ -50,6 +112,19 @@ export class Decoder {
50
112
  initialized = true;
51
113
  isClosed = false;
52
114
  options;
115
+ // Frame tracking for PTS/duration estimation
116
+ lastFramePts = AV_NOPTS_VALUE;
117
+ lastFrameDurationEst = 0n;
118
+ lastFrameTb;
119
+ // Audio-specific frame tracking
120
+ lastFrameSampleRate = 0;
121
+ lastFilterInRescaleDelta = AV_NOPTS_VALUE;
122
+ // Worker pattern for push-based processing
123
+ inputQueue;
124
+ outputQueue;
125
+ workerPromise = null;
126
+ nextComponent = null;
127
+ pipeToPromise = null;
53
128
  /**
54
129
  * @param codecContext - Configured codec context
55
130
  *
@@ -70,71 +145,67 @@ export class Decoder {
70
145
  this.options = options;
71
146
  this.frame = new Frame();
72
147
  this.frame.alloc();
148
+ this.lastFrameTb = new Rational(0, 1);
149
+ this.inputQueue = new AsyncQueue(PACKET_THREAD_QUEUE_SIZE);
150
+ this.outputQueue = new AsyncQueue(FRAME_THREAD_QUEUE_SIZE);
73
151
  }
74
- /**
75
- * Create a decoder for a media stream.
76
- *
77
- * Initializes a decoder with the appropriate codec and configuration.
78
- * Automatically detects and configures hardware acceleration if provided.
79
- * Applies custom codec options and threading configuration.
80
- *
81
- * @param stream - Media stream to decode
82
- *
83
- * @param options - Decoder configuration options
84
- *
85
- * @returns Configured decoder instance
86
- *
87
- * @throws {Error} If decoder not found for codec
88
- *
89
- * @throws {FFmpegError} If codec initialization fails
90
- *
91
- * @example
92
- * ```typescript
93
- * import { MediaInput, Decoder } from 'node-av/api';
94
- *
95
- * await using input = await MediaInput.open('video.mp4');
96
- * using decoder = await Decoder.create(input.video());
97
- * ```
98
- *
99
- * @example
100
- * ```typescript
101
- * using decoder = await Decoder.create(stream, {
102
- * threads: 4,
103
- * options: {
104
- * 'refcounted_frames': '1',
105
- * 'skip_frame': 'nonkey' // Only decode keyframes
106
- * }
107
- * });
108
- * ```
109
- *
110
- * @example
111
- * ```typescript
112
- * const hw = HardwareContext.auto();
113
- * using decoder = await Decoder.create(stream, {
114
- * hardware: hw,
115
- * threads: 0 // Auto-detect thread count
116
- * exitOnError: false // Continue on decode errors (default: true)
117
- * });
118
- * ```
119
- *
120
- * @see {@link HardwareContext} For GPU acceleration setup
121
- * @see {@link DecoderOptions} For configuration options
122
- */
123
- static async create(stream, options = {}) {
152
+ static async create(stream, optionsOrCodec, maybeOptions) {
153
+ // Parse arguments
154
+ let options = {};
155
+ let explicitCodec;
156
+ if (optionsOrCodec !== undefined) {
157
+ // Check if first argument is a codec or options
158
+ if (typeof optionsOrCodec === 'string' || // FFDecoderCodec
159
+ typeof optionsOrCodec === 'number' || // AVCodecID
160
+ optionsOrCodec instanceof Codec // Codec instance
161
+ ) {
162
+ // First argument is a codec
163
+ explicitCodec = optionsOrCodec;
164
+ options = maybeOptions ?? {};
165
+ }
166
+ else {
167
+ // First argument is options
168
+ options = optionsOrCodec;
169
+ }
170
+ }
124
171
  let codec = null;
125
- // If hardware acceleration requested, try to find hardware decoder first
126
- if (options.hardware) {
127
- codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
128
- if (!codec) {
129
- // No hardware decoder available, fall back to software
130
- options.hardware = undefined;
172
+ // If explicit codec provided, use it
173
+ if (explicitCodec !== undefined) {
174
+ if (typeof explicitCodec === 'object' && 'id' in explicitCodec) {
175
+ // Already a Codec instance
176
+ codec = explicitCodec;
177
+ }
178
+ else if (typeof explicitCodec === 'string') {
179
+ // FFDecoderCodec string
180
+ codec = Codec.findDecoderByName(explicitCodec);
181
+ if (!codec) {
182
+ throw new Error(`Decoder '${explicitCodec}' not found`);
183
+ }
184
+ }
185
+ else {
186
+ // AVCodecID number
187
+ codec = Codec.findDecoder(explicitCodec);
188
+ if (!codec) {
189
+ throw new Error(`Decoder not found for codec ID ${explicitCodec}`);
190
+ }
131
191
  }
132
192
  }
133
- // If no hardware decoder or no hardware requested, use software decoder
134
- if (!codec) {
135
- codec = Codec.findDecoder(stream.codecpar.codecId);
193
+ else {
194
+ // No explicit codec - use auto-detection logic
195
+ // If hardware acceleration requested, try to find hardware decoder first
196
+ if (options.hardware) {
197
+ codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
198
+ if (!codec) {
199
+ // No hardware decoder available, fall back to software
200
+ options.hardware = undefined;
201
+ }
202
+ }
203
+ // If no hardware decoder or no hardware requested, use software decoder
136
204
  if (!codec) {
137
- throw new Error(`Decoder not found for codec ${stream.codecpar.codecId}`);
205
+ codec = Codec.findDecoder(stream.codecpar.codecId);
206
+ if (!codec) {
207
+ throw new Error(`Decoder not found for codec ${stream.codecpar.codecId}`);
208
+ }
138
209
  }
139
210
  }
140
211
  // Allocate and configure codec context
@@ -148,16 +219,14 @@ export class Decoder {
148
219
  }
149
220
  // Set packet time base
150
221
  codecContext.pktTimebase = stream.timeBase;
151
- // Apply options
152
- if (options.threads !== undefined) {
153
- codecContext.threadCount = options.threads;
154
- }
155
222
  // Check if this decoder supports hardware acceleration
156
223
  // Only apply hardware acceleration if the decoder supports it
157
224
  // Silently ignore hardware for software decoders
158
225
  const isHWDecoder = codec.isHardwareAcceleratedDecoder();
159
226
  if (isHWDecoder && options.hardware) {
160
227
  codecContext.hwDeviceCtx = options.hardware.deviceContext;
228
+ // Set hardware pixel format
229
+ codecContext.setHardwarePixelFormat(options.hardware.devicePixelFormat);
161
230
  // Set extra_hw_frames if specified
162
231
  if (options.extraHWFrames !== undefined && options.extraHWFrames > 0) {
163
232
  codecContext.extraHWFrames = options.extraHWFrames;
@@ -167,6 +236,8 @@ export class Decoder {
167
236
  options.hardware = undefined;
168
237
  }
169
238
  options.exitOnError = options.exitOnError ?? true;
239
+ // Enable COPY_OPAQUE flag to copy packet.opaque to frame.opaque
240
+ codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
170
241
  const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
171
242
  // Open codec
172
243
  const openRet = await codecContext.open2(codec, opts);
@@ -174,71 +245,76 @@ export class Decoder {
174
245
  codecContext.freeContext();
175
246
  FFmpegError.throwIfError(openRet, 'Failed to open codec');
176
247
  }
248
+ // Adjust extra_hw_frames for queuing
249
+ // This is done AFTER open2 because the decoder validates extra_hw_frames during open
250
+ if (isHWDecoder && options.hardware) {
251
+ const currentExtraFrames = codecContext.extraHWFrames;
252
+ if (currentExtraFrames >= 0) {
253
+ codecContext.extraHWFrames = currentExtraFrames + FRAME_THREAD_QUEUE_SIZE;
254
+ }
255
+ else {
256
+ codecContext.extraHWFrames = 1;
257
+ }
258
+ }
177
259
  return new Decoder(codecContext, codec, stream, options);
178
260
  }
179
- /**
180
- * Create a decoder for a media stream synchronously.
181
- * Synchronous version of create.
182
- *
183
- * Initializes a decoder with the appropriate codec and configuration.
184
- * Automatically detects and configures hardware acceleration if provided.
185
- * Applies custom codec options and threading configuration.
186
- *
187
- * @param stream - Media stream to decode
188
- *
189
- * @param options - Decoder configuration options
190
- *
191
- * @returns Configured decoder instance
192
- *
193
- * @throws {Error} If decoder not found for codec
194
- *
195
- * @throws {FFmpegError} If codec initialization fails
196
- *
197
- * @example
198
- * ```typescript
199
- * import { MediaInput, Decoder } from 'node-av/api';
200
- *
201
- * await using input = await MediaInput.open('video.mp4');
202
- * using decoder = await Decoder.create(input.video());
203
- * ```
204
- *
205
- * @example
206
- * ```typescript
207
- * using decoder = await Decoder.create(stream, {
208
- * threads: 4,
209
- * options: {
210
- * 'refcounted_frames': '1',
211
- * 'skip_frame': 'nonkey' // Only decode keyframes
212
- * }
213
- * });
214
- * ```
215
- *
216
- * @example
217
- * ```typescript
218
- * const hw = HardwareContext.auto();
219
- * using decoder = await Decoder.create(stream, {
220
- * hardware: hw,
221
- * threads: 0 // Auto-detect thread count
222
- * });
223
- * ```
224
- *
225
- * @see {@link create} For async version
226
- */
227
- static createSync(stream, options = {}) {
261
+ static createSync(stream, optionsOrCodec, maybeOptions) {
262
+ // Parse arguments
263
+ let options = {};
264
+ let explicitCodec;
265
+ if (optionsOrCodec !== undefined) {
266
+ // Check if first argument is a codec or options
267
+ if (typeof optionsOrCodec === 'string' || // FFDecoderCodec
268
+ typeof optionsOrCodec === 'number' || // AVCodecID
269
+ optionsOrCodec instanceof Codec // Codec instance
270
+ ) {
271
+ // First argument is a codec
272
+ explicitCodec = optionsOrCodec;
273
+ options = maybeOptions ?? {};
274
+ }
275
+ else {
276
+ // First argument is options
277
+ options = optionsOrCodec;
278
+ }
279
+ }
228
280
  let codec = null;
229
- // If hardware acceleration requested, try to find hardware decoder first
230
- if (options.hardware) {
231
- codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
232
- if (!codec) {
233
- // No hardware decoder available, fall back to software
234
- options.hardware = undefined;
281
+ // If explicit codec provided, use it
282
+ if (explicitCodec !== undefined) {
283
+ if (typeof explicitCodec === 'object' && 'id' in explicitCodec) {
284
+ // Already a Codec instance
285
+ codec = explicitCodec;
286
+ }
287
+ else if (typeof explicitCodec === 'string') {
288
+ // FFDecoderCodec string
289
+ codec = Codec.findDecoderByName(explicitCodec);
290
+ if (!codec) {
291
+ throw new Error(`Decoder '${explicitCodec}' not found`);
292
+ }
293
+ }
294
+ else {
295
+ // AVCodecID number
296
+ codec = Codec.findDecoder(explicitCodec);
297
+ if (!codec) {
298
+ throw new Error(`Decoder not found for codec ID ${explicitCodec}`);
299
+ }
235
300
  }
236
301
  }
237
- // If no hardware decoder or no hardware requested, use software decoder
238
- if (!codec) {
239
- codec = Codec.findDecoder(stream.codecpar.codecId);
302
+ else {
303
+ // No explicit codec - use auto-detection logic
304
+ // If hardware acceleration requested, try to find hardware decoder first
305
+ if (options.hardware) {
306
+ codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
307
+ if (!codec) {
308
+ // No hardware decoder available, fall back to software
309
+ options.hardware = undefined;
310
+ }
311
+ }
312
+ // If no hardware decoder or no hardware requested, use software decoder
240
313
  if (!codec) {
241
- throw new Error(`Decoder not found for codec ${stream.codecpar.codecId}`);
314
+ codec = Codec.findDecoder(stream.codecpar.codecId);
315
+ if (!codec) {
316
+ throw new Error(`Decoder not found for codec ${stream.codecpar.codecId}`);
317
+ }
242
318
  }
243
319
  }
244
320
  // Allocate and configure codec context
@@ -252,16 +328,14 @@ export class Decoder {
252
328
  }
253
329
  // Set packet time base
254
330
  codecContext.pktTimebase = stream.timeBase;
255
- // Apply options
256
- if (options.threads !== undefined) {
257
- codecContext.threadCount = options.threads;
258
- }
259
331
  // Check if this decoder supports hardware acceleration
260
332
  // Only apply hardware acceleration if the decoder supports it
261
333
  // Silently ignore hardware for software decoders
262
334
  const isHWDecoder = codec.isHardwareAcceleratedDecoder();
263
335
  if (isHWDecoder && options.hardware) {
264
336
  codecContext.hwDeviceCtx = options.hardware.deviceContext;
337
+ // Set hardware pixel format and get_format callback
338
+ codecContext.setHardwarePixelFormat(options.hardware.devicePixelFormat);
265
339
  // Set extra_hw_frames if specified
266
340
  if (options.extraHWFrames !== undefined && options.extraHWFrames > 0) {
267
341
  codecContext.extraHWFrames = options.extraHWFrames;
@@ -270,6 +344,9 @@ export class Decoder {
270
344
  else {
271
345
  options.hardware = undefined;
272
346
  }
347
+ options.exitOnError = options.exitOnError ?? true;
348
+ // Enable COPY_OPAQUE flag to copy packet.opaque to frame.opaque
349
+ // codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
273
350
  const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
274
351
  // Open codec synchronously
275
352
  const openRet = codecContext.open2Sync(codec, opts);
@@ -277,6 +354,17 @@ export class Decoder {
277
354
  codecContext.freeContext();
278
355
  FFmpegError.throwIfError(openRet, 'Failed to open codec');
279
356
  }
357
+ // Adjust extra_hw_frames for queuing
358
+ // This is done AFTER open2 because the decoder validates extra_hw_frames during open
359
+ if (isHWDecoder && options.hardware) {
360
+ const currentExtraFrames = codecContext.extraHWFrames;
361
+ if (currentExtraFrames >= 0) {
362
+ codecContext.extraHWFrames = currentExtraFrames + FRAME_THREAD_QUEUE_SIZE;
363
+ }
364
+ else {
365
+ codecContext.extraHWFrames = 1;
366
+ }
367
+ }
280
368
  return new Decoder(codecContext, codec, stream, options);
281
369
  }
282
370
  /**
@@ -345,24 +433,31 @@ export class Decoder {
345
433
  return this.initialized && !this.isClosed;
346
434
  }
347
435
  /**
348
- * Decode a packet to a frame.
436
+ * Send a packet to the decoder.
349
437
  *
350
- * Sends a packet to the decoder and attempts to receive a decoded frame.
351
- * Handles internal buffering - may return null if more packets needed.
438
+ * Sends a compressed packet to the decoder for decoding.
439
+ * Does not return decoded frames - use {@link receive} to retrieve frames.
440
+ * A single packet can produce zero, one, or multiple frames depending on codec buffering.
352
441
  * Automatically manages decoder state and error recovery.
353
442
  *
354
- * Direct mapping to avcodec_send_packet() and avcodec_receive_frame().
443
+ * **Important**: This method only SENDS the packet to the decoder.
444
+ * You must call {@link receive} separately (potentially multiple times) to get decoded frames.
355
445
  *
356
- * @param packet - Compressed packet to decode
446
+ * Direct mapping to avcodec_send_packet().
357
447
  *
358
- * @returns Decoded frame or null if more data needed or decoder is closed
448
+ * @param packet - Compressed packet to send to decoder
359
449
  *
360
- * @throws {FFmpegError} If decoding fails
450
+ * @throws {FFmpegError} If sending packet fails
361
451
  *
362
452
  * @example
363
453
  * ```typescript
364
- * const frame = await decoder.decode(packet);
365
- * if (frame) {
454
+ * // Send packet and receive frames
455
+ * await decoder.decode(packet);
456
+ *
457
+ * // Receive all available frames
458
+ * while (true) {
459
+ * const frame = await decoder.receive();
460
+ * if (!frame) break;
366
461
  * console.log(`Decoded frame with PTS: ${frame.pts}`);
367
462
  * frame.free();
368
463
  * }
@@ -372,8 +467,12 @@ export class Decoder {
372
467
  * ```typescript
373
468
  * for await (const packet of input.packets()) {
374
469
  * if (packet.streamIndex === decoder.getStream().index) {
375
- * const frame = await decoder.decode(packet);
376
- * if (frame) {
470
+ * // Send packet
471
+ * await decoder.decode(packet);
472
+ *
473
+ * // Receive available frames
474
+ * let frame;
475
+ * while ((frame = await decoder.receive())) {
377
476
  * await processFrame(frame);
378
477
  * frame.free();
379
478
  * }
@@ -382,86 +481,250 @@ export class Decoder {
382
481
  * }
383
482
  * ```
384
483
  *
484
+ * @see {@link receive} For receiving decoded frames
485
+ * @see {@link decodeAll} For combined send+receive operation
385
486
  * @see {@link frames} For automatic packet iteration
386
487
  * @see {@link flush} For end-of-stream handling
488
+ * @see {@link decodeSync} For synchronous version
387
489
  */
388
490
  async decode(packet) {
389
491
  if (this.isClosed) {
390
- return null;
492
+ return;
493
+ }
494
+ if (packet.streamIndex !== this.stream.index) {
495
+ return;
496
+ }
497
+ // Skip 0-sized packets
498
+ if (packet.size === 0) {
499
+ return;
391
500
  }
392
501
  // Send packet to decoder
393
502
  const sendRet = await this.codecContext.sendPacket(packet);
503
+ // EAGAIN during send_packet is a decoder bug (FFmpeg treats this as AVERROR_BUG)
504
+ // We read all decoded frames with receive() until done, so decoder should never be full
505
+ if (sendRet === AVERROR_EAGAIN) {
506
+ throw new Error('Decoder returned EAGAIN on send - this is a decoder bug');
507
+ }
508
+ // Handle send errors
394
509
  if (sendRet < 0 && sendRet !== AVERROR_EOF) {
395
- // Decoder might be full, try to receive first
396
- const frame = await this.receive();
397
- if (frame) {
398
- return frame;
399
- }
400
- // If still failing, it's an error
401
- if (sendRet !== AVERROR_EAGAIN && this.options.exitOnError) {
402
- FFmpegError.throwIfError(sendRet, 'Failed to send packet');
510
+ if (this.options.exitOnError) {
511
+ FFmpegError.throwIfError(sendRet, 'Failed to send packet to decoder');
403
512
  }
513
+ // exitOnError=false: Continue to receive loop to drain any buffered frames
404
514
  }
405
- // Try to receive frame
406
- const frame = await this.receive();
407
- return frame;
408
515
  }
409
516
  /**
410
- * Decode a packet to frame synchronously.
517
+ * Send a packet to the decoder synchronously.
411
518
  * Synchronous version of decode.
412
519
  *
413
- * Send packet to decoder and attempt to receive frame.
414
- * Handles decoder buffering and error conditions.
415
- * May return null if decoder needs more data.
520
+ * Sends a compressed packet to the decoder for decoding.
521
+ * Does not return decoded frames - use {@link receiveSync} to retrieve frames.
522
+ * A single packet can produce zero, one, or multiple frames depending on codec buffering.
523
+ * Automatically manages decoder state and error recovery.
416
524
  *
417
- * @param packet - Compressed packet to decode
525
+ * **Important**: This method only SENDS the packet to the decoder.
526
+ * You must call {@link receiveSync} separately (potentially multiple times) to get decoded frames.
418
527
  *
419
- * @returns Decoded frame or null if more data needed or decoder is closed
528
+ * Direct mapping to avcodec_send_packet().
420
529
  *
421
- * @throws {FFmpegError} If decoding fails
530
+ * @param packet - Compressed packet to send to decoder
531
+ *
532
+ * @throws {FFmpegError} If sending packet fails
422
533
  *
423
534
  * @example
424
535
  * ```typescript
425
- * const frame = decoder.decodeSync(packet);
426
- * if (frame) {
427
- * console.log(`Decoded: ${frame.width}x${frame.height}`);
536
+ * // Send packet and receive frames
537
+ * await decoder.decode(packet);
538
+ *
539
+ * // Receive all available frames
540
+ * while (true) {
541
+ * const frame = await decoder.receive();
542
+ * if (!frame) break;
543
+ * console.log(`Decoded frame with PTS: ${frame.pts}`);
544
+ * frame.free();
545
+ * }
546
+ * ```
547
+ *
548
+ * @example
549
+ * ```typescript
550
+ * for await (const packet of input.packets()) {
551
+ * if (packet.streamIndex === decoder.getStream().index) {
552
+ * // Send packet
553
+ * await decoder.decode(packet);
554
+ *
555
+ * // Receive available frames
556
+ * let frame;
557
+ * while ((frame = await decoder.receive())) {
558
+ * await processFrame(frame);
559
+ * frame.free();
560
+ * }
561
+ * }
562
+ * packet.free();
428
563
  * }
429
564
  * ```
430
565
  *
566
+ * @see {@link receiveSync} For receiving decoded frames
567
+ * @see {@link decodeAllSync} For combined send+receive operation
568
+ * @see {@link framesSync} For automatic packet iteration
569
+ * @see {@link flushSync} For end-of-stream handling
431
570
  * @see {@link decode} For async version
432
571
  */
433
572
  decodeSync(packet) {
434
573
  if (this.isClosed) {
435
- return null;
574
+ return;
575
+ }
576
+ if (packet.streamIndex !== this.stream.index) {
577
+ return;
578
+ }
579
+ // Skip 0-sized packets
580
+ if (packet.size === 0) {
581
+ return;
436
582
  }
437
583
  // Send packet to decoder
438
584
  const sendRet = this.codecContext.sendPacketSync(packet);
585
+ // EAGAIN during send_packet is a decoder bug (FFmpeg treats this as AVERROR_BUG)
586
+ // We read all decoded frames with receive() until done, so decoder should never be full
587
+ if (sendRet === AVERROR_EAGAIN) {
588
+ throw new Error('Decoder returned EAGAIN on send - this is a decoder bug');
589
+ }
590
+ // Handle send errors
439
591
  if (sendRet < 0 && sendRet !== AVERROR_EOF) {
440
- // Decoder might be full, try to receive first
441
- const frame = this.receiveSync();
442
- if (frame) {
443
- return frame;
444
- }
445
- // If still failing, it's an error
446
- if (sendRet !== AVERROR_EAGAIN) {
447
- FFmpegError.throwIfError(sendRet, 'Failed to send packet');
592
+ if (this.options.exitOnError) {
593
+ FFmpegError.throwIfError(sendRet, 'Failed to send packet to decoder');
448
594
  }
595
+ // exitOnError=false: Continue to receive loop to drain any buffered frames
596
+ }
597
+ }
598
+ /**
599
+ * Decode a packet to frames.
600
+ *
601
+ * Sends a packet to the decoder and receives all available decoded frames.
602
+ * Returns array of frames - may be empty if decoder needs more data.
603
+ * One packet can produce zero, one, or multiple frames depending on codec.
604
+ * Automatically manages decoder state and error recovery.
605
+ *
606
+ * Direct mapping to avcodec_send_packet() and avcodec_receive_frame().
607
+ *
608
+ * @param packet - Compressed packet to decode
609
+ *
610
+ * @returns Array of decoded frames (empty if more data needed or decoder is closed)
611
+ *
612
+ * @throws {FFmpegError} If decoding fails
613
+ *
614
+ * @example
615
+ * ```typescript
616
+ * const frames = await decoder.decodeAll(packet);
617
+ * for (const frame of frames) {
618
+ * console.log(`Decoded frame with PTS: ${frame.pts}`);
619
+ * frame.free();
620
+ * }
621
+ * ```
622
+ *
623
+ * @example
624
+ * ```typescript
625
+ * for await (const packet of input.packets()) {
626
+ * const frames = await decoder.decodeAll(packet);
627
+ * for (const frame of frames) {
628
+ * await processFrame(frame);
629
+ * frame.free();
630
+ * }
631
+ * packet.free();
632
+ * }
633
+ * ```
634
+ *
635
+ * @see {@link decode} For single packet decoding
636
+ * @see {@link frames} For automatic packet iteration
637
+ * @see {@link flush} For end-of-stream handling
638
+ * @see {@link decodeAllSync} For synchronous version
639
+ */
640
+ async decodeAll(packet) {
641
+ const frames = [];
642
+ if (packet) {
643
+ await this.decode(packet);
644
+ }
645
+ else {
646
+ await this.flush();
647
+ }
648
+ // Receive all available frames
649
+ while (true) {
650
+ const remaining = await this.receive();
651
+ if (!remaining)
652
+ break;
653
+ frames.push(remaining);
449
654
  }
450
- // Try to receive frame
451
- const frame = this.receiveSync();
452
- return frame;
655
+ return frames;
656
+ }
657
+ /**
658
+ * Decode a packet to frames synchronously.
659
+ * Synchronous version of decodeAll.
660
+ *
661
+ * Sends packet to decoder and receives all available decoded frames.
662
+ * Returns array of frames - may be empty if decoder needs more data.
663
+ * One packet can produce zero, one, or multiple frames depending on codec.
664
+ *
665
+ * @param packet - Compressed packet to decode
666
+ *
667
+ * @returns Array of decoded frames (empty if more data needed or decoder is closed)
668
+ *
669
+ * @throws {FFmpegError} If decoding fails
670
+ *
671
+ * @example
672
+ * ```typescript
673
+ * const frames = decoder.decodeAllSync(packet);
674
+ * for (const frame of frames) {
675
+ * console.log(`Decoded: ${frame.width}x${frame.height}`);
676
+ * frame.free();
677
+ * }
678
+ *
679
+ * @example
680
+ * ```typescript
681
+ * for (const packet of input.packetsSync()) {
682
+ * const frames = await decoder.decodeAllSync(packet);
683
+ * for (const frame of frames) {
684
+ * processFrame(frame);
685
+ * frame.free();
686
+ * }
687
+ * packet.free();
688
+ * }
689
+ * ```
690
+ *
691
+ * @see {@link decodeSync} For single packet decoding
692
+ * @see {@link framesSync} For automatic packet iteration
693
+ * @see {@link flushSync} For end-of-stream handling
694
+ * @see {@link decodeAll} For async version
695
+ */
696
+ decodeAllSync(packet) {
697
+ const frames = [];
698
+ if (packet) {
699
+ this.decodeSync(packet);
700
+ }
701
+ else {
702
+ this.flushSync();
703
+ }
704
+ // Receive all available frames
705
+ while (true) {
706
+ const remaining = this.receiveSync();
707
+ if (!remaining)
708
+ break;
709
+ frames.push(remaining);
710
+ }
711
+ return frames;
453
712
  }
454
713
  /**
455
714
  * Decode packet stream to frame stream.
456
715
  *
457
716
  * High-level async generator for complete decoding pipeline.
458
- * Automatically filters packets for this stream, manages memory,
459
- * and flushes buffered frames at end.
717
+ * Decoder is only flushed when EOF (null) signal is explicitly received.
460
718
  * Primary interface for stream-based decoding.
461
719
  *
462
- * @param packets - Async iterable of packets
720
+ * **EOF Handling:**
721
+ * - Send null to flush decoder and get remaining buffered frames
722
+ * - Generator yields null after flushing when null is received
723
+ * - No automatic flushing - decoder stays open until EOF or close()
463
724
  *
464
- * @yields {Frame} Decoded frames
725
+ * @param packets - Async iterable of packets, single packet, or null to flush
726
+ *
727
+ * @yields {Frame | null} Decoded frames, followed by null when explicitly flushed
465
728
  *
466
729
  * @throws {Error} If decoder is closed
467
730
  *
@@ -469,10 +732,15 @@ export class Decoder {
469
732
  *
470
733
  * @example
471
734
  * ```typescript
472
- * await using input = await MediaInput.open('video.mp4');
735
+ * // Stream of packets with automatic EOF propagation
736
+ * await using input = await Demuxer.open('video.mp4');
473
737
  * using decoder = await Decoder.create(input.video());
474
738
  *
475
739
  * for await (const frame of decoder.frames(input.packets())) {
740
+ * if (frame === null) {
741
+ * console.log('Decoding complete');
742
+ * break;
743
+ * }
476
744
  * console.log(`Frame: ${frame.width}x${frame.height}`);
477
745
  * frame.free();
478
746
  * }
@@ -480,68 +748,96 @@ export class Decoder {
480
748
  *
481
749
  * @example
482
750
  * ```typescript
483
- * for await (const frame of decoder.frames(input.packets())) {
484
- * // Process frame
485
- * await filter.process(frame);
486
- *
487
- * // Frame automatically freed
751
+ * // Single packet (no automatic flush)
752
+ * for await (const frame of decoder.frames(singlePacket)) {
753
+ * await encoder.encode(frame);
754
+ * frame.free();
755
+ * }
756
+ * // Decoder still has buffered frames - send null to flush
757
+ * for await (const frame of decoder.frames(null)) {
758
+ * if (frame === null) break;
759
+ * await encoder.encode(frame);
488
760
  * frame.free();
489
761
  * }
490
762
  * ```
491
763
  *
492
764
  * @example
493
765
  * ```typescript
494
- * import { pipeline } from 'node-av/api';
495
- *
496
- * const control = pipeline(
497
- * input,
498
- * decoder,
499
- * encoder,
500
- * output
501
- * );
502
- * await control.completion;
766
+ * // Explicit flush with EOF
767
+ * for await (const frame of decoder.frames(null)) {
768
+ * if (frame === null) {
769
+ * console.log('All buffered frames flushed');
770
+ * break;
771
+ * }
772
+ * console.log('Buffered frame:', frame.pts);
773
+ * frame.free();
774
+ * }
503
775
  * ```
504
776
  *
505
777
  * @see {@link decode} For single packet decoding
506
- * @see {@link MediaInput.packets} For packet source
778
+ * @see {@link Demuxer.packets} For packet source
779
+ * @see {@link framesSync} For sync version
507
780
  */
508
781
  async *frames(packets) {
509
- // Process packets
510
- for await (const packet of packets) {
782
+ const self = this;
783
+ const processPacket = async function* (packet) {
784
+ await self.decode(packet);
785
+ while (true) {
786
+ const frame = await self.receive();
787
+ if (!frame)
788
+ break;
789
+ yield frame;
790
+ }
791
+ }.bind(this);
792
+ const finalize = async function* () {
793
+ for await (const remaining of self.flushFrames()) {
794
+ yield remaining;
795
+ }
796
+ yield null;
797
+ }.bind(this);
798
+ if (packets === null) {
799
+ yield* finalize();
800
+ return;
801
+ }
802
+ if (packets instanceof Packet) {
803
+ yield* processPacket(packets);
804
+ return;
805
+ }
806
+ for await (const packet_1 of packets) {
807
+ const env_1 = { stack: [], error: void 0, hasError: false };
511
808
  try {
512
- // Only process packets for our stream
513
- if (packet.streamIndex === this.stream.index) {
514
- const frame = await this.decode(packet);
515
- if (frame) {
516
- yield frame;
517
- }
809
+ const packet = __addDisposableResource(env_1, packet_1, false);
810
+ if (packet === null) {
811
+ yield* finalize();
812
+ return;
518
813
  }
814
+ yield* processPacket(packet);
815
+ }
816
+ catch (e_1) {
817
+ env_1.error = e_1;
818
+ env_1.hasError = true;
519
819
  }
520
820
  finally {
521
- // Free the input packet after processing
522
- packet.free();
821
+ __disposeResources(env_1);
523
822
  }
524
823
  }
525
- // Flush decoder after all packets
526
- await this.flush();
527
- while (!this.isClosed) {
528
- const remaining = await this.receive();
529
- if (!remaining)
530
- break;
531
- yield remaining;
532
- }
533
824
  }
534
825
  /**
535
826
  * Decode packet stream to frame stream synchronously.
536
827
  * Synchronous version of frames.
537
828
  *
538
- * High-level sync generator for complete decoding pipeline.
539
- * Automatically filters packets for this stream, manages memory,
540
- * and flushes buffered frames at end.
829
+ * High-level async generator for complete decoding pipeline.
830
+ * Decoder is only flushed when EOF (null) signal is explicitly received.
831
+ * Primary interface for stream-based decoding.
832
+ *
833
+ * **EOF Handling:**
834
+ * - Send null to flush decoder and get remaining buffered frames
835
+ * - Generator yields null after flushing when null is received
836
+ * - No automatic flushing - decoder stays open until EOF or close()
541
837
  *
542
- * @param packets - Iterable of packets
838
+ * @param packets - Iterable of packets, single packet, or null to flush
543
839
  *
544
- * @yields {Frame} Decoded frames
840
+ * @yields {Frame | null} Decoded frames, followed by null when explicitly flushed
545
841
  *
546
842
  * @throws {Error} If decoder is closed
547
843
  *
@@ -549,39 +845,91 @@ export class Decoder {
549
845
  *
550
846
  * @example
551
847
  * ```typescript
552
- * for (const frame of decoder.framesSync(packets)) {
848
+ * // Stream of packets with automatic EOF propagation
849
+ * await using input = await Demuxer.open('video.mp4');
850
+ * using decoder = await Decoder.create(input.video());
851
+ *
852
+ * for (const frame of decoder.framesSync(input.packetsSync())) {
853
+ * if (frame === null) {
854
+ * console.log('Decoding complete');
855
+ * break;
856
+ * }
553
857
  * console.log(`Frame: ${frame.width}x${frame.height}`);
554
- * // Process frame...
858
+ * frame.free();
555
859
  * }
556
860
  * ```
557
861
  *
558
- * @see {@link frames} For async version
862
+ * @example
863
+ * ```typescript
864
+ * // Single packet (no automatic flush)
865
+ * for (const frame of decoder.framesSync(singlePacket)) {
866
+ * encoder.encodeSync(frame);
867
+ * frame.free();
868
+ * }
869
+ * // Decoder still has buffered frames - send null to flush
870
+ * for (const frame of decoder.framesSync(null)) {
871
+ * if (frame === null) break;
872
+ * encoder.encodeSync(frame);
873
+ * frame.free();
874
+ * }
875
+ * ```
876
+ *
877
+ * @example
878
+ * ```typescript
879
+ * // Explicit flush with EOF
880
+ * for (const frame of decoder.framesSync(null)) {
881
+ * if (frame === null) {
882
+ * console.log('All buffered frames flushed');
883
+ * break;
884
+ * }
885
+ * console.log('Buffered frame:', frame.pts);
886
+ * frame.free();
887
+ * }
888
+ * ```
559
889
  */
560
890
  *framesSync(packets) {
561
- // Process packets
562
- for (const packet of packets) {
891
+ const self = this;
892
+ const processPacket = function* (packet) {
893
+ self.decodeSync(packet);
894
+ while (true) {
895
+ const frame = self.receiveSync();
896
+ if (!frame)
897
+ break;
898
+ yield frame;
899
+ }
900
+ }.bind(this);
901
+ const finalize = function* () {
902
+ for (const remaining of self.flushFramesSync()) {
903
+ yield remaining;
904
+ }
905
+ yield null;
906
+ }.bind(this);
907
+ if (packets === null) {
908
+ yield* finalize();
909
+ return;
910
+ }
911
+ if (packets instanceof Packet) {
912
+ yield* processPacket(packets);
913
+ return;
914
+ }
915
+ for (const packet_2 of packets) {
916
+ const env_2 = { stack: [], error: void 0, hasError: false };
563
917
  try {
564
- // Only process packets for our stream
565
- if (packet.streamIndex === this.stream.index) {
566
- const frame = this.decodeSync(packet);
567
- if (frame) {
568
- yield frame;
569
- }
918
+ const packet = __addDisposableResource(env_2, packet_2, false);
919
+ if (packet === null) {
920
+ yield* finalize();
921
+ return;
570
922
  }
923
+ yield* processPacket(packet);
924
+ }
925
+ catch (e_2) {
926
+ env_2.error = e_2;
927
+ env_2.hasError = true;
571
928
  }
572
929
  finally {
573
- // Free the input packet after processing
574
- packet.free();
930
+ __disposeResources(env_2);
575
931
  }
576
932
  }
577
- // Flush decoder after all packets
578
- this.flushSync();
579
- while (!this.isClosed) {
580
- const remaining = this.receiveSync();
581
- if (!remaining)
582
- break;
583
- yield remaining;
584
- }
585
933
  }
586
934
  /**
587
935
  * Flush decoder and signal end-of-stream.
@@ -609,6 +957,7 @@ export class Decoder {
609
957
  *
610
958
  * @see {@link flushFrames} For convenient async iteration
611
959
  * @see {@link receive} For getting buffered frames
960
+ * @see {@link flushSync} For synchronous version
612
961
  */
613
962
  async flush() {
614
963
  if (this.isClosed) {
@@ -642,6 +991,8 @@ export class Decoder {
642
991
  * }
643
992
  * ```
644
993
  *
994
+ * @see {@link flushFramesSync} For convenient sync iteration
995
+ * @see {@link receiveSync} For getting buffered frames
645
996
  * @see {@link flush} For async version
646
997
  */
647
998
  flushSync() {
@@ -675,15 +1026,18 @@ export class Decoder {
675
1026
  * }
676
1027
  * ```
677
1028
  *
1029
+ * @see {@link decode} For sending packets and receiving frames
678
1030
  * @see {@link flush} For signaling end-of-stream
679
- * @see {@link frames} For complete pipeline
1031
+ * @see {@link flushFramesSync} For synchronous version
680
1032
  */
681
1033
  async *flushFrames() {
682
1034
  // Send flush signal
683
1035
  await this.flush();
684
- let frame;
685
- while ((frame = await this.receive()) !== null) {
686
- yield frame;
1036
+ while (true) {
1037
+ const remaining = await this.receive();
1038
+ if (!remaining)
1039
+ break;
1040
+ yield remaining;
687
1041
  }
688
1042
  }
689
1043
  /**
@@ -706,14 +1060,18 @@ export class Decoder {
706
1060
  * }
707
1061
  * ```
708
1062
  *
1063
+ * @see {@link decodeSync} For sending packets and receiving frames
1064
+ * @see {@link flushSync} For signaling end-of-stream
709
1065
  * @see {@link flushFrames} For async version
710
1066
  */
711
1067
  *flushFramesSync() {
712
1068
  // Send flush signal
713
1069
  this.flushSync();
714
- let frame;
715
- while ((frame = this.receiveSync()) !== null) {
716
- yield frame;
1070
+ while (true) {
1071
+ const remaining = this.receiveSync();
1072
+ if (!remaining)
1073
+ break;
1074
+ yield remaining;
717
1075
  }
718
1076
  }
719
1077
  /**
@@ -722,56 +1080,90 @@ export class Decoder {
722
1080
  * Gets decoded frames from the codec's internal buffer.
723
1081
  * Handles frame cloning and error checking.
724
1082
  * Hardware frames include hw_frames_ctx reference.
725
- * Call repeatedly until null to drain all buffered frames.
1083
+ * Call repeatedly to drain all buffered frames.
1084
+ *
1085
+ * **Return Values:**
1086
+ * - `Frame` - Successfully decoded frame
1087
+ * - `null` - No frame available (AVERROR_EAGAIN), send more packets
1088
+ * - `undefined` - End of stream reached (AVERROR_EOF), decoder flushed
726
1089
  *
727
1090
  * Direct mapping to avcodec_receive_frame().
728
1091
  *
729
- * @returns Cloned frame or null if no frames available
1092
+ * @returns Decoded frame, null (need more data), or undefined (end of stream)
730
1093
  *
731
1094
  * @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
732
1095
  *
733
1096
  * @example
734
1097
  * ```typescript
735
1098
  * const frame = await decoder.receive();
736
- * if (frame) {
1099
+ * if (frame === EOF) {
1100
+ * console.log('Decoder flushed, no more frames');
1101
+ * } else if (frame) {
737
1102
  * console.log('Got decoded frame');
738
1103
  * frame.free();
1104
+ * } else {
1105
+ * console.log('Need more packets');
739
1106
  * }
740
1107
  * ```
741
1108
  *
742
1109
  * @example
743
1110
  * ```typescript
744
- * // Drain all buffered frames
1111
+ * // Drain all buffered frames (stop on null or EOF)
745
1112
  * let frame;
746
- * while ((frame = await decoder.receive()) !== null) {
1113
+ * while ((frame = await decoder.receive()) && frame !== EOF) {
747
1114
  * console.log(`Frame PTS: ${frame.pts}`);
748
1115
  * frame.free();
749
1116
  * }
750
1117
  * ```
751
1118
  *
752
- * @see {@link decode} For sending packets and receiving frames
1119
+ * @see {@link decode} For sending packets
753
1120
  * @see {@link flush} For signaling end-of-stream
1121
+ * @see {@link receiveSync} For synchronous version
1122
+ * @see {@link EOF} For end-of-stream signal
754
1123
  */
755
1124
  async receive() {
756
- // Clear previous frame data
757
- this.frame.unref();
758
1125
  if (this.isClosed) {
759
- return null;
1126
+ return EOF;
760
1127
  }
1128
+ // Clear previous frame data
1129
+ this.frame.unref();
761
1130
  const ret = await this.codecContext.receiveFrame(this.frame);
762
1131
  if (ret === 0) {
1132
+ // Set frame time_base to decoder's packet timebase
1133
+ this.frame.timeBase = this.codecContext.pktTimebase;
1134
+ // Check for corrupt frame
1135
+ if (this.frame.decodeErrorFlags || this.frame.hasFlags(AV_FRAME_FLAG_CORRUPT)) {
1136
+ if (this.options.exitOnError) {
1137
+ throw new Error('Corrupt decoded frame detected');
1138
+ }
1139
+ // exitOnError=false: skip corrupt frame
1140
+ return null;
1141
+ }
1142
+ // Handles PTS assignment, duration estimation, and frame tracking
1143
+ if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
1144
+ this.processVideoFrame(this.frame);
1145
+ }
1146
+ // Handles timestamp extrapolation, sample rate changes, and duration calculation
1147
+ if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
1148
+ this.processAudioFrame(this.frame);
1149
+ }
763
1150
  // Got a frame, clone it for the user
764
1151
  return this.frame.clone();
765
1152
  }
766
- else if (ret === AVERROR_EAGAIN || ret === AVERROR_EOF) {
767
- // Need more data or end of stream
1153
+ else if (ret === AVERROR_EAGAIN) {
1154
+ // Need more data
768
1155
  return null;
769
1156
  }
1157
+ else if (ret === AVERROR_EOF) {
1158
+ // End of stream
1159
+ return EOF;
1160
+ }
770
1161
  else {
771
- // Error
1162
+ // Error during receive
772
1163
  if (this.options.exitOnError) {
773
1164
  FFmpegError.throwIfError(ret, 'Failed to receive frame');
774
1165
  }
1166
+ // exitOnError=false: return null, caller can retry if desired
775
1167
  return null;
776
1168
  }
777
1169
  }
@@ -782,58 +1174,113 @@ export class Decoder {
782
1174
  * Gets decoded frames from the codec's internal buffer.
783
1175
  * Handles frame cloning and error checking.
784
1176
  * Hardware frames include hw_frames_ctx reference.
785
- * Call repeatedly until null to drain all buffered frames.
1177
+ * Call repeatedly to drain all buffered frames.
1178
+ *
1179
+ * **Return Values:**
1180
+ * - `Frame` - Successfully decoded frame
1181
+ * - `null` - No frame available (AVERROR_EAGAIN), send more packets
1182
+ * - `undefined` - End of stream reached (AVERROR_EOF), decoder flushed
786
1183
  *
787
1184
  * Direct mapping to avcodec_receive_frame().
788
1185
  *
789
- * @returns Cloned frame or null if no frames available
1186
+ * @returns Decoded frame, null (need more data), or undefined (end of stream)
790
1187
  *
791
1188
  * @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
792
1189
  *
793
1190
  * @example
794
1191
  * ```typescript
795
1192
  * const frame = decoder.receiveSync();
796
- * if (frame) {
1193
+ * if (frame === EOF) {
1194
+ * console.log('Decoder flushed, no more frames');
1195
+ * } else if (frame) {
797
1196
  * console.log('Got decoded frame');
798
1197
  * frame.free();
1198
+ * } else {
1199
+ * console.log('Need more packets');
799
1200
  * }
800
1201
  * ```
801
1202
  *
802
1203
  * @example
803
1204
  * ```typescript
804
- * // Drain all buffered frames
1205
+ * // Drain all buffered frames (stop on null or EOF)
805
1206
  * let frame;
806
- * while ((frame = decoder.receiveSync()) !== null) {
1207
+ * while ((frame = decoder.receiveSync()) && frame !== EOF) {
807
1208
  * console.log(`Frame PTS: ${frame.pts}`);
808
1209
  * frame.free();
809
1210
  * }
810
1211
  * ```
811
1212
  *
1213
+ * @see {@link decodeSync} For sending packets
1214
+ * @see {@link flushSync} For signaling end-of-stream
812
1215
  * @see {@link receive} For async version
1216
+ * @see {@link EOF} For end-of-stream signal
813
1217
  */
814
1218
  receiveSync() {
815
- // Clear previous frame data
816
- this.frame.unref();
817
1219
  if (this.isClosed) {
818
- return null;
1220
+ return EOF;
819
1221
  }
1222
+ // Clear previous frame data
1223
+ this.frame.unref();
820
1224
  const ret = this.codecContext.receiveFrameSync(this.frame);
821
1225
  if (ret === 0) {
1226
+ // Set frame time_base to decoder's packet timebase
1227
+ this.frame.timeBase = this.codecContext.pktTimebase;
1228
+ // Check for corrupt frame
1229
+ if (this.frame.decodeErrorFlags || this.frame.hasFlags(AV_FRAME_FLAG_CORRUPT)) {
1230
+ if (this.options.exitOnError) {
1231
+ throw new Error('Corrupt decoded frame detected');
1232
+ }
1233
+ // exitOnError=false: skip corrupt frame
1234
+ return null;
1235
+ }
1236
+ // Process video frame
1237
+ // Handles PTS assignment, duration estimation, and frame tracking
1238
+ if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
1239
+ this.processVideoFrame(this.frame);
1240
+ }
1241
+ // Process audio frame
1242
+ // Handles timestamp extrapolation, sample rate changes, and duration calculation
1243
+ if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
1244
+ this.processAudioFrame(this.frame);
1245
+ }
822
1246
  // Got a frame, clone it for the user
823
1247
  return this.frame.clone();
824
1248
  }
825
- else if (ret === AVERROR_EAGAIN || ret === AVERROR_EOF) {
826
- // Need more data or end of stream
1249
+ else if (ret === AVERROR_EAGAIN) {
1250
+ // Need more data
827
1251
  return null;
828
1252
  }
1253
+ else if (ret === AVERROR_EOF) {
1254
+ // End of stream
1255
+ return EOF;
1256
+ }
829
1257
  else {
830
- // Error
1258
+ // Error during receive
831
1259
  if (this.options.exitOnError) {
832
1260
  FFmpegError.throwIfError(ret, 'Failed to receive frame');
833
1261
  }
1262
+ // exitOnError=false: return null, caller can retry if desired
834
1263
  return null;
835
1264
  }
836
1265
  }
1266
+ pipeTo(target) {
1267
+ const t = target;
1268
+ // Store reference to next component for flush propagation
1269
+ this.nextComponent = t;
1270
+ // Start worker if not already running
1271
+ this.workerPromise ??= this.runWorker();
1272
+ // Start pipe task: decoder.outputQueue -> target.inputQueue (via target.send)
1273
+ this.pipeToPromise = (async () => {
1274
+ while (true) {
1275
+ const frame = await this.receiveFromQueue();
1276
+ if (!frame)
1277
+ break;
1278
+ await t.sendToQueue(frame);
1279
+ }
1280
+ })();
1281
+ // Return scheduler for chaining (target is now the last component)
1282
+ return new Scheduler(this, t);
1283
+ }
837
1284
  /**
838
1285
  * Close decoder and free resources.
839
1286
  *
@@ -858,6 +1305,8 @@ export class Decoder {
858
1305
  return;
859
1306
  }
860
1307
  this.isClosed = true;
1308
+ this.inputQueue?.close();
1309
+ this.outputQueue?.close();
861
1310
  this.frame.free();
862
1311
  this.codecContext.freeContext();
863
1312
  this.initialized = false;
@@ -908,6 +1357,367 @@ export class Decoder {
908
1357
  getCodecContext() {
909
1358
  return !this.isClosed && this.initialized ? this.codecContext : null;
910
1359
  }
1360
+ /**
1361
+ * Worker loop for push-based processing.
1362
+ *
1363
+ * @internal
1364
+ */
1365
+ async runWorker() {
1366
+ try {
1367
+ // Outer loop - receive packets
1368
+ while (!this.inputQueue.isClosed) {
1369
+ const env_3 = { stack: [], error: void 0, hasError: false };
1370
+ try {
1371
+ const packet = __addDisposableResource(env_3, await this.inputQueue.receive(), false);
1372
+ if (!packet)
1373
+ break;
1374
+ // Skip packets for other streams
1375
+ if (packet.streamIndex !== this.stream.index) {
1376
+ continue;
1377
+ }
1378
+ if (packet.size === 0) {
1379
+ continue;
1380
+ }
1381
+ await this.decode(packet);
1382
+ // Receive ALL available frames immediately
1383
+ // This ensures frames are yielded ASAP without latency
1384
+ while (!this.outputQueue.isClosed) {
1385
+ const frame = await this.receive();
1386
+ if (!frame)
1387
+ break; // EAGAIN or EOF
1388
+ await this.outputQueue.send(frame);
1389
+ }
1390
+ }
1391
+ catch (e_3) {
1392
+ env_3.error = e_3;
1393
+ env_3.hasError = true;
1394
+ }
1395
+ finally {
1396
+ __disposeResources(env_3);
1397
+ }
1398
+ }
1399
+ // Flush decoder at end
1400
+ await this.flush();
1401
+ while (!this.outputQueue.isClosed) {
1402
+ const frame = await this.receive();
1403
+ if (!frame)
1404
+ break;
1405
+ await this.outputQueue.send(frame);
1406
+ }
1407
+ }
1408
+ catch {
1409
+ // Ignore ?
1410
+ }
1411
+ finally {
1412
+ // Close output queue when done
1413
+ this.outputQueue?.close();
1414
+ }
1415
+ }
1416
+ /**
1417
+ * Send packet to input queue or flush the pipeline.
1418
+ *
1419
+ * When packet is provided, queues it for processing.
1420
+ * When null is provided, triggers flush sequence:
1421
+ * - Closes input queue
1422
+ * - Waits for worker completion
1423
+ * - Flushes decoder and sends remaining frames to output queue
1424
+ * - Closes output queue
1425
+ * - Waits for pipeTo task completion
1426
+ * - Propagates flush to next component (if any)
1427
+ *
1428
+ * Used by scheduler system for pipeline control.
1429
+ *
1430
+ * @param packet - Packet to send, or null to flush
1431
+ *
1432
+ * @internal
1433
+ */
1434
+ async sendToQueue(packet) {
1435
+ if (packet) {
1436
+ await this.inputQueue.send(packet);
1437
+ }
1438
+ else {
1439
+ // Close input queue to signal end of stream to worker
1440
+ this.inputQueue.close();
1441
+ // Wait for worker to finish processing all packets (if exists)
1442
+ if (this.workerPromise) {
1443
+ await this.workerPromise;
1444
+ }
1445
+ // Flush decoder at end
1446
+ await this.flush();
1447
+ // Send all flushed frames to output queue
1448
+ while (true) {
1449
+ const frame = await this.receive();
1450
+ if (!frame)
1451
+ break;
1452
+ await this.outputQueue.send(frame);
1453
+ }
1454
+ // Close output queue to signal end of stream to pipeTo() task
1455
+ this.outputQueue.close();
1456
+ // Wait for pipeTo() task to finish processing all frames (if exists)
1457
+ if (this.pipeToPromise) {
1458
+ await this.pipeToPromise;
1459
+ }
1460
+ // Then propagate flush to next component
1461
+ if (this.nextComponent) {
1462
+ await this.nextComponent.sendToQueue(null);
1463
+ }
1464
+ }
1465
+ }
1466
+ /**
1467
+ * Receive frame from output queue.
1468
+ *
1469
+ * @returns Frame from output queue or null if closed
1470
+ *
1471
+ * @internal
1472
+ */
1473
+ async receiveFromQueue() {
1474
+ return await this.outputQueue.receive();
1475
+ }
1476
+ /**
1477
+ * Estimate video frame duration.
1478
+ *
1479
+ * Implements FFmpeg CLI's video_duration_estimate() logic.
1480
+ * Uses multiple heuristics to determine frame duration when not explicitly available:
1481
+ * 1. Frame duration from container (if reliable)
1482
+ * 2. Duration from codec framerate
1483
+ * 3. PTS difference between frames
1484
+ * 4. Stream framerate
1485
+ * 5. Last frame's estimated duration
1486
+ *
1487
+ * @param frame - Frame to estimate duration for
1488
+ *
1489
+ * @returns Estimated duration in frame's timebase units
1490
+ *
1491
+ * @internal
1492
+ */
1493
+ estimateVideoDuration(frame) {
1494
+ // Difference between this and last frame's timestamps
1495
+ const tsDiff = frame.pts !== AV_NOPTS_VALUE && this.lastFramePts !== AV_NOPTS_VALUE ? frame.pts - this.lastFramePts : -1n;
1496
+ // Frame duration is unreliable (typically guessed by lavf) when it is equal
1497
+ // to 1 and the actual duration of the last frame is more than 2x larger
1498
+ const durationUnreliable = frame.duration === 1n && tsDiff > 2n * frame.duration;
1499
+ // Prefer frame duration for containers with timestamps
1500
+ if (frame.duration > 0n && !durationUnreliable) {
1501
+ return frame.duration;
1502
+ }
1503
+ // Calculate codec duration from framerate
1504
+ let codecDuration = 0n;
1505
+ const framerate = this.codecContext.framerate;
1506
+ if (framerate && framerate.den > 0 && framerate.num > 0) {
1507
+ const fields = (frame.repeatPict ?? 0) + 2;
1508
+ const fieldRate = avMulQ(framerate, { num: 2, den: 1 });
1509
+ codecDuration = avRescaleQ(fields, avInvQ(fieldRate), frame.timeBase);
1510
+ }
1511
+ // When timestamps are available, repeat last frame's actual duration
1512
+ if (tsDiff > 0n) {
1513
+ return tsDiff;
1514
+ }
1515
+ // Try frame/codec duration
1516
+ if (frame.duration > 0n) {
1517
+ return frame.duration;
1518
+ }
1519
+ if (codecDuration > 0n) {
1520
+ return codecDuration;
1521
+ }
1522
+ // Try stream framerate
1523
+ const streamFramerate = this.stream.avgFrameRate ?? this.stream.rFrameRate;
1524
+ if (streamFramerate && streamFramerate.num > 0 && streamFramerate.den > 0) {
1525
+ const d = avRescaleQ(1, avInvQ(streamFramerate), frame.timeBase);
1526
+ if (d > 0n) {
1527
+ return d;
1528
+ }
1529
+ }
1530
+ // Last resort is last frame's estimated duration, and 1
1531
+ return this.lastFrameDurationEst > 0n ? this.lastFrameDurationEst : 1n;
1532
+ }
1533
+ /**
1534
+ * Process video frame after decoding.
1535
+ *
1536
+ * Implements FFmpeg CLI's video_frame_process() logic.
1537
+ * Handles:
1538
+ * - Hardware frame transfer to software format
1539
+ * - PTS assignment from best_effort_timestamp
1540
+ * - PTS extrapolation when missing
1541
+ * - Duration estimation
1542
+ * - Frame tracking for next frame
1543
+ *
1544
+ * @param frame - Decoded frame to process
1545
+ *
1546
+ * @internal
1547
+ */
1548
+ processVideoFrame(frame) {
1549
+ // Hardware acceleration retrieve
1550
+ // If hwaccel_output_format is set and frame is in hardware format, transfer to software format
1551
+ if (this.options.hwaccelOutputFormat !== undefined && frame.isHwFrame()) {
1552
+ const swFrame = new Frame();
1553
+ swFrame.alloc();
1554
+ swFrame.format = this.options.hwaccelOutputFormat;
1555
+ // Transfer data from hardware to software frame
1556
+ const ret = frame.hwframeTransferDataSync(swFrame, 0);
1557
+ if (ret < 0) {
1558
+ swFrame.free();
1559
+ if (this.options.exitOnError) {
1560
+ FFmpegError.throwIfError(ret, 'Failed to transfer hardware frame data');
1561
+ }
1562
+ return;
1563
+ }
1564
+ // Copy properties from hw frame to sw frame
1565
+ swFrame.copyProps(frame);
1566
+ // Replace frame with software version (unref old, move ref)
1567
+ frame.unref();
1568
+ const refRet = frame.ref(swFrame);
1569
+ swFrame.free();
1570
+ if (refRet < 0) {
1571
+ if (this.options.exitOnError) {
1572
+ FFmpegError.throwIfError(refRet, 'Failed to reference software frame');
1573
+ }
1574
+ return;
1575
+ }
1576
+ }
1577
+ // Set PTS from best_effort_timestamp
1578
+ frame.pts = frame.bestEffortTimestamp;
1579
+ // DECODER_FLAG_FRAMERATE_FORCED: Ignores all timestamps and generates constant framerate
1580
+ if (this.options.forcedFramerate) {
1581
+ frame.pts = AV_NOPTS_VALUE;
1582
+ frame.duration = 1n;
1583
+ const invFramerate = avInvQ(this.options.forcedFramerate);
1584
+ frame.timeBase = new Rational(invFramerate.num, invFramerate.den);
1585
+ }
1586
+ // No timestamp available - extrapolate from previous frame duration
1587
+ if (frame.pts === AV_NOPTS_VALUE) {
1588
+ frame.pts = this.lastFramePts === AV_NOPTS_VALUE ? 0n : this.lastFramePts + this.lastFrameDurationEst;
1589
+ }
1590
+ // Update timestamp history
1591
+ this.lastFrameDurationEst = this.estimateVideoDuration(frame);
1592
+ this.lastFramePts = frame.pts;
1593
+ this.lastFrameTb = new Rational(frame.timeBase.num, frame.timeBase.den);
1594
+ // SAR override
1595
+ if (this.options.sarOverride) {
1596
+ frame.sampleAspectRatio = new Rational(this.options.sarOverride.num, this.options.sarOverride.den);
1597
+ }
1598
+ // Apply cropping
1599
+ if (this.options.applyCropping) {
1600
+ const ret = frame.applyCropping(1); // AV_FRAME_CROP_UNALIGNED = 1
1601
+ if (ret < 0) {
1602
+ if (this.options.exitOnError) {
1603
+ FFmpegError.throwIfError(ret, 'Error applying decoder cropping');
1604
+ }
1605
+ }
1606
+ }
1607
+ }
1608
+ /**
1609
+ * Audio samplerate update - handles sample rate changes.
1610
+ *
1611
+ * Based on FFmpeg's audio_samplerate_update().
1612
+ *
1613
+ * On sample rate change, chooses a new internal timebase that can represent
1614
+ * timestamps from all sample rates seen so far. Uses GCD to find minimal
1615
+ * common timebase, with fallback to LCM of common sample rates (28224000).
1616
+ *
1617
+ * Handles:
1618
+ * - Sample rate change detection
1619
+ * - Timebase calculation via GCD
1620
+ * - Overflow detection and fallback
1621
+ * - Frame timebase optimization
1622
+ * - Rescaling existing timestamps
1623
+ *
1624
+ * @param frame - Audio frame to process
1625
+ *
1626
+ * @returns Timebase to use for this frame
1627
+ *
1628
+ * @internal
1629
+ */
1630
+ audioSamplerateUpdate(frame) {
1631
+ const prev = this.lastFrameTb.den;
1632
+ const sr = frame.sampleRate;
1633
+ // No change - return existing timebase
1634
+ if (frame.sampleRate === this.lastFrameSampleRate) {
1635
+ return this.lastFrameTb;
1636
+ }
1637
+ // Calculate GCD to find minimal common timebase
1638
+ const gcd = avGcd(prev, sr);
1639
+ let tbNew;
1640
+ // Check for overflow
1641
+ if (Number(prev) / Number(gcd) >= INT_MAX / sr) {
1642
+ // LCM of 192000, 44100 - represents all common sample rates
1643
+ tbNew = { num: 1, den: 28224000 };
1644
+ }
1645
+ else {
1646
+ // Normal case
1647
+ tbNew = { num: 1, den: (Number(prev) / Number(gcd)) * sr };
1648
+ }
1649
+ // Keep frame's timebase if strictly better
1650
+ // "Strictly better" means: num=1, den > tbNew.den, and tbNew.den divides den evenly
1651
+ if (frame.timeBase.num === 1 && frame.timeBase.den > tbNew.den && frame.timeBase.den % tbNew.den === 0) {
1652
+ tbNew = { num: frame.timeBase.num, den: frame.timeBase.den };
1653
+ }
1654
+ // Rescale existing timestamps to new timebase
1655
+ if (this.lastFramePts !== AV_NOPTS_VALUE) {
1656
+ this.lastFramePts = avRescaleQ(this.lastFramePts, this.lastFrameTb, tbNew);
1657
+ }
1658
+ this.lastFrameDurationEst = avRescaleQ(this.lastFrameDurationEst, this.lastFrameTb, tbNew);
1659
+ this.lastFrameTb = new Rational(tbNew.num, tbNew.den);
1660
+ this.lastFrameSampleRate = frame.sampleRate;
1661
+ return this.lastFrameTb;
1662
+ }
1663
+ /**
1664
+ * Audio timestamp processing - handles audio frame timestamps.
1665
+ *
1666
+ * Based on FFmpeg's audio_ts_process().
1667
+ *
1668
+ * Processes audio frame timestamps with:
1669
+ * - Sample rate change handling via audioSamplerateUpdate()
1670
+ * - PTS extrapolation when missing (pts_pred)
1671
+ * - Gap detection (resets av_rescale_delta state)
1672
+ * - Smooth timestamp conversion via av_rescale_delta
1673
+ * - Duration calculation from nb_samples
1674
+ * - Conversion to filtering timebase {1, sample_rate}
1675
+ *
1676
+ * Handles:
1677
+ * - Dynamic sample rate changes
1678
+ * - Missing timestamps (AV_NOPTS_VALUE)
1679
+ * - Timestamp gaps/discontinuities
1680
+ * - Sample-accurate timestamp generation
1681
+ * - Frame duration calculation
1682
+ *
1683
+ * @param frame - Decoded audio frame to process
1684
+ *
1685
+ * @internal
1686
+ */
1687
+ processAudioFrame(frame) {
1688
+ // Filtering timebase is always {1, sample_rate} for audio
1689
+ const tbFilter = { num: 1, den: frame.sampleRate };
1690
+ // Handle sample rate change - updates internal timebase
1691
+ const tb = this.audioSamplerateUpdate(frame);
1692
+ // Predict next PTS based on last frame + duration
1693
+ const ptsPred = this.lastFramePts === AV_NOPTS_VALUE ? 0n : this.lastFramePts + this.lastFrameDurationEst;
1694
+ // No timestamp - use predicted value
1695
+ if (frame.pts === AV_NOPTS_VALUE) {
1696
+ frame.pts = ptsPred;
1697
+ frame.timeBase = new Rational(tb.num, tb.den);
1698
+ }
1699
+ else if (this.lastFramePts !== AV_NOPTS_VALUE) {
1700
+ // Detect timestamp gap - compare with predicted timestamp
1701
+ const ptsPredInFrameTb = avRescaleQRnd(ptsPred, tb, frame.timeBase, AV_ROUND_UP);
1702
+ if (frame.pts > ptsPredInFrameTb) {
1703
+ // Gap detected - reset rescale_delta state for smooth conversion
1704
+ this.lastFilterInRescaleDelta = AV_NOPTS_VALUE;
1705
+ }
1706
+ }
1707
+ // Smooth timestamp conversion with av_rescale_delta
1708
+ // This maintains fractional sample accuracy across timebase conversions
1709
+ // avRescaleDelta modifies lastRef in place (simulates C's &last_filter_in_rescale_delta)
1710
+ const lastRef = { value: this.lastFilterInRescaleDelta };
1711
+ frame.pts = avRescaleDelta(frame.timeBase, frame.pts, tb, frame.nbSamples, lastRef, tb);
1712
+ this.lastFilterInRescaleDelta = lastRef.value;
1713
+ // Update frame tracking
1714
+ this.lastFramePts = frame.pts;
1715
+ this.lastFrameDurationEst = avRescaleQ(BigInt(frame.nbSamples), tbFilter, tb);
1716
+ // Convert to filtering timebase
1717
+ frame.pts = avRescaleQ(frame.pts, tb, tbFilter);
1718
+ frame.duration = BigInt(frame.nbSamples);
1719
+ frame.timeBase = new Rational(tbFilter.num, tbFilter.den);
1720
+ }
911
1721
  /**
912
1722
  * Dispose of decoder.
913
1723
  *