node-av 3.1.3 → 5.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +88 -52
- package/binding.gyp +23 -11
- package/dist/api/audio-frame-buffer.d.ts +201 -0
- package/dist/api/audio-frame-buffer.js +275 -0
- package/dist/api/audio-frame-buffer.js.map +1 -0
- package/dist/api/bitstream-filter.d.ts +320 -78
- package/dist/api/bitstream-filter.js +684 -151
- package/dist/api/bitstream-filter.js.map +1 -1
- package/dist/api/constants.d.ts +44 -0
- package/dist/api/constants.js +45 -0
- package/dist/api/constants.js.map +1 -0
- package/dist/api/data/test_av1.ivf +0 -0
- package/dist/api/data/test_mjpeg.mjpeg +0 -0
- package/dist/api/data/test_vp8.ivf +0 -0
- package/dist/api/data/test_vp9.ivf +0 -0
- package/dist/api/decoder.d.ts +454 -77
- package/dist/api/decoder.js +1081 -271
- package/dist/api/decoder.js.map +1 -1
- package/dist/api/{media-input.d.ts → demuxer.d.ts} +295 -45
- package/dist/api/demuxer.js +1965 -0
- package/dist/api/demuxer.js.map +1 -0
- package/dist/api/encoder.d.ts +423 -132
- package/dist/api/encoder.js +1089 -240
- package/dist/api/encoder.js.map +1 -1
- package/dist/api/filter-complex.d.ts +769 -0
- package/dist/api/filter-complex.js +1596 -0
- package/dist/api/filter-complex.js.map +1 -0
- package/dist/api/filter-presets.d.ts +80 -5
- package/dist/api/filter-presets.js +117 -7
- package/dist/api/filter-presets.js.map +1 -1
- package/dist/api/filter.d.ts +561 -125
- package/dist/api/filter.js +1083 -274
- package/dist/api/filter.js.map +1 -1
- package/dist/api/{fmp4.d.ts → fmp4-stream.d.ts} +141 -140
- package/dist/api/fmp4-stream.js +539 -0
- package/dist/api/fmp4-stream.js.map +1 -0
- package/dist/api/hardware.d.ts +58 -6
- package/dist/api/hardware.js +127 -11
- package/dist/api/hardware.js.map +1 -1
- package/dist/api/index.d.ts +8 -4
- package/dist/api/index.js +17 -8
- package/dist/api/index.js.map +1 -1
- package/dist/api/io-stream.d.ts +6 -6
- package/dist/api/io-stream.js +5 -4
- package/dist/api/io-stream.js.map +1 -1
- package/dist/api/{media-output.d.ts → muxer.d.ts} +280 -66
- package/dist/api/muxer.js +1934 -0
- package/dist/api/muxer.js.map +1 -0
- package/dist/api/pipeline.d.ts +77 -29
- package/dist/api/pipeline.js +449 -439
- package/dist/api/pipeline.js.map +1 -1
- package/dist/api/rtp-stream.d.ts +312 -0
- package/dist/api/rtp-stream.js +630 -0
- package/dist/api/rtp-stream.js.map +1 -0
- package/dist/api/types.d.ts +533 -56
- package/dist/api/utilities/async-queue.d.ts +91 -0
- package/dist/api/utilities/async-queue.js +162 -0
- package/dist/api/utilities/async-queue.js.map +1 -0
- package/dist/api/utilities/audio-sample.d.ts +11 -1
- package/dist/api/utilities/audio-sample.js +10 -0
- package/dist/api/utilities/audio-sample.js.map +1 -1
- package/dist/api/utilities/channel-layout.d.ts +1 -0
- package/dist/api/utilities/channel-layout.js +1 -0
- package/dist/api/utilities/channel-layout.js.map +1 -1
- package/dist/api/utilities/image.d.ts +39 -1
- package/dist/api/utilities/image.js +38 -0
- package/dist/api/utilities/image.js.map +1 -1
- package/dist/api/utilities/index.d.ts +3 -0
- package/dist/api/utilities/index.js +6 -0
- package/dist/api/utilities/index.js.map +1 -1
- package/dist/api/utilities/media-type.d.ts +2 -1
- package/dist/api/utilities/media-type.js +1 -0
- package/dist/api/utilities/media-type.js.map +1 -1
- package/dist/api/utilities/pixel-format.d.ts +4 -1
- package/dist/api/utilities/pixel-format.js +3 -0
- package/dist/api/utilities/pixel-format.js.map +1 -1
- package/dist/api/utilities/sample-format.d.ts +6 -1
- package/dist/api/utilities/sample-format.js +5 -0
- package/dist/api/utilities/sample-format.js.map +1 -1
- package/dist/api/utilities/scheduler.d.ts +138 -0
- package/dist/api/utilities/scheduler.js +98 -0
- package/dist/api/utilities/scheduler.js.map +1 -0
- package/dist/api/utilities/streaming.d.ts +105 -15
- package/dist/api/utilities/streaming.js +201 -12
- package/dist/api/utilities/streaming.js.map +1 -1
- package/dist/api/utilities/timestamp.d.ts +15 -1
- package/dist/api/utilities/timestamp.js +14 -0
- package/dist/api/utilities/timestamp.js.map +1 -1
- package/dist/api/utilities/whisper-model.d.ts +310 -0
- package/dist/api/utilities/whisper-model.js +528 -0
- package/dist/api/utilities/whisper-model.js.map +1 -0
- package/dist/api/webrtc-stream.d.ts +288 -0
- package/dist/api/webrtc-stream.js +440 -0
- package/dist/api/webrtc-stream.js.map +1 -0
- package/dist/api/whisper.d.ts +324 -0
- package/dist/api/whisper.js +362 -0
- package/dist/api/whisper.js.map +1 -0
- package/dist/constants/constants.d.ts +54 -2
- package/dist/constants/constants.js +48 -1
- package/dist/constants/constants.js.map +1 -1
- package/dist/constants/encoders.d.ts +2 -1
- package/dist/constants/encoders.js +4 -3
- package/dist/constants/encoders.js.map +1 -1
- package/dist/constants/hardware.d.ts +26 -0
- package/dist/constants/hardware.js +27 -0
- package/dist/constants/hardware.js.map +1 -0
- package/dist/constants/index.d.ts +1 -0
- package/dist/constants/index.js +1 -0
- package/dist/constants/index.js.map +1 -1
- package/dist/ffmpeg/index.d.ts +3 -3
- package/dist/ffmpeg/index.js +3 -3
- package/dist/ffmpeg/utils.d.ts +27 -0
- package/dist/ffmpeg/utils.js +28 -16
- package/dist/ffmpeg/utils.js.map +1 -1
- package/dist/lib/binding.d.ts +22 -11
- package/dist/lib/binding.js.map +1 -1
- package/dist/lib/codec-context.d.ts +87 -0
- package/dist/lib/codec-context.js +125 -4
- package/dist/lib/codec-context.js.map +1 -1
- package/dist/lib/codec-parameters.d.ts +229 -1
- package/dist/lib/codec-parameters.js +264 -0
- package/dist/lib/codec-parameters.js.map +1 -1
- package/dist/lib/codec-parser.d.ts +23 -0
- package/dist/lib/codec-parser.js +25 -0
- package/dist/lib/codec-parser.js.map +1 -1
- package/dist/lib/codec.d.ts +26 -4
- package/dist/lib/codec.js +35 -0
- package/dist/lib/codec.js.map +1 -1
- package/dist/lib/dictionary.js +1 -0
- package/dist/lib/dictionary.js.map +1 -1
- package/dist/lib/error.js +1 -1
- package/dist/lib/error.js.map +1 -1
- package/dist/lib/fifo.d.ts +416 -0
- package/dist/lib/fifo.js +453 -0
- package/dist/lib/fifo.js.map +1 -0
- package/dist/lib/filter-context.d.ts +52 -11
- package/dist/lib/filter-context.js +56 -12
- package/dist/lib/filter-context.js.map +1 -1
- package/dist/lib/filter-graph.d.ts +9 -0
- package/dist/lib/filter-graph.js +13 -0
- package/dist/lib/filter-graph.js.map +1 -1
- package/dist/lib/filter.d.ts +21 -0
- package/dist/lib/filter.js +28 -0
- package/dist/lib/filter.js.map +1 -1
- package/dist/lib/format-context.d.ts +48 -14
- package/dist/lib/format-context.js +76 -7
- package/dist/lib/format-context.js.map +1 -1
- package/dist/lib/frame.d.ts +264 -1
- package/dist/lib/frame.js +351 -1
- package/dist/lib/frame.js.map +1 -1
- package/dist/lib/hardware-device-context.d.ts +3 -2
- package/dist/lib/hardware-device-context.js.map +1 -1
- package/dist/lib/index.d.ts +2 -0
- package/dist/lib/index.js +4 -0
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/input-format.d.ts +21 -0
- package/dist/lib/input-format.js +42 -2
- package/dist/lib/input-format.js.map +1 -1
- package/dist/lib/native-types.d.ts +76 -27
- package/dist/lib/option.d.ts +25 -13
- package/dist/lib/option.js +28 -0
- package/dist/lib/option.js.map +1 -1
- package/dist/lib/output-format.d.ts +22 -1
- package/dist/lib/output-format.js +28 -0
- package/dist/lib/output-format.js.map +1 -1
- package/dist/lib/packet.d.ts +35 -0
- package/dist/lib/packet.js +52 -2
- package/dist/lib/packet.js.map +1 -1
- package/dist/lib/rational.d.ts +18 -0
- package/dist/lib/rational.js +19 -0
- package/dist/lib/rational.js.map +1 -1
- package/dist/lib/stream.d.ts +126 -0
- package/dist/lib/stream.js +188 -5
- package/dist/lib/stream.js.map +1 -1
- package/dist/lib/sync-queue.d.ts +179 -0
- package/dist/lib/sync-queue.js +197 -0
- package/dist/lib/sync-queue.js.map +1 -0
- package/dist/lib/types.d.ts +49 -1
- package/dist/lib/utilities.d.ts +281 -53
- package/dist/lib/utilities.js +298 -55
- package/dist/lib/utilities.js.map +1 -1
- package/install/check.js +2 -2
- package/package.json +37 -26
- package/dist/api/fmp4.js +0 -710
- package/dist/api/fmp4.js.map +0 -1
- package/dist/api/media-input.js +0 -1075
- package/dist/api/media-input.js.map +0 -1
- package/dist/api/media-output.js +0 -1040
- package/dist/api/media-output.js.map +0 -1
- package/dist/api/webrtc.d.ts +0 -664
- package/dist/api/webrtc.js +0 -1132
- package/dist/api/webrtc.js.map +0 -1
package/dist/api/encoder.js
CHANGED
|
@@ -1,5 +1,68 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
2
|
+
if (value !== null && value !== void 0) {
|
|
3
|
+
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
4
|
+
var dispose, inner;
|
|
5
|
+
if (async) {
|
|
6
|
+
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
7
|
+
dispose = value[Symbol.asyncDispose];
|
|
8
|
+
}
|
|
9
|
+
if (dispose === void 0) {
|
|
10
|
+
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
11
|
+
dispose = value[Symbol.dispose];
|
|
12
|
+
if (async) inner = dispose;
|
|
13
|
+
}
|
|
14
|
+
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
15
|
+
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
16
|
+
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
17
|
+
}
|
|
18
|
+
else if (async) {
|
|
19
|
+
env.stack.push({ async: true });
|
|
20
|
+
}
|
|
21
|
+
return value;
|
|
22
|
+
};
|
|
23
|
+
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
24
|
+
return function (env) {
|
|
25
|
+
function fail(e) {
|
|
26
|
+
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
27
|
+
env.hasError = true;
|
|
28
|
+
}
|
|
29
|
+
var r, s = 0;
|
|
30
|
+
function next() {
|
|
31
|
+
while (r = env.stack.pop()) {
|
|
32
|
+
try {
|
|
33
|
+
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
34
|
+
if (r.dispose) {
|
|
35
|
+
var result = r.dispose.call(r.value);
|
|
36
|
+
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
37
|
+
}
|
|
38
|
+
else s |= 1;
|
|
39
|
+
}
|
|
40
|
+
catch (e) {
|
|
41
|
+
fail(e);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
45
|
+
if (env.hasError) throw env.error;
|
|
46
|
+
}
|
|
47
|
+
return next();
|
|
48
|
+
};
|
|
49
|
+
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
50
|
+
var e = new Error(message);
|
|
51
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
|
+
});
|
|
53
|
+
import { AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, AV_CODEC_CAP_PARAM_CHANGE, AV_CODEC_FLAG_COPY_OPAQUE, AV_CODEC_FLAG_FRAME_DURATION, AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX, AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, AV_PICTURE_TYPE_NONE, AV_PIX_FMT_NONE, AV_PKT_FLAG_TRUSTED, AVCHROMA_LOC_UNSPECIFIED, AVERROR_EAGAIN, AVERROR_EOF, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, EOF, } from '../constants/constants.js';
|
|
54
|
+
import { CodecContext } from '../lib/codec-context.js';
|
|
55
|
+
import { Codec } from '../lib/codec.js';
|
|
56
|
+
import { Dictionary } from '../lib/dictionary.js';
|
|
57
|
+
import { FFmpegError } from '../lib/error.js';
|
|
58
|
+
import { Frame } from '../lib/frame.js';
|
|
59
|
+
import { Packet } from '../lib/packet.js';
|
|
60
|
+
import { Rational } from '../lib/rational.js';
|
|
61
|
+
import { avRescaleQ } from '../lib/utilities.js';
|
|
62
|
+
import { AudioFrameBuffer } from './audio-frame-buffer.js';
|
|
63
|
+
import { FRAME_THREAD_QUEUE_SIZE, PACKET_THREAD_QUEUE_SIZE } from './constants.js';
|
|
64
|
+
import { AsyncQueue } from './utilities/async-queue.js';
|
|
65
|
+
import { SchedulerControl } from './utilities/scheduler.js';
|
|
3
66
|
import { parseBitrate } from './utils.js';
|
|
4
67
|
/**
|
|
5
68
|
* High-level encoder for audio and video streams.
|
|
@@ -56,17 +119,24 @@ import { parseBitrate } from './utils.js';
|
|
|
56
119
|
* ```
|
|
57
120
|
*
|
|
58
121
|
* @see {@link Decoder} For decoding packets to frames
|
|
59
|
-
* @see {@link
|
|
122
|
+
* @see {@link Muxer} For writing encoded packets
|
|
60
123
|
* @see {@link HardwareContext} For GPU acceleration
|
|
61
124
|
*/
|
|
62
125
|
export class Encoder {
|
|
63
126
|
codecContext;
|
|
64
127
|
packet;
|
|
65
128
|
codec;
|
|
129
|
+
initializePromise = null;
|
|
66
130
|
initialized = false;
|
|
67
131
|
isClosed = false;
|
|
68
132
|
opts;
|
|
69
133
|
options;
|
|
134
|
+
audioFrameBuffer;
|
|
135
|
+
// Worker pattern for push-based processing
|
|
136
|
+
inputQueue;
|
|
137
|
+
outputQueue;
|
|
138
|
+
workerPromise = null;
|
|
139
|
+
pipeToPromise = null;
|
|
70
140
|
/**
|
|
71
141
|
* @param codecContext - Configured codec context
|
|
72
142
|
*
|
|
@@ -85,6 +155,8 @@ export class Encoder {
|
|
|
85
155
|
this.opts = opts;
|
|
86
156
|
this.packet = new Packet();
|
|
87
157
|
this.packet.alloc();
|
|
158
|
+
this.inputQueue = new AsyncQueue(FRAME_THREAD_QUEUE_SIZE);
|
|
159
|
+
this.outputQueue = new AsyncQueue(PACKET_THREAD_QUEUE_SIZE);
|
|
88
160
|
}
|
|
89
161
|
/**
|
|
90
162
|
* Create an encoder with specified codec and options.
|
|
@@ -97,7 +169,7 @@ export class Encoder {
|
|
|
97
169
|
*
|
|
98
170
|
* @param encoderCodec - Codec name, ID, or instance to use for encoding
|
|
99
171
|
*
|
|
100
|
-
* @param options -
|
|
172
|
+
* @param options - Optional encoder configuration options including required timeBase
|
|
101
173
|
*
|
|
102
174
|
* @returns Configured encoder instance
|
|
103
175
|
*
|
|
@@ -138,8 +210,9 @@ export class Encoder {
|
|
|
138
210
|
* ```
|
|
139
211
|
*
|
|
140
212
|
* @see {@link EncoderOptions} For configuration options
|
|
213
|
+
* @see {@link createSync} For synchronous version
|
|
141
214
|
*/
|
|
142
|
-
static async create(encoderCodec, options) {
|
|
215
|
+
static async create(encoderCodec, options = {}) {
|
|
143
216
|
let codec = null;
|
|
144
217
|
let codecName = '';
|
|
145
218
|
if (encoderCodec instanceof Codec) {
|
|
@@ -187,14 +260,6 @@ export class Encoder {
|
|
|
187
260
|
const bufSize = typeof options.bufSize === 'string' ? parseBitrate(options.bufSize) : BigInt(options.bufSize);
|
|
188
261
|
codecContext.rcBufferSize = Number(bufSize);
|
|
189
262
|
}
|
|
190
|
-
if (options.threads !== undefined) {
|
|
191
|
-
codecContext.threadCount = options.threads;
|
|
192
|
-
}
|
|
193
|
-
codecContext.timeBase = new Rational(options.timeBase.num, options.timeBase.den);
|
|
194
|
-
codecContext.pktTimebase = new Rational(options.timeBase.num, options.timeBase.den);
|
|
195
|
-
if (options.frameRate) {
|
|
196
|
-
codecContext.framerate = new Rational(options.frameRate.num, options.frameRate.den);
|
|
197
|
-
}
|
|
198
263
|
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
199
264
|
return new Encoder(codecContext, codec, options, opts);
|
|
200
265
|
}
|
|
@@ -210,7 +275,7 @@ export class Encoder {
|
|
|
210
275
|
*
|
|
211
276
|
* @param encoderCodec - Codec name, ID, or instance to use for encoding
|
|
212
277
|
*
|
|
213
|
-
* @param options -
|
|
278
|
+
* @param options - Optional encoder configuration options including required timeBase
|
|
214
279
|
*
|
|
215
280
|
* @returns Configured encoder instance
|
|
216
281
|
*
|
|
@@ -252,9 +317,10 @@ export class Encoder {
|
|
|
252
317
|
* });
|
|
253
318
|
* ```
|
|
254
319
|
*
|
|
320
|
+
* @see {@link EncoderOptions} For configuration options
|
|
255
321
|
* @see {@link create} For async version
|
|
256
322
|
*/
|
|
257
|
-
static createSync(encoderCodec, options) {
|
|
323
|
+
static createSync(encoderCodec, options = {}) {
|
|
258
324
|
let codec = null;
|
|
259
325
|
let codecName = '';
|
|
260
326
|
if (encoderCodec instanceof Codec) {
|
|
@@ -301,14 +367,6 @@ export class Encoder {
|
|
|
301
367
|
const bufSize = typeof options.bufSize === 'string' ? parseBitrate(options.bufSize) : BigInt(options.bufSize);
|
|
302
368
|
codecContext.rcBufferSize = Number(bufSize);
|
|
303
369
|
}
|
|
304
|
-
if (options.threads !== undefined) {
|
|
305
|
-
codecContext.threadCount = options.threads;
|
|
306
|
-
}
|
|
307
|
-
if (options.frameRate) {
|
|
308
|
-
codecContext.framerate = new Rational(options.frameRate.num, options.frameRate.den);
|
|
309
|
-
}
|
|
310
|
-
codecContext.timeBase = new Rational(options.timeBase.num, options.timeBase.den);
|
|
311
|
-
codecContext.pktTimebase = new Rational(options.timeBase.num, options.timeBase.den);
|
|
312
370
|
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
313
371
|
return new Encoder(codecContext, codec, options, opts);
|
|
314
372
|
}
|
|
@@ -343,6 +401,115 @@ export class Encoder {
|
|
|
343
401
|
get isEncoderInitialized() {
|
|
344
402
|
return this.initialized;
|
|
345
403
|
}
|
|
404
|
+
/**
|
|
405
|
+
* Codec flags.
|
|
406
|
+
*
|
|
407
|
+
* @returns Current codec flags
|
|
408
|
+
*
|
|
409
|
+
* @throws {Error} If encoder is closed
|
|
410
|
+
*
|
|
411
|
+
* @example
|
|
412
|
+
* ```typescript
|
|
413
|
+
* const flags = encoder.codecFlags;
|
|
414
|
+
* console.log('Current flags:', flags);
|
|
415
|
+
* ```
|
|
416
|
+
*
|
|
417
|
+
* @see {@link setCodecFlags} To set flags
|
|
418
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
419
|
+
* @see {@link hasCodecFlags} To check flags
|
|
420
|
+
*/
|
|
421
|
+
get codecFlags() {
|
|
422
|
+
if (this.isClosed) {
|
|
423
|
+
throw new Error('Cannot get flags on closed encoder');
|
|
424
|
+
}
|
|
425
|
+
return this.codecContext.flags;
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Set codec flags.
|
|
429
|
+
*
|
|
430
|
+
* @param flags - One or more flag values to set
|
|
431
|
+
*
|
|
432
|
+
* @throws {Error} If encoder is already initialized or closed
|
|
433
|
+
*
|
|
434
|
+
* @example
|
|
435
|
+
* ```typescript
|
|
436
|
+
* import { AV_CODEC_FLAG_GLOBAL_HEADER, AV_CODEC_FLAG_QSCALE } from 'node-av/constants';
|
|
437
|
+
*
|
|
438
|
+
* // Set multiple flags before initialization
|
|
439
|
+
* encoder.setCodecFlags(AV_CODEC_FLAG_GLOBAL_HEADER, AV_CODEC_FLAG_QSCALE);
|
|
440
|
+
* ```
|
|
441
|
+
*
|
|
442
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
443
|
+
* @see {@link hasCodecFlags} To check flags
|
|
444
|
+
* @see {@link codecFlags} For direct flag access
|
|
445
|
+
*/
|
|
446
|
+
setCodecFlags(...flags) {
|
|
447
|
+
if (this.isClosed) {
|
|
448
|
+
throw new Error('Cannot set flags on closed encoder');
|
|
449
|
+
}
|
|
450
|
+
if (this.initialized) {
|
|
451
|
+
throw new Error('Cannot set flags on already initialized encoder');
|
|
452
|
+
}
|
|
453
|
+
this.codecContext.setFlags(...flags);
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Clear codec flags.
|
|
457
|
+
*
|
|
458
|
+
* @param flags - One or more flag values to clear
|
|
459
|
+
*
|
|
460
|
+
* @throws {Error} If encoder is already initialized or closed
|
|
461
|
+
*
|
|
462
|
+
* @example
|
|
463
|
+
* ```typescript
|
|
464
|
+
* import { AV_CODEC_FLAG_QSCALE } from 'node-av/constants';
|
|
465
|
+
*
|
|
466
|
+
* // Clear specific flag before initialization
|
|
467
|
+
* encoder.clearCodecFlags(AV_CODEC_FLAG_QSCALE);
|
|
468
|
+
* ```
|
|
469
|
+
*
|
|
470
|
+
* @see {@link setCodecFlags} To set flags
|
|
471
|
+
* @see {@link hasCodecFlags} To check flags
|
|
472
|
+
* @see {@link codecFlags} For direct flag access
|
|
473
|
+
*/
|
|
474
|
+
clearCodecFlags(...flags) {
|
|
475
|
+
if (this.isClosed) {
|
|
476
|
+
throw new Error('Cannot clear flags on closed encoder');
|
|
477
|
+
}
|
|
478
|
+
if (this.initialized) {
|
|
479
|
+
throw new Error('Cannot clear flags on already initialized encoder');
|
|
480
|
+
}
|
|
481
|
+
this.codecContext.clearFlags(...flags);
|
|
482
|
+
}
|
|
483
|
+
/**
|
|
484
|
+
* Check if codec has specific flags.
|
|
485
|
+
*
|
|
486
|
+
* Tests whether all specified codec flags are set using bitwise AND.
|
|
487
|
+
*
|
|
488
|
+
* @param flags - One or more flag values to check
|
|
489
|
+
*
|
|
490
|
+
* @returns true if all specified flags are set, false otherwise
|
|
491
|
+
*
|
|
492
|
+
* @throws {Error} If encoder is closed
|
|
493
|
+
*
|
|
494
|
+
* @example
|
|
495
|
+
* ```typescript
|
|
496
|
+
* import { AV_CODEC_FLAG_GLOBAL_HEADER } from 'node-av/constants';
|
|
497
|
+
*
|
|
498
|
+
* if (encoder.hasCodecFlags(AV_CODEC_FLAG_GLOBAL_HEADER)) {
|
|
499
|
+
* console.log('Global header flag is set');
|
|
500
|
+
* }
|
|
501
|
+
* ```
|
|
502
|
+
*
|
|
503
|
+
* @see {@link setCodecFlags} To set flags
|
|
504
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
505
|
+
* @see {@link codecFlags} For direct flag access
|
|
506
|
+
*/
|
|
507
|
+
hasCodecFlags(...flags) {
|
|
508
|
+
if (this.isClosed) {
|
|
509
|
+
throw new Error('Cannot check flags on closed encoder');
|
|
510
|
+
}
|
|
511
|
+
return this.codecContext.hasFlags(...flags);
|
|
512
|
+
}
|
|
346
513
|
/**
|
|
347
514
|
* Check if encoder uses hardware acceleration.
|
|
348
515
|
*
|
|
@@ -376,24 +543,31 @@ export class Encoder {
|
|
|
376
543
|
return this.initialized && !this.isClosed;
|
|
377
544
|
}
|
|
378
545
|
/**
|
|
379
|
-
*
|
|
546
|
+
* Send a frame to the encoder.
|
|
380
547
|
*
|
|
381
|
-
* Sends a frame to the encoder
|
|
548
|
+
* Sends a raw frame to the encoder for encoding.
|
|
549
|
+
* Does not return encoded packets - use {@link receive} to retrieve packets.
|
|
382
550
|
* On first frame, automatically initializes encoder with frame properties.
|
|
383
|
-
*
|
|
551
|
+
* A single frame can produce zero, one, or multiple packets depending on codec buffering.
|
|
384
552
|
*
|
|
385
|
-
*
|
|
553
|
+
* **Important**: This method only SENDS the frame to the encoder.
|
|
554
|
+
* You must call {@link receive} separately (potentially multiple times) to get encoded packets.
|
|
386
555
|
*
|
|
387
|
-
*
|
|
556
|
+
* Direct mapping to avcodec_send_frame().
|
|
388
557
|
*
|
|
389
|
-
* @
|
|
558
|
+
* @param frame - Raw frame to send to encoder
|
|
390
559
|
*
|
|
391
|
-
* @throws {FFmpegError} If
|
|
560
|
+
* @throws {FFmpegError} If sending frame fails
|
|
392
561
|
*
|
|
393
562
|
* @example
|
|
394
563
|
* ```typescript
|
|
395
|
-
*
|
|
396
|
-
*
|
|
564
|
+
* // Send frame and receive packets
|
|
565
|
+
* await encoder.encode(frame);
|
|
566
|
+
*
|
|
567
|
+
* // Receive all available packets
|
|
568
|
+
* while (true) {
|
|
569
|
+
* const packet = await encoder.receive();
|
|
570
|
+
* if (!packet) break;
|
|
397
571
|
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
398
572
|
* await output.writePacket(packet);
|
|
399
573
|
* packet.free();
|
|
@@ -402,10 +576,13 @@ export class Encoder {
|
|
|
402
576
|
*
|
|
403
577
|
* @example
|
|
404
578
|
* ```typescript
|
|
405
|
-
* // Encode loop
|
|
406
579
|
* for await (const frame of decoder.frames(input.packets())) {
|
|
407
|
-
*
|
|
408
|
-
*
|
|
580
|
+
* // Send frame
|
|
581
|
+
* await encoder.encode(frame);
|
|
582
|
+
*
|
|
583
|
+
* // Receive available packets
|
|
584
|
+
* let packet;
|
|
585
|
+
* while ((packet = await encoder.receive())) {
|
|
409
586
|
* await output.writePacket(packet);
|
|
410
587
|
* packet.free();
|
|
411
588
|
* }
|
|
@@ -413,56 +590,181 @@ export class Encoder {
|
|
|
413
590
|
* }
|
|
414
591
|
* ```
|
|
415
592
|
*
|
|
593
|
+
* @see {@link receive} For receiving encoded packets
|
|
594
|
+
* @see {@link encodeAll} For combined send+receive operation
|
|
416
595
|
* @see {@link packets} For automatic frame iteration
|
|
417
596
|
* @see {@link flush} For end-of-stream handling
|
|
597
|
+
* @see {@link encodeSync} For synchronous version
|
|
418
598
|
*/
|
|
419
599
|
async encode(frame) {
|
|
420
600
|
if (this.isClosed) {
|
|
421
|
-
return
|
|
601
|
+
return;
|
|
602
|
+
}
|
|
603
|
+
// Open encoder if not already done
|
|
604
|
+
this.initializePromise ??= this.initialize(frame);
|
|
605
|
+
await this.initializePromise;
|
|
606
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
607
|
+
this.prepareFrameForEncoding(frame);
|
|
608
|
+
const encode = async (newFrame) => {
|
|
609
|
+
const sendRet = await this.codecContext.sendFrame(newFrame);
|
|
610
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
611
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send frame to encoder');
|
|
612
|
+
return;
|
|
613
|
+
}
|
|
614
|
+
};
|
|
615
|
+
if (this.audioFrameBuffer) {
|
|
616
|
+
// Push frame into buffer - actual sending happens in receive()
|
|
617
|
+
await this.audioFrameBuffer.push(frame);
|
|
618
|
+
}
|
|
619
|
+
else {
|
|
620
|
+
await encode(frame);
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
/**
|
|
624
|
+
* Send a frame to the encoder synchronously.
|
|
625
|
+
* Synchronous version of encode.
|
|
626
|
+
*
|
|
627
|
+
* Sends a raw frame to the encoder for encoding.
|
|
628
|
+
* Does not return encoded packets - use {@link receiveSync} to retrieve packets.
|
|
629
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
630
|
+
* A single frame can produce zero, one, or multiple packets depending on codec buffering.
|
|
631
|
+
*
|
|
632
|
+
* **Important**: This method only SENDS the frame to the encoder.
|
|
633
|
+
* You must call {@link receiveSync} separately (potentially multiple times) to get encoded packets.
|
|
634
|
+
*
|
|
635
|
+
* Direct mapping to avcodec_send_frame().
|
|
636
|
+
*
|
|
637
|
+
* @param frame - Raw frame to send to encoder
|
|
638
|
+
*
|
|
639
|
+
* @throws {FFmpegError} If sending frame fails
|
|
640
|
+
*
|
|
641
|
+
* @example
|
|
642
|
+
* ```typescript
|
|
643
|
+
* // Send frame and receive packets
|
|
644
|
+
* encoder.encodeSync(frame);
|
|
645
|
+
*
|
|
646
|
+
* // Receive all available packets
|
|
647
|
+
* let packet;
|
|
648
|
+
* while ((packet = encoder.receiveSync())) {
|
|
649
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
650
|
+
* output.writePacketSync(packet);
|
|
651
|
+
* packet.free();
|
|
652
|
+
* }
|
|
653
|
+
* ```
|
|
654
|
+
*
|
|
655
|
+
* @see {@link receiveSync} For receiving encoded packets
|
|
656
|
+
* @see {@link encodeAllSync} For combined send+receive operation
|
|
657
|
+
* @see {@link packetsSync} For automatic frame iteration
|
|
658
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
659
|
+
* @see {@link encode} For async version
|
|
660
|
+
*/
|
|
661
|
+
encodeSync(frame) {
|
|
662
|
+
if (this.isClosed) {
|
|
663
|
+
return;
|
|
422
664
|
}
|
|
423
665
|
// Open encoder if not already done
|
|
424
666
|
if (!this.initialized) {
|
|
425
|
-
|
|
426
|
-
|
|
667
|
+
this.initializeSync(frame);
|
|
668
|
+
}
|
|
669
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
670
|
+
this.prepareFrameForEncoding(frame);
|
|
671
|
+
const encode = (newFrame) => {
|
|
672
|
+
const sendRet = this.codecContext.sendFrameSync(newFrame);
|
|
673
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
674
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send frame to encoder');
|
|
675
|
+
return;
|
|
427
676
|
}
|
|
428
|
-
|
|
677
|
+
};
|
|
678
|
+
if (this.audioFrameBuffer) {
|
|
679
|
+
// Push frame into buffer - actual sending happens in receiveSync()
|
|
680
|
+
this.audioFrameBuffer.pushSync(frame);
|
|
681
|
+
}
|
|
682
|
+
else {
|
|
683
|
+
encode(frame);
|
|
684
|
+
}
|
|
685
|
+
}
|
|
686
|
+
/**
|
|
687
|
+
* Encode a frame to packets.
|
|
688
|
+
*
|
|
689
|
+
* Sends a frame to the encoder and receives all available encoded packets.
|
|
690
|
+
* Returns array of packets - may be empty if encoder needs more data.
|
|
691
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
692
|
+
* One frame can produce zero, one, or multiple packets depending on codec.
|
|
693
|
+
*
|
|
694
|
+
* Direct mapping to avcodec_send_frame() and avcodec_receive_packet().
|
|
695
|
+
*
|
|
696
|
+
* @param frame - Raw frame to encode (or null to flush)
|
|
697
|
+
*
|
|
698
|
+
* @returns Array of encoded packets (empty if more data needed or encoder is closed)
|
|
699
|
+
*
|
|
700
|
+
* @throws {FFmpegError} If encoding fails
|
|
701
|
+
*
|
|
702
|
+
* @example
|
|
703
|
+
* ```typescript
|
|
704
|
+
* const packets = await encoder.encodeAll(frame);
|
|
705
|
+
* for (const packet of packets) {
|
|
706
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
707
|
+
* await output.writePacket(packet);
|
|
708
|
+
* packet.free();
|
|
709
|
+
* }
|
|
710
|
+
* ```
|
|
711
|
+
*
|
|
712
|
+
* @example
|
|
713
|
+
* ```typescript
|
|
714
|
+
* // Encode loop
|
|
715
|
+
* for await (const frame of decoder.frames(input.packets())) {
|
|
716
|
+
* const packets = await encoder.encodeAll(frame);
|
|
717
|
+
* for (const packet of packets) {
|
|
718
|
+
* await output.writePacket(packet);
|
|
719
|
+
* packet.free();
|
|
720
|
+
* }
|
|
721
|
+
* frame.free();
|
|
722
|
+
* }
|
|
723
|
+
* ```
|
|
724
|
+
*
|
|
725
|
+
* @see {@link encode} For single packet encoding
|
|
726
|
+
* @see {@link packets} For automatic frame iteration
|
|
727
|
+
* @see {@link flush} For end-of-stream handling
|
|
728
|
+
* @see {@link encodeAllSync} For synchronous version
|
|
729
|
+
*/
|
|
730
|
+
async encodeAll(frame) {
|
|
731
|
+
if (frame) {
|
|
732
|
+
await this.encode(frame);
|
|
429
733
|
}
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
734
|
+
else {
|
|
735
|
+
await this.flush();
|
|
736
|
+
}
|
|
737
|
+
// Receive all available packets
|
|
738
|
+
const packets = [];
|
|
739
|
+
while (true) {
|
|
434
740
|
const packet = await this.receive();
|
|
435
|
-
if (packet)
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
// If still failing, it's an error
|
|
439
|
-
if (sendRet !== AVERROR_EAGAIN) {
|
|
440
|
-
FFmpegError.throwIfError(sendRet, 'Failed to send frame');
|
|
441
|
-
}
|
|
741
|
+
if (!packet)
|
|
742
|
+
break; // Stop on EAGAIN or EOF
|
|
743
|
+
packets.push(packet); // Only push actual packets
|
|
442
744
|
}
|
|
443
|
-
|
|
444
|
-
return await this.receive();
|
|
745
|
+
return packets;
|
|
445
746
|
}
|
|
446
747
|
/**
|
|
447
|
-
* Encode a frame to
|
|
448
|
-
* Synchronous version of
|
|
748
|
+
* Encode a frame to packets synchronously.
|
|
749
|
+
* Synchronous version of encodeAll.
|
|
449
750
|
*
|
|
450
|
-
* Sends a frame to the encoder and
|
|
751
|
+
* Sends a frame to the encoder and receives all available encoded packets.
|
|
752
|
+
* Returns array of packets - may be empty if encoder needs more data.
|
|
451
753
|
* On first frame, automatically initializes encoder with frame properties.
|
|
452
|
-
*
|
|
754
|
+
* One frame can produce zero, one, or multiple packets depending on codec.
|
|
453
755
|
*
|
|
454
756
|
* Direct mapping to avcodec_send_frame() and avcodec_receive_packet().
|
|
455
757
|
*
|
|
456
758
|
* @param frame - Raw frame to encode (or null to flush)
|
|
457
759
|
*
|
|
458
|
-
* @returns
|
|
760
|
+
* @returns Array of encoded packets (empty if more data needed or encoder is closed)
|
|
459
761
|
*
|
|
460
762
|
* @throws {FFmpegError} If encoding fails
|
|
461
763
|
*
|
|
462
764
|
* @example
|
|
463
765
|
* ```typescript
|
|
464
|
-
* const
|
|
465
|
-
*
|
|
766
|
+
* const packets = encoder.encodeAllSync(frame);
|
|
767
|
+
* for (const packet of packets) {
|
|
466
768
|
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
467
769
|
* output.writePacketSync(packet);
|
|
468
770
|
* packet.free();
|
|
@@ -473,8 +775,8 @@ export class Encoder {
|
|
|
473
775
|
* ```typescript
|
|
474
776
|
* // Encode loop
|
|
475
777
|
* for (const frame of decoder.framesSync(packets)) {
|
|
476
|
-
* const
|
|
477
|
-
*
|
|
778
|
+
* const packets = encoder.encodeAllSync(frame);
|
|
779
|
+
* for (const packet of packets) {
|
|
478
780
|
* output.writePacketSync(packet);
|
|
479
781
|
* packet.free();
|
|
480
782
|
* }
|
|
@@ -482,52 +784,54 @@ export class Encoder {
|
|
|
482
784
|
* }
|
|
483
785
|
* ```
|
|
484
786
|
*
|
|
485
|
-
* @see {@link
|
|
787
|
+
* @see {@link encodeSync} For single packet encoding
|
|
788
|
+
* @see {@link packetsSync} For automatic frame iteration
|
|
789
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
790
|
+
* @see {@link encodeAll} For async version
|
|
486
791
|
*/
|
|
487
|
-
|
|
488
|
-
if (
|
|
489
|
-
|
|
792
|
+
encodeAllSync(frame) {
|
|
793
|
+
if (frame) {
|
|
794
|
+
this.encodeSync(frame);
|
|
490
795
|
}
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
if (!frame) {
|
|
494
|
-
return null;
|
|
495
|
-
}
|
|
496
|
-
this.initializeSync(frame);
|
|
796
|
+
else {
|
|
797
|
+
this.flushSync();
|
|
497
798
|
}
|
|
498
|
-
//
|
|
499
|
-
const
|
|
500
|
-
|
|
501
|
-
// Encoder might be full, try to receive first
|
|
799
|
+
// Receive all available packets
|
|
800
|
+
const packets = [];
|
|
801
|
+
while (true) {
|
|
502
802
|
const packet = this.receiveSync();
|
|
503
|
-
if (packet)
|
|
504
|
-
|
|
505
|
-
//
|
|
506
|
-
if (sendRet !== AVERROR_EAGAIN) {
|
|
507
|
-
FFmpegError.throwIfError(sendRet, 'Failed to send frame');
|
|
508
|
-
}
|
|
803
|
+
if (!packet)
|
|
804
|
+
break; // Stop on EAGAIN or EOF
|
|
805
|
+
packets.push(packet); // Only push actual packets
|
|
509
806
|
}
|
|
510
|
-
|
|
511
|
-
return this.receiveSync();
|
|
807
|
+
return packets;
|
|
512
808
|
}
|
|
513
809
|
/**
|
|
514
810
|
* Encode frame stream to packet stream.
|
|
515
811
|
*
|
|
516
812
|
* High-level async generator for complete encoding pipeline.
|
|
517
|
-
*
|
|
518
|
-
* and flushes buffered packets at end.
|
|
813
|
+
* Encoder is only flushed when EOF (null) signal is explicitly received.
|
|
519
814
|
* Primary interface for stream-based encoding.
|
|
520
815
|
*
|
|
521
|
-
*
|
|
816
|
+
* **EOF Handling:**
|
|
817
|
+
* - Send null to flush encoder and get remaining buffered packets
|
|
818
|
+
* - Generator yields null after flushing when null is received
|
|
819
|
+
* - No automatic flushing - encoder stays open until EOF or close()
|
|
820
|
+
*
|
|
821
|
+
* @param frames - Async iterable of frames, single frame, or null to flush
|
|
522
822
|
*
|
|
523
|
-
* @yields {Packet} Encoded packets
|
|
823
|
+
* @yields {Packet | null} Encoded packets, followed by null when explicitly flushed
|
|
524
824
|
*
|
|
525
825
|
* @throws {FFmpegError} If encoding fails
|
|
526
826
|
*
|
|
527
827
|
* @example
|
|
528
828
|
* ```typescript
|
|
529
|
-
* //
|
|
829
|
+
* // Stream of frames with automatic EOF propagation
|
|
530
830
|
* for await (const packet of encoder.packets(decoder.frames(input.packets()))) {
|
|
831
|
+
* if (packet === null) {
|
|
832
|
+
* console.log('Encoder flushed');
|
|
833
|
+
* break;
|
|
834
|
+
* }
|
|
531
835
|
* await output.writePacket(packet);
|
|
532
836
|
* packet.free(); // Must free output packets
|
|
533
837
|
* }
|
|
@@ -535,82 +839,103 @@ export class Encoder {
|
|
|
535
839
|
*
|
|
536
840
|
* @example
|
|
537
841
|
* ```typescript
|
|
538
|
-
* //
|
|
539
|
-
*
|
|
540
|
-
* for await (const frame of decoder.frames(input.packets())) {
|
|
541
|
-
* const filtered = await filter.process(frame);
|
|
542
|
-
* if (filtered) {
|
|
543
|
-
* yield filtered;
|
|
544
|
-
* }
|
|
545
|
-
* frame.free();
|
|
546
|
-
* }
|
|
547
|
-
* }
|
|
548
|
-
*
|
|
549
|
-
* for await (const packet of encoder.packets(filteredFrames())) {
|
|
842
|
+
* // Single frame - no automatic flush
|
|
843
|
+
* for await (const packet of encoder.packets(singleFrame)) {
|
|
550
844
|
* await output.writePacket(packet);
|
|
551
845
|
* packet.free();
|
|
552
846
|
* }
|
|
847
|
+
* // Encoder remains open, buffered packets not flushed
|
|
553
848
|
* ```
|
|
554
849
|
*
|
|
555
850
|
* @example
|
|
556
851
|
* ```typescript
|
|
557
|
-
* //
|
|
558
|
-
*
|
|
559
|
-
*
|
|
560
|
-
*
|
|
561
|
-
*
|
|
562
|
-
*
|
|
563
|
-
*
|
|
564
|
-
* output
|
|
565
|
-
*
|
|
566
|
-
*
|
|
852
|
+
* // Explicit flush with EOF
|
|
853
|
+
* for await (const packet of encoder.packets(null)) {
|
|
854
|
+
* if (packet === null) {
|
|
855
|
+
* console.log('All buffered packets flushed');
|
|
856
|
+
* break;
|
|
857
|
+
* }
|
|
858
|
+
* console.log('Buffered packet:', packet.pts);
|
|
859
|
+
* await output.writePacket(packet);
|
|
860
|
+
* packet.free();
|
|
861
|
+
* }
|
|
567
862
|
* ```
|
|
568
863
|
*
|
|
569
864
|
* @see {@link encode} For single frame encoding
|
|
570
865
|
* @see {@link Decoder.frames} For frame source
|
|
866
|
+
* @see {@link packetsSync} For sync version
|
|
571
867
|
*/
|
|
572
868
|
async *packets(frames) {
|
|
573
|
-
|
|
574
|
-
|
|
869
|
+
const self = this;
|
|
870
|
+
const processFrame = async function* (frame) {
|
|
871
|
+
await self.encode(frame);
|
|
872
|
+
while (true) {
|
|
873
|
+
const packet = await self.receive();
|
|
874
|
+
if (!packet)
|
|
875
|
+
break;
|
|
876
|
+
yield packet;
|
|
877
|
+
}
|
|
878
|
+
}.bind(this);
|
|
879
|
+
const finalize = async function* () {
|
|
880
|
+
for await (const remaining of self.flushPackets()) {
|
|
881
|
+
yield remaining;
|
|
882
|
+
}
|
|
883
|
+
yield null;
|
|
884
|
+
}.bind(this);
|
|
885
|
+
if (frames === null) {
|
|
886
|
+
yield* finalize();
|
|
887
|
+
return;
|
|
888
|
+
}
|
|
889
|
+
if (frames instanceof Frame) {
|
|
890
|
+
yield* processFrame(frames);
|
|
891
|
+
return;
|
|
892
|
+
}
|
|
893
|
+
for await (const frame_1 of frames) {
|
|
894
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
575
895
|
try {
|
|
576
|
-
const
|
|
577
|
-
if (
|
|
578
|
-
yield
|
|
896
|
+
const frame = __addDisposableResource(env_1, frame_1, false);
|
|
897
|
+
if (frame === null) {
|
|
898
|
+
yield* finalize();
|
|
899
|
+
return;
|
|
579
900
|
}
|
|
901
|
+
yield* processFrame(frame);
|
|
902
|
+
}
|
|
903
|
+
catch (e_1) {
|
|
904
|
+
env_1.error = e_1;
|
|
905
|
+
env_1.hasError = true;
|
|
580
906
|
}
|
|
581
907
|
finally {
|
|
582
|
-
|
|
583
|
-
frame.free();
|
|
908
|
+
__disposeResources(env_1);
|
|
584
909
|
}
|
|
585
910
|
}
|
|
586
|
-
// Flush encoder after all frames
|
|
587
|
-
await this.flush();
|
|
588
|
-
while (!this.isClosed) {
|
|
589
|
-
const remaining = await this.receive();
|
|
590
|
-
if (!remaining)
|
|
591
|
-
break;
|
|
592
|
-
yield remaining;
|
|
593
|
-
}
|
|
594
911
|
}
|
|
595
912
|
/**
|
|
596
913
|
* Encode frame stream to packet stream synchronously.
|
|
597
914
|
* Synchronous version of packets.
|
|
598
915
|
*
|
|
599
916
|
* High-level sync generator for complete encoding pipeline.
|
|
600
|
-
*
|
|
601
|
-
* and flushes buffered packets at end.
|
|
917
|
+
* Encoder is only flushed when EOF (null) signal is explicitly received.
|
|
602
918
|
* Primary interface for stream-based encoding.
|
|
603
919
|
*
|
|
604
|
-
*
|
|
920
|
+
* **EOF Handling:**
|
|
921
|
+
* - Send null to flush encoder and get remaining buffered packets
|
|
922
|
+
* - Generator yields null after flushing when null is received
|
|
923
|
+
* - No automatic flushing - encoder stays open until EOF or close()
|
|
924
|
+
*
|
|
925
|
+
* @param frames - Iterable of frames, single frame, or null to flush
|
|
605
926
|
*
|
|
606
|
-
* @yields {Packet} Encoded packets
|
|
927
|
+
* @yields {Packet | null} Encoded packets, followed by null when explicitly flushed
|
|
607
928
|
*
|
|
608
929
|
* @throws {FFmpegError} If encoding fails
|
|
609
930
|
*
|
|
610
931
|
* @example
|
|
611
932
|
* ```typescript
|
|
612
|
-
* //
|
|
933
|
+
* // Stream of frames with automatic EOF propagation
|
|
613
934
|
* for (const packet of encoder.packetsSync(decoder.framesSync(packets))) {
|
|
935
|
+
* if (packet === null) {
|
|
936
|
+
* console.log('Encoder flushed');
|
|
937
|
+
* break;
|
|
938
|
+
* }
|
|
614
939
|
* output.writePacketSync(packet);
|
|
615
940
|
* packet.free(); // Must free output packets
|
|
616
941
|
* }
|
|
@@ -618,47 +943,83 @@ export class Encoder {
|
|
|
618
943
|
*
|
|
619
944
|
* @example
|
|
620
945
|
* ```typescript
|
|
621
|
-
* //
|
|
622
|
-
*
|
|
623
|
-
*
|
|
624
|
-
*
|
|
625
|
-
* if (filtered) {
|
|
626
|
-
* yield filtered;
|
|
627
|
-
* }
|
|
628
|
-
* frame.free();
|
|
629
|
-
* }
|
|
946
|
+
* // Single frame - no automatic flush
|
|
947
|
+
* for (const packet of encoder.packetsSync(singleFrame)) {
|
|
948
|
+
* output.writePacketSync(packet);
|
|
949
|
+
* packet.free();
|
|
630
950
|
* }
|
|
951
|
+
* // Encoder remains open, buffered packets not flushed
|
|
952
|
+
* ```
|
|
631
953
|
*
|
|
632
|
-
*
|
|
954
|
+
* @example
|
|
955
|
+
* ```typescript
|
|
956
|
+
* // Explicit flush with EOF
|
|
957
|
+
* for (const packet of encoder.packetsSync(null)) {
|
|
958
|
+
* if (packet === null) {
|
|
959
|
+
* console.log('All buffered packets flushed');
|
|
960
|
+
* break;
|
|
961
|
+
* }
|
|
962
|
+
* console.log('Buffered packet:', packet.pts);
|
|
633
963
|
* output.writePacketSync(packet);
|
|
634
964
|
* packet.free();
|
|
635
965
|
* }
|
|
636
966
|
* ```
|
|
637
967
|
*
|
|
968
|
+
* @see {@link encodeSync} For single frame encoding
|
|
969
|
+
* @see {@link Decoder.framesSync} For frame source
|
|
638
970
|
* @see {@link packets} For async version
|
|
639
971
|
*/
|
|
640
972
|
*packetsSync(frames) {
|
|
641
|
-
|
|
642
|
-
|
|
973
|
+
const self = this;
|
|
974
|
+
// Helper: Encode frame and yield all available packets (filters out EAGAIN nulls and EOF)
|
|
975
|
+
const processFrame = function* (frame) {
|
|
976
|
+
self.encodeSync(frame);
|
|
977
|
+
// Receive ALL packets (filter out null/EAGAIN and EOF)
|
|
978
|
+
while (true) {
|
|
979
|
+
const packet = self.receiveSync();
|
|
980
|
+
if (!packet)
|
|
981
|
+
break; // EAGAIN or EOF - no more packets available
|
|
982
|
+
yield packet; // Only yield actual packets
|
|
983
|
+
}
|
|
984
|
+
}.bind(this);
|
|
985
|
+
// Helper: Flush encoder and signal EOF
|
|
986
|
+
const finalize = function* () {
|
|
987
|
+
for (const remaining of self.flushPacketsSync()) {
|
|
988
|
+
yield remaining; // Only yield actual packets
|
|
989
|
+
}
|
|
990
|
+
yield null; // Signal end-of-stream
|
|
991
|
+
}.bind(this);
|
|
992
|
+
// Case 1: EOF input -> flush only
|
|
993
|
+
if (frames === null) {
|
|
994
|
+
yield* finalize();
|
|
995
|
+
return;
|
|
996
|
+
}
|
|
997
|
+
// Case 2: Single frame - NO AUTOMATIC FLUSH
|
|
998
|
+
if (frames instanceof Frame) {
|
|
999
|
+
yield* processFrame(frames);
|
|
1000
|
+
return; // No finalize() call!
|
|
1001
|
+
}
|
|
1002
|
+
// Case 3: Iterable of frames
|
|
1003
|
+
for (const frame_2 of frames) {
|
|
1004
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
643
1005
|
try {
|
|
644
|
-
const
|
|
645
|
-
|
|
646
|
-
|
|
1006
|
+
const frame = __addDisposableResource(env_2, frame_2, false);
|
|
1007
|
+
// Check for EOF signal from upstream
|
|
1008
|
+
if (frame === null) {
|
|
1009
|
+
yield* finalize();
|
|
1010
|
+
return;
|
|
647
1011
|
}
|
|
1012
|
+
yield* processFrame(frame);
|
|
1013
|
+
}
|
|
1014
|
+
catch (e_2) {
|
|
1015
|
+
env_2.error = e_2;
|
|
1016
|
+
env_2.hasError = true;
|
|
648
1017
|
}
|
|
649
1018
|
finally {
|
|
650
|
-
|
|
651
|
-
frame.free();
|
|
1019
|
+
__disposeResources(env_2);
|
|
652
1020
|
}
|
|
653
1021
|
}
|
|
654
|
-
//
|
|
655
|
-
this.flushSync();
|
|
656
|
-
while (!this.isClosed) {
|
|
657
|
-
const remaining = this.receiveSync();
|
|
658
|
-
if (!remaining)
|
|
659
|
-
break;
|
|
660
|
-
yield remaining;
|
|
661
|
-
}
|
|
1022
|
+
// No fallback flush - only flush on explicit EOF
|
|
662
1023
|
}
|
|
663
1024
|
/**
|
|
664
1025
|
* Flush encoder and signal end-of-stream.
|
|
@@ -685,11 +1046,32 @@ export class Encoder {
|
|
|
685
1046
|
*
|
|
686
1047
|
* @see {@link flushPackets} For async iteration
|
|
687
1048
|
* @see {@link receive} For getting buffered packets
|
|
1049
|
+
* @see {@link flushSync} For synchronous version
|
|
688
1050
|
*/
|
|
689
1051
|
async flush() {
|
|
690
1052
|
if (this.isClosed || !this.initialized) {
|
|
691
1053
|
return;
|
|
692
1054
|
}
|
|
1055
|
+
// If using AudioFrameBuffer, flush remaining buffered samples first
|
|
1056
|
+
if (this.audioFrameBuffer && this.audioFrameBuffer.size > 0) {
|
|
1057
|
+
// Pull any remaining partial frame (may be less than frameSize)
|
|
1058
|
+
// For the final frame, we pad or truncate as needed
|
|
1059
|
+
let _bufferedFrame;
|
|
1060
|
+
while (!this.isClosed && (_bufferedFrame = await this.audioFrameBuffer.pull()) !== null) {
|
|
1061
|
+
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
1062
|
+
try {
|
|
1063
|
+
const bufferedFrame = __addDisposableResource(env_3, _bufferedFrame, false);
|
|
1064
|
+
await this.codecContext.sendFrame(bufferedFrame);
|
|
1065
|
+
}
|
|
1066
|
+
catch (e_3) {
|
|
1067
|
+
env_3.error = e_3;
|
|
1068
|
+
env_3.hasError = true;
|
|
1069
|
+
}
|
|
1070
|
+
finally {
|
|
1071
|
+
__disposeResources(env_3);
|
|
1072
|
+
}
|
|
1073
|
+
}
|
|
1074
|
+
}
|
|
693
1075
|
// Send flush frame (null)
|
|
694
1076
|
const ret = await this.codecContext.sendFrame(null);
|
|
695
1077
|
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
@@ -722,12 +1104,34 @@ export class Encoder {
|
|
|
722
1104
|
* }
|
|
723
1105
|
* ```
|
|
724
1106
|
*
|
|
1107
|
+
* @see {@link flushPacketsSync} For sync iteration
|
|
1108
|
+
* @see {@link receiveSync} For getting buffered packets
|
|
725
1109
|
* @see {@link flush} For async version
|
|
726
1110
|
*/
|
|
727
1111
|
flushSync() {
|
|
728
1112
|
if (this.isClosed || !this.initialized) {
|
|
729
1113
|
return;
|
|
730
1114
|
}
|
|
1115
|
+
// If using AudioFrameBuffer, flush remaining buffered samples first
|
|
1116
|
+
if (this.audioFrameBuffer && this.audioFrameBuffer.size > 0) {
|
|
1117
|
+
// Pull any remaining partial frame (may be less than frameSize)
|
|
1118
|
+
// For the final frame, we pad or truncate as needed
|
|
1119
|
+
let _bufferedFrame;
|
|
1120
|
+
while (!this.isClosed && (_bufferedFrame = this.audioFrameBuffer.pullSync()) !== null) {
|
|
1121
|
+
const env_4 = { stack: [], error: void 0, hasError: false };
|
|
1122
|
+
try {
|
|
1123
|
+
const bufferedFrame = __addDisposableResource(env_4, _bufferedFrame, false);
|
|
1124
|
+
this.codecContext.sendFrameSync(bufferedFrame);
|
|
1125
|
+
}
|
|
1126
|
+
catch (e_4) {
|
|
1127
|
+
env_4.error = e_4;
|
|
1128
|
+
env_4.hasError = true;
|
|
1129
|
+
}
|
|
1130
|
+
finally {
|
|
1131
|
+
__disposeResources(env_4);
|
|
1132
|
+
}
|
|
1133
|
+
}
|
|
1134
|
+
}
|
|
731
1135
|
// Send flush frame (null)
|
|
732
1136
|
const ret = this.codecContext.sendFrameSync(null);
|
|
733
1137
|
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
@@ -755,15 +1159,19 @@ export class Encoder {
|
|
|
755
1159
|
* }
|
|
756
1160
|
* ```
|
|
757
1161
|
*
|
|
1162
|
+
* @see {@link encode} For sending frames and receiving packets
|
|
758
1163
|
* @see {@link flush} For signaling end-of-stream
|
|
759
|
-
* @see {@link
|
|
1164
|
+
* @see {@link flushPacketsSync} For synchronous version
|
|
760
1165
|
*/
|
|
761
1166
|
async *flushPackets() {
|
|
762
1167
|
// Send flush signal
|
|
763
1168
|
await this.flush();
|
|
764
|
-
|
|
765
|
-
while (
|
|
766
|
-
|
|
1169
|
+
// Yield all remaining packets (filter out null/EAGAIN and EOF)
|
|
1170
|
+
while (true) {
|
|
1171
|
+
const packet = await this.receive();
|
|
1172
|
+
if (!packet)
|
|
1173
|
+
break; // Stop on EAGAIN or EOF
|
|
1174
|
+
yield packet; // Only yield actual packets
|
|
767
1175
|
}
|
|
768
1176
|
}
|
|
769
1177
|
/**
|
|
@@ -786,14 +1194,19 @@ export class Encoder {
|
|
|
786
1194
|
* }
|
|
787
1195
|
* ```
|
|
788
1196
|
*
|
|
1197
|
+
* @see {@link encodeSync} For sending frames and receiving packets
|
|
1198
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
789
1199
|
* @see {@link flushPackets} For async version
|
|
790
1200
|
*/
|
|
791
1201
|
*flushPacketsSync() {
|
|
792
1202
|
// Send flush signal
|
|
793
1203
|
this.flushSync();
|
|
794
|
-
|
|
795
|
-
while (
|
|
796
|
-
|
|
1204
|
+
// Yield all remaining packets (filter out null/EAGAIN and EOF)
|
|
1205
|
+
while (true) {
|
|
1206
|
+
const packet = this.receiveSync();
|
|
1207
|
+
if (!packet)
|
|
1208
|
+
break; // Stop on EAGAIN or EOF
|
|
1209
|
+
yield packet; // Only yield actual packets
|
|
797
1210
|
}
|
|
798
1211
|
}
|
|
799
1212
|
/**
|
|
@@ -801,19 +1214,25 @@ export class Encoder {
|
|
|
801
1214
|
*
|
|
802
1215
|
* Gets encoded packets from the codec's internal buffer.
|
|
803
1216
|
* Handles packet cloning and error checking.
|
|
804
|
-
*
|
|
805
|
-
*
|
|
1217
|
+
* Implements FFmpeg's send/receive pattern.
|
|
1218
|
+
*
|
|
1219
|
+
* **Return Values:**
|
|
1220
|
+
* - `Packet` - Successfully encoded packet (AVERROR >= 0)
|
|
1221
|
+
* - `null` - Need more input frames (AVERROR_EAGAIN), or encoder not initialized
|
|
1222
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), or encoder is closed
|
|
806
1223
|
*
|
|
807
1224
|
* Direct mapping to avcodec_receive_packet().
|
|
808
1225
|
*
|
|
809
|
-
* @returns Cloned packet or
|
|
1226
|
+
* @returns Cloned packet, null if need more data, or undefined if stream ended
|
|
810
1227
|
*
|
|
811
1228
|
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
812
1229
|
*
|
|
813
1230
|
* @example
|
|
814
1231
|
* ```typescript
|
|
815
|
-
*
|
|
816
|
-
*
|
|
1232
|
+
* // Process all buffered packets
|
|
1233
|
+
* while (true) {
|
|
1234
|
+
* const packet = await encoder.receive();
|
|
1235
|
+
* if (!packet) break; // Stop on EAGAIN or EOF
|
|
817
1236
|
* console.log(`Got packet with PTS: ${packet.pts}`);
|
|
818
1237
|
* await output.writePacket(packet);
|
|
819
1238
|
* packet.free();
|
|
@@ -822,10 +1241,14 @@ export class Encoder {
|
|
|
822
1241
|
*
|
|
823
1242
|
* @example
|
|
824
1243
|
* ```typescript
|
|
825
|
-
* //
|
|
826
|
-
*
|
|
827
|
-
*
|
|
828
|
-
* console.log(
|
|
1244
|
+
* // Handle each return value explicitly
|
|
1245
|
+
* const packet = await encoder.receive();
|
|
1246
|
+
* if (packet === EOF) {
|
|
1247
|
+
* console.log('Encoder stream ended');
|
|
1248
|
+
* } else if (packet === null) {
|
|
1249
|
+
* console.log('Need more input frames');
|
|
1250
|
+
* } else {
|
|
1251
|
+
* console.log(`Got packet: pts=${packet.pts}`);
|
|
829
1252
|
* await output.writePacket(packet);
|
|
830
1253
|
* packet.free();
|
|
831
1254
|
* }
|
|
@@ -833,22 +1256,51 @@ export class Encoder {
|
|
|
833
1256
|
*
|
|
834
1257
|
* @see {@link encode} For sending frames and receiving packets
|
|
835
1258
|
* @see {@link flush} For signaling end-of-stream
|
|
1259
|
+
* @see {@link receiveSync} For synchronous version
|
|
1260
|
+
* @see {@link EOF} For end-of-stream signal
|
|
836
1261
|
*/
|
|
837
1262
|
async receive() {
|
|
838
|
-
if (this.isClosed
|
|
1263
|
+
if (this.isClosed) {
|
|
1264
|
+
return EOF;
|
|
1265
|
+
}
|
|
1266
|
+
if (!this.initialized) {
|
|
839
1267
|
return null;
|
|
840
1268
|
}
|
|
841
1269
|
// Clear previous packet data
|
|
842
1270
|
this.packet.unref();
|
|
1271
|
+
if (this.audioFrameBuffer?.hasFrame()) {
|
|
1272
|
+
const env_5 = { stack: [], error: void 0, hasError: false };
|
|
1273
|
+
try {
|
|
1274
|
+
const bufferedFrame = __addDisposableResource(env_5, await this.audioFrameBuffer.pull(), false);
|
|
1275
|
+
if (bufferedFrame) {
|
|
1276
|
+
await this.codecContext.sendFrame(bufferedFrame);
|
|
1277
|
+
}
|
|
1278
|
+
}
|
|
1279
|
+
catch (e_5) {
|
|
1280
|
+
env_5.error = e_5;
|
|
1281
|
+
env_5.hasError = true;
|
|
1282
|
+
}
|
|
1283
|
+
finally {
|
|
1284
|
+
__disposeResources(env_5);
|
|
1285
|
+
}
|
|
1286
|
+
}
|
|
843
1287
|
const ret = await this.codecContext.receivePacket(this.packet);
|
|
844
1288
|
if (ret === 0) {
|
|
1289
|
+
// Set packet timebase to codec timebase
|
|
1290
|
+
this.packet.timeBase = this.codecContext.timeBase;
|
|
1291
|
+
// Mark packet as trusted (from encoder)
|
|
1292
|
+
this.packet.setFlags(AV_PKT_FLAG_TRUSTED);
|
|
845
1293
|
// Got a packet, clone it for the user
|
|
846
1294
|
return this.packet.clone();
|
|
847
1295
|
}
|
|
848
|
-
else if (ret === AVERROR_EAGAIN
|
|
849
|
-
// Need more data
|
|
1296
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1297
|
+
// Need more data
|
|
850
1298
|
return null;
|
|
851
1299
|
}
|
|
1300
|
+
else if (ret === AVERROR_EOF) {
|
|
1301
|
+
// End of stream
|
|
1302
|
+
return EOF;
|
|
1303
|
+
}
|
|
852
1304
|
else {
|
|
853
1305
|
// Error
|
|
854
1306
|
FFmpegError.throwIfError(ret, 'Failed to receive packet');
|
|
@@ -861,19 +1313,25 @@ export class Encoder {
|
|
|
861
1313
|
*
|
|
862
1314
|
* Gets encoded packets from the codec's internal buffer.
|
|
863
1315
|
* Handles packet cloning and error checking.
|
|
864
|
-
*
|
|
865
|
-
*
|
|
1316
|
+
* Implements FFmpeg's send/receive pattern.
|
|
1317
|
+
*
|
|
1318
|
+
* **Return Values:**
|
|
1319
|
+
* - `Packet` - Successfully encoded packet (AVERROR >= 0)
|
|
1320
|
+
* - `null` - Need more input frames (AVERROR_EAGAIN), or encoder not initialized
|
|
1321
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), or encoder is closed
|
|
866
1322
|
*
|
|
867
1323
|
* Direct mapping to avcodec_receive_packet().
|
|
868
1324
|
*
|
|
869
|
-
* @returns Cloned packet or
|
|
1325
|
+
* @returns Cloned packet, null if need more data, or undefined if stream ended
|
|
870
1326
|
*
|
|
871
1327
|
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
872
1328
|
*
|
|
873
1329
|
* @example
|
|
874
1330
|
* ```typescript
|
|
875
|
-
*
|
|
876
|
-
*
|
|
1331
|
+
* // Process all buffered packets
|
|
1332
|
+
* while (true) {
|
|
1333
|
+
* const packet = encoder.receiveSync();
|
|
1334
|
+
* if (!packet) break; // Stop on EAGAIN or EOF
|
|
877
1335
|
* console.log(`Got packet with PTS: ${packet.pts}`);
|
|
878
1336
|
* output.writePacketSync(packet);
|
|
879
1337
|
* packet.free();
|
|
@@ -882,38 +1340,101 @@ export class Encoder {
|
|
|
882
1340
|
*
|
|
883
1341
|
* @example
|
|
884
1342
|
* ```typescript
|
|
885
|
-
* //
|
|
886
|
-
*
|
|
887
|
-
*
|
|
888
|
-
* console.log(
|
|
1343
|
+
* // Handle each return value explicitly
|
|
1344
|
+
* const packet = encoder.receiveSync();
|
|
1345
|
+
* if (packet === EOF) {
|
|
1346
|
+
* console.log('Encoder stream ended');
|
|
1347
|
+
* } else if (packet === null) {
|
|
1348
|
+
* console.log('Need more input frames');
|
|
1349
|
+
* } else {
|
|
1350
|
+
* console.log(`Got packet: pts=${packet.pts}`);
|
|
889
1351
|
* output.writePacketSync(packet);
|
|
890
1352
|
* packet.free();
|
|
891
1353
|
* }
|
|
892
1354
|
* ```
|
|
893
1355
|
*
|
|
1356
|
+
* @see {@link encodeSync} For sending frames and receiving packets
|
|
1357
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
894
1358
|
* @see {@link receive} For async version
|
|
1359
|
+
* @see {@link EOF} For end-of-stream signal
|
|
895
1360
|
*/
|
|
896
1361
|
receiveSync() {
|
|
897
|
-
if (this.isClosed
|
|
1362
|
+
if (this.isClosed) {
|
|
1363
|
+
return EOF;
|
|
1364
|
+
}
|
|
1365
|
+
if (!this.initialized) {
|
|
898
1366
|
return null;
|
|
899
1367
|
}
|
|
900
1368
|
// Clear previous packet data
|
|
901
1369
|
this.packet.unref();
|
|
1370
|
+
if (this.audioFrameBuffer?.hasFrame()) {
|
|
1371
|
+
const env_6 = { stack: [], error: void 0, hasError: false };
|
|
1372
|
+
try {
|
|
1373
|
+
const bufferedFrame = __addDisposableResource(env_6, this.audioFrameBuffer.pullSync(), false);
|
|
1374
|
+
if (bufferedFrame) {
|
|
1375
|
+
this.codecContext.sendFrameSync(bufferedFrame);
|
|
1376
|
+
}
|
|
1377
|
+
}
|
|
1378
|
+
catch (e_6) {
|
|
1379
|
+
env_6.error = e_6;
|
|
1380
|
+
env_6.hasError = true;
|
|
1381
|
+
}
|
|
1382
|
+
finally {
|
|
1383
|
+
__disposeResources(env_6);
|
|
1384
|
+
}
|
|
1385
|
+
}
|
|
902
1386
|
const ret = this.codecContext.receivePacketSync(this.packet);
|
|
903
1387
|
if (ret === 0) {
|
|
1388
|
+
// Set packet timebase to codec timebase
|
|
1389
|
+
this.packet.timeBase = this.codecContext.timeBase;
|
|
1390
|
+
// Mark packet as trusted (from encoder)
|
|
1391
|
+
this.packet.setFlags(AV_PKT_FLAG_TRUSTED);
|
|
904
1392
|
// Got a packet, clone it for the user
|
|
905
1393
|
return this.packet.clone();
|
|
906
1394
|
}
|
|
907
|
-
else if (ret === AVERROR_EAGAIN
|
|
908
|
-
// Need more data
|
|
1395
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1396
|
+
// Need more data
|
|
909
1397
|
return null;
|
|
910
1398
|
}
|
|
1399
|
+
else if (ret === AVERROR_EOF) {
|
|
1400
|
+
// End of stream
|
|
1401
|
+
return EOF;
|
|
1402
|
+
}
|
|
911
1403
|
else {
|
|
912
1404
|
// Error
|
|
913
1405
|
FFmpegError.throwIfError(ret, 'Failed to receive packet');
|
|
914
1406
|
return null;
|
|
915
1407
|
}
|
|
916
1408
|
}
|
|
1409
|
+
/**
|
|
1410
|
+
* Pipe encoded packets to muxer.
|
|
1411
|
+
*
|
|
1412
|
+
* @param target - Media output component to write packets to
|
|
1413
|
+
*
|
|
1414
|
+
* @param streamIndex - Stream index to write packets to
|
|
1415
|
+
*
|
|
1416
|
+
* @returns Scheduler for continued chaining
|
|
1417
|
+
*
|
|
1418
|
+
* @example
|
|
1419
|
+
* ```typescript
|
|
1420
|
+
* decoder.pipeTo(filter).pipeTo(encoder)
|
|
1421
|
+
* ```
|
|
1422
|
+
*/
|
|
1423
|
+
pipeTo(target, streamIndex) {
|
|
1424
|
+
// Start worker if not already running
|
|
1425
|
+
this.workerPromise ??= this.runWorker();
|
|
1426
|
+
// Start pipe task: encoder.outputQueue -> output
|
|
1427
|
+
this.pipeToPromise = (async () => {
|
|
1428
|
+
while (true) {
|
|
1429
|
+
const packet = await this.receiveFromQueue();
|
|
1430
|
+
if (!packet)
|
|
1431
|
+
break;
|
|
1432
|
+
await target.writePacket(packet, streamIndex);
|
|
1433
|
+
}
|
|
1434
|
+
})();
|
|
1435
|
+
// Return control without pipeTo (terminal stage)
|
|
1436
|
+
return new SchedulerControl(this);
|
|
1437
|
+
}
|
|
917
1438
|
/**
|
|
918
1439
|
* Close encoder and free resources.
|
|
919
1440
|
*
|
|
@@ -938,10 +1459,150 @@ export class Encoder {
|
|
|
938
1459
|
return;
|
|
939
1460
|
}
|
|
940
1461
|
this.isClosed = true;
|
|
1462
|
+
// Close queues
|
|
1463
|
+
this.inputQueue.close();
|
|
1464
|
+
this.outputQueue.close();
|
|
941
1465
|
this.packet.free();
|
|
942
1466
|
this.codecContext.freeContext();
|
|
943
1467
|
this.initialized = false;
|
|
944
1468
|
}
|
|
1469
|
+
/**
|
|
1470
|
+
* Get encoder codec.
|
|
1471
|
+
*
|
|
1472
|
+
* Returns the codec used by this encoder.
|
|
1473
|
+
* Useful for checking codec capabilities and properties.
|
|
1474
|
+
*
|
|
1475
|
+
* @returns Codec instance
|
|
1476
|
+
*
|
|
1477
|
+
* @internal
|
|
1478
|
+
*
|
|
1479
|
+
* @see {@link Codec} For codec details
|
|
1480
|
+
*/
|
|
1481
|
+
getCodec() {
|
|
1482
|
+
return this.codec;
|
|
1483
|
+
}
|
|
1484
|
+
/**
|
|
1485
|
+
* Get underlying codec context.
|
|
1486
|
+
*
|
|
1487
|
+
* Returns the codec context for advanced operations.
|
|
1488
|
+
* Useful for accessing low-level codec properties and settings.
|
|
1489
|
+
* Returns null if encoder is closed or not initialized.
|
|
1490
|
+
*
|
|
1491
|
+
* @returns Codec context or null if closed/not initialized
|
|
1492
|
+
*
|
|
1493
|
+
* @internal
|
|
1494
|
+
*
|
|
1495
|
+
* @see {@link CodecContext} For context details
|
|
1496
|
+
*/
|
|
1497
|
+
getCodecContext() {
|
|
1498
|
+
return !this.isClosed && this.initialized ? this.codecContext : null;
|
|
1499
|
+
}
|
|
1500
|
+
/**
|
|
1501
|
+
* Worker loop for push-based processing.
|
|
1502
|
+
*
|
|
1503
|
+
* @internal
|
|
1504
|
+
*/
|
|
1505
|
+
async runWorker() {
|
|
1506
|
+
try {
|
|
1507
|
+
// Outer loop - receive frames
|
|
1508
|
+
while (!this.inputQueue.isClosed) {
|
|
1509
|
+
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
1510
|
+
try {
|
|
1511
|
+
const frame = __addDisposableResource(env_7, await this.inputQueue.receive(), false);
|
|
1512
|
+
if (!frame)
|
|
1513
|
+
break;
|
|
1514
|
+
// Open encoder if not already done
|
|
1515
|
+
if (!this.initialized) {
|
|
1516
|
+
this.initializePromise ??= this.initialize(frame);
|
|
1517
|
+
}
|
|
1518
|
+
await this.initializePromise;
|
|
1519
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
1520
|
+
this.prepareFrameForEncoding(frame);
|
|
1521
|
+
await this.encode(frame);
|
|
1522
|
+
// Receive packets
|
|
1523
|
+
while (!this.outputQueue.isClosed) {
|
|
1524
|
+
const packet = await this.receive();
|
|
1525
|
+
if (!packet)
|
|
1526
|
+
break; // Stop on EAGAIN or EOF
|
|
1527
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1528
|
+
}
|
|
1529
|
+
}
|
|
1530
|
+
catch (e_7) {
|
|
1531
|
+
env_7.error = e_7;
|
|
1532
|
+
env_7.hasError = true;
|
|
1533
|
+
}
|
|
1534
|
+
finally {
|
|
1535
|
+
__disposeResources(env_7);
|
|
1536
|
+
}
|
|
1537
|
+
}
|
|
1538
|
+
// Flush encoder at end
|
|
1539
|
+
await this.flush();
|
|
1540
|
+
while (!this.outputQueue.isClosed) {
|
|
1541
|
+
const packet = await this.receive();
|
|
1542
|
+
if (!packet)
|
|
1543
|
+
break; // Stop on EAGAIN or EOF
|
|
1544
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1545
|
+
}
|
|
1546
|
+
}
|
|
1547
|
+
catch {
|
|
1548
|
+
// Ignore error ?
|
|
1549
|
+
}
|
|
1550
|
+
finally {
|
|
1551
|
+
// Close output queue when done
|
|
1552
|
+
this.outputQueue?.close();
|
|
1553
|
+
}
|
|
1554
|
+
}
|
|
1555
|
+
/**
|
|
1556
|
+
* Send frame to input queue or flush the pipeline.
|
|
1557
|
+
*
|
|
1558
|
+
* When frame is provided, queues it for encoding.
|
|
1559
|
+
* When null is provided, triggers flush sequence:
|
|
1560
|
+
* - Closes input queue
|
|
1561
|
+
* - Waits for worker completion
|
|
1562
|
+
* - Flushes encoder and sends remaining packets to output queue
|
|
1563
|
+
* - Closes output queue
|
|
1564
|
+
* - Waits for pipeTo task completion (writes to muxer)
|
|
1565
|
+
*
|
|
1566
|
+
* Used by scheduler system for pipeline control.
|
|
1567
|
+
*
|
|
1568
|
+
* @param frame - Frame to send, or null to flush
|
|
1569
|
+
*
|
|
1570
|
+
* @internal
|
|
1571
|
+
*/
|
|
1572
|
+
async sendToQueue(frame) {
|
|
1573
|
+
if (frame) {
|
|
1574
|
+
await this.inputQueue.send(frame);
|
|
1575
|
+
}
|
|
1576
|
+
else {
|
|
1577
|
+
// Close input queue to signal end of stream to worker
|
|
1578
|
+
this.inputQueue.close();
|
|
1579
|
+
// Wait for worker to finish processing all frames (if exists)
|
|
1580
|
+
if (this.workerPromise) {
|
|
1581
|
+
await this.workerPromise;
|
|
1582
|
+
}
|
|
1583
|
+
// Flush encoder at end
|
|
1584
|
+
await this.flush();
|
|
1585
|
+
while (true) {
|
|
1586
|
+
const packet = await this.receive();
|
|
1587
|
+
if (!packet)
|
|
1588
|
+
break; // Stop on EAGAIN or EOF
|
|
1589
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1590
|
+
}
|
|
1591
|
+
if (this.pipeToPromise) {
|
|
1592
|
+
await this.pipeToPromise;
|
|
1593
|
+
}
|
|
1594
|
+
}
|
|
1595
|
+
}
|
|
1596
|
+
/**
|
|
1597
|
+
* Receive packet from output queue.
|
|
1598
|
+
*
|
|
1599
|
+
* @returns Packet from output queue
|
|
1600
|
+
*
|
|
1601
|
+
* @internal
|
|
1602
|
+
*/
|
|
1603
|
+
async receiveFromQueue() {
|
|
1604
|
+
return await this.outputQueue.receive();
|
|
1605
|
+
}
|
|
945
1606
|
/**
|
|
946
1607
|
* Initialize encoder from first frame.
|
|
947
1608
|
*
|
|
@@ -956,25 +1617,80 @@ export class Encoder {
|
|
|
956
1617
|
* @internal
|
|
957
1618
|
*/
|
|
958
1619
|
async initialize(frame) {
|
|
1620
|
+
// Get bits_per_raw_sample from decoder if available
|
|
1621
|
+
if (this.options.decoder) {
|
|
1622
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1623
|
+
if (decoderCtx && decoderCtx.bitsPerRawSample > 0) {
|
|
1624
|
+
this.codecContext.bitsPerRawSample = decoderCtx.bitsPerRawSample;
|
|
1625
|
+
}
|
|
1626
|
+
}
|
|
1627
|
+
// Get framerate from filter if available, otherwise from decoder
|
|
1628
|
+
// This matches FFmpeg CLI behavior where encoder gets frame_rate_filter from FrameData
|
|
1629
|
+
if (this.options.filter && frame.isVideo()) {
|
|
1630
|
+
const filterFrameRate = this.options.filter.frameRate;
|
|
1631
|
+
if (filterFrameRate) {
|
|
1632
|
+
this.codecContext.framerate = new Rational(filterFrameRate.num, filterFrameRate.den);
|
|
1633
|
+
}
|
|
1634
|
+
}
|
|
1635
|
+
// If no filter framerate, try to get from decoder stream
|
|
1636
|
+
if ((!this.codecContext.framerate || this.codecContext.framerate.num === 0) && this.options.decoder && frame.isVideo()) {
|
|
1637
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1638
|
+
if (decoderCtx?.framerate && decoderCtx.framerate.num > 0) {
|
|
1639
|
+
this.codecContext.framerate = decoderCtx.framerate;
|
|
1640
|
+
}
|
|
1641
|
+
}
|
|
959
1642
|
if (frame.isVideo()) {
|
|
1643
|
+
// FFmpeg CLI sets encoder time_base to 1/framerate (inverse of framerate)
|
|
1644
|
+
// This allows encoder to produce sequential PTS (0, 1, 2, 3...) which enables
|
|
1645
|
+
// proper B-frame DTS generation (negative DTS values)
|
|
1646
|
+
if (this.codecContext.framerate && this.codecContext.framerate.num > 0) {
|
|
1647
|
+
// Use inverse of framerate (e.g., framerate=30/1 → timebase=1/30)
|
|
1648
|
+
this.codecContext.timeBase = new Rational(this.codecContext.framerate.den, this.codecContext.framerate.num);
|
|
1649
|
+
}
|
|
1650
|
+
else {
|
|
1651
|
+
// Fallback: use frame timebase if framerate not available
|
|
1652
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1653
|
+
}
|
|
960
1654
|
this.codecContext.width = frame.width;
|
|
961
1655
|
this.codecContext.height = frame.height;
|
|
962
1656
|
this.codecContext.pixelFormat = frame.format;
|
|
963
1657
|
this.codecContext.sampleAspectRatio = frame.sampleAspectRatio;
|
|
1658
|
+
this.codecContext.colorRange = frame.colorRange;
|
|
1659
|
+
this.codecContext.colorPrimaries = frame.colorPrimaries;
|
|
1660
|
+
this.codecContext.colorTrc = frame.colorTrc;
|
|
1661
|
+
this.codecContext.colorSpace = frame.colorSpace;
|
|
1662
|
+
// Only set chroma location if unspecified
|
|
1663
|
+
if (this.codecContext.chromaLocation === AVCHROMA_LOC_UNSPECIFIED) {
|
|
1664
|
+
this.codecContext.chromaLocation = frame.chromaLocation;
|
|
1665
|
+
}
|
|
964
1666
|
}
|
|
965
1667
|
else {
|
|
1668
|
+
// Audio: Always use frame timebase (which is typically 1/sample_rate)
|
|
1669
|
+
// This ensures correct PTS progression for audio frames
|
|
1670
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
966
1671
|
this.codecContext.sampleRate = frame.sampleRate;
|
|
967
1672
|
this.codecContext.sampleFormat = frame.format;
|
|
968
1673
|
this.codecContext.channelLayout = frame.channelLayout;
|
|
969
1674
|
}
|
|
970
|
-
|
|
971
|
-
this.
|
|
1675
|
+
// Setup hardware acceleration with validation
|
|
1676
|
+
this.setupHardwareAcceleration(frame);
|
|
1677
|
+
// AV_CODEC_FLAG_COPY_OPAQUE: Copy opaque data from frames to packets if supported
|
|
1678
|
+
if (this.codec.hasCapabilities(AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
|
|
1679
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
1680
|
+
}
|
|
1681
|
+
// AV_CODEC_FLAG_FRAME_DURATION: Signal that frame duration matters for timestamps
|
|
1682
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_FRAME_DURATION);
|
|
972
1683
|
// Open codec
|
|
973
1684
|
const openRet = await this.codecContext.open2(this.codec, this.opts);
|
|
974
1685
|
if (openRet < 0) {
|
|
975
1686
|
this.codecContext.freeContext();
|
|
976
1687
|
FFmpegError.throwIfError(openRet, 'Failed to open encoder');
|
|
977
1688
|
}
|
|
1689
|
+
// Check if encoder requires fixed frame size (e.g., Opus, AAC, MP3)
|
|
1690
|
+
// If so, create AudioFrameBuffer to automatically chunk frames
|
|
1691
|
+
if (frame.isAudio() && this.codecContext.frameSize > 0) {
|
|
1692
|
+
this.audioFrameBuffer = AudioFrameBuffer.create(this.codecContext.frameSize, this.codecContext.sampleFormat, this.codecContext.sampleRate, this.codecContext.channelLayout, this.codecContext.channels);
|
|
1693
|
+
}
|
|
978
1694
|
this.initialized = true;
|
|
979
1695
|
}
|
|
980
1696
|
/**
|
|
@@ -994,95 +1710,228 @@ export class Encoder {
|
|
|
994
1710
|
* @see {@link initialize} For async version
|
|
995
1711
|
*/
|
|
996
1712
|
initializeSync(frame) {
|
|
1713
|
+
// Get bits_per_raw_sample from decoder if available
|
|
1714
|
+
if (this.options.decoder) {
|
|
1715
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1716
|
+
if (decoderCtx && decoderCtx.bitsPerRawSample > 0) {
|
|
1717
|
+
this.codecContext.bitsPerRawSample = decoderCtx.bitsPerRawSample;
|
|
1718
|
+
}
|
|
1719
|
+
}
|
|
1720
|
+
// Get framerate from filter if available, otherwise from decoder
|
|
1721
|
+
// This matches FFmpeg CLI behavior where encoder gets frame_rate_filter from FrameData
|
|
1722
|
+
if (this.options.filter && frame.isVideo()) {
|
|
1723
|
+
const filterFrameRate = this.options.filter.frameRate;
|
|
1724
|
+
if (filterFrameRate) {
|
|
1725
|
+
this.codecContext.framerate = new Rational(filterFrameRate.num, filterFrameRate.den);
|
|
1726
|
+
}
|
|
1727
|
+
}
|
|
1728
|
+
// If no filter framerate, try to get from decoder stream
|
|
1729
|
+
if ((!this.codecContext.framerate || this.codecContext.framerate.num === 0) && this.options.decoder && frame.isVideo()) {
|
|
1730
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1731
|
+
if (decoderCtx?.framerate && decoderCtx.framerate.num > 0) {
|
|
1732
|
+
this.codecContext.framerate = decoderCtx.framerate;
|
|
1733
|
+
}
|
|
1734
|
+
}
|
|
997
1735
|
if (frame.isVideo()) {
|
|
1736
|
+
// FFmpeg CLI sets encoder time_base to 1/framerate (inverse of framerate)
|
|
1737
|
+
// This allows encoder to produce sequential PTS (0, 1, 2, 3...) which enables
|
|
1738
|
+
// proper B-frame DTS generation (negative DTS values)
|
|
1739
|
+
if (this.codecContext.framerate && this.codecContext.framerate.num > 0) {
|
|
1740
|
+
// Use inverse of framerate (e.g., framerate=30/1 → timebase=1/30)
|
|
1741
|
+
this.codecContext.timeBase = new Rational(this.codecContext.framerate.den, this.codecContext.framerate.num);
|
|
1742
|
+
}
|
|
1743
|
+
else {
|
|
1744
|
+
// Fallback: use frame timebase if framerate not available
|
|
1745
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1746
|
+
}
|
|
998
1747
|
this.codecContext.width = frame.width;
|
|
999
1748
|
this.codecContext.height = frame.height;
|
|
1000
1749
|
this.codecContext.pixelFormat = frame.format;
|
|
1001
1750
|
this.codecContext.sampleAspectRatio = frame.sampleAspectRatio;
|
|
1751
|
+
this.codecContext.colorRange = frame.colorRange;
|
|
1752
|
+
this.codecContext.colorPrimaries = frame.colorPrimaries;
|
|
1753
|
+
this.codecContext.colorTrc = frame.colorTrc;
|
|
1754
|
+
this.codecContext.colorSpace = frame.colorSpace;
|
|
1755
|
+
// Only set chroma location if unspecified
|
|
1756
|
+
if (this.codecContext.chromaLocation === AVCHROMA_LOC_UNSPECIFIED) {
|
|
1757
|
+
this.codecContext.chromaLocation = frame.chromaLocation;
|
|
1758
|
+
}
|
|
1002
1759
|
}
|
|
1003
1760
|
else {
|
|
1761
|
+
// Audio: Always use frame timebase (which is typically 1/sample_rate)
|
|
1762
|
+
// This ensures correct PTS progression for audio frames
|
|
1763
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1004
1764
|
this.codecContext.sampleRate = frame.sampleRate;
|
|
1005
1765
|
this.codecContext.sampleFormat = frame.format;
|
|
1006
1766
|
this.codecContext.channelLayout = frame.channelLayout;
|
|
1007
1767
|
}
|
|
1008
|
-
|
|
1009
|
-
this.
|
|
1768
|
+
// Setup hardware acceleration with validation
|
|
1769
|
+
this.setupHardwareAcceleration(frame);
|
|
1770
|
+
// Set codec flags
|
|
1771
|
+
// AV_CODEC_FLAG_COPY_OPAQUE: Copy opaque data from frames to packets if supported
|
|
1772
|
+
if (this.codec.hasCapabilities(AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
|
|
1773
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
1774
|
+
}
|
|
1775
|
+
// AV_CODEC_FLAG_FRAME_DURATION: Signal that frame duration matters for timestamps
|
|
1776
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_FRAME_DURATION);
|
|
1010
1777
|
// Open codec
|
|
1011
1778
|
const openRet = this.codecContext.open2Sync(this.codec, this.opts);
|
|
1012
1779
|
if (openRet < 0) {
|
|
1013
1780
|
this.codecContext.freeContext();
|
|
1014
1781
|
FFmpegError.throwIfError(openRet, 'Failed to open encoder');
|
|
1015
1782
|
}
|
|
1783
|
+
// Check if encoder requires fixed frame size (e.g., Opus, AAC, MP3)
|
|
1784
|
+
// If so, create AudioFrameBuffer to automatically chunk frames
|
|
1785
|
+
if (frame.isAudio() && this.codecContext.frameSize > 0) {
|
|
1786
|
+
this.audioFrameBuffer = AudioFrameBuffer.create(this.codecContext.frameSize, this.codecContext.sampleFormat, this.codecContext.sampleRate, this.codecContext.channelLayout, this.codecContext.channels);
|
|
1787
|
+
}
|
|
1016
1788
|
this.initialized = true;
|
|
1017
1789
|
}
|
|
1018
1790
|
/**
|
|
1019
|
-
*
|
|
1791
|
+
* Setup hardware acceleration for encoder.
|
|
1020
1792
|
*
|
|
1021
|
-
*
|
|
1022
|
-
*
|
|
1793
|
+
* Implements FFmpeg's hw_device_setup_for_encode logic.
|
|
1794
|
+
* Validates hardware frames context format and codec support.
|
|
1795
|
+
* Falls back to device context if frames context is incompatible.
|
|
1023
1796
|
*
|
|
1024
|
-
* @
|
|
1797
|
+
* @param frame - Frame to get hardware context from
|
|
1025
1798
|
*
|
|
1026
1799
|
* @internal
|
|
1027
|
-
*
|
|
1028
|
-
* @see {@link Codec} For codec details
|
|
1029
1800
|
*/
|
|
1030
|
-
|
|
1031
|
-
|
|
1801
|
+
setupHardwareAcceleration(frame) {
|
|
1802
|
+
if (!frame.hwFramesCtx) {
|
|
1803
|
+
// Software encoding
|
|
1804
|
+
return;
|
|
1805
|
+
}
|
|
1806
|
+
const hwFramesCtx = frame.hwFramesCtx;
|
|
1807
|
+
const framesFormat = hwFramesCtx.format;
|
|
1808
|
+
const encoderFormat = this.codecContext.pixelFormat;
|
|
1809
|
+
// Check 1: Format validation
|
|
1810
|
+
if (framesFormat !== encoderFormat) {
|
|
1811
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1812
|
+
this.codecContext.hwFramesCtx = null;
|
|
1813
|
+
return;
|
|
1814
|
+
}
|
|
1815
|
+
// Check 2: Codec supports HW_FRAMES_CTX?
|
|
1816
|
+
let supportsFramesCtx = false;
|
|
1817
|
+
for (let i = 0;; i++) {
|
|
1818
|
+
const config = this.codec.getHwConfig(i);
|
|
1819
|
+
if (!config)
|
|
1820
|
+
break;
|
|
1821
|
+
// Check if codec supports HW_FRAMES_CTX method
|
|
1822
|
+
if (config.methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) {
|
|
1823
|
+
// Check if pixel format matches or is unspecified
|
|
1824
|
+
if (config.pixFmt === AV_PIX_FMT_NONE || config.pixFmt === encoderFormat) {
|
|
1825
|
+
supportsFramesCtx = true;
|
|
1826
|
+
break;
|
|
1827
|
+
}
|
|
1828
|
+
}
|
|
1829
|
+
}
|
|
1830
|
+
if (supportsFramesCtx) {
|
|
1831
|
+
// Use hw_frames_ctx (best performance - zero copy)
|
|
1832
|
+
this.codecContext.hwFramesCtx = hwFramesCtx;
|
|
1833
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1834
|
+
}
|
|
1835
|
+
else {
|
|
1836
|
+
// Fallback to hw_device_ctx (still uses HW, but may copy)
|
|
1837
|
+
// Check if codec supports HW_DEVICE_CTX as fallback
|
|
1838
|
+
let supportsDeviceCtx = false;
|
|
1839
|
+
for (let i = 0;; i++) {
|
|
1840
|
+
const config = this.codec.getHwConfig(i);
|
|
1841
|
+
if (!config)
|
|
1842
|
+
break;
|
|
1843
|
+
if (config.methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) {
|
|
1844
|
+
supportsDeviceCtx = true;
|
|
1845
|
+
break;
|
|
1846
|
+
}
|
|
1847
|
+
}
|
|
1848
|
+
if (supportsDeviceCtx) {
|
|
1849
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1850
|
+
this.codecContext.hwFramesCtx = null;
|
|
1851
|
+
}
|
|
1852
|
+
else {
|
|
1853
|
+
// No hardware support at all - software encoding
|
|
1854
|
+
this.codecContext.hwDeviceCtx = null;
|
|
1855
|
+
this.codecContext.hwFramesCtx = null;
|
|
1856
|
+
}
|
|
1857
|
+
}
|
|
1032
1858
|
}
|
|
1033
1859
|
/**
|
|
1034
|
-
*
|
|
1035
|
-
*
|
|
1036
|
-
* Returns the codec context for advanced operations.
|
|
1037
|
-
* Useful for accessing low-level codec properties and settings.
|
|
1038
|
-
* Returns null if encoder is closed or not initialized.
|
|
1039
|
-
*
|
|
1040
|
-
* @returns Codec context or null if closed/not initialized
|
|
1860
|
+
* Prepare frame for encoding.
|
|
1041
1861
|
*
|
|
1042
|
-
*
|
|
1043
|
-
*
|
|
1044
|
-
*
|
|
1045
|
-
*/
|
|
1046
|
-
getCodecContext() {
|
|
1047
|
-
return !this.isClosed && this.initialized ? this.codecContext : null;
|
|
1048
|
-
}
|
|
1049
|
-
/**
|
|
1050
|
-
* Get codec flags even before encoder initialization.
|
|
1862
|
+
* Implements FFmpeg's frame_encode() pre-encoding logic:
|
|
1863
|
+
* 1. Video: Sets frame.quality from encoder's globalQuality (like -qscale)
|
|
1864
|
+
* 2. Audio: Validates channel count consistency for encoders without PARAM_CHANGE capability
|
|
1051
1865
|
*
|
|
1052
|
-
*
|
|
1866
|
+
* This matches FFmpeg CLI behavior where these properties are automatically managed.
|
|
1053
1867
|
*
|
|
1054
|
-
* @
|
|
1868
|
+
* @param frame - Frame to prepare for encoding
|
|
1055
1869
|
*
|
|
1056
|
-
* @throws {Error} If encoder
|
|
1870
|
+
* @throws {Error} If audio channel count changed and encoder doesn't support parameter changes
|
|
1057
1871
|
*
|
|
1058
1872
|
* @internal
|
|
1059
1873
|
*/
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1874
|
+
prepareFrameForEncoding(frame) {
|
|
1875
|
+
// Clear pict_type - encoder will determine frame types based on its own settings
|
|
1876
|
+
// Input stream's frame type hints are irrelevant when re-encoding
|
|
1877
|
+
frame.pictType = AV_PICTURE_TYPE_NONE;
|
|
1878
|
+
// Adjust frame PTS and timebase to encoder timebase
|
|
1879
|
+
// This matches FFmpeg's adjust_frame_pts_to_encoder_tb() behavior which:
|
|
1880
|
+
// 1. Converts PTS from frame's timebase to encoder's timebase (av_rescale_q)
|
|
1881
|
+
// 2. Sets frame->time_base = tb_dst (so encoder gets correct timebase)
|
|
1882
|
+
// Note: prepareFrameForEncoding is always called AFTER initialize(),
|
|
1883
|
+
// so codecContext.timeBase is already set correctly:
|
|
1884
|
+
// - Video: 1/framerate (if available)
|
|
1885
|
+
// - Audio: frame.timeBase from first frame (typically 1/sample_rate)
|
|
1886
|
+
const encoderTimebase = this.codecContext.timeBase;
|
|
1887
|
+
const oldTimebase = frame.timeBase;
|
|
1888
|
+
// IMPORTANT: Calculate duration BEFORE converting frame timebase
|
|
1889
|
+
// This matches FFmpeg's video_sync_process() which calculates:
|
|
1890
|
+
// duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out)
|
|
1891
|
+
// We need the OLD timebase to convert duration properly
|
|
1892
|
+
let frameDuration;
|
|
1893
|
+
if (frame.duration && frame.duration > 0n) {
|
|
1894
|
+
// Convert duration from frame timebase to encoder timebase
|
|
1895
|
+
// This ensures encoder gets correct frame duration for timestamps
|
|
1896
|
+
frameDuration = avRescaleQ(frame.duration, oldTimebase, encoderTimebase);
|
|
1063
1897
|
}
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
*
|
|
1069
|
-
* This allows setting flags on the codec context before the encoder is opened,
|
|
1070
|
-
* which is necessary for flags that affect initialization behavior (like GLOBAL_HEADER).
|
|
1071
|
-
*
|
|
1072
|
-
* @param flags - The flags to set
|
|
1073
|
-
*
|
|
1074
|
-
* @throws {Error} If encoder is already initialized or closed
|
|
1075
|
-
*
|
|
1076
|
-
* @internal
|
|
1077
|
-
*/
|
|
1078
|
-
setCodecFlags(flags) {
|
|
1079
|
-
if (this.isClosed) {
|
|
1080
|
-
throw new Error('Cannot set flags on closed encoder');
|
|
1898
|
+
else {
|
|
1899
|
+
// Default to 1 (constant frame rate behavior)
|
|
1900
|
+
// Matches FFmpeg's CFR mode: frame->duration = 1
|
|
1901
|
+
frameDuration = 1n;
|
|
1081
1902
|
}
|
|
1082
|
-
if (
|
|
1083
|
-
|
|
1903
|
+
if (frame.pts !== null && frame.pts !== undefined) {
|
|
1904
|
+
// Convert PTS to encoder timebase
|
|
1905
|
+
frame.pts = avRescaleQ(frame.pts, oldTimebase, encoderTimebase);
|
|
1906
|
+
// IMPORTANT: Set frame timebase to encoder timebase
|
|
1907
|
+
// FFmpeg does this in adjust_frame_pts_to_encoder_tb(): frame->time_base = tb_dst
|
|
1908
|
+
// This ensures encoder gets frames with correct timebase (1/framerate for video, 1/sample_rate for audio)
|
|
1909
|
+
frame.timeBase = encoderTimebase;
|
|
1910
|
+
}
|
|
1911
|
+
// Set frame duration in encoder timebase
|
|
1912
|
+
// This matches FFmpeg's video_sync_process() which sets frame->duration
|
|
1913
|
+
// based on vsync_method (CFR: 1, VFR: calculated, PASSTHROUGH: calculated)
|
|
1914
|
+
// Since we don't have automatic filter like FFmpeg, we always set it here
|
|
1915
|
+
frame.duration = frameDuration;
|
|
1916
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
|
|
1917
|
+
// Video: Set frame quality from encoder's global quality
|
|
1918
|
+
// Only set if encoder has globalQuality configured and frame doesn't already have quality set
|
|
1919
|
+
if (this.codecContext.globalQuality > 0 && frame.quality <= 0) {
|
|
1920
|
+
frame.quality = this.codecContext.globalQuality;
|
|
1921
|
+
}
|
|
1922
|
+
}
|
|
1923
|
+
else if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
|
|
1924
|
+
// Audio: Validate channel count consistency
|
|
1925
|
+
// If encoder doesn't support AV_CODEC_CAP_PARAM_CHANGE, channel count must remain constant
|
|
1926
|
+
const supportsParamChange = this.codec.hasCapabilities(AV_CODEC_CAP_PARAM_CHANGE);
|
|
1927
|
+
if (!supportsParamChange) {
|
|
1928
|
+
const encoderChannels = this.codecContext.channelLayout.nbChannels;
|
|
1929
|
+
const frameChannels = frame.channelLayout?.nbChannels ?? 0;
|
|
1930
|
+
if (encoderChannels !== frameChannels) {
|
|
1931
|
+
throw new Error(`Audio channel count changed (${encoderChannels} -> ${frameChannels}) and encoder '${this.codec.name}' does not support parameter changes`);
|
|
1932
|
+
}
|
|
1933
|
+
}
|
|
1084
1934
|
}
|
|
1085
|
-
this.codecContext.flags = flags;
|
|
1086
1935
|
}
|
|
1087
1936
|
/**
|
|
1088
1937
|
* Dispose of encoder.
|