@revizly/node-av 5.2.2-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/BUILD_LINUX.md +61 -0
- package/LICENSE.md +22 -0
- package/README.md +662 -0
- package/build_mac_local.sh +69 -0
- package/dist/api/audio-frame-buffer.d.ts +205 -0
- package/dist/api/audio-frame-buffer.js +287 -0
- package/dist/api/audio-frame-buffer.js.map +1 -0
- package/dist/api/bitstream-filter.d.ts +820 -0
- package/dist/api/bitstream-filter.js +1242 -0
- package/dist/api/bitstream-filter.js.map +1 -0
- package/dist/api/constants.d.ts +44 -0
- package/dist/api/constants.js +45 -0
- package/dist/api/constants.js.map +1 -0
- package/dist/api/data/test_av1.ivf +0 -0
- package/dist/api/data/test_h264.h264 +0 -0
- package/dist/api/data/test_hevc.h265 +0 -0
- package/dist/api/data/test_mjpeg.mjpeg +0 -0
- package/dist/api/data/test_vp8.ivf +0 -0
- package/dist/api/data/test_vp9.ivf +0 -0
- package/dist/api/decoder.d.ts +1088 -0
- package/dist/api/decoder.js +1775 -0
- package/dist/api/decoder.js.map +1 -0
- package/dist/api/demuxer.d.ts +1219 -0
- package/dist/api/demuxer.js +2081 -0
- package/dist/api/demuxer.js.map +1 -0
- package/dist/api/device.d.ts +586 -0
- package/dist/api/device.js +961 -0
- package/dist/api/device.js.map +1 -0
- package/dist/api/encoder.d.ts +1132 -0
- package/dist/api/encoder.js +1988 -0
- package/dist/api/encoder.js.map +1 -0
- package/dist/api/filter-complex.d.ts +821 -0
- package/dist/api/filter-complex.js +1604 -0
- package/dist/api/filter-complex.js.map +1 -0
- package/dist/api/filter-presets.d.ts +1286 -0
- package/dist/api/filter-presets.js +2152 -0
- package/dist/api/filter-presets.js.map +1 -0
- package/dist/api/filter.d.ts +1234 -0
- package/dist/api/filter.js +1976 -0
- package/dist/api/filter.js.map +1 -0
- package/dist/api/fmp4-stream.d.ts +426 -0
- package/dist/api/fmp4-stream.js +739 -0
- package/dist/api/fmp4-stream.js.map +1 -0
- package/dist/api/hardware.d.ts +651 -0
- package/dist/api/hardware.js +1260 -0
- package/dist/api/hardware.js.map +1 -0
- package/dist/api/index.d.ts +17 -0
- package/dist/api/index.js +32 -0
- package/dist/api/index.js.map +1 -0
- package/dist/api/io-stream.d.ts +307 -0
- package/dist/api/io-stream.js +282 -0
- package/dist/api/io-stream.js.map +1 -0
- package/dist/api/muxer.d.ts +957 -0
- package/dist/api/muxer.js +2002 -0
- package/dist/api/muxer.js.map +1 -0
- package/dist/api/pipeline.d.ts +607 -0
- package/dist/api/pipeline.js +1145 -0
- package/dist/api/pipeline.js.map +1 -0
- package/dist/api/utilities/async-queue.d.ts +120 -0
- package/dist/api/utilities/async-queue.js +211 -0
- package/dist/api/utilities/async-queue.js.map +1 -0
- package/dist/api/utilities/audio-sample.d.ts +117 -0
- package/dist/api/utilities/audio-sample.js +112 -0
- package/dist/api/utilities/audio-sample.js.map +1 -0
- package/dist/api/utilities/channel-layout.d.ts +76 -0
- package/dist/api/utilities/channel-layout.js +80 -0
- package/dist/api/utilities/channel-layout.js.map +1 -0
- package/dist/api/utilities/electron-shared-texture.d.ts +328 -0
- package/dist/api/utilities/electron-shared-texture.js +503 -0
- package/dist/api/utilities/electron-shared-texture.js.map +1 -0
- package/dist/api/utilities/image.d.ts +207 -0
- package/dist/api/utilities/image.js +213 -0
- package/dist/api/utilities/image.js.map +1 -0
- package/dist/api/utilities/index.d.ts +12 -0
- package/dist/api/utilities/index.js +25 -0
- package/dist/api/utilities/index.js.map +1 -0
- package/dist/api/utilities/media-type.d.ts +49 -0
- package/dist/api/utilities/media-type.js +53 -0
- package/dist/api/utilities/media-type.js.map +1 -0
- package/dist/api/utilities/pixel-format.d.ts +89 -0
- package/dist/api/utilities/pixel-format.js +97 -0
- package/dist/api/utilities/pixel-format.js.map +1 -0
- package/dist/api/utilities/sample-format.d.ts +129 -0
- package/dist/api/utilities/sample-format.js +141 -0
- package/dist/api/utilities/sample-format.js.map +1 -0
- package/dist/api/utilities/scheduler.d.ts +138 -0
- package/dist/api/utilities/scheduler.js +98 -0
- package/dist/api/utilities/scheduler.js.map +1 -0
- package/dist/api/utilities/streaming.d.ts +186 -0
- package/dist/api/utilities/streaming.js +309 -0
- package/dist/api/utilities/streaming.js.map +1 -0
- package/dist/api/utilities/timestamp.d.ts +193 -0
- package/dist/api/utilities/timestamp.js +206 -0
- package/dist/api/utilities/timestamp.js.map +1 -0
- package/dist/api/utilities/whisper-model.d.ts +310 -0
- package/dist/api/utilities/whisper-model.js +528 -0
- package/dist/api/utilities/whisper-model.js.map +1 -0
- package/dist/api/utils.d.ts +19 -0
- package/dist/api/utils.js +39 -0
- package/dist/api/utils.js.map +1 -0
- package/dist/api/whisper.d.ts +324 -0
- package/dist/api/whisper.js +362 -0
- package/dist/api/whisper.js.map +1 -0
- package/dist/constants/channel-layouts.d.ts +53 -0
- package/dist/constants/channel-layouts.js +57 -0
- package/dist/constants/channel-layouts.js.map +1 -0
- package/dist/constants/constants.d.ts +2325 -0
- package/dist/constants/constants.js +1887 -0
- package/dist/constants/constants.js.map +1 -0
- package/dist/constants/decoders.d.ts +633 -0
- package/dist/constants/decoders.js +641 -0
- package/dist/constants/decoders.js.map +1 -0
- package/dist/constants/encoders.d.ts +295 -0
- package/dist/constants/encoders.js +308 -0
- package/dist/constants/encoders.js.map +1 -0
- package/dist/constants/hardware.d.ts +26 -0
- package/dist/constants/hardware.js +27 -0
- package/dist/constants/hardware.js.map +1 -0
- package/dist/constants/index.d.ts +5 -0
- package/dist/constants/index.js +6 -0
- package/dist/constants/index.js.map +1 -0
- package/dist/ffmpeg/index.d.ts +99 -0
- package/dist/ffmpeg/index.js +115 -0
- package/dist/ffmpeg/index.js.map +1 -0
- package/dist/ffmpeg/utils.d.ts +31 -0
- package/dist/ffmpeg/utils.js +68 -0
- package/dist/ffmpeg/utils.js.map +1 -0
- package/dist/ffmpeg/version.d.ts +6 -0
- package/dist/ffmpeg/version.js +7 -0
- package/dist/ffmpeg/version.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/audio-fifo.d.ts +399 -0
- package/dist/lib/audio-fifo.js +431 -0
- package/dist/lib/audio-fifo.js.map +1 -0
- package/dist/lib/binding.d.ts +228 -0
- package/dist/lib/binding.js +60 -0
- package/dist/lib/binding.js.map +1 -0
- package/dist/lib/bitstream-filter-context.d.ts +379 -0
- package/dist/lib/bitstream-filter-context.js +441 -0
- package/dist/lib/bitstream-filter-context.js.map +1 -0
- package/dist/lib/bitstream-filter.d.ts +140 -0
- package/dist/lib/bitstream-filter.js +154 -0
- package/dist/lib/bitstream-filter.js.map +1 -0
- package/dist/lib/codec-context.d.ts +1071 -0
- package/dist/lib/codec-context.js +1354 -0
- package/dist/lib/codec-context.js.map +1 -0
- package/dist/lib/codec-parameters.d.ts +616 -0
- package/dist/lib/codec-parameters.js +761 -0
- package/dist/lib/codec-parameters.js.map +1 -0
- package/dist/lib/codec-parser.d.ts +201 -0
- package/dist/lib/codec-parser.js +213 -0
- package/dist/lib/codec-parser.js.map +1 -0
- package/dist/lib/codec.d.ts +586 -0
- package/dist/lib/codec.js +713 -0
- package/dist/lib/codec.js.map +1 -0
- package/dist/lib/device.d.ts +291 -0
- package/dist/lib/device.js +324 -0
- package/dist/lib/device.js.map +1 -0
- package/dist/lib/dictionary.d.ts +333 -0
- package/dist/lib/dictionary.js +372 -0
- package/dist/lib/dictionary.js.map +1 -0
- package/dist/lib/error.d.ts +242 -0
- package/dist/lib/error.js +303 -0
- package/dist/lib/error.js.map +1 -0
- package/dist/lib/fifo.d.ts +416 -0
- package/dist/lib/fifo.js +453 -0
- package/dist/lib/fifo.js.map +1 -0
- package/dist/lib/filter-context.d.ts +712 -0
- package/dist/lib/filter-context.js +789 -0
- package/dist/lib/filter-context.js.map +1 -0
- package/dist/lib/filter-graph-segment.d.ts +160 -0
- package/dist/lib/filter-graph-segment.js +171 -0
- package/dist/lib/filter-graph-segment.js.map +1 -0
- package/dist/lib/filter-graph.d.ts +641 -0
- package/dist/lib/filter-graph.js +704 -0
- package/dist/lib/filter-graph.js.map +1 -0
- package/dist/lib/filter-inout.d.ts +198 -0
- package/dist/lib/filter-inout.js +257 -0
- package/dist/lib/filter-inout.js.map +1 -0
- package/dist/lib/filter.d.ts +243 -0
- package/dist/lib/filter.js +272 -0
- package/dist/lib/filter.js.map +1 -0
- package/dist/lib/format-context.d.ts +1254 -0
- package/dist/lib/format-context.js +1379 -0
- package/dist/lib/format-context.js.map +1 -0
- package/dist/lib/frame-utils.d.ts +116 -0
- package/dist/lib/frame-utils.js +98 -0
- package/dist/lib/frame-utils.js.map +1 -0
- package/dist/lib/frame.d.ts +1222 -0
- package/dist/lib/frame.js +1435 -0
- package/dist/lib/frame.js.map +1 -0
- package/dist/lib/hardware-device-context.d.ts +362 -0
- package/dist/lib/hardware-device-context.js +383 -0
- package/dist/lib/hardware-device-context.js.map +1 -0
- package/dist/lib/hardware-frames-context.d.ts +419 -0
- package/dist/lib/hardware-frames-context.js +477 -0
- package/dist/lib/hardware-frames-context.js.map +1 -0
- package/dist/lib/index.d.ts +35 -0
- package/dist/lib/index.js +60 -0
- package/dist/lib/index.js.map +1 -0
- package/dist/lib/input-format.d.ts +249 -0
- package/dist/lib/input-format.js +306 -0
- package/dist/lib/input-format.js.map +1 -0
- package/dist/lib/io-context.d.ts +696 -0
- package/dist/lib/io-context.js +769 -0
- package/dist/lib/io-context.js.map +1 -0
- package/dist/lib/log.d.ts +174 -0
- package/dist/lib/log.js +184 -0
- package/dist/lib/log.js.map +1 -0
- package/dist/lib/native-types.d.ts +946 -0
- package/dist/lib/native-types.js +2 -0
- package/dist/lib/native-types.js.map +1 -0
- package/dist/lib/option.d.ts +927 -0
- package/dist/lib/option.js +1583 -0
- package/dist/lib/option.js.map +1 -0
- package/dist/lib/output-format.d.ts +180 -0
- package/dist/lib/output-format.js +213 -0
- package/dist/lib/output-format.js.map +1 -0
- package/dist/lib/packet.d.ts +501 -0
- package/dist/lib/packet.js +590 -0
- package/dist/lib/packet.js.map +1 -0
- package/dist/lib/rational.d.ts +251 -0
- package/dist/lib/rational.js +278 -0
- package/dist/lib/rational.js.map +1 -0
- package/dist/lib/software-resample-context.d.ts +552 -0
- package/dist/lib/software-resample-context.js +592 -0
- package/dist/lib/software-resample-context.js.map +1 -0
- package/dist/lib/software-scale-context.d.ts +344 -0
- package/dist/lib/software-scale-context.js +366 -0
- package/dist/lib/software-scale-context.js.map +1 -0
- package/dist/lib/stream.d.ts +379 -0
- package/dist/lib/stream.js +526 -0
- package/dist/lib/stream.js.map +1 -0
- package/dist/lib/sync-queue.d.ts +179 -0
- package/dist/lib/sync-queue.js +197 -0
- package/dist/lib/sync-queue.js.map +1 -0
- package/dist/lib/types.d.ts +34 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/types.js.map +1 -0
- package/dist/lib/utilities.d.ts +1127 -0
- package/dist/lib/utilities.js +1225 -0
- package/dist/lib/utilities.js.map +1 -0
- package/dist/utils/electron.d.ts +49 -0
- package/dist/utils/electron.js +63 -0
- package/dist/utils/electron.js.map +1 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.js +5 -0
- package/dist/utils/index.js.map +1 -0
- package/install/check.js +121 -0
- package/install/ffmpeg.js +66 -0
- package/jellyfin-ffmpeg.patch +181 -0
- package/package.json +129 -0
|
@@ -0,0 +1,1988 @@
|
|
|
1
|
+
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
2
|
+
if (value !== null && value !== void 0) {
|
|
3
|
+
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
4
|
+
var dispose, inner;
|
|
5
|
+
if (async) {
|
|
6
|
+
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
7
|
+
dispose = value[Symbol.asyncDispose];
|
|
8
|
+
}
|
|
9
|
+
if (dispose === void 0) {
|
|
10
|
+
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
11
|
+
dispose = value[Symbol.dispose];
|
|
12
|
+
if (async) inner = dispose;
|
|
13
|
+
}
|
|
14
|
+
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
15
|
+
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
16
|
+
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
17
|
+
}
|
|
18
|
+
else if (async) {
|
|
19
|
+
env.stack.push({ async: true });
|
|
20
|
+
}
|
|
21
|
+
return value;
|
|
22
|
+
};
|
|
23
|
+
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
24
|
+
return function (env) {
|
|
25
|
+
function fail(e) {
|
|
26
|
+
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
27
|
+
env.hasError = true;
|
|
28
|
+
}
|
|
29
|
+
var r, s = 0;
|
|
30
|
+
function next() {
|
|
31
|
+
while (r = env.stack.pop()) {
|
|
32
|
+
try {
|
|
33
|
+
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
34
|
+
if (r.dispose) {
|
|
35
|
+
var result = r.dispose.call(r.value);
|
|
36
|
+
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
37
|
+
}
|
|
38
|
+
else s |= 1;
|
|
39
|
+
}
|
|
40
|
+
catch (e) {
|
|
41
|
+
fail(e);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
45
|
+
if (env.hasError) throw env.error;
|
|
46
|
+
}
|
|
47
|
+
return next();
|
|
48
|
+
};
|
|
49
|
+
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
50
|
+
var e = new Error(message);
|
|
51
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
|
+
});
|
|
53
|
+
import { AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, AV_CODEC_CAP_PARAM_CHANGE, AV_CODEC_FLAG_COPY_OPAQUE, AV_CODEC_FLAG_FRAME_DURATION, AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX, AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, AV_PICTURE_TYPE_NONE, AV_PIX_FMT_NONE, AV_PKT_FLAG_TRUSTED, AVCHROMA_LOC_UNSPECIFIED, AVERROR_EAGAIN, AVERROR_ENCODER_NOT_FOUND, AVERROR_EOF, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, EOF, } from '../constants/constants.js';
|
|
54
|
+
import { CodecContext } from '../lib/codec-context.js';
|
|
55
|
+
import { Codec } from '../lib/codec.js';
|
|
56
|
+
import { Dictionary } from '../lib/dictionary.js';
|
|
57
|
+
import { FFmpegError } from '../lib/error.js';
|
|
58
|
+
import { Frame } from '../lib/frame.js';
|
|
59
|
+
import { Packet } from '../lib/packet.js';
|
|
60
|
+
import { Rational } from '../lib/rational.js';
|
|
61
|
+
import { avRescaleQ } from '../lib/utilities.js';
|
|
62
|
+
import { AudioFrameBuffer } from './audio-frame-buffer.js';
|
|
63
|
+
import { FRAME_THREAD_QUEUE_SIZE, PACKET_THREAD_QUEUE_SIZE } from './constants.js';
|
|
64
|
+
import { AsyncQueue } from './utilities/async-queue.js';
|
|
65
|
+
import { SchedulerControl } from './utilities/scheduler.js';
|
|
66
|
+
import { parseBitrate } from './utils.js';
|
|
67
|
+
/**
|
|
68
|
+
* High-level encoder for audio and video streams.
|
|
69
|
+
*
|
|
70
|
+
* Provides a simplified interface for encoding media frames to packets.
|
|
71
|
+
* Handles codec initialization, hardware acceleration setup, and packet management.
|
|
72
|
+
* Supports both synchronous frame-by-frame encoding and async iteration over packets.
|
|
73
|
+
* Essential component in media processing pipelines for converting raw frames to compressed data.
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* ```typescript
|
|
77
|
+
* import { Encoder } from 'node-av/api';
|
|
78
|
+
* import { AV_CODEC_ID_H264, FF_ENCODER_LIBX264 } from 'node-av/constants';
|
|
79
|
+
*
|
|
80
|
+
* // Create H.264 encoder
|
|
81
|
+
* const encoder = await Encoder.create(FF_ENCODER_LIBX264, {
|
|
82
|
+
* type: 'video',
|
|
83
|
+
* width: 1920,
|
|
84
|
+
* height: 1080,
|
|
85
|
+
* pixelFormat: AV_PIX_FMT_YUV420P,
|
|
86
|
+
* timeBase: { num: 1, den: 30 },
|
|
87
|
+
* frameRate: { num: 30, den: 1 }
|
|
88
|
+
* }, {
|
|
89
|
+
* bitrate: '5M',
|
|
90
|
+
* gopSize: 60
|
|
91
|
+
* });
|
|
92
|
+
*
|
|
93
|
+
* // Encode frames
|
|
94
|
+
* const packet = await encoder.encode(frame);
|
|
95
|
+
* if (packet) {
|
|
96
|
+
* await output.writePacket(packet);
|
|
97
|
+
* packet.free();
|
|
98
|
+
* }
|
|
99
|
+
* ```
|
|
100
|
+
*
|
|
101
|
+
* @example
|
|
102
|
+
* ```typescript
|
|
103
|
+
* // Hardware-accelerated encoding with lazy initialization
|
|
104
|
+
* import { HardwareContext } from 'node-av/api';
|
|
105
|
+
* import { FF_ENCODER_H264_VIDEOTOOLBOX } from 'node-av/constants';
|
|
106
|
+
*
|
|
107
|
+
* const hw = HardwareContext.auto();
|
|
108
|
+
* const encoderCodec = hw?.getEncoderCodec('h264') ?? FF_ENCODER_H264_VIDEOTOOLBOX;
|
|
109
|
+
* const encoder = await Encoder.create(encoderCodec, {
|
|
110
|
+
* timeBase: video.timeBase,
|
|
111
|
+
* bitrate: '10M'
|
|
112
|
+
* });
|
|
113
|
+
*
|
|
114
|
+
* // Hardware context will be detected from first frame's hw_frames_ctx
|
|
115
|
+
* for await (const packet of encoder.packets(frames)) {
|
|
116
|
+
* await output.writePacket(packet);
|
|
117
|
+
* packet.free();
|
|
118
|
+
* }
|
|
119
|
+
* ```
|
|
120
|
+
*
|
|
121
|
+
* @see {@link Decoder} For decoding packets to frames
|
|
122
|
+
* @see {@link Muxer} For writing encoded packets
|
|
123
|
+
* @see {@link HardwareContext} For GPU acceleration
|
|
124
|
+
*/
|
|
125
|
+
export class Encoder {
|
|
126
|
+
codecContext;
|
|
127
|
+
packet;
|
|
128
|
+
codec;
|
|
129
|
+
initializePromise = null;
|
|
130
|
+
initialized = false;
|
|
131
|
+
isClosed = false;
|
|
132
|
+
opts;
|
|
133
|
+
options;
|
|
134
|
+
audioFrameBuffer;
|
|
135
|
+
// Worker pattern for push-based processing
|
|
136
|
+
inputQueue;
|
|
137
|
+
outputQueue;
|
|
138
|
+
workerPromise = null;
|
|
139
|
+
pipeToPromise = null;
|
|
140
|
+
signal;
|
|
141
|
+
/**
|
|
142
|
+
* @param codecContext - Configured codec context
|
|
143
|
+
*
|
|
144
|
+
* @param codec - Encoder codec
|
|
145
|
+
*
|
|
146
|
+
* @param options - Encoder options
|
|
147
|
+
*
|
|
148
|
+
* @param opts - Encoder options as Dictionary
|
|
149
|
+
*
|
|
150
|
+
* @internal
|
|
151
|
+
*/
|
|
152
|
+
constructor(codecContext, codec, options, opts) {
|
|
153
|
+
this.codecContext = codecContext;
|
|
154
|
+
this.codec = codec;
|
|
155
|
+
this.options = options;
|
|
156
|
+
this.opts = opts;
|
|
157
|
+
this.packet = new Packet();
|
|
158
|
+
this.packet.alloc();
|
|
159
|
+
this.inputQueue = new AsyncQueue(FRAME_THREAD_QUEUE_SIZE);
|
|
160
|
+
this.outputQueue = new AsyncQueue(PACKET_THREAD_QUEUE_SIZE);
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Create an encoder with specified codec and options.
|
|
164
|
+
*
|
|
165
|
+
* Initializes an encoder with the appropriate codec and configuration.
|
|
166
|
+
* Uses lazy initialization - encoder is opened when first frame is received.
|
|
167
|
+
* Hardware context will be automatically detected from first frame if not provided.
|
|
168
|
+
*
|
|
169
|
+
* Direct mapping to avcodec_find_encoder_by_name() or avcodec_find_encoder().
|
|
170
|
+
*
|
|
171
|
+
* @param encoderCodec - Codec name, ID, or instance to use for encoding
|
|
172
|
+
*
|
|
173
|
+
* @param options - Optional encoder configuration options including required timeBase
|
|
174
|
+
*
|
|
175
|
+
* @returns Configured encoder instance
|
|
176
|
+
*
|
|
177
|
+
* @throws {Error} If encoder not found
|
|
178
|
+
*
|
|
179
|
+
* @example
|
|
180
|
+
* ```typescript
|
|
181
|
+
* // From decoder stream info
|
|
182
|
+
* const encoder = await Encoder.create(FF_ENCODER_LIBX264, {
|
|
183
|
+
* timeBase: video.timeBase,
|
|
184
|
+
* bitrate: '5M',
|
|
185
|
+
* gopSize: 60,
|
|
186
|
+
* options: {
|
|
187
|
+
* preset: 'fast',
|
|
188
|
+
* crf: '23'
|
|
189
|
+
* }
|
|
190
|
+
* });
|
|
191
|
+
* ```
|
|
192
|
+
*
|
|
193
|
+
* @example
|
|
194
|
+
* ```typescript
|
|
195
|
+
* // With custom stream info
|
|
196
|
+
* const encoder = await Encoder.create(FF_ENCODER_AAC, {
|
|
197
|
+
* timeBase: audio.timeBase,
|
|
198
|
+
* bitrate: '192k'
|
|
199
|
+
* });
|
|
200
|
+
* ```
|
|
201
|
+
*
|
|
202
|
+
* @example
|
|
203
|
+
* ```typescript
|
|
204
|
+
* // Hardware encoder
|
|
205
|
+
* const hw = HardwareContext.auto();
|
|
206
|
+
* const encoderCodec = hw?.getEncoderCodec('h264') ?? FF_ENCODER_H264_VIDEOTOOLBOX;
|
|
207
|
+
* const encoder = await Encoder.create(encoderCodec, {
|
|
208
|
+
* timeBase: video.timeBase,
|
|
209
|
+
* bitrate: '8M'
|
|
210
|
+
* });
|
|
211
|
+
* ```
|
|
212
|
+
*
|
|
213
|
+
* @see {@link EncoderOptions} For configuration options
|
|
214
|
+
* @see {@link createSync} For synchronous version
|
|
215
|
+
*/
|
|
216
|
+
static async create(encoderCodec, options = {}) {
|
|
217
|
+
let codec = null;
|
|
218
|
+
if (encoderCodec instanceof Codec) {
|
|
219
|
+
codec = encoderCodec;
|
|
220
|
+
}
|
|
221
|
+
else if (typeof encoderCodec === 'string') {
|
|
222
|
+
codec = Codec.findEncoderByName(encoderCodec);
|
|
223
|
+
}
|
|
224
|
+
else {
|
|
225
|
+
codec = Codec.findEncoder(encoderCodec);
|
|
226
|
+
}
|
|
227
|
+
if (!codec) {
|
|
228
|
+
throw new FFmpegError(AVERROR_ENCODER_NOT_FOUND);
|
|
229
|
+
}
|
|
230
|
+
// Allocate codec context
|
|
231
|
+
const codecContext = new CodecContext();
|
|
232
|
+
codecContext.allocContext3(codec);
|
|
233
|
+
// Apply encoder-specific options
|
|
234
|
+
if (options.gopSize !== undefined) {
|
|
235
|
+
codecContext.gopSize = options.gopSize;
|
|
236
|
+
}
|
|
237
|
+
if (options.maxBFrames !== undefined) {
|
|
238
|
+
codecContext.maxBFrames = options.maxBFrames;
|
|
239
|
+
}
|
|
240
|
+
// Apply common options with codec-type-specific defaults
|
|
241
|
+
if (options.bitrate === undefined) {
|
|
242
|
+
// Set codec-type-specific default bitrate
|
|
243
|
+
const isAudio = codec.type === AVMEDIA_TYPE_AUDIO;
|
|
244
|
+
options.bitrate = isAudio ? 128_000 : 1_000_000;
|
|
245
|
+
}
|
|
246
|
+
const bitrate = typeof options.bitrate === 'string' ? parseBitrate(options.bitrate) : BigInt(options.bitrate);
|
|
247
|
+
codecContext.bitRate = bitrate;
|
|
248
|
+
if (options.minRate !== undefined) {
|
|
249
|
+
const minRate = typeof options.minRate === 'string' ? parseBitrate(options.minRate) : BigInt(options.minRate);
|
|
250
|
+
codecContext.rcMinRate = minRate;
|
|
251
|
+
}
|
|
252
|
+
if (options.maxRate !== undefined) {
|
|
253
|
+
const maxRate = typeof options.maxRate === 'string' ? parseBitrate(options.maxRate) : BigInt(options.maxRate);
|
|
254
|
+
codecContext.rcMaxRate = maxRate;
|
|
255
|
+
}
|
|
256
|
+
if (options.bufSize !== undefined) {
|
|
257
|
+
const bufSize = typeof options.bufSize === 'string' ? parseBitrate(options.bufSize) : BigInt(options.bufSize);
|
|
258
|
+
codecContext.rcBufferSize = Number(bufSize);
|
|
259
|
+
}
|
|
260
|
+
// Thread parameters need to be set before open
|
|
261
|
+
codecContext.threadCount = options.threadCount ?? 0;
|
|
262
|
+
if (options.threadType !== undefined) {
|
|
263
|
+
codecContext.threadType = options.threadType;
|
|
264
|
+
}
|
|
265
|
+
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
266
|
+
const encoder = new Encoder(codecContext, codec, options, opts);
|
|
267
|
+
if (options.signal) {
|
|
268
|
+
options.signal.throwIfAborted();
|
|
269
|
+
encoder.signal = options.signal;
|
|
270
|
+
}
|
|
271
|
+
return encoder;
|
|
272
|
+
}
|
|
273
|
+
/**
|
|
274
|
+
* Create an encoder with specified codec and options synchronously.
|
|
275
|
+
* Synchronous version of create.
|
|
276
|
+
*
|
|
277
|
+
* Initializes an encoder with the appropriate codec and configuration.
|
|
278
|
+
* Uses lazy initialization - encoder is opened when first frame is received.
|
|
279
|
+
* Hardware context will be automatically detected from first frame if not provided.
|
|
280
|
+
*
|
|
281
|
+
* Direct mapping to avcodec_find_encoder_by_name() or avcodec_find_encoder().
|
|
282
|
+
*
|
|
283
|
+
* @param encoderCodec - Codec name, ID, or instance to use for encoding
|
|
284
|
+
*
|
|
285
|
+
* @param options - Optional encoder configuration options including required timeBase
|
|
286
|
+
*
|
|
287
|
+
* @returns Configured encoder instance
|
|
288
|
+
*
|
|
289
|
+
* @throws {Error} If encoder not found or timeBase not provided
|
|
290
|
+
*
|
|
291
|
+
* @throws {FFmpegError} If codec allocation fails
|
|
292
|
+
*
|
|
293
|
+
* @example
|
|
294
|
+
* ```typescript
|
|
295
|
+
* // From decoder stream info
|
|
296
|
+
* const encoder = await Encoder.create(FF_ENCODER_LIBX264, {
|
|
297
|
+
* timeBase: video.timeBase,
|
|
298
|
+
* bitrate: '5M',
|
|
299
|
+
* gopSize: 60,
|
|
300
|
+
* options: {
|
|
301
|
+
* preset: 'fast',
|
|
302
|
+
* crf: '23'
|
|
303
|
+
* }
|
|
304
|
+
* });
|
|
305
|
+
* ```
|
|
306
|
+
*
|
|
307
|
+
* @example
|
|
308
|
+
* ```typescript
|
|
309
|
+
* // With custom stream info
|
|
310
|
+
* const encoder = await Encoder.create(FF_ENCODER_AAC, {
|
|
311
|
+
* timeBase: audio.timeBase,
|
|
312
|
+
* bitrate: '192k'
|
|
313
|
+
* });
|
|
314
|
+
* ```
|
|
315
|
+
*
|
|
316
|
+
* @example
|
|
317
|
+
* ```typescript
|
|
318
|
+
* // Hardware encoder
|
|
319
|
+
* const hw = HardwareContext.auto();
|
|
320
|
+
* const encoderCodec = hw?.getEncoderCodec('h264') ?? FF_ENCODER_H264_VIDEOTOOLBOX;
|
|
321
|
+
* const encoder = await Encoder.create(encoderCodec, {
|
|
322
|
+
* timeBase: video.timeBase,
|
|
323
|
+
* bitrate: '8M'
|
|
324
|
+
* });
|
|
325
|
+
* ```
|
|
326
|
+
*
|
|
327
|
+
* @see {@link EncoderOptions} For configuration options
|
|
328
|
+
* @see {@link create} For async version
|
|
329
|
+
*/
|
|
330
|
+
static createSync(encoderCodec, options = {}) {
|
|
331
|
+
let codec = null;
|
|
332
|
+
if (encoderCodec instanceof Codec) {
|
|
333
|
+
codec = encoderCodec;
|
|
334
|
+
}
|
|
335
|
+
else if (typeof encoderCodec === 'string') {
|
|
336
|
+
codec = Codec.findEncoderByName(encoderCodec);
|
|
337
|
+
}
|
|
338
|
+
else {
|
|
339
|
+
codec = Codec.findEncoder(encoderCodec);
|
|
340
|
+
}
|
|
341
|
+
if (!codec) {
|
|
342
|
+
throw new FFmpegError(AVERROR_ENCODER_NOT_FOUND);
|
|
343
|
+
}
|
|
344
|
+
// Allocate codec context
|
|
345
|
+
const codecContext = new CodecContext();
|
|
346
|
+
codecContext.allocContext3(codec);
|
|
347
|
+
// Apply common options with codec-type-specific defaults
|
|
348
|
+
if (options.bitrate === undefined) {
|
|
349
|
+
// Set codec-type-specific default bitrate
|
|
350
|
+
const isAudio = codec.type === AVMEDIA_TYPE_AUDIO;
|
|
351
|
+
options.bitrate = isAudio ? 128_000 : 1_000_000;
|
|
352
|
+
}
|
|
353
|
+
const bitrate = typeof options.bitrate === 'string' ? parseBitrate(options.bitrate) : BigInt(options.bitrate);
|
|
354
|
+
codecContext.bitRate = bitrate;
|
|
355
|
+
if (options.gopSize !== undefined) {
|
|
356
|
+
codecContext.gopSize = options.gopSize;
|
|
357
|
+
}
|
|
358
|
+
if (options.maxBFrames !== undefined) {
|
|
359
|
+
codecContext.maxBFrames = options.maxBFrames;
|
|
360
|
+
}
|
|
361
|
+
if (options.minRate !== undefined) {
|
|
362
|
+
const minRate = typeof options.minRate === 'string' ? parseBitrate(options.minRate) : BigInt(options.minRate);
|
|
363
|
+
codecContext.rcMinRate = minRate;
|
|
364
|
+
}
|
|
365
|
+
if (options.maxRate !== undefined) {
|
|
366
|
+
const maxRate = typeof options.maxRate === 'string' ? parseBitrate(options.maxRate) : BigInt(options.maxRate);
|
|
367
|
+
codecContext.rcMaxRate = maxRate;
|
|
368
|
+
}
|
|
369
|
+
if (options.bufSize !== undefined) {
|
|
370
|
+
const bufSize = typeof options.bufSize === 'string' ? parseBitrate(options.bufSize) : BigInt(options.bufSize);
|
|
371
|
+
codecContext.rcBufferSize = Number(bufSize);
|
|
372
|
+
}
|
|
373
|
+
// Thread parameters need to be set before open
|
|
374
|
+
codecContext.threadCount = options.threadCount ?? 0;
|
|
375
|
+
if (options.threadType !== undefined) {
|
|
376
|
+
codecContext.threadType = options.threadType;
|
|
377
|
+
}
|
|
378
|
+
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
379
|
+
const encoder = new Encoder(codecContext, codec, options, opts);
|
|
380
|
+
if (options.signal) {
|
|
381
|
+
options.signal.throwIfAborted();
|
|
382
|
+
encoder.signal = options.signal;
|
|
383
|
+
}
|
|
384
|
+
return encoder;
|
|
385
|
+
}
|
|
386
|
+
/**
|
|
387
|
+
* Check if encoder is open.
|
|
388
|
+
*
|
|
389
|
+
* @example
|
|
390
|
+
* ```typescript
|
|
391
|
+
* if (encoder.isEncoderOpen) {
|
|
392
|
+
* const packet = await encoder.encode(frame);
|
|
393
|
+
* }
|
|
394
|
+
* ```
|
|
395
|
+
*/
|
|
396
|
+
get isEncoderOpen() {
|
|
397
|
+
return !this.isClosed;
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Check if encoder has been initialized.
|
|
401
|
+
*
|
|
402
|
+
* Returns true after first frame has been processed and encoder opened.
|
|
403
|
+
* Useful for checking if encoder has received frame properties.
|
|
404
|
+
*
|
|
405
|
+
* @returns true if encoder has been initialized with frame data
|
|
406
|
+
*
|
|
407
|
+
* @example
|
|
408
|
+
* ```typescript
|
|
409
|
+
* if (!encoder.isEncoderInitialized) {
|
|
410
|
+
* console.log('Encoder will initialize on first frame');
|
|
411
|
+
* }
|
|
412
|
+
* ```
|
|
413
|
+
*/
|
|
414
|
+
get isEncoderInitialized() {
|
|
415
|
+
return this.initialized;
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Codec flags.
|
|
419
|
+
*
|
|
420
|
+
* @returns Current codec flags
|
|
421
|
+
*
|
|
422
|
+
* @throws {Error} If encoder is closed
|
|
423
|
+
*
|
|
424
|
+
* @example
|
|
425
|
+
* ```typescript
|
|
426
|
+
* const flags = encoder.codecFlags;
|
|
427
|
+
* console.log('Current flags:', flags);
|
|
428
|
+
* ```
|
|
429
|
+
*
|
|
430
|
+
* @see {@link setCodecFlags} To set flags
|
|
431
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
432
|
+
* @see {@link hasCodecFlags} To check flags
|
|
433
|
+
*/
|
|
434
|
+
get codecFlags() {
|
|
435
|
+
if (this.isClosed) {
|
|
436
|
+
throw new Error('Cannot get flags on closed encoder');
|
|
437
|
+
}
|
|
438
|
+
return this.codecContext.flags;
|
|
439
|
+
}
|
|
440
|
+
/**
|
|
441
|
+
* Set codec flags.
|
|
442
|
+
*
|
|
443
|
+
* @param flags - One or more flag values to set
|
|
444
|
+
*
|
|
445
|
+
* @throws {Error} If encoder is already initialized or closed
|
|
446
|
+
*
|
|
447
|
+
* @example
|
|
448
|
+
* ```typescript
|
|
449
|
+
* import { AV_CODEC_FLAG_GLOBAL_HEADER, AV_CODEC_FLAG_QSCALE } from 'node-av/constants';
|
|
450
|
+
*
|
|
451
|
+
* // Set multiple flags before initialization
|
|
452
|
+
* encoder.setCodecFlags(AV_CODEC_FLAG_GLOBAL_HEADER, AV_CODEC_FLAG_QSCALE);
|
|
453
|
+
* ```
|
|
454
|
+
*
|
|
455
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
456
|
+
* @see {@link hasCodecFlags} To check flags
|
|
457
|
+
* @see {@link codecFlags} For direct flag access
|
|
458
|
+
*/
|
|
459
|
+
setCodecFlags(...flags) {
|
|
460
|
+
if (this.isClosed) {
|
|
461
|
+
throw new Error('Cannot set flags on closed encoder');
|
|
462
|
+
}
|
|
463
|
+
if (this.initialized) {
|
|
464
|
+
throw new Error('Cannot set flags on already initialized encoder');
|
|
465
|
+
}
|
|
466
|
+
this.codecContext.setFlags(...flags);
|
|
467
|
+
}
|
|
468
|
+
/**
|
|
469
|
+
* Clear codec flags.
|
|
470
|
+
*
|
|
471
|
+
* @param flags - One or more flag values to clear
|
|
472
|
+
*
|
|
473
|
+
* @throws {Error} If encoder is already initialized or closed
|
|
474
|
+
*
|
|
475
|
+
* @example
|
|
476
|
+
* ```typescript
|
|
477
|
+
* import { AV_CODEC_FLAG_QSCALE } from 'node-av/constants';
|
|
478
|
+
*
|
|
479
|
+
* // Clear specific flag before initialization
|
|
480
|
+
* encoder.clearCodecFlags(AV_CODEC_FLAG_QSCALE);
|
|
481
|
+
* ```
|
|
482
|
+
*
|
|
483
|
+
* @see {@link setCodecFlags} To set flags
|
|
484
|
+
* @see {@link hasCodecFlags} To check flags
|
|
485
|
+
* @see {@link codecFlags} For direct flag access
|
|
486
|
+
*/
|
|
487
|
+
clearCodecFlags(...flags) {
|
|
488
|
+
if (this.isClosed) {
|
|
489
|
+
throw new Error('Cannot clear flags on closed encoder');
|
|
490
|
+
}
|
|
491
|
+
if (this.initialized) {
|
|
492
|
+
throw new Error('Cannot clear flags on already initialized encoder');
|
|
493
|
+
}
|
|
494
|
+
this.codecContext.clearFlags(...flags);
|
|
495
|
+
}
|
|
496
|
+
/**
|
|
497
|
+
* Check if codec has specific flags.
|
|
498
|
+
*
|
|
499
|
+
* Tests whether all specified codec flags are set using bitwise AND.
|
|
500
|
+
*
|
|
501
|
+
* @param flags - One or more flag values to check
|
|
502
|
+
*
|
|
503
|
+
* @returns true if all specified flags are set, false otherwise
|
|
504
|
+
*
|
|
505
|
+
* @throws {Error} If encoder is closed
|
|
506
|
+
*
|
|
507
|
+
* @example
|
|
508
|
+
* ```typescript
|
|
509
|
+
* import { AV_CODEC_FLAG_GLOBAL_HEADER } from 'node-av/constants';
|
|
510
|
+
*
|
|
511
|
+
* if (encoder.hasCodecFlags(AV_CODEC_FLAG_GLOBAL_HEADER)) {
|
|
512
|
+
* console.log('Global header flag is set');
|
|
513
|
+
* }
|
|
514
|
+
* ```
|
|
515
|
+
*
|
|
516
|
+
* @see {@link setCodecFlags} To set flags
|
|
517
|
+
* @see {@link clearCodecFlags} To clear flags
|
|
518
|
+
* @see {@link codecFlags} For direct flag access
|
|
519
|
+
*/
|
|
520
|
+
hasCodecFlags(...flags) {
|
|
521
|
+
if (this.isClosed) {
|
|
522
|
+
throw new Error('Cannot check flags on closed encoder');
|
|
523
|
+
}
|
|
524
|
+
return this.codecContext.hasFlags(...flags);
|
|
525
|
+
}
|
|
526
|
+
/**
|
|
527
|
+
* Check if encoder uses hardware acceleration.
|
|
528
|
+
*
|
|
529
|
+
* @returns true if hardware-accelerated
|
|
530
|
+
*
|
|
531
|
+
* @example
|
|
532
|
+
* ```typescript
|
|
533
|
+
* if (encoder.isHardware()) {
|
|
534
|
+
* console.log('Using GPU acceleration');
|
|
535
|
+
* }
|
|
536
|
+
* ```
|
|
537
|
+
*
|
|
538
|
+
* @see {@link HardwareContext} For hardware setup
|
|
539
|
+
*/
|
|
540
|
+
isHardware() {
|
|
541
|
+
return this.codec.isHardwareAcceleratedEncoder();
|
|
542
|
+
}
|
|
543
|
+
/**
|
|
544
|
+
* Check if encoder is ready for processing.
|
|
545
|
+
*
|
|
546
|
+
* @returns true if initialized and ready
|
|
547
|
+
*
|
|
548
|
+
* @example
|
|
549
|
+
* ```typescript
|
|
550
|
+
* if (encoder.isReady()) {
|
|
551
|
+
* const packet = await encoder.encode(frame);
|
|
552
|
+
* }
|
|
553
|
+
* ```
|
|
554
|
+
*/
|
|
555
|
+
isReady() {
|
|
556
|
+
return this.initialized && !this.isClosed;
|
|
557
|
+
}
|
|
558
|
+
/**
|
|
559
|
+
* Send a frame to the encoder.
|
|
560
|
+
*
|
|
561
|
+
* Sends a raw frame to the encoder for encoding.
|
|
562
|
+
* Does not return encoded packets - use {@link receive} to retrieve packets.
|
|
563
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
564
|
+
* A single frame can produce zero, one, or multiple packets depending on codec buffering.
|
|
565
|
+
*
|
|
566
|
+
* **Important**: This method only SENDS the frame to the encoder.
|
|
567
|
+
* You must call {@link receive} separately (potentially multiple times) to get encoded packets.
|
|
568
|
+
*
|
|
569
|
+
* Direct mapping to avcodec_send_frame().
|
|
570
|
+
*
|
|
571
|
+
* @param frame - Raw frame to send to encoder, or null to flush
|
|
572
|
+
*
|
|
573
|
+
* @throws {FFmpegError} If sending frame fails
|
|
574
|
+
*
|
|
575
|
+
* @example
|
|
576
|
+
* ```typescript
|
|
577
|
+
* // Send frame and receive packets
|
|
578
|
+
* await encoder.encode(frame);
|
|
579
|
+
*
|
|
580
|
+
* // Receive all available packets
|
|
581
|
+
* while (true) {
|
|
582
|
+
* const packet = await encoder.receive();
|
|
583
|
+
* if (!packet) break;
|
|
584
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
585
|
+
* await output.writePacket(packet);
|
|
586
|
+
* packet.free();
|
|
587
|
+
* }
|
|
588
|
+
* ```
|
|
589
|
+
*
|
|
590
|
+
* @example
|
|
591
|
+
* ```typescript
|
|
592
|
+
* for await (const frame of decoder.frames(input.packets())) {
|
|
593
|
+
* // Send frame
|
|
594
|
+
* await encoder.encode(frame);
|
|
595
|
+
*
|
|
596
|
+
* // Receive available packets
|
|
597
|
+
* let packet;
|
|
598
|
+
* while ((packet = await encoder.receive())) {
|
|
599
|
+
* await output.writePacket(packet);
|
|
600
|
+
* packet.free();
|
|
601
|
+
* }
|
|
602
|
+
* frame.free();
|
|
603
|
+
* }
|
|
604
|
+
* ```
|
|
605
|
+
*
|
|
606
|
+
* @see {@link receive} For receiving encoded packets
|
|
607
|
+
* @see {@link encodeAll} For combined send+receive operation
|
|
608
|
+
* @see {@link packets} For automatic frame iteration
|
|
609
|
+
* @see {@link flush} For end-of-stream handling
|
|
610
|
+
* @see {@link encodeSync} For synchronous version
|
|
611
|
+
*/
|
|
612
|
+
async encode(frame) {
|
|
613
|
+
this.signal?.throwIfAborted();
|
|
614
|
+
if (this.isClosed) {
|
|
615
|
+
return;
|
|
616
|
+
}
|
|
617
|
+
// Null frame = flush encoder
|
|
618
|
+
if (frame === null) {
|
|
619
|
+
await this.flush();
|
|
620
|
+
return;
|
|
621
|
+
}
|
|
622
|
+
// Open encoder if not already done
|
|
623
|
+
this.initializePromise ??= this.initialize(frame);
|
|
624
|
+
await this.initializePromise;
|
|
625
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
626
|
+
this.prepareFrameForEncoding(frame);
|
|
627
|
+
const encode = async (newFrame) => {
|
|
628
|
+
const sendRet = await this.codecContext.sendFrame(newFrame);
|
|
629
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
630
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send frame to encoder');
|
|
631
|
+
return;
|
|
632
|
+
}
|
|
633
|
+
};
|
|
634
|
+
if (this.audioFrameBuffer) {
|
|
635
|
+
// Push frame into buffer - actual sending happens in receive()
|
|
636
|
+
await this.audioFrameBuffer.push(frame);
|
|
637
|
+
}
|
|
638
|
+
else {
|
|
639
|
+
await encode(frame);
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
/**
|
|
643
|
+
* Send a frame to the encoder synchronously.
|
|
644
|
+
* Synchronous version of encode.
|
|
645
|
+
*
|
|
646
|
+
* Sends a raw frame to the encoder for encoding.
|
|
647
|
+
* Does not return encoded packets - use {@link receiveSync} to retrieve packets.
|
|
648
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
649
|
+
* A single frame can produce zero, one, or multiple packets depending on codec buffering.
|
|
650
|
+
*
|
|
651
|
+
* **Important**: This method only SENDS the frame to the encoder.
|
|
652
|
+
* You must call {@link receiveSync} separately (potentially multiple times) to get encoded packets.
|
|
653
|
+
*
|
|
654
|
+
* Direct mapping to avcodec_send_frame().
|
|
655
|
+
*
|
|
656
|
+
* @param frame - Raw frame to send to encoder, or null to flush
|
|
657
|
+
*
|
|
658
|
+
* @throws {FFmpegError} If sending frame fails
|
|
659
|
+
*
|
|
660
|
+
* @example
|
|
661
|
+
* ```typescript
|
|
662
|
+
* // Send frame and receive packets
|
|
663
|
+
* encoder.encodeSync(frame);
|
|
664
|
+
*
|
|
665
|
+
* // Receive all available packets
|
|
666
|
+
* let packet;
|
|
667
|
+
* while ((packet = encoder.receiveSync())) {
|
|
668
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
669
|
+
* output.writePacketSync(packet);
|
|
670
|
+
* packet.free();
|
|
671
|
+
* }
|
|
672
|
+
* ```
|
|
673
|
+
*
|
|
674
|
+
* @see {@link receiveSync} For receiving encoded packets
|
|
675
|
+
* @see {@link encodeAllSync} For combined send+receive operation
|
|
676
|
+
* @see {@link packetsSync} For automatic frame iteration
|
|
677
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
678
|
+
* @see {@link encode} For async version
|
|
679
|
+
*/
|
|
680
|
+
encodeSync(frame) {
|
|
681
|
+
if (this.isClosed) {
|
|
682
|
+
return;
|
|
683
|
+
}
|
|
684
|
+
// Null frame = flush encoder
|
|
685
|
+
if (frame === null) {
|
|
686
|
+
this.flushSync();
|
|
687
|
+
return;
|
|
688
|
+
}
|
|
689
|
+
// Open encoder if not already done
|
|
690
|
+
if (!this.initialized) {
|
|
691
|
+
this.initializeSync(frame);
|
|
692
|
+
}
|
|
693
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
694
|
+
this.prepareFrameForEncoding(frame);
|
|
695
|
+
const encode = (newFrame) => {
|
|
696
|
+
const sendRet = this.codecContext.sendFrameSync(newFrame);
|
|
697
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
698
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send frame to encoder');
|
|
699
|
+
return;
|
|
700
|
+
}
|
|
701
|
+
};
|
|
702
|
+
if (this.audioFrameBuffer) {
|
|
703
|
+
// Push frame into buffer - actual sending happens in receiveSync()
|
|
704
|
+
this.audioFrameBuffer.pushSync(frame);
|
|
705
|
+
}
|
|
706
|
+
else {
|
|
707
|
+
encode(frame);
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
/**
|
|
711
|
+
* Encode a frame to packets.
|
|
712
|
+
*
|
|
713
|
+
* Sends a frame to the encoder and receives all available encoded packets.
|
|
714
|
+
* Returns array of packets - may be empty if encoder needs more data.
|
|
715
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
716
|
+
* One frame can produce zero, one, or multiple packets depending on codec.
|
|
717
|
+
*
|
|
718
|
+
* Direct mapping to avcodec_send_frame() and avcodec_receive_packet().
|
|
719
|
+
*
|
|
720
|
+
* @param frame - Raw frame to encode (or null to flush)
|
|
721
|
+
*
|
|
722
|
+
* @returns Array of encoded packets (empty if more data needed or encoder is closed)
|
|
723
|
+
*
|
|
724
|
+
* @throws {FFmpegError} If encoding fails
|
|
725
|
+
*
|
|
726
|
+
* @example
|
|
727
|
+
* ```typescript
|
|
728
|
+
* const packets = await encoder.encodeAll(frame);
|
|
729
|
+
* for (const packet of packets) {
|
|
730
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
731
|
+
* await output.writePacket(packet);
|
|
732
|
+
* packet.free();
|
|
733
|
+
* }
|
|
734
|
+
* ```
|
|
735
|
+
*
|
|
736
|
+
* @example
|
|
737
|
+
* ```typescript
|
|
738
|
+
* // Encode loop
|
|
739
|
+
* for await (const frame of decoder.frames(input.packets())) {
|
|
740
|
+
* const packets = await encoder.encodeAll(frame);
|
|
741
|
+
* for (const packet of packets) {
|
|
742
|
+
* await output.writePacket(packet);
|
|
743
|
+
* packet.free();
|
|
744
|
+
* }
|
|
745
|
+
* frame.free();
|
|
746
|
+
* }
|
|
747
|
+
* ```
|
|
748
|
+
*
|
|
749
|
+
* @see {@link encode} For single packet encoding
|
|
750
|
+
* @see {@link packets} For automatic frame iteration
|
|
751
|
+
* @see {@link flush} For end-of-stream handling
|
|
752
|
+
* @see {@link encodeAllSync} For synchronous version
|
|
753
|
+
*/
|
|
754
|
+
async encodeAll(frame) {
|
|
755
|
+
this.signal?.throwIfAborted();
|
|
756
|
+
await this.encode(frame);
|
|
757
|
+
// Receive all available packets
|
|
758
|
+
const packets = [];
|
|
759
|
+
while (true) {
|
|
760
|
+
const packet = await this.receive();
|
|
761
|
+
if (!packet)
|
|
762
|
+
break; // Stop on EAGAIN or EOF
|
|
763
|
+
packets.push(packet); // Only push actual packets
|
|
764
|
+
}
|
|
765
|
+
return packets;
|
|
766
|
+
}
|
|
767
|
+
/**
|
|
768
|
+
* Encode a frame to packets synchronously.
|
|
769
|
+
* Synchronous version of encodeAll.
|
|
770
|
+
*
|
|
771
|
+
* Sends a frame to the encoder and receives all available encoded packets.
|
|
772
|
+
* Returns array of packets - may be empty if encoder needs more data.
|
|
773
|
+
* On first frame, automatically initializes encoder with frame properties.
|
|
774
|
+
* One frame can produce zero, one, or multiple packets depending on codec.
|
|
775
|
+
*
|
|
776
|
+
* Direct mapping to avcodec_send_frame() and avcodec_receive_packet().
|
|
777
|
+
*
|
|
778
|
+
* @param frame - Raw frame to encode (or null to flush)
|
|
779
|
+
*
|
|
780
|
+
* @returns Array of encoded packets (empty if more data needed or encoder is closed)
|
|
781
|
+
*
|
|
782
|
+
* @throws {FFmpegError} If encoding fails
|
|
783
|
+
*
|
|
784
|
+
* @example
|
|
785
|
+
* ```typescript
|
|
786
|
+
* const packets = encoder.encodeAllSync(frame);
|
|
787
|
+
* for (const packet of packets) {
|
|
788
|
+
* console.log(`Encoded packet with PTS: ${packet.pts}`);
|
|
789
|
+
* output.writePacketSync(packet);
|
|
790
|
+
* packet.free();
|
|
791
|
+
* }
|
|
792
|
+
* ```
|
|
793
|
+
*
|
|
794
|
+
* @example
|
|
795
|
+
* ```typescript
|
|
796
|
+
* // Encode loop
|
|
797
|
+
* for (const frame of decoder.framesSync(packets)) {
|
|
798
|
+
* const packets = encoder.encodeAllSync(frame);
|
|
799
|
+
* for (const packet of packets) {
|
|
800
|
+
* output.writePacketSync(packet);
|
|
801
|
+
* packet.free();
|
|
802
|
+
* }
|
|
803
|
+
* frame.free();
|
|
804
|
+
* }
|
|
805
|
+
* ```
|
|
806
|
+
*
|
|
807
|
+
* @see {@link encodeSync} For single packet encoding
|
|
808
|
+
* @see {@link packetsSync} For automatic frame iteration
|
|
809
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
810
|
+
* @see {@link encodeAll} For async version
|
|
811
|
+
*/
|
|
812
|
+
encodeAllSync(frame) {
|
|
813
|
+
this.encodeSync(frame);
|
|
814
|
+
// Receive all available packets
|
|
815
|
+
const packets = [];
|
|
816
|
+
while (true) {
|
|
817
|
+
const packet = this.receiveSync();
|
|
818
|
+
if (!packet)
|
|
819
|
+
break; // Stop on EAGAIN or EOF
|
|
820
|
+
packets.push(packet); // Only push actual packets
|
|
821
|
+
}
|
|
822
|
+
return packets;
|
|
823
|
+
}
|
|
824
|
+
/**
|
|
825
|
+
* Encode frame stream to packet stream.
|
|
826
|
+
*
|
|
827
|
+
* High-level async generator for complete encoding pipeline.
|
|
828
|
+
* Encoder is only flushed when EOF (null) signal is explicitly received.
|
|
829
|
+
* Primary interface for stream-based encoding.
|
|
830
|
+
*
|
|
831
|
+
* **EOF Handling:**
|
|
832
|
+
* - Send null to flush encoder and get remaining buffered packets
|
|
833
|
+
* - Generator yields null after flushing when null is received
|
|
834
|
+
* - No automatic flushing - encoder stays open until EOF or close()
|
|
835
|
+
*
|
|
836
|
+
* @param frames - Async iterable of frames, single frame, or null to flush
|
|
837
|
+
*
|
|
838
|
+
* @yields {Packet | null} Encoded packets, followed by null when explicitly flushed
|
|
839
|
+
*
|
|
840
|
+
* @throws {FFmpegError} If encoding fails
|
|
841
|
+
*
|
|
842
|
+
* @example
|
|
843
|
+
* ```typescript
|
|
844
|
+
* // Stream of frames with automatic EOF propagation
|
|
845
|
+
* for await (const packet of encoder.packets(decoder.frames(input.packets()))) {
|
|
846
|
+
* if (packet === null) {
|
|
847
|
+
* console.log('Encoder flushed');
|
|
848
|
+
* break;
|
|
849
|
+
* }
|
|
850
|
+
* await output.writePacket(packet);
|
|
851
|
+
* packet.free(); // Must free output packets
|
|
852
|
+
* }
|
|
853
|
+
* ```
|
|
854
|
+
*
|
|
855
|
+
* @example
|
|
856
|
+
* ```typescript
|
|
857
|
+
* // Single frame - no automatic flush
|
|
858
|
+
* for await (const packet of encoder.packets(singleFrame)) {
|
|
859
|
+
* await output.writePacket(packet);
|
|
860
|
+
* packet.free();
|
|
861
|
+
* }
|
|
862
|
+
* // Encoder remains open, buffered packets not flushed
|
|
863
|
+
* ```
|
|
864
|
+
*
|
|
865
|
+
* @example
|
|
866
|
+
* ```typescript
|
|
867
|
+
* // Explicit flush with EOF
|
|
868
|
+
* for await (const packet of encoder.packets(null)) {
|
|
869
|
+
* if (packet === null) {
|
|
870
|
+
* console.log('All buffered packets flushed');
|
|
871
|
+
* break;
|
|
872
|
+
* }
|
|
873
|
+
* console.log('Buffered packet:', packet.pts);
|
|
874
|
+
* await output.writePacket(packet);
|
|
875
|
+
* packet.free();
|
|
876
|
+
* }
|
|
877
|
+
* ```
|
|
878
|
+
*
|
|
879
|
+
* @see {@link encode} For single frame encoding
|
|
880
|
+
* @see {@link Decoder.frames} For frame source
|
|
881
|
+
* @see {@link packetsSync} For sync version
|
|
882
|
+
*/
|
|
883
|
+
async *packets(frames) {
|
|
884
|
+
const self = this;
|
|
885
|
+
const processFrame = async function* (frame) {
|
|
886
|
+
await self.encode(frame);
|
|
887
|
+
while (true) {
|
|
888
|
+
const packet = await self.receive();
|
|
889
|
+
if (!packet)
|
|
890
|
+
break;
|
|
891
|
+
yield packet;
|
|
892
|
+
}
|
|
893
|
+
}.bind(this);
|
|
894
|
+
const finalize = async function* () {
|
|
895
|
+
for await (const remaining of self.flushPackets()) {
|
|
896
|
+
yield remaining;
|
|
897
|
+
}
|
|
898
|
+
yield null;
|
|
899
|
+
}.bind(this);
|
|
900
|
+
if (frames === null) {
|
|
901
|
+
yield* finalize();
|
|
902
|
+
return;
|
|
903
|
+
}
|
|
904
|
+
if (frames instanceof Frame) {
|
|
905
|
+
yield* processFrame(frames);
|
|
906
|
+
return;
|
|
907
|
+
}
|
|
908
|
+
for await (const frame_1 of frames) {
|
|
909
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
910
|
+
try {
|
|
911
|
+
const frame = __addDisposableResource(env_1, frame_1, false);
|
|
912
|
+
this.signal?.throwIfAborted();
|
|
913
|
+
if (frame === null) {
|
|
914
|
+
yield* finalize();
|
|
915
|
+
return;
|
|
916
|
+
}
|
|
917
|
+
yield* processFrame(frame);
|
|
918
|
+
}
|
|
919
|
+
catch (e_1) {
|
|
920
|
+
env_1.error = e_1;
|
|
921
|
+
env_1.hasError = true;
|
|
922
|
+
}
|
|
923
|
+
finally {
|
|
924
|
+
__disposeResources(env_1);
|
|
925
|
+
}
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
/**
|
|
929
|
+
* Encode frame stream to packet stream synchronously.
|
|
930
|
+
* Synchronous version of packets.
|
|
931
|
+
*
|
|
932
|
+
* High-level sync generator for complete encoding pipeline.
|
|
933
|
+
* Encoder is only flushed when EOF (null) signal is explicitly received.
|
|
934
|
+
* Primary interface for stream-based encoding.
|
|
935
|
+
*
|
|
936
|
+
* **EOF Handling:**
|
|
937
|
+
* - Send null to flush encoder and get remaining buffered packets
|
|
938
|
+
* - Generator yields null after flushing when null is received
|
|
939
|
+
* - No automatic flushing - encoder stays open until EOF or close()
|
|
940
|
+
*
|
|
941
|
+
* @param frames - Iterable of frames, single frame, or null to flush
|
|
942
|
+
*
|
|
943
|
+
* @yields {Packet | null} Encoded packets, followed by null when explicitly flushed
|
|
944
|
+
*
|
|
945
|
+
* @throws {FFmpegError} If encoding fails
|
|
946
|
+
*
|
|
947
|
+
* @example
|
|
948
|
+
* ```typescript
|
|
949
|
+
* // Stream of frames with automatic EOF propagation
|
|
950
|
+
* for (const packet of encoder.packetsSync(decoder.framesSync(packets))) {
|
|
951
|
+
* if (packet === null) {
|
|
952
|
+
* console.log('Encoder flushed');
|
|
953
|
+
* break;
|
|
954
|
+
* }
|
|
955
|
+
* output.writePacketSync(packet);
|
|
956
|
+
* packet.free(); // Must free output packets
|
|
957
|
+
* }
|
|
958
|
+
* ```
|
|
959
|
+
*
|
|
960
|
+
* @example
|
|
961
|
+
* ```typescript
|
|
962
|
+
* // Single frame - no automatic flush
|
|
963
|
+
* for (const packet of encoder.packetsSync(singleFrame)) {
|
|
964
|
+
* output.writePacketSync(packet);
|
|
965
|
+
* packet.free();
|
|
966
|
+
* }
|
|
967
|
+
* // Encoder remains open, buffered packets not flushed
|
|
968
|
+
* ```
|
|
969
|
+
*
|
|
970
|
+
* @example
|
|
971
|
+
* ```typescript
|
|
972
|
+
* // Explicit flush with EOF
|
|
973
|
+
* for (const packet of encoder.packetsSync(null)) {
|
|
974
|
+
* if (packet === null) {
|
|
975
|
+
* console.log('All buffered packets flushed');
|
|
976
|
+
* break;
|
|
977
|
+
* }
|
|
978
|
+
* console.log('Buffered packet:', packet.pts);
|
|
979
|
+
* output.writePacketSync(packet);
|
|
980
|
+
* packet.free();
|
|
981
|
+
* }
|
|
982
|
+
* ```
|
|
983
|
+
*
|
|
984
|
+
* @see {@link encodeSync} For single frame encoding
|
|
985
|
+
* @see {@link Decoder.framesSync} For frame source
|
|
986
|
+
* @see {@link packets} For async version
|
|
987
|
+
*/
|
|
988
|
+
*packetsSync(frames) {
|
|
989
|
+
const self = this;
|
|
990
|
+
// Helper: Encode frame and yield all available packets (filters out EAGAIN nulls and EOF)
|
|
991
|
+
const processFrame = function* (frame) {
|
|
992
|
+
self.encodeSync(frame);
|
|
993
|
+
// Receive ALL packets (filter out null/EAGAIN and EOF)
|
|
994
|
+
while (true) {
|
|
995
|
+
const packet = self.receiveSync();
|
|
996
|
+
if (!packet)
|
|
997
|
+
break; // EAGAIN or EOF - no more packets available
|
|
998
|
+
yield packet; // Only yield actual packets
|
|
999
|
+
}
|
|
1000
|
+
}.bind(this);
|
|
1001
|
+
// Helper: Flush encoder and signal EOF
|
|
1002
|
+
const finalize = function* () {
|
|
1003
|
+
for (const remaining of self.flushPacketsSync()) {
|
|
1004
|
+
yield remaining; // Only yield actual packets
|
|
1005
|
+
}
|
|
1006
|
+
yield null; // Signal end-of-stream
|
|
1007
|
+
}.bind(this);
|
|
1008
|
+
// Case 1: EOF input -> flush only
|
|
1009
|
+
if (frames === null) {
|
|
1010
|
+
yield* finalize();
|
|
1011
|
+
return;
|
|
1012
|
+
}
|
|
1013
|
+
// Case 2: Single frame - NO AUTOMATIC FLUSH
|
|
1014
|
+
if (frames instanceof Frame) {
|
|
1015
|
+
yield* processFrame(frames);
|
|
1016
|
+
return; // No finalize() call!
|
|
1017
|
+
}
|
|
1018
|
+
// Case 3: Iterable of frames
|
|
1019
|
+
for (const frame_2 of frames) {
|
|
1020
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
1021
|
+
try {
|
|
1022
|
+
const frame = __addDisposableResource(env_2, frame_2, false);
|
|
1023
|
+
// Check for EOF signal from upstream
|
|
1024
|
+
if (frame === null) {
|
|
1025
|
+
yield* finalize();
|
|
1026
|
+
return;
|
|
1027
|
+
}
|
|
1028
|
+
yield* processFrame(frame);
|
|
1029
|
+
}
|
|
1030
|
+
catch (e_2) {
|
|
1031
|
+
env_2.error = e_2;
|
|
1032
|
+
env_2.hasError = true;
|
|
1033
|
+
}
|
|
1034
|
+
finally {
|
|
1035
|
+
__disposeResources(env_2);
|
|
1036
|
+
}
|
|
1037
|
+
}
|
|
1038
|
+
// No fallback flush - only flush on explicit EOF
|
|
1039
|
+
}
|
|
1040
|
+
/**
|
|
1041
|
+
* Flush encoder and signal end-of-stream.
|
|
1042
|
+
*
|
|
1043
|
+
* Sends null frame to encoder to signal end-of-stream.
|
|
1044
|
+
* Does nothing if encoder was never initialized or is closed.
|
|
1045
|
+
* Must call receive() to get remaining buffered packets.
|
|
1046
|
+
*
|
|
1047
|
+
* Direct mapping to avcodec_send_frame(NULL).
|
|
1048
|
+
*
|
|
1049
|
+
* @example
|
|
1050
|
+
* ```typescript
|
|
1051
|
+
* // Signal end of stream
|
|
1052
|
+
* await encoder.flush();
|
|
1053
|
+
*
|
|
1054
|
+
* // Then get remaining packets
|
|
1055
|
+
* let packet;
|
|
1056
|
+
* while ((packet = await encoder.receive()) !== null) {
|
|
1057
|
+
* console.log('Got buffered packet');
|
|
1058
|
+
* await output.writePacket(packet);
|
|
1059
|
+
* packet.free();
|
|
1060
|
+
* }
|
|
1061
|
+
* ```
|
|
1062
|
+
*
|
|
1063
|
+
* @see {@link flushPackets} For async iteration
|
|
1064
|
+
* @see {@link receive} For getting buffered packets
|
|
1065
|
+
* @see {@link flushSync} For synchronous version
|
|
1066
|
+
*/
|
|
1067
|
+
async flush() {
|
|
1068
|
+
this.signal?.throwIfAborted();
|
|
1069
|
+
if (this.isClosed || !this.initialized) {
|
|
1070
|
+
return;
|
|
1071
|
+
}
|
|
1072
|
+
// If using AudioFrameBuffer, flush remaining buffered samples first
|
|
1073
|
+
if (this.audioFrameBuffer && this.audioFrameBuffer.size > 0) {
|
|
1074
|
+
// Pull any remaining partial frame (may be less than frameSize)
|
|
1075
|
+
// For the final frame, we pad or truncate as needed
|
|
1076
|
+
let _bufferedFrame;
|
|
1077
|
+
while (!this.isClosed && (_bufferedFrame = await this.audioFrameBuffer.pull()) !== null) {
|
|
1078
|
+
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
1079
|
+
try {
|
|
1080
|
+
const bufferedFrame = __addDisposableResource(env_3, _bufferedFrame, false);
|
|
1081
|
+
await this.codecContext.sendFrame(bufferedFrame);
|
|
1082
|
+
}
|
|
1083
|
+
catch (e_3) {
|
|
1084
|
+
env_3.error = e_3;
|
|
1085
|
+
env_3.hasError = true;
|
|
1086
|
+
}
|
|
1087
|
+
finally {
|
|
1088
|
+
__disposeResources(env_3);
|
|
1089
|
+
}
|
|
1090
|
+
}
|
|
1091
|
+
}
|
|
1092
|
+
// Send flush frame (null)
|
|
1093
|
+
const ret = await this.codecContext.sendFrame(null);
|
|
1094
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
1095
|
+
if (ret !== AVERROR_EAGAIN) {
|
|
1096
|
+
FFmpegError.throwIfError(ret, 'Failed to flush encoder');
|
|
1097
|
+
}
|
|
1098
|
+
}
|
|
1099
|
+
}
|
|
1100
|
+
/**
|
|
1101
|
+
* Flush encoder and signal end-of-stream synchronously.
|
|
1102
|
+
* Synchronous version of flush.
|
|
1103
|
+
*
|
|
1104
|
+
* Sends null frame to encoder to signal end-of-stream.
|
|
1105
|
+
* Does nothing if encoder was never initialized or is closed.
|
|
1106
|
+
* Must call receiveSync() to get remaining buffered packets.
|
|
1107
|
+
*
|
|
1108
|
+
* Direct mapping to avcodec_send_frame(NULL).
|
|
1109
|
+
*
|
|
1110
|
+
* @example
|
|
1111
|
+
* ```typescript
|
|
1112
|
+
* // Signal end of stream
|
|
1113
|
+
* encoder.flushSync();
|
|
1114
|
+
*
|
|
1115
|
+
* // Then get remaining packets
|
|
1116
|
+
* let packet;
|
|
1117
|
+
* while ((packet = encoder.receiveSync()) !== null) {
|
|
1118
|
+
* console.log('Got buffered packet');
|
|
1119
|
+
* output.writePacketSync(packet);
|
|
1120
|
+
* packet.free();
|
|
1121
|
+
* }
|
|
1122
|
+
* ```
|
|
1123
|
+
*
|
|
1124
|
+
* @see {@link flushPacketsSync} For sync iteration
|
|
1125
|
+
* @see {@link receiveSync} For getting buffered packets
|
|
1126
|
+
* @see {@link flush} For async version
|
|
1127
|
+
*/
|
|
1128
|
+
flushSync() {
|
|
1129
|
+
if (this.isClosed || !this.initialized) {
|
|
1130
|
+
return;
|
|
1131
|
+
}
|
|
1132
|
+
// If using AudioFrameBuffer, flush remaining buffered samples first
|
|
1133
|
+
if (this.audioFrameBuffer && this.audioFrameBuffer.size > 0) {
|
|
1134
|
+
// Pull any remaining partial frame (may be less than frameSize)
|
|
1135
|
+
// For the final frame, we pad or truncate as needed
|
|
1136
|
+
let _bufferedFrame;
|
|
1137
|
+
while (!this.isClosed && (_bufferedFrame = this.audioFrameBuffer.pullSync()) !== null) {
|
|
1138
|
+
const env_4 = { stack: [], error: void 0, hasError: false };
|
|
1139
|
+
try {
|
|
1140
|
+
const bufferedFrame = __addDisposableResource(env_4, _bufferedFrame, false);
|
|
1141
|
+
this.codecContext.sendFrameSync(bufferedFrame);
|
|
1142
|
+
}
|
|
1143
|
+
catch (e_4) {
|
|
1144
|
+
env_4.error = e_4;
|
|
1145
|
+
env_4.hasError = true;
|
|
1146
|
+
}
|
|
1147
|
+
finally {
|
|
1148
|
+
__disposeResources(env_4);
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
}
|
|
1152
|
+
// Send flush frame (null)
|
|
1153
|
+
const ret = this.codecContext.sendFrameSync(null);
|
|
1154
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
1155
|
+
if (ret !== AVERROR_EAGAIN) {
|
|
1156
|
+
FFmpegError.throwIfError(ret, 'Failed to flush encoder');
|
|
1157
|
+
}
|
|
1158
|
+
}
|
|
1159
|
+
}
|
|
1160
|
+
/**
|
|
1161
|
+
* Flush all buffered packets as async generator.
|
|
1162
|
+
*
|
|
1163
|
+
* Convenient async iteration over remaining packets.
|
|
1164
|
+
* Automatically handles flush and repeated receive calls.
|
|
1165
|
+
* Returns immediately if encoder was never initialized or is closed.
|
|
1166
|
+
*
|
|
1167
|
+
* @yields {Packet} Buffered packets
|
|
1168
|
+
*
|
|
1169
|
+
* @example
|
|
1170
|
+
* ```typescript
|
|
1171
|
+
* // Flush at end of encoding
|
|
1172
|
+
* for await (const packet of encoder.flushPackets()) {
|
|
1173
|
+
* console.log('Processing buffered packet');
|
|
1174
|
+
* await output.writePacket(packet);
|
|
1175
|
+
* packet.free();
|
|
1176
|
+
* }
|
|
1177
|
+
* ```
|
|
1178
|
+
*
|
|
1179
|
+
* @see {@link encode} For sending frames and receiving packets
|
|
1180
|
+
* @see {@link flush} For signaling end-of-stream
|
|
1181
|
+
* @see {@link flushPacketsSync} For synchronous version
|
|
1182
|
+
*/
|
|
1183
|
+
async *flushPackets() {
|
|
1184
|
+
// Send flush signal
|
|
1185
|
+
await this.flush();
|
|
1186
|
+
// Yield all remaining packets (filter out null/EAGAIN and EOF)
|
|
1187
|
+
while (true) {
|
|
1188
|
+
const packet = await this.receive();
|
|
1189
|
+
if (!packet)
|
|
1190
|
+
break; // Stop on EAGAIN or EOF
|
|
1191
|
+
yield packet; // Only yield actual packets
|
|
1192
|
+
}
|
|
1193
|
+
}
|
|
1194
|
+
/**
|
|
1195
|
+
* Flush all buffered packets as generator synchronously.
|
|
1196
|
+
* Synchronous version of flushPackets.
|
|
1197
|
+
*
|
|
1198
|
+
* Convenient sync iteration over remaining packets.
|
|
1199
|
+
* Automatically handles flush and repeated receive calls.
|
|
1200
|
+
* Returns immediately if encoder was never initialized or is closed.
|
|
1201
|
+
*
|
|
1202
|
+
* @yields {Packet} Buffered packets
|
|
1203
|
+
*
|
|
1204
|
+
* @example
|
|
1205
|
+
* ```typescript
|
|
1206
|
+
* // Flush at end of encoding
|
|
1207
|
+
* for (const packet of encoder.flushPacketsSync()) {
|
|
1208
|
+
* console.log('Processing buffered packet');
|
|
1209
|
+
* output.writePacketSync(packet);
|
|
1210
|
+
* packet.free();
|
|
1211
|
+
* }
|
|
1212
|
+
* ```
|
|
1213
|
+
*
|
|
1214
|
+
* @see {@link encodeSync} For sending frames and receiving packets
|
|
1215
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
1216
|
+
* @see {@link flushPackets} For async version
|
|
1217
|
+
*/
|
|
1218
|
+
*flushPacketsSync() {
|
|
1219
|
+
// Send flush signal
|
|
1220
|
+
this.flushSync();
|
|
1221
|
+
// Yield all remaining packets (filter out null/EAGAIN and EOF)
|
|
1222
|
+
while (true) {
|
|
1223
|
+
const packet = this.receiveSync();
|
|
1224
|
+
if (!packet)
|
|
1225
|
+
break; // Stop on EAGAIN or EOF
|
|
1226
|
+
yield packet; // Only yield actual packets
|
|
1227
|
+
}
|
|
1228
|
+
}
|
|
1229
|
+
/**
|
|
1230
|
+
* Receive packet from encoder.
|
|
1231
|
+
*
|
|
1232
|
+
* Gets encoded packets from the codec's internal buffer.
|
|
1233
|
+
* Handles packet cloning and error checking.
|
|
1234
|
+
* Implements FFmpeg's send/receive pattern.
|
|
1235
|
+
*
|
|
1236
|
+
* **Return Values:**
|
|
1237
|
+
* - `Packet` - Successfully encoded packet (AVERROR >= 0)
|
|
1238
|
+
* - `null` - Need more input frames (AVERROR_EAGAIN), or encoder not initialized
|
|
1239
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), or encoder is closed
|
|
1240
|
+
*
|
|
1241
|
+
* Direct mapping to avcodec_receive_packet().
|
|
1242
|
+
*
|
|
1243
|
+
* @returns Cloned packet, null if need more data, or undefined if stream ended
|
|
1244
|
+
*
|
|
1245
|
+
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
1246
|
+
*
|
|
1247
|
+
* @throws {Error} If packet cloning fails (out of memory)
|
|
1248
|
+
*
|
|
1249
|
+
* @example
|
|
1250
|
+
* ```typescript
|
|
1251
|
+
* // Process all buffered packets
|
|
1252
|
+
* while (true) {
|
|
1253
|
+
* const packet = await encoder.receive();
|
|
1254
|
+
* if (!packet) break; // Stop on EAGAIN or EOF
|
|
1255
|
+
* console.log(`Got packet with PTS: ${packet.pts}`);
|
|
1256
|
+
* await output.writePacket(packet);
|
|
1257
|
+
* packet.free();
|
|
1258
|
+
* }
|
|
1259
|
+
* ```
|
|
1260
|
+
*
|
|
1261
|
+
* @example
|
|
1262
|
+
* ```typescript
|
|
1263
|
+
* // Handle each return value explicitly
|
|
1264
|
+
* const packet = await encoder.receive();
|
|
1265
|
+
* if (packet === EOF) {
|
|
1266
|
+
* console.log('Encoder stream ended');
|
|
1267
|
+
* } else if (packet === null) {
|
|
1268
|
+
* console.log('Need more input frames');
|
|
1269
|
+
* } else {
|
|
1270
|
+
* console.log(`Got packet: pts=${packet.pts}`);
|
|
1271
|
+
* await output.writePacket(packet);
|
|
1272
|
+
* packet.free();
|
|
1273
|
+
* }
|
|
1274
|
+
* ```
|
|
1275
|
+
*
|
|
1276
|
+
* @see {@link encode} For sending frames and receiving packets
|
|
1277
|
+
* @see {@link flush} For signaling end-of-stream
|
|
1278
|
+
* @see {@link receiveSync} For synchronous version
|
|
1279
|
+
* @see {@link EOF} For end-of-stream signal
|
|
1280
|
+
*/
|
|
1281
|
+
async receive() {
|
|
1282
|
+
if (this.isClosed) {
|
|
1283
|
+
return EOF;
|
|
1284
|
+
}
|
|
1285
|
+
if (!this.initialized) {
|
|
1286
|
+
return null;
|
|
1287
|
+
}
|
|
1288
|
+
// Clear previous packet data
|
|
1289
|
+
this.packet.unref();
|
|
1290
|
+
if (this.audioFrameBuffer?.hasFrame()) {
|
|
1291
|
+
const env_5 = { stack: [], error: void 0, hasError: false };
|
|
1292
|
+
try {
|
|
1293
|
+
const bufferedFrame = __addDisposableResource(env_5, await this.audioFrameBuffer.pull(), false);
|
|
1294
|
+
if (bufferedFrame) {
|
|
1295
|
+
await this.codecContext.sendFrame(bufferedFrame);
|
|
1296
|
+
}
|
|
1297
|
+
}
|
|
1298
|
+
catch (e_5) {
|
|
1299
|
+
env_5.error = e_5;
|
|
1300
|
+
env_5.hasError = true;
|
|
1301
|
+
}
|
|
1302
|
+
finally {
|
|
1303
|
+
__disposeResources(env_5);
|
|
1304
|
+
}
|
|
1305
|
+
}
|
|
1306
|
+
const ret = await this.codecContext.receivePacket(this.packet);
|
|
1307
|
+
if (ret === 0) {
|
|
1308
|
+
// Set packet timebase to codec timebase
|
|
1309
|
+
this.packet.timeBase = this.codecContext.timeBase;
|
|
1310
|
+
// Mark packet as trusted (from encoder)
|
|
1311
|
+
this.packet.setFlags(AV_PKT_FLAG_TRUSTED);
|
|
1312
|
+
// Got a packet, clone it for the user
|
|
1313
|
+
const cloned = this.packet.clone();
|
|
1314
|
+
if (!cloned) {
|
|
1315
|
+
throw new Error('Failed to clone packet (out of memory)');
|
|
1316
|
+
}
|
|
1317
|
+
return cloned;
|
|
1318
|
+
}
|
|
1319
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1320
|
+
// Need more data
|
|
1321
|
+
return null;
|
|
1322
|
+
}
|
|
1323
|
+
else if (ret === AVERROR_EOF) {
|
|
1324
|
+
// End of stream
|
|
1325
|
+
return EOF;
|
|
1326
|
+
}
|
|
1327
|
+
else {
|
|
1328
|
+
// Error
|
|
1329
|
+
FFmpegError.throwIfError(ret, 'Failed to receive packet');
|
|
1330
|
+
return null;
|
|
1331
|
+
}
|
|
1332
|
+
}
|
|
1333
|
+
/**
|
|
1334
|
+
* Receive packet from encoder synchronously.
|
|
1335
|
+
* Synchronous version of receive.
|
|
1336
|
+
*
|
|
1337
|
+
* Gets encoded packets from the codec's internal buffer.
|
|
1338
|
+
* Handles packet cloning and error checking.
|
|
1339
|
+
* Implements FFmpeg's send/receive pattern.
|
|
1340
|
+
*
|
|
1341
|
+
* **Return Values:**
|
|
1342
|
+
* - `Packet` - Successfully encoded packet (AVERROR >= 0)
|
|
1343
|
+
* - `null` - Need more input frames (AVERROR_EAGAIN), or encoder not initialized
|
|
1344
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), or encoder is closed
|
|
1345
|
+
*
|
|
1346
|
+
* Direct mapping to avcodec_receive_packet().
|
|
1347
|
+
*
|
|
1348
|
+
* @returns Cloned packet, null if need more data, or undefined if stream ended
|
|
1349
|
+
*
|
|
1350
|
+
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
1351
|
+
*
|
|
1352
|
+
* @throws {Error} If packet cloning fails (out of memory)
|
|
1353
|
+
*
|
|
1354
|
+
* @example
|
|
1355
|
+
* ```typescript
|
|
1356
|
+
* // Process all buffered packets
|
|
1357
|
+
* while (true) {
|
|
1358
|
+
* const packet = encoder.receiveSync();
|
|
1359
|
+
* if (!packet) break; // Stop on EAGAIN or EOF
|
|
1360
|
+
* console.log(`Got packet with PTS: ${packet.pts}`);
|
|
1361
|
+
* output.writePacketSync(packet);
|
|
1362
|
+
* packet.free();
|
|
1363
|
+
* }
|
|
1364
|
+
* ```
|
|
1365
|
+
*
|
|
1366
|
+
* @example
|
|
1367
|
+
* ```typescript
|
|
1368
|
+
* // Handle each return value explicitly
|
|
1369
|
+
* const packet = encoder.receiveSync();
|
|
1370
|
+
* if (packet === EOF) {
|
|
1371
|
+
* console.log('Encoder stream ended');
|
|
1372
|
+
* } else if (packet === null) {
|
|
1373
|
+
* console.log('Need more input frames');
|
|
1374
|
+
* } else {
|
|
1375
|
+
* console.log(`Got packet: pts=${packet.pts}`);
|
|
1376
|
+
* output.writePacketSync(packet);
|
|
1377
|
+
* packet.free();
|
|
1378
|
+
* }
|
|
1379
|
+
* ```
|
|
1380
|
+
*
|
|
1381
|
+
* @see {@link encodeSync} For sending frames and receiving packets
|
|
1382
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
1383
|
+
* @see {@link receive} For async version
|
|
1384
|
+
* @see {@link EOF} For end-of-stream signal
|
|
1385
|
+
*/
|
|
1386
|
+
receiveSync() {
|
|
1387
|
+
if (this.isClosed) {
|
|
1388
|
+
return EOF;
|
|
1389
|
+
}
|
|
1390
|
+
if (!this.initialized) {
|
|
1391
|
+
return null;
|
|
1392
|
+
}
|
|
1393
|
+
// Clear previous packet data
|
|
1394
|
+
this.packet.unref();
|
|
1395
|
+
if (this.audioFrameBuffer?.hasFrame()) {
|
|
1396
|
+
const env_6 = { stack: [], error: void 0, hasError: false };
|
|
1397
|
+
try {
|
|
1398
|
+
const bufferedFrame = __addDisposableResource(env_6, this.audioFrameBuffer.pullSync(), false);
|
|
1399
|
+
if (bufferedFrame) {
|
|
1400
|
+
this.codecContext.sendFrameSync(bufferedFrame);
|
|
1401
|
+
}
|
|
1402
|
+
}
|
|
1403
|
+
catch (e_6) {
|
|
1404
|
+
env_6.error = e_6;
|
|
1405
|
+
env_6.hasError = true;
|
|
1406
|
+
}
|
|
1407
|
+
finally {
|
|
1408
|
+
__disposeResources(env_6);
|
|
1409
|
+
}
|
|
1410
|
+
}
|
|
1411
|
+
const ret = this.codecContext.receivePacketSync(this.packet);
|
|
1412
|
+
if (ret === 0) {
|
|
1413
|
+
// Set packet timebase to codec timebase
|
|
1414
|
+
this.packet.timeBase = this.codecContext.timeBase;
|
|
1415
|
+
// Mark packet as trusted (from encoder)
|
|
1416
|
+
this.packet.setFlags(AV_PKT_FLAG_TRUSTED);
|
|
1417
|
+
// Got a packet, clone it for the user
|
|
1418
|
+
const cloned = this.packet.clone();
|
|
1419
|
+
if (!cloned) {
|
|
1420
|
+
throw new Error('Failed to clone packet (out of memory)');
|
|
1421
|
+
}
|
|
1422
|
+
return cloned;
|
|
1423
|
+
}
|
|
1424
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1425
|
+
// Need more data
|
|
1426
|
+
return null;
|
|
1427
|
+
}
|
|
1428
|
+
else if (ret === AVERROR_EOF) {
|
|
1429
|
+
// End of stream
|
|
1430
|
+
return EOF;
|
|
1431
|
+
}
|
|
1432
|
+
else {
|
|
1433
|
+
// Error
|
|
1434
|
+
FFmpegError.throwIfError(ret, 'Failed to receive packet');
|
|
1435
|
+
return null;
|
|
1436
|
+
}
|
|
1437
|
+
}
|
|
1438
|
+
/**
|
|
1439
|
+
* Pipe encoded packets to muxer.
|
|
1440
|
+
*
|
|
1441
|
+
* @param target - Media output component to write packets to
|
|
1442
|
+
*
|
|
1443
|
+
* @param streamIndex - Stream index to write packets to
|
|
1444
|
+
*
|
|
1445
|
+
* @returns Scheduler for continued chaining
|
|
1446
|
+
*
|
|
1447
|
+
* @example
|
|
1448
|
+
* ```typescript
|
|
1449
|
+
* decoder.pipeTo(filter).pipeTo(encoder)
|
|
1450
|
+
* ```
|
|
1451
|
+
*/
|
|
1452
|
+
pipeTo(target, streamIndex) {
|
|
1453
|
+
// Start worker if not already running
|
|
1454
|
+
this.workerPromise ??= this.runWorker();
|
|
1455
|
+
// Start pipe task: encoder.outputQueue -> output
|
|
1456
|
+
this.pipeToPromise = (async () => {
|
|
1457
|
+
while (true) {
|
|
1458
|
+
const packet = await this.receiveFromQueue();
|
|
1459
|
+
if (!packet)
|
|
1460
|
+
break;
|
|
1461
|
+
await target.writePacket(packet, streamIndex);
|
|
1462
|
+
}
|
|
1463
|
+
})();
|
|
1464
|
+
// Return control without pipeTo (terminal stage)
|
|
1465
|
+
return new SchedulerControl(this);
|
|
1466
|
+
}
|
|
1467
|
+
/**
|
|
1468
|
+
* Close encoder and free resources.
|
|
1469
|
+
*
|
|
1470
|
+
* Releases codec context and internal packet buffer.
|
|
1471
|
+
* Safe to call multiple times.
|
|
1472
|
+
* Automatically called by Symbol.dispose.
|
|
1473
|
+
*
|
|
1474
|
+
* @example
|
|
1475
|
+
* ```typescript
|
|
1476
|
+
* const encoder = await Encoder.create(FF_ENCODER_LIBX264, { ... });
|
|
1477
|
+
* try {
|
|
1478
|
+
* // Use encoder
|
|
1479
|
+
* } finally {
|
|
1480
|
+
* encoder.close();
|
|
1481
|
+
* }
|
|
1482
|
+
* ```
|
|
1483
|
+
*
|
|
1484
|
+
* @see {@link Symbol.dispose} For automatic cleanup
|
|
1485
|
+
*/
|
|
1486
|
+
close() {
|
|
1487
|
+
if (this.isClosed) {
|
|
1488
|
+
return;
|
|
1489
|
+
}
|
|
1490
|
+
this.isClosed = true;
|
|
1491
|
+
// Close queues
|
|
1492
|
+
this.inputQueue.close();
|
|
1493
|
+
this.outputQueue.close();
|
|
1494
|
+
this.packet.free();
|
|
1495
|
+
this.codecContext.freeContext();
|
|
1496
|
+
this.initialized = false;
|
|
1497
|
+
}
|
|
1498
|
+
/**
|
|
1499
|
+
* Get encoder codec.
|
|
1500
|
+
*
|
|
1501
|
+
* Returns the codec used by this encoder.
|
|
1502
|
+
* Useful for checking codec capabilities and properties.
|
|
1503
|
+
*
|
|
1504
|
+
* @returns Codec instance
|
|
1505
|
+
*
|
|
1506
|
+
* @internal
|
|
1507
|
+
*
|
|
1508
|
+
* @see {@link Codec} For codec details
|
|
1509
|
+
*/
|
|
1510
|
+
getCodec() {
|
|
1511
|
+
return this.codec;
|
|
1512
|
+
}
|
|
1513
|
+
/**
|
|
1514
|
+
* Get underlying codec context.
|
|
1515
|
+
*
|
|
1516
|
+
* Returns the codec context for advanced operations.
|
|
1517
|
+
* Useful for accessing low-level codec properties and settings.
|
|
1518
|
+
* Returns null if encoder is closed or not initialized.
|
|
1519
|
+
*
|
|
1520
|
+
* @returns Codec context or null if closed/not initialized
|
|
1521
|
+
*
|
|
1522
|
+
* @internal
|
|
1523
|
+
*
|
|
1524
|
+
* @see {@link CodecContext} For context details
|
|
1525
|
+
*/
|
|
1526
|
+
getCodecContext() {
|
|
1527
|
+
return !this.isClosed && this.initialized ? this.codecContext : null;
|
|
1528
|
+
}
|
|
1529
|
+
/**
|
|
1530
|
+
* Worker loop for push-based processing.
|
|
1531
|
+
*
|
|
1532
|
+
* @internal
|
|
1533
|
+
*/
|
|
1534
|
+
async runWorker() {
|
|
1535
|
+
try {
|
|
1536
|
+
// Outer loop - receive frames
|
|
1537
|
+
while (!this.inputQueue.isClosed) {
|
|
1538
|
+
const env_7 = { stack: [], error: void 0, hasError: false };
|
|
1539
|
+
try {
|
|
1540
|
+
const frame = __addDisposableResource(env_7, await this.inputQueue.receive(), false);
|
|
1541
|
+
if (!frame)
|
|
1542
|
+
break;
|
|
1543
|
+
// Open encoder if not already done
|
|
1544
|
+
if (!this.initialized) {
|
|
1545
|
+
this.initializePromise ??= this.initialize(frame);
|
|
1546
|
+
}
|
|
1547
|
+
await this.initializePromise;
|
|
1548
|
+
// Prepare frame for encoding (set quality, validate channel count)
|
|
1549
|
+
this.prepareFrameForEncoding(frame);
|
|
1550
|
+
await this.encode(frame);
|
|
1551
|
+
// Receive packets
|
|
1552
|
+
while (!this.outputQueue.isClosed) {
|
|
1553
|
+
const packet = await this.receive();
|
|
1554
|
+
if (!packet)
|
|
1555
|
+
break; // Stop on EAGAIN or EOF
|
|
1556
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1557
|
+
}
|
|
1558
|
+
}
|
|
1559
|
+
catch (e_7) {
|
|
1560
|
+
env_7.error = e_7;
|
|
1561
|
+
env_7.hasError = true;
|
|
1562
|
+
}
|
|
1563
|
+
finally {
|
|
1564
|
+
__disposeResources(env_7);
|
|
1565
|
+
}
|
|
1566
|
+
}
|
|
1567
|
+
// Flush encoder at end
|
|
1568
|
+
await this.flush();
|
|
1569
|
+
while (!this.outputQueue.isClosed) {
|
|
1570
|
+
const packet = await this.receive();
|
|
1571
|
+
if (!packet)
|
|
1572
|
+
break; // Stop on EAGAIN or EOF
|
|
1573
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1574
|
+
}
|
|
1575
|
+
}
|
|
1576
|
+
catch (error) {
|
|
1577
|
+
// Propagate error to both queues so upstream and downstream know
|
|
1578
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1579
|
+
this.inputQueue?.closeWithError(err);
|
|
1580
|
+
this.outputQueue?.closeWithError(err);
|
|
1581
|
+
}
|
|
1582
|
+
finally {
|
|
1583
|
+
// Close output queue when done (if not already closed with error)
|
|
1584
|
+
this.outputQueue?.close();
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
/**
|
|
1588
|
+
* Send frame to input queue or flush the pipeline.
|
|
1589
|
+
*
|
|
1590
|
+
* When frame is provided, queues it for encoding.
|
|
1591
|
+
* When null is provided, triggers flush sequence:
|
|
1592
|
+
* - Closes input queue
|
|
1593
|
+
* - Waits for worker completion
|
|
1594
|
+
* - Flushes encoder and sends remaining packets to output queue
|
|
1595
|
+
* - Closes output queue
|
|
1596
|
+
* - Waits for pipeTo task completion (writes to muxer)
|
|
1597
|
+
*
|
|
1598
|
+
* Used by scheduler system for pipeline control.
|
|
1599
|
+
*
|
|
1600
|
+
* @param frame - Frame to send, or null to flush
|
|
1601
|
+
*
|
|
1602
|
+
* @internal
|
|
1603
|
+
*/
|
|
1604
|
+
async sendToQueue(frame) {
|
|
1605
|
+
if (frame) {
|
|
1606
|
+
await this.inputQueue.send(frame);
|
|
1607
|
+
}
|
|
1608
|
+
else {
|
|
1609
|
+
// Close input queue to signal end of stream to worker
|
|
1610
|
+
this.inputQueue.close();
|
|
1611
|
+
// Wait for worker to finish processing all frames (if exists)
|
|
1612
|
+
if (this.workerPromise) {
|
|
1613
|
+
await this.workerPromise;
|
|
1614
|
+
}
|
|
1615
|
+
// Flush encoder at end
|
|
1616
|
+
await this.flush();
|
|
1617
|
+
while (true) {
|
|
1618
|
+
const packet = await this.receive();
|
|
1619
|
+
if (!packet)
|
|
1620
|
+
break; // Stop on EAGAIN or EOF
|
|
1621
|
+
await this.outputQueue.send(packet); // Only send actual packets
|
|
1622
|
+
}
|
|
1623
|
+
if (this.pipeToPromise) {
|
|
1624
|
+
await this.pipeToPromise;
|
|
1625
|
+
}
|
|
1626
|
+
}
|
|
1627
|
+
}
|
|
1628
|
+
/**
|
|
1629
|
+
* Receive packet from output queue.
|
|
1630
|
+
*
|
|
1631
|
+
* @returns Packet from output queue
|
|
1632
|
+
*
|
|
1633
|
+
* @internal
|
|
1634
|
+
*/
|
|
1635
|
+
async receiveFromQueue() {
|
|
1636
|
+
return await this.outputQueue.receive();
|
|
1637
|
+
}
|
|
1638
|
+
/**
|
|
1639
|
+
* Initialize encoder from first frame.
|
|
1640
|
+
*
|
|
1641
|
+
* Sets codec context parameters from frame properties.
|
|
1642
|
+
* Configures hardware context if present in frame.
|
|
1643
|
+
* Opens encoder with accumulated options.
|
|
1644
|
+
*
|
|
1645
|
+
* @param frame - First frame to encode
|
|
1646
|
+
*
|
|
1647
|
+
* @throws {FFmpegError} If encoder open fails
|
|
1648
|
+
*
|
|
1649
|
+
* @internal
|
|
1650
|
+
*/
|
|
1651
|
+
async initialize(frame) {
|
|
1652
|
+
// Get bits_per_raw_sample from decoder if available
|
|
1653
|
+
if (this.options.decoder) {
|
|
1654
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1655
|
+
if (decoderCtx && decoderCtx.bitsPerRawSample > 0) {
|
|
1656
|
+
this.codecContext.bitsPerRawSample = decoderCtx.bitsPerRawSample;
|
|
1657
|
+
}
|
|
1658
|
+
}
|
|
1659
|
+
// Get framerate from filter if available, otherwise from decoder
|
|
1660
|
+
// This matches FFmpeg CLI behavior where encoder gets frame_rate_filter from FrameData
|
|
1661
|
+
if (this.options.filter && frame.isVideo()) {
|
|
1662
|
+
const filterFrameRate = this.options.filter.frameRate;
|
|
1663
|
+
if (filterFrameRate) {
|
|
1664
|
+
this.codecContext.framerate = new Rational(filterFrameRate.num, filterFrameRate.den);
|
|
1665
|
+
}
|
|
1666
|
+
}
|
|
1667
|
+
// If no filter framerate, try to get from decoder stream
|
|
1668
|
+
if ((!this.codecContext.framerate || this.codecContext.framerate.num === 0) && this.options.decoder && frame.isVideo()) {
|
|
1669
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1670
|
+
if (decoderCtx?.framerate && decoderCtx.framerate.num > 0) {
|
|
1671
|
+
this.codecContext.framerate = decoderCtx.framerate;
|
|
1672
|
+
}
|
|
1673
|
+
}
|
|
1674
|
+
if (frame.isVideo()) {
|
|
1675
|
+
// FFmpeg CLI sets encoder time_base to 1/framerate (inverse of framerate)
|
|
1676
|
+
// This allows encoder to produce sequential PTS (0, 1, 2, 3...) which enables
|
|
1677
|
+
// proper B-frame DTS generation (negative DTS values)
|
|
1678
|
+
if (this.codecContext.framerate && this.codecContext.framerate.num > 0) {
|
|
1679
|
+
// Use inverse of framerate (e.g., framerate=30/1 → timebase=1/30)
|
|
1680
|
+
this.codecContext.timeBase = new Rational(this.codecContext.framerate.den, this.codecContext.framerate.num);
|
|
1681
|
+
}
|
|
1682
|
+
else {
|
|
1683
|
+
// Fallback: use frame timebase if framerate not available
|
|
1684
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1685
|
+
}
|
|
1686
|
+
this.codecContext.width = frame.width;
|
|
1687
|
+
this.codecContext.height = frame.height;
|
|
1688
|
+
this.codecContext.pixelFormat = frame.format;
|
|
1689
|
+
this.codecContext.sampleAspectRatio = frame.sampleAspectRatio;
|
|
1690
|
+
this.codecContext.colorRange = frame.colorRange;
|
|
1691
|
+
this.codecContext.colorPrimaries = frame.colorPrimaries;
|
|
1692
|
+
this.codecContext.colorTrc = frame.colorTrc;
|
|
1693
|
+
this.codecContext.colorSpace = frame.colorSpace;
|
|
1694
|
+
// Only set chroma location if unspecified
|
|
1695
|
+
if (this.codecContext.chromaLocation === AVCHROMA_LOC_UNSPECIFIED) {
|
|
1696
|
+
this.codecContext.chromaLocation = frame.chromaLocation;
|
|
1697
|
+
}
|
|
1698
|
+
}
|
|
1699
|
+
else {
|
|
1700
|
+
// Audio: Always use frame timebase (which is typically 1/sample_rate)
|
|
1701
|
+
// This ensures correct PTS progression for audio frames
|
|
1702
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1703
|
+
this.codecContext.sampleRate = frame.sampleRate;
|
|
1704
|
+
this.codecContext.sampleFormat = frame.format;
|
|
1705
|
+
this.codecContext.channelLayout = frame.channelLayout;
|
|
1706
|
+
}
|
|
1707
|
+
// Setup hardware acceleration with validation
|
|
1708
|
+
this.setupHardwareAcceleration(frame);
|
|
1709
|
+
// AV_CODEC_FLAG_COPY_OPAQUE: Copy opaque data from frames to packets if supported
|
|
1710
|
+
if (this.codec.hasCapabilities(AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
|
|
1711
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
1712
|
+
}
|
|
1713
|
+
// AV_CODEC_FLAG_FRAME_DURATION: Signal that frame duration matters for timestamps
|
|
1714
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_FRAME_DURATION);
|
|
1715
|
+
// Open codec
|
|
1716
|
+
const openRet = await this.codecContext.open2(this.codec, this.opts);
|
|
1717
|
+
if (openRet < 0) {
|
|
1718
|
+
this.codecContext.freeContext();
|
|
1719
|
+
FFmpegError.throwIfError(openRet, 'Failed to open encoder');
|
|
1720
|
+
}
|
|
1721
|
+
// Check if encoder requires fixed frame size (e.g., Opus, AAC, MP3)
|
|
1722
|
+
// If so, create AudioFrameBuffer to automatically chunk frames
|
|
1723
|
+
if (frame.isAudio() && this.codecContext.frameSize > 0) {
|
|
1724
|
+
this.audioFrameBuffer = AudioFrameBuffer.create(this.codecContext.frameSize, this.codecContext.sampleFormat, this.codecContext.sampleRate, this.codecContext.channelLayout, this.codecContext.channels);
|
|
1725
|
+
}
|
|
1726
|
+
this.initialized = true;
|
|
1727
|
+
}
|
|
1728
|
+
/**
|
|
1729
|
+
* Initialize encoder from first frame synchronously.
|
|
1730
|
+
* Synchronous version of initialize.
|
|
1731
|
+
*
|
|
1732
|
+
* Sets codec context parameters from frame properties.
|
|
1733
|
+
* Configures hardware context if present in frame.
|
|
1734
|
+
* Opens encoder with accumulated options.
|
|
1735
|
+
*
|
|
1736
|
+
* @param frame - First frame to encode
|
|
1737
|
+
*
|
|
1738
|
+
* @throws {FFmpegError} If encoder open fails
|
|
1739
|
+
*
|
|
1740
|
+
* @internal
|
|
1741
|
+
*
|
|
1742
|
+
* @see {@link initialize} For async version
|
|
1743
|
+
*/
|
|
1744
|
+
initializeSync(frame) {
|
|
1745
|
+
// Get bits_per_raw_sample from decoder if available
|
|
1746
|
+
if (this.options.decoder) {
|
|
1747
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1748
|
+
if (decoderCtx && decoderCtx.bitsPerRawSample > 0) {
|
|
1749
|
+
this.codecContext.bitsPerRawSample = decoderCtx.bitsPerRawSample;
|
|
1750
|
+
}
|
|
1751
|
+
}
|
|
1752
|
+
// Get framerate from filter if available, otherwise from decoder
|
|
1753
|
+
// This matches FFmpeg CLI behavior where encoder gets frame_rate_filter from FrameData
|
|
1754
|
+
if (this.options.filter && frame.isVideo()) {
|
|
1755
|
+
const filterFrameRate = this.options.filter.frameRate;
|
|
1756
|
+
if (filterFrameRate) {
|
|
1757
|
+
this.codecContext.framerate = new Rational(filterFrameRate.num, filterFrameRate.den);
|
|
1758
|
+
}
|
|
1759
|
+
}
|
|
1760
|
+
// If no filter framerate, try to get from decoder stream
|
|
1761
|
+
if ((!this.codecContext.framerate || this.codecContext.framerate.num === 0) && this.options.decoder && frame.isVideo()) {
|
|
1762
|
+
const decoderCtx = this.options.decoder.getCodecContext();
|
|
1763
|
+
if (decoderCtx?.framerate && decoderCtx.framerate.num > 0) {
|
|
1764
|
+
this.codecContext.framerate = decoderCtx.framerate;
|
|
1765
|
+
}
|
|
1766
|
+
}
|
|
1767
|
+
if (frame.isVideo()) {
|
|
1768
|
+
// FFmpeg CLI sets encoder time_base to 1/framerate (inverse of framerate)
|
|
1769
|
+
// This allows encoder to produce sequential PTS (0, 1, 2, 3...) which enables
|
|
1770
|
+
// proper B-frame DTS generation (negative DTS values)
|
|
1771
|
+
if (this.codecContext.framerate && this.codecContext.framerate.num > 0) {
|
|
1772
|
+
// Use inverse of framerate (e.g., framerate=30/1 → timebase=1/30)
|
|
1773
|
+
this.codecContext.timeBase = new Rational(this.codecContext.framerate.den, this.codecContext.framerate.num);
|
|
1774
|
+
}
|
|
1775
|
+
else {
|
|
1776
|
+
// Fallback: use frame timebase if framerate not available
|
|
1777
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1778
|
+
}
|
|
1779
|
+
this.codecContext.width = frame.width;
|
|
1780
|
+
this.codecContext.height = frame.height;
|
|
1781
|
+
this.codecContext.pixelFormat = frame.format;
|
|
1782
|
+
this.codecContext.sampleAspectRatio = frame.sampleAspectRatio;
|
|
1783
|
+
this.codecContext.colorRange = frame.colorRange;
|
|
1784
|
+
this.codecContext.colorPrimaries = frame.colorPrimaries;
|
|
1785
|
+
this.codecContext.colorTrc = frame.colorTrc;
|
|
1786
|
+
this.codecContext.colorSpace = frame.colorSpace;
|
|
1787
|
+
// Only set chroma location if unspecified
|
|
1788
|
+
if (this.codecContext.chromaLocation === AVCHROMA_LOC_UNSPECIFIED) {
|
|
1789
|
+
this.codecContext.chromaLocation = frame.chromaLocation;
|
|
1790
|
+
}
|
|
1791
|
+
}
|
|
1792
|
+
else {
|
|
1793
|
+
// Audio: Always use frame timebase (which is typically 1/sample_rate)
|
|
1794
|
+
// This ensures correct PTS progression for audio frames
|
|
1795
|
+
this.codecContext.timeBase = frame.timeBase;
|
|
1796
|
+
this.codecContext.sampleRate = frame.sampleRate;
|
|
1797
|
+
this.codecContext.sampleFormat = frame.format;
|
|
1798
|
+
this.codecContext.channelLayout = frame.channelLayout;
|
|
1799
|
+
}
|
|
1800
|
+
// Setup hardware acceleration with validation
|
|
1801
|
+
this.setupHardwareAcceleration(frame);
|
|
1802
|
+
// Set codec flags
|
|
1803
|
+
// AV_CODEC_FLAG_COPY_OPAQUE: Copy opaque data from frames to packets if supported
|
|
1804
|
+
if (this.codec.hasCapabilities(AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
|
|
1805
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
1806
|
+
}
|
|
1807
|
+
// AV_CODEC_FLAG_FRAME_DURATION: Signal that frame duration matters for timestamps
|
|
1808
|
+
this.codecContext.setFlags(AV_CODEC_FLAG_FRAME_DURATION);
|
|
1809
|
+
// Open codec
|
|
1810
|
+
const openRet = this.codecContext.open2Sync(this.codec, this.opts);
|
|
1811
|
+
if (openRet < 0) {
|
|
1812
|
+
this.codecContext.freeContext();
|
|
1813
|
+
FFmpegError.throwIfError(openRet, 'Failed to open encoder');
|
|
1814
|
+
}
|
|
1815
|
+
// Check if encoder requires fixed frame size (e.g., Opus, AAC, MP3)
|
|
1816
|
+
// If so, create AudioFrameBuffer to automatically chunk frames
|
|
1817
|
+
if (frame.isAudio() && this.codecContext.frameSize > 0) {
|
|
1818
|
+
this.audioFrameBuffer = AudioFrameBuffer.create(this.codecContext.frameSize, this.codecContext.sampleFormat, this.codecContext.sampleRate, this.codecContext.channelLayout, this.codecContext.channels);
|
|
1819
|
+
}
|
|
1820
|
+
this.initialized = true;
|
|
1821
|
+
}
|
|
1822
|
+
/**
|
|
1823
|
+
* Setup hardware acceleration for encoder.
|
|
1824
|
+
*
|
|
1825
|
+
* Implements FFmpeg's hw_device_setup_for_encode logic.
|
|
1826
|
+
* Validates hardware frames context format and codec support.
|
|
1827
|
+
* Falls back to device context if frames context is incompatible.
|
|
1828
|
+
*
|
|
1829
|
+
* @param frame - Frame to get hardware context from
|
|
1830
|
+
*
|
|
1831
|
+
* @internal
|
|
1832
|
+
*/
|
|
1833
|
+
setupHardwareAcceleration(frame) {
|
|
1834
|
+
if (!frame.hwFramesCtx) {
|
|
1835
|
+
// Software encoding
|
|
1836
|
+
return;
|
|
1837
|
+
}
|
|
1838
|
+
const hwFramesCtx = frame.hwFramesCtx;
|
|
1839
|
+
const framesFormat = hwFramesCtx.format;
|
|
1840
|
+
const encoderFormat = this.codecContext.pixelFormat;
|
|
1841
|
+
// Check 1: Format validation
|
|
1842
|
+
if (framesFormat !== encoderFormat) {
|
|
1843
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1844
|
+
this.codecContext.hwFramesCtx = null;
|
|
1845
|
+
return;
|
|
1846
|
+
}
|
|
1847
|
+
// Check 2: Codec supports HW_FRAMES_CTX?
|
|
1848
|
+
let supportsFramesCtx = false;
|
|
1849
|
+
for (let i = 0;; i++) {
|
|
1850
|
+
const config = this.codec.getHwConfig(i);
|
|
1851
|
+
if (!config)
|
|
1852
|
+
break;
|
|
1853
|
+
// Check if codec supports HW_FRAMES_CTX method
|
|
1854
|
+
if (config.methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) {
|
|
1855
|
+
// Check if pixel format matches or is unspecified
|
|
1856
|
+
if (config.pixFmt === AV_PIX_FMT_NONE || config.pixFmt === encoderFormat) {
|
|
1857
|
+
supportsFramesCtx = true;
|
|
1858
|
+
break;
|
|
1859
|
+
}
|
|
1860
|
+
}
|
|
1861
|
+
}
|
|
1862
|
+
if (supportsFramesCtx) {
|
|
1863
|
+
// Use hw_frames_ctx (best performance - zero copy)
|
|
1864
|
+
this.codecContext.hwFramesCtx = hwFramesCtx;
|
|
1865
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1866
|
+
}
|
|
1867
|
+
else {
|
|
1868
|
+
// Fallback to hw_device_ctx (still uses HW, but may copy)
|
|
1869
|
+
// Check if codec supports HW_DEVICE_CTX as fallback
|
|
1870
|
+
let supportsDeviceCtx = false;
|
|
1871
|
+
for (let i = 0;; i++) {
|
|
1872
|
+
const config = this.codec.getHwConfig(i);
|
|
1873
|
+
if (!config)
|
|
1874
|
+
break;
|
|
1875
|
+
if (config.methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) {
|
|
1876
|
+
supportsDeviceCtx = true;
|
|
1877
|
+
break;
|
|
1878
|
+
}
|
|
1879
|
+
}
|
|
1880
|
+
if (supportsDeviceCtx) {
|
|
1881
|
+
this.codecContext.hwDeviceCtx = hwFramesCtx.deviceRef;
|
|
1882
|
+
this.codecContext.hwFramesCtx = null;
|
|
1883
|
+
}
|
|
1884
|
+
else {
|
|
1885
|
+
// No hardware support at all - software encoding
|
|
1886
|
+
this.codecContext.hwDeviceCtx = null;
|
|
1887
|
+
this.codecContext.hwFramesCtx = null;
|
|
1888
|
+
}
|
|
1889
|
+
}
|
|
1890
|
+
}
|
|
1891
|
+
/**
|
|
1892
|
+
* Prepare frame for encoding.
|
|
1893
|
+
*
|
|
1894
|
+
* Implements FFmpeg's frame_encode() pre-encoding logic:
|
|
1895
|
+
* 1. Video: Sets frame.quality from encoder's globalQuality (like -qscale)
|
|
1896
|
+
* 2. Audio: Validates channel count consistency for encoders without PARAM_CHANGE capability
|
|
1897
|
+
*
|
|
1898
|
+
* This matches FFmpeg CLI behavior where these properties are automatically managed.
|
|
1899
|
+
*
|
|
1900
|
+
* @param frame - Frame to prepare for encoding
|
|
1901
|
+
*
|
|
1902
|
+
* @throws {Error} If audio channel count changed and encoder doesn't support parameter changes
|
|
1903
|
+
*
|
|
1904
|
+
* @internal
|
|
1905
|
+
*/
|
|
1906
|
+
prepareFrameForEncoding(frame) {
|
|
1907
|
+
// Clear pict_type - encoder will determine frame types based on its own settings
|
|
1908
|
+
// Input stream's frame type hints are irrelevant when re-encoding
|
|
1909
|
+
frame.pictType = AV_PICTURE_TYPE_NONE;
|
|
1910
|
+
// Adjust frame PTS and timebase to encoder timebase
|
|
1911
|
+
// This matches FFmpeg's adjust_frame_pts_to_encoder_tb() behavior which:
|
|
1912
|
+
// 1. Converts PTS from frame's timebase to encoder's timebase (av_rescale_q)
|
|
1913
|
+
// 2. Sets frame->time_base = tb_dst (so encoder gets correct timebase)
|
|
1914
|
+
// Note: prepareFrameForEncoding is always called AFTER initialize(),
|
|
1915
|
+
// so codecContext.timeBase is already set correctly:
|
|
1916
|
+
// - Video: 1/framerate (if available)
|
|
1917
|
+
// - Audio: frame.timeBase from first frame (typically 1/sample_rate)
|
|
1918
|
+
const encoderTimebase = this.codecContext.timeBase;
|
|
1919
|
+
const oldTimebase = frame.timeBase;
|
|
1920
|
+
// IMPORTANT: Calculate duration BEFORE converting frame timebase
|
|
1921
|
+
// This matches FFmpeg's video_sync_process() which calculates:
|
|
1922
|
+
// duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out)
|
|
1923
|
+
// We need the OLD timebase to convert duration properly
|
|
1924
|
+
let frameDuration;
|
|
1925
|
+
if (frame.duration && frame.duration > 0n) {
|
|
1926
|
+
// Convert duration from frame timebase to encoder timebase
|
|
1927
|
+
// This ensures encoder gets correct frame duration for timestamps
|
|
1928
|
+
frameDuration = avRescaleQ(frame.duration, oldTimebase, encoderTimebase);
|
|
1929
|
+
}
|
|
1930
|
+
else {
|
|
1931
|
+
// Default to 1 (constant frame rate behavior)
|
|
1932
|
+
// Matches FFmpeg's CFR mode: frame->duration = 1
|
|
1933
|
+
frameDuration = 1n;
|
|
1934
|
+
}
|
|
1935
|
+
if (frame.pts !== null && frame.pts !== undefined) {
|
|
1936
|
+
// Convert PTS to encoder timebase
|
|
1937
|
+
frame.pts = avRescaleQ(frame.pts, oldTimebase, encoderTimebase);
|
|
1938
|
+
// IMPORTANT: Set frame timebase to encoder timebase
|
|
1939
|
+
// FFmpeg does this in adjust_frame_pts_to_encoder_tb(): frame->time_base = tb_dst
|
|
1940
|
+
// This ensures encoder gets frames with correct timebase (1/framerate for video, 1/sample_rate for audio)
|
|
1941
|
+
frame.timeBase = encoderTimebase;
|
|
1942
|
+
}
|
|
1943
|
+
// Set frame duration in encoder timebase
|
|
1944
|
+
// This matches FFmpeg's video_sync_process() which sets frame->duration
|
|
1945
|
+
// based on vsync_method (CFR: 1, VFR: calculated, PASSTHROUGH: calculated)
|
|
1946
|
+
// Since we don't have automatic filter like FFmpeg, we always set it here
|
|
1947
|
+
frame.duration = frameDuration;
|
|
1948
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
|
|
1949
|
+
// Video: Set frame quality from encoder's global quality
|
|
1950
|
+
// Only set if encoder has globalQuality configured and frame doesn't already have quality set
|
|
1951
|
+
if (this.codecContext.globalQuality > 0 && frame.quality <= 0) {
|
|
1952
|
+
frame.quality = this.codecContext.globalQuality;
|
|
1953
|
+
}
|
|
1954
|
+
}
|
|
1955
|
+
else if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
|
|
1956
|
+
// Audio: Validate channel count consistency
|
|
1957
|
+
// If encoder doesn't support AV_CODEC_CAP_PARAM_CHANGE, channel count must remain constant
|
|
1958
|
+
const supportsParamChange = this.codec.hasCapabilities(AV_CODEC_CAP_PARAM_CHANGE);
|
|
1959
|
+
if (!supportsParamChange) {
|
|
1960
|
+
const encoderChannels = this.codecContext.channelLayout.nbChannels;
|
|
1961
|
+
const frameChannels = frame.channelLayout?.nbChannels ?? 0;
|
|
1962
|
+
if (encoderChannels !== frameChannels) {
|
|
1963
|
+
throw new Error(`Audio channel count changed (${encoderChannels} -> ${frameChannels}) and encoder '${this.codec.name}' does not support parameter changes`);
|
|
1964
|
+
}
|
|
1965
|
+
}
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
/**
|
|
1969
|
+
* Dispose of encoder.
|
|
1970
|
+
*
|
|
1971
|
+
* Implements Disposable interface for automatic cleanup.
|
|
1972
|
+
* Equivalent to calling close().
|
|
1973
|
+
*
|
|
1974
|
+
* @example
|
|
1975
|
+
* ```typescript
|
|
1976
|
+
* {
|
|
1977
|
+
* using encoder = await Encoder.create(FF_ENCODER_LIBX264, { ... });
|
|
1978
|
+
* // Encode frames...
|
|
1979
|
+
* } // Automatically closed
|
|
1980
|
+
* ```
|
|
1981
|
+
*
|
|
1982
|
+
* @see {@link close} For manual cleanup
|
|
1983
|
+
*/
|
|
1984
|
+
[Symbol.dispose]() {
|
|
1985
|
+
this.close();
|
|
1986
|
+
}
|
|
1987
|
+
}
|
|
1988
|
+
//# sourceMappingURL=encoder.js.map
|