@revizly/node-av 5.2.2-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/BUILD_LINUX.md +61 -0
- package/LICENSE.md +22 -0
- package/README.md +662 -0
- package/build_mac_local.sh +69 -0
- package/dist/api/audio-frame-buffer.d.ts +205 -0
- package/dist/api/audio-frame-buffer.js +287 -0
- package/dist/api/audio-frame-buffer.js.map +1 -0
- package/dist/api/bitstream-filter.d.ts +820 -0
- package/dist/api/bitstream-filter.js +1242 -0
- package/dist/api/bitstream-filter.js.map +1 -0
- package/dist/api/constants.d.ts +44 -0
- package/dist/api/constants.js +45 -0
- package/dist/api/constants.js.map +1 -0
- package/dist/api/data/test_av1.ivf +0 -0
- package/dist/api/data/test_h264.h264 +0 -0
- package/dist/api/data/test_hevc.h265 +0 -0
- package/dist/api/data/test_mjpeg.mjpeg +0 -0
- package/dist/api/data/test_vp8.ivf +0 -0
- package/dist/api/data/test_vp9.ivf +0 -0
- package/dist/api/decoder.d.ts +1088 -0
- package/dist/api/decoder.js +1775 -0
- package/dist/api/decoder.js.map +1 -0
- package/dist/api/demuxer.d.ts +1219 -0
- package/dist/api/demuxer.js +2081 -0
- package/dist/api/demuxer.js.map +1 -0
- package/dist/api/device.d.ts +586 -0
- package/dist/api/device.js +961 -0
- package/dist/api/device.js.map +1 -0
- package/dist/api/encoder.d.ts +1132 -0
- package/dist/api/encoder.js +1988 -0
- package/dist/api/encoder.js.map +1 -0
- package/dist/api/filter-complex.d.ts +821 -0
- package/dist/api/filter-complex.js +1604 -0
- package/dist/api/filter-complex.js.map +1 -0
- package/dist/api/filter-presets.d.ts +1286 -0
- package/dist/api/filter-presets.js +2152 -0
- package/dist/api/filter-presets.js.map +1 -0
- package/dist/api/filter.d.ts +1234 -0
- package/dist/api/filter.js +1976 -0
- package/dist/api/filter.js.map +1 -0
- package/dist/api/fmp4-stream.d.ts +426 -0
- package/dist/api/fmp4-stream.js +739 -0
- package/dist/api/fmp4-stream.js.map +1 -0
- package/dist/api/hardware.d.ts +651 -0
- package/dist/api/hardware.js +1260 -0
- package/dist/api/hardware.js.map +1 -0
- package/dist/api/index.d.ts +17 -0
- package/dist/api/index.js +32 -0
- package/dist/api/index.js.map +1 -0
- package/dist/api/io-stream.d.ts +307 -0
- package/dist/api/io-stream.js +282 -0
- package/dist/api/io-stream.js.map +1 -0
- package/dist/api/muxer.d.ts +957 -0
- package/dist/api/muxer.js +2002 -0
- package/dist/api/muxer.js.map +1 -0
- package/dist/api/pipeline.d.ts +607 -0
- package/dist/api/pipeline.js +1145 -0
- package/dist/api/pipeline.js.map +1 -0
- package/dist/api/utilities/async-queue.d.ts +120 -0
- package/dist/api/utilities/async-queue.js +211 -0
- package/dist/api/utilities/async-queue.js.map +1 -0
- package/dist/api/utilities/audio-sample.d.ts +117 -0
- package/dist/api/utilities/audio-sample.js +112 -0
- package/dist/api/utilities/audio-sample.js.map +1 -0
- package/dist/api/utilities/channel-layout.d.ts +76 -0
- package/dist/api/utilities/channel-layout.js +80 -0
- package/dist/api/utilities/channel-layout.js.map +1 -0
- package/dist/api/utilities/electron-shared-texture.d.ts +328 -0
- package/dist/api/utilities/electron-shared-texture.js +503 -0
- package/dist/api/utilities/electron-shared-texture.js.map +1 -0
- package/dist/api/utilities/image.d.ts +207 -0
- package/dist/api/utilities/image.js +213 -0
- package/dist/api/utilities/image.js.map +1 -0
- package/dist/api/utilities/index.d.ts +12 -0
- package/dist/api/utilities/index.js +25 -0
- package/dist/api/utilities/index.js.map +1 -0
- package/dist/api/utilities/media-type.d.ts +49 -0
- package/dist/api/utilities/media-type.js +53 -0
- package/dist/api/utilities/media-type.js.map +1 -0
- package/dist/api/utilities/pixel-format.d.ts +89 -0
- package/dist/api/utilities/pixel-format.js +97 -0
- package/dist/api/utilities/pixel-format.js.map +1 -0
- package/dist/api/utilities/sample-format.d.ts +129 -0
- package/dist/api/utilities/sample-format.js +141 -0
- package/dist/api/utilities/sample-format.js.map +1 -0
- package/dist/api/utilities/scheduler.d.ts +138 -0
- package/dist/api/utilities/scheduler.js +98 -0
- package/dist/api/utilities/scheduler.js.map +1 -0
- package/dist/api/utilities/streaming.d.ts +186 -0
- package/dist/api/utilities/streaming.js +309 -0
- package/dist/api/utilities/streaming.js.map +1 -0
- package/dist/api/utilities/timestamp.d.ts +193 -0
- package/dist/api/utilities/timestamp.js +206 -0
- package/dist/api/utilities/timestamp.js.map +1 -0
- package/dist/api/utilities/whisper-model.d.ts +310 -0
- package/dist/api/utilities/whisper-model.js +528 -0
- package/dist/api/utilities/whisper-model.js.map +1 -0
- package/dist/api/utils.d.ts +19 -0
- package/dist/api/utils.js +39 -0
- package/dist/api/utils.js.map +1 -0
- package/dist/api/whisper.d.ts +324 -0
- package/dist/api/whisper.js +362 -0
- package/dist/api/whisper.js.map +1 -0
- package/dist/constants/channel-layouts.d.ts +53 -0
- package/dist/constants/channel-layouts.js +57 -0
- package/dist/constants/channel-layouts.js.map +1 -0
- package/dist/constants/constants.d.ts +2325 -0
- package/dist/constants/constants.js +1887 -0
- package/dist/constants/constants.js.map +1 -0
- package/dist/constants/decoders.d.ts +633 -0
- package/dist/constants/decoders.js +641 -0
- package/dist/constants/decoders.js.map +1 -0
- package/dist/constants/encoders.d.ts +295 -0
- package/dist/constants/encoders.js +308 -0
- package/dist/constants/encoders.js.map +1 -0
- package/dist/constants/hardware.d.ts +26 -0
- package/dist/constants/hardware.js +27 -0
- package/dist/constants/hardware.js.map +1 -0
- package/dist/constants/index.d.ts +5 -0
- package/dist/constants/index.js +6 -0
- package/dist/constants/index.js.map +1 -0
- package/dist/ffmpeg/index.d.ts +99 -0
- package/dist/ffmpeg/index.js +115 -0
- package/dist/ffmpeg/index.js.map +1 -0
- package/dist/ffmpeg/utils.d.ts +31 -0
- package/dist/ffmpeg/utils.js +68 -0
- package/dist/ffmpeg/utils.js.map +1 -0
- package/dist/ffmpeg/version.d.ts +6 -0
- package/dist/ffmpeg/version.js +7 -0
- package/dist/ffmpeg/version.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/audio-fifo.d.ts +399 -0
- package/dist/lib/audio-fifo.js +431 -0
- package/dist/lib/audio-fifo.js.map +1 -0
- package/dist/lib/binding.d.ts +228 -0
- package/dist/lib/binding.js +60 -0
- package/dist/lib/binding.js.map +1 -0
- package/dist/lib/bitstream-filter-context.d.ts +379 -0
- package/dist/lib/bitstream-filter-context.js +441 -0
- package/dist/lib/bitstream-filter-context.js.map +1 -0
- package/dist/lib/bitstream-filter.d.ts +140 -0
- package/dist/lib/bitstream-filter.js +154 -0
- package/dist/lib/bitstream-filter.js.map +1 -0
- package/dist/lib/codec-context.d.ts +1071 -0
- package/dist/lib/codec-context.js +1354 -0
- package/dist/lib/codec-context.js.map +1 -0
- package/dist/lib/codec-parameters.d.ts +616 -0
- package/dist/lib/codec-parameters.js +761 -0
- package/dist/lib/codec-parameters.js.map +1 -0
- package/dist/lib/codec-parser.d.ts +201 -0
- package/dist/lib/codec-parser.js +213 -0
- package/dist/lib/codec-parser.js.map +1 -0
- package/dist/lib/codec.d.ts +586 -0
- package/dist/lib/codec.js +713 -0
- package/dist/lib/codec.js.map +1 -0
- package/dist/lib/device.d.ts +291 -0
- package/dist/lib/device.js +324 -0
- package/dist/lib/device.js.map +1 -0
- package/dist/lib/dictionary.d.ts +333 -0
- package/dist/lib/dictionary.js +372 -0
- package/dist/lib/dictionary.js.map +1 -0
- package/dist/lib/error.d.ts +242 -0
- package/dist/lib/error.js +303 -0
- package/dist/lib/error.js.map +1 -0
- package/dist/lib/fifo.d.ts +416 -0
- package/dist/lib/fifo.js +453 -0
- package/dist/lib/fifo.js.map +1 -0
- package/dist/lib/filter-context.d.ts +712 -0
- package/dist/lib/filter-context.js +789 -0
- package/dist/lib/filter-context.js.map +1 -0
- package/dist/lib/filter-graph-segment.d.ts +160 -0
- package/dist/lib/filter-graph-segment.js +171 -0
- package/dist/lib/filter-graph-segment.js.map +1 -0
- package/dist/lib/filter-graph.d.ts +641 -0
- package/dist/lib/filter-graph.js +704 -0
- package/dist/lib/filter-graph.js.map +1 -0
- package/dist/lib/filter-inout.d.ts +198 -0
- package/dist/lib/filter-inout.js +257 -0
- package/dist/lib/filter-inout.js.map +1 -0
- package/dist/lib/filter.d.ts +243 -0
- package/dist/lib/filter.js +272 -0
- package/dist/lib/filter.js.map +1 -0
- package/dist/lib/format-context.d.ts +1254 -0
- package/dist/lib/format-context.js +1379 -0
- package/dist/lib/format-context.js.map +1 -0
- package/dist/lib/frame-utils.d.ts +116 -0
- package/dist/lib/frame-utils.js +98 -0
- package/dist/lib/frame-utils.js.map +1 -0
- package/dist/lib/frame.d.ts +1222 -0
- package/dist/lib/frame.js +1435 -0
- package/dist/lib/frame.js.map +1 -0
- package/dist/lib/hardware-device-context.d.ts +362 -0
- package/dist/lib/hardware-device-context.js +383 -0
- package/dist/lib/hardware-device-context.js.map +1 -0
- package/dist/lib/hardware-frames-context.d.ts +419 -0
- package/dist/lib/hardware-frames-context.js +477 -0
- package/dist/lib/hardware-frames-context.js.map +1 -0
- package/dist/lib/index.d.ts +35 -0
- package/dist/lib/index.js +60 -0
- package/dist/lib/index.js.map +1 -0
- package/dist/lib/input-format.d.ts +249 -0
- package/dist/lib/input-format.js +306 -0
- package/dist/lib/input-format.js.map +1 -0
- package/dist/lib/io-context.d.ts +696 -0
- package/dist/lib/io-context.js +769 -0
- package/dist/lib/io-context.js.map +1 -0
- package/dist/lib/log.d.ts +174 -0
- package/dist/lib/log.js +184 -0
- package/dist/lib/log.js.map +1 -0
- package/dist/lib/native-types.d.ts +946 -0
- package/dist/lib/native-types.js +2 -0
- package/dist/lib/native-types.js.map +1 -0
- package/dist/lib/option.d.ts +927 -0
- package/dist/lib/option.js +1583 -0
- package/dist/lib/option.js.map +1 -0
- package/dist/lib/output-format.d.ts +180 -0
- package/dist/lib/output-format.js +213 -0
- package/dist/lib/output-format.js.map +1 -0
- package/dist/lib/packet.d.ts +501 -0
- package/dist/lib/packet.js +590 -0
- package/dist/lib/packet.js.map +1 -0
- package/dist/lib/rational.d.ts +251 -0
- package/dist/lib/rational.js +278 -0
- package/dist/lib/rational.js.map +1 -0
- package/dist/lib/software-resample-context.d.ts +552 -0
- package/dist/lib/software-resample-context.js +592 -0
- package/dist/lib/software-resample-context.js.map +1 -0
- package/dist/lib/software-scale-context.d.ts +344 -0
- package/dist/lib/software-scale-context.js +366 -0
- package/dist/lib/software-scale-context.js.map +1 -0
- package/dist/lib/stream.d.ts +379 -0
- package/dist/lib/stream.js +526 -0
- package/dist/lib/stream.js.map +1 -0
- package/dist/lib/sync-queue.d.ts +179 -0
- package/dist/lib/sync-queue.js +197 -0
- package/dist/lib/sync-queue.js.map +1 -0
- package/dist/lib/types.d.ts +34 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/types.js.map +1 -0
- package/dist/lib/utilities.d.ts +1127 -0
- package/dist/lib/utilities.js +1225 -0
- package/dist/lib/utilities.js.map +1 -0
- package/dist/utils/electron.d.ts +49 -0
- package/dist/utils/electron.js +63 -0
- package/dist/utils/electron.js.map +1 -0
- package/dist/utils/index.d.ts +4 -0
- package/dist/utils/index.js +5 -0
- package/dist/utils/index.js.map +1 -0
- package/install/check.js +121 -0
- package/install/ffmpeg.js +66 -0
- package/jellyfin-ffmpeg.patch +181 -0
- package/package.json +129 -0
|
@@ -0,0 +1,1775 @@
|
|
|
1
|
+
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
2
|
+
if (value !== null && value !== void 0) {
|
|
3
|
+
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
4
|
+
var dispose, inner;
|
|
5
|
+
if (async) {
|
|
6
|
+
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
7
|
+
dispose = value[Symbol.asyncDispose];
|
|
8
|
+
}
|
|
9
|
+
if (dispose === void 0) {
|
|
10
|
+
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
11
|
+
dispose = value[Symbol.dispose];
|
|
12
|
+
if (async) inner = dispose;
|
|
13
|
+
}
|
|
14
|
+
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
15
|
+
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
16
|
+
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
17
|
+
}
|
|
18
|
+
else if (async) {
|
|
19
|
+
env.stack.push({ async: true });
|
|
20
|
+
}
|
|
21
|
+
return value;
|
|
22
|
+
};
|
|
23
|
+
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
24
|
+
return function (env) {
|
|
25
|
+
function fail(e) {
|
|
26
|
+
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
27
|
+
env.hasError = true;
|
|
28
|
+
}
|
|
29
|
+
var r, s = 0;
|
|
30
|
+
function next() {
|
|
31
|
+
while (r = env.stack.pop()) {
|
|
32
|
+
try {
|
|
33
|
+
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
34
|
+
if (r.dispose) {
|
|
35
|
+
var result = r.dispose.call(r.value);
|
|
36
|
+
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
37
|
+
}
|
|
38
|
+
else s |= 1;
|
|
39
|
+
}
|
|
40
|
+
catch (e) {
|
|
41
|
+
fail(e);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
45
|
+
if (env.hasError) throw env.error;
|
|
46
|
+
}
|
|
47
|
+
return next();
|
|
48
|
+
};
|
|
49
|
+
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
50
|
+
var e = new Error(message);
|
|
51
|
+
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
52
|
+
});
|
|
53
|
+
import { AV_CODEC_FLAG_COPY_OPAQUE, AV_FRAME_FLAG_CORRUPT, AV_NOPTS_VALUE, AV_ROUND_UP, AVERROR_DECODER_NOT_FOUND, AVERROR_EAGAIN, AVERROR_EOF, AVERROR_INVALIDDATA, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO, EOF, INT_MAX, } from '../constants/constants.js';
|
|
54
|
+
import { CodecContext } from '../lib/codec-context.js';
|
|
55
|
+
import { Codec } from '../lib/codec.js';
|
|
56
|
+
import { Dictionary } from '../lib/dictionary.js';
|
|
57
|
+
import { FFmpegError } from '../lib/error.js';
|
|
58
|
+
import { Frame } from '../lib/frame.js';
|
|
59
|
+
import { Packet } from '../lib/packet.js';
|
|
60
|
+
import { Rational } from '../lib/rational.js';
|
|
61
|
+
import { avGcd, avInvQ, avMulQ, avRescaleDelta, avRescaleQ, avRescaleQRnd } from '../lib/utilities.js';
|
|
62
|
+
import { FRAME_THREAD_QUEUE_SIZE, PACKET_THREAD_QUEUE_SIZE } from './constants.js';
|
|
63
|
+
import { AsyncQueue } from './utilities/async-queue.js';
|
|
64
|
+
import { Scheduler } from './utilities/scheduler.js';
|
|
65
|
+
/**
|
|
66
|
+
* High-level decoder for audio and video streams.
|
|
67
|
+
*
|
|
68
|
+
* Provides a simplified interface for decoding media streams from packets to frames.
|
|
69
|
+
* Handles codec initialization, hardware acceleration setup, and frame management.
|
|
70
|
+
* Supports both synchronous packet-by-packet decoding and async iteration over frames.
|
|
71
|
+
* Essential component in media processing pipelines for converting compressed data to raw frames.
|
|
72
|
+
*
|
|
73
|
+
* @example
|
|
74
|
+
* ```typescript
|
|
75
|
+
* import { Demuxer, Decoder } from 'node-av/api';
|
|
76
|
+
*
|
|
77
|
+
* // Open media and create decoder
|
|
78
|
+
* await using input = await Demuxer.open('video.mp4');
|
|
79
|
+
* using decoder = await Decoder.create(input.video());
|
|
80
|
+
*
|
|
81
|
+
* // Decode frames
|
|
82
|
+
* for await (const frame of decoder.frames(input.packets())) {
|
|
83
|
+
* console.log(`Decoded frame: ${frame.width}x${frame.height}`);
|
|
84
|
+
* frame.free();
|
|
85
|
+
* }
|
|
86
|
+
* ```
|
|
87
|
+
*
|
|
88
|
+
* @example
|
|
89
|
+
* ```typescript
|
|
90
|
+
* import { HardwareContext } from 'node-av/api';
|
|
91
|
+
* import { AV_HWDEVICE_TYPE_CUDA } from 'node-av/constants';
|
|
92
|
+
*
|
|
93
|
+
* // Setup hardware acceleration
|
|
94
|
+
* const hw = HardwareContext.create(AV_HWDEVICE_TYPE_CUDA);
|
|
95
|
+
* using decoder = await Decoder.create(stream, { hardware: hw });
|
|
96
|
+
*
|
|
97
|
+
* // Frames will be decoded on GPU
|
|
98
|
+
* for await (const frame of decoder.frames(packets)) {
|
|
99
|
+
* // frame.hwFramesCtx contains GPU memory reference
|
|
100
|
+
* }
|
|
101
|
+
* ```
|
|
102
|
+
*
|
|
103
|
+
* @see {@link Encoder} For encoding frames to packets
|
|
104
|
+
* @see {@link Demuxer} For reading media files
|
|
105
|
+
* @see {@link HardwareContext} For GPU acceleration
|
|
106
|
+
*/
|
|
107
|
+
export class Decoder {
|
|
108
|
+
codecContext;
|
|
109
|
+
codec;
|
|
110
|
+
frame;
|
|
111
|
+
stream;
|
|
112
|
+
initialized = true;
|
|
113
|
+
isClosed = false;
|
|
114
|
+
options;
|
|
115
|
+
// Frame tracking for PTS/duration estimation
|
|
116
|
+
lastFramePts = AV_NOPTS_VALUE;
|
|
117
|
+
lastFrameDurationEst = 0n;
|
|
118
|
+
lastFrameTb;
|
|
119
|
+
// Audio-specific frame tracking
|
|
120
|
+
lastFrameSampleRate = 0;
|
|
121
|
+
lastFilterInRescaleDelta = AV_NOPTS_VALUE;
|
|
122
|
+
// Worker pattern for push-based processing
|
|
123
|
+
inputQueue;
|
|
124
|
+
outputQueue;
|
|
125
|
+
workerPromise = null;
|
|
126
|
+
nextComponent = null;
|
|
127
|
+
pipeToPromise = null;
|
|
128
|
+
signal;
|
|
129
|
+
/**
|
|
130
|
+
* @param codecContext - Configured codec context
|
|
131
|
+
*
|
|
132
|
+
* @param codec - Codec being used
|
|
133
|
+
*
|
|
134
|
+
* @param stream - Media stream being decoded
|
|
135
|
+
*
|
|
136
|
+
* @param options - Decoder options
|
|
137
|
+
*
|
|
138
|
+
* Use {@link create} factory method
|
|
139
|
+
*
|
|
140
|
+
* @internal
|
|
141
|
+
*/
|
|
142
|
+
constructor(codecContext, codec, stream, options = {}) {
|
|
143
|
+
this.codecContext = codecContext;
|
|
144
|
+
this.codec = codec;
|
|
145
|
+
this.stream = stream;
|
|
146
|
+
this.options = options;
|
|
147
|
+
this.frame = new Frame();
|
|
148
|
+
this.frame.alloc();
|
|
149
|
+
this.lastFrameTb = new Rational(0, 1);
|
|
150
|
+
this.inputQueue = new AsyncQueue(PACKET_THREAD_QUEUE_SIZE);
|
|
151
|
+
this.outputQueue = new AsyncQueue(FRAME_THREAD_QUEUE_SIZE);
|
|
152
|
+
}
|
|
153
|
+
static async create(stream, optionsOrCodec, maybeOptions) {
|
|
154
|
+
// Parse arguments
|
|
155
|
+
let options = {};
|
|
156
|
+
let explicitCodec;
|
|
157
|
+
if (optionsOrCodec !== undefined) {
|
|
158
|
+
// Check if first argument is a codec or options
|
|
159
|
+
if (typeof optionsOrCodec === 'string' || // FFDecoderCodec
|
|
160
|
+
typeof optionsOrCodec === 'number' || // AVCodecID
|
|
161
|
+
optionsOrCodec instanceof Codec // Codec instance
|
|
162
|
+
) {
|
|
163
|
+
// First argument is a codec
|
|
164
|
+
explicitCodec = optionsOrCodec;
|
|
165
|
+
options = maybeOptions ?? {};
|
|
166
|
+
}
|
|
167
|
+
else {
|
|
168
|
+
// First argument is options
|
|
169
|
+
options = optionsOrCodec;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
let codec = null;
|
|
173
|
+
// If explicit codec provided, use it
|
|
174
|
+
if (explicitCodec !== undefined) {
|
|
175
|
+
if (typeof explicitCodec === 'object' && 'id' in explicitCodec) {
|
|
176
|
+
// Already a Codec instance
|
|
177
|
+
codec = explicitCodec;
|
|
178
|
+
}
|
|
179
|
+
else if (typeof explicitCodec === 'string') {
|
|
180
|
+
// FFDecoderCodec string
|
|
181
|
+
codec = Codec.findDecoderByName(explicitCodec);
|
|
182
|
+
if (!codec) {
|
|
183
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
else {
|
|
187
|
+
// AVCodecID number
|
|
188
|
+
codec = Codec.findDecoder(explicitCodec);
|
|
189
|
+
if (!codec) {
|
|
190
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
else {
|
|
195
|
+
// No explicit codec - use auto-detection logic
|
|
196
|
+
// If hardware acceleration requested, try to find hardware decoder first
|
|
197
|
+
if (options.hardware) {
|
|
198
|
+
codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
|
|
199
|
+
if (!codec) {
|
|
200
|
+
// No hardware decoder available, fall back to software
|
|
201
|
+
options.hardware = undefined;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
// If no hardware decoder or no hardware requested, use software decoder
|
|
205
|
+
if (!codec) {
|
|
206
|
+
codec = Codec.findDecoder(stream.codecpar.codecId);
|
|
207
|
+
if (!codec) {
|
|
208
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
// Allocate and configure codec context
|
|
213
|
+
const codecContext = new CodecContext();
|
|
214
|
+
codecContext.allocContext3(codec);
|
|
215
|
+
// Copy codec parameters to context
|
|
216
|
+
const ret = codecContext.parametersToContext(stream.codecpar);
|
|
217
|
+
if (ret < 0) {
|
|
218
|
+
codecContext.freeContext();
|
|
219
|
+
FFmpegError.throwIfError(ret, 'Failed to copy codec parameters');
|
|
220
|
+
}
|
|
221
|
+
// Set packet time base
|
|
222
|
+
codecContext.pktTimebase = stream.timeBase;
|
|
223
|
+
// Check if this decoder supports hardware acceleration
|
|
224
|
+
// Only apply hardware acceleration if the decoder supports it
|
|
225
|
+
// Silently ignore hardware for software decoders
|
|
226
|
+
const isHWDecoder = codec.isHardwareAcceleratedDecoder();
|
|
227
|
+
if (isHWDecoder && options.hardware) {
|
|
228
|
+
codecContext.hwDeviceCtx = options.hardware.deviceContext;
|
|
229
|
+
// Set hardware pixel format
|
|
230
|
+
codecContext.setHardwarePixelFormat(options.hardware.devicePixelFormat);
|
|
231
|
+
// Set extra_hw_frames if specified
|
|
232
|
+
if (options.extraHWFrames !== undefined && options.extraHWFrames > 0) {
|
|
233
|
+
codecContext.extraHWFrames = options.extraHWFrames;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
else {
|
|
237
|
+
options.hardware = undefined;
|
|
238
|
+
}
|
|
239
|
+
options.exitOnError = options.exitOnError ?? true;
|
|
240
|
+
// Enable COPY_OPAQUE flag to copy packet.opaque to frame.opaque
|
|
241
|
+
codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
242
|
+
// Thread parameters need to be set before open2
|
|
243
|
+
codecContext.threadCount = options.threadCount ?? 0;
|
|
244
|
+
if (options.threadType !== undefined) {
|
|
245
|
+
codecContext.threadType = options.threadType;
|
|
246
|
+
}
|
|
247
|
+
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
248
|
+
// Open codec
|
|
249
|
+
const openRet = await codecContext.open2(codec, opts);
|
|
250
|
+
if (openRet < 0) {
|
|
251
|
+
codecContext.freeContext();
|
|
252
|
+
FFmpegError.throwIfError(openRet, 'Failed to open codec');
|
|
253
|
+
}
|
|
254
|
+
// Adjust extra_hw_frames for queuing
|
|
255
|
+
// This is done AFTER open2 because the decoder validates extra_hw_frames during open
|
|
256
|
+
if (isHWDecoder && options.hardware) {
|
|
257
|
+
const currentExtraFrames = codecContext.extraHWFrames;
|
|
258
|
+
if (currentExtraFrames >= 0) {
|
|
259
|
+
codecContext.extraHWFrames = currentExtraFrames + FRAME_THREAD_QUEUE_SIZE;
|
|
260
|
+
}
|
|
261
|
+
else {
|
|
262
|
+
codecContext.extraHWFrames = 1;
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
const decoder = new Decoder(codecContext, codec, stream, options);
|
|
266
|
+
if (options.signal) {
|
|
267
|
+
options.signal.throwIfAborted();
|
|
268
|
+
decoder.signal = options.signal;
|
|
269
|
+
}
|
|
270
|
+
return decoder;
|
|
271
|
+
}
|
|
272
|
+
static createSync(stream, optionsOrCodec, maybeOptions) {
|
|
273
|
+
// Parse arguments
|
|
274
|
+
let options = {};
|
|
275
|
+
let explicitCodec;
|
|
276
|
+
if (optionsOrCodec !== undefined) {
|
|
277
|
+
// Check if first argument is a codec or options
|
|
278
|
+
if (typeof optionsOrCodec === 'string' || // FFDecoderCodec
|
|
279
|
+
typeof optionsOrCodec === 'number' || // AVCodecID
|
|
280
|
+
optionsOrCodec instanceof Codec // Codec instance
|
|
281
|
+
) {
|
|
282
|
+
// First argument is a codec
|
|
283
|
+
explicitCodec = optionsOrCodec;
|
|
284
|
+
options = maybeOptions ?? {};
|
|
285
|
+
}
|
|
286
|
+
else {
|
|
287
|
+
// First argument is options
|
|
288
|
+
options = optionsOrCodec;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
let codec = null;
|
|
292
|
+
// If explicit codec provided, use it
|
|
293
|
+
if (explicitCodec !== undefined) {
|
|
294
|
+
if (typeof explicitCodec === 'object' && 'id' in explicitCodec) {
|
|
295
|
+
// Already a Codec instance
|
|
296
|
+
codec = explicitCodec;
|
|
297
|
+
}
|
|
298
|
+
else if (typeof explicitCodec === 'string') {
|
|
299
|
+
// FFDecoderCodec string
|
|
300
|
+
codec = Codec.findDecoderByName(explicitCodec);
|
|
301
|
+
if (!codec) {
|
|
302
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
else {
|
|
306
|
+
// AVCodecID number
|
|
307
|
+
codec = Codec.findDecoder(explicitCodec);
|
|
308
|
+
if (!codec) {
|
|
309
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
else {
|
|
314
|
+
// No explicit codec - use auto-detection logic
|
|
315
|
+
// If hardware acceleration requested, try to find hardware decoder first
|
|
316
|
+
if (options.hardware) {
|
|
317
|
+
codec = options.hardware.getDecoderCodec(stream.codecpar.codecId);
|
|
318
|
+
if (!codec) {
|
|
319
|
+
// No hardware decoder available, fall back to software
|
|
320
|
+
options.hardware = undefined;
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
// If no hardware decoder or no hardware requested, use software decoder
|
|
324
|
+
if (!codec) {
|
|
325
|
+
codec = Codec.findDecoder(stream.codecpar.codecId);
|
|
326
|
+
if (!codec) {
|
|
327
|
+
throw new FFmpegError(AVERROR_DECODER_NOT_FOUND);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
// Allocate and configure codec context
|
|
332
|
+
const codecContext = new CodecContext();
|
|
333
|
+
codecContext.allocContext3(codec);
|
|
334
|
+
// Copy codec parameters to context
|
|
335
|
+
const ret = codecContext.parametersToContext(stream.codecpar);
|
|
336
|
+
if (ret < 0) {
|
|
337
|
+
codecContext.freeContext();
|
|
338
|
+
FFmpegError.throwIfError(ret, 'Failed to copy codec parameters');
|
|
339
|
+
}
|
|
340
|
+
// Set packet time base
|
|
341
|
+
codecContext.pktTimebase = stream.timeBase;
|
|
342
|
+
// Check if this decoder supports hardware acceleration
|
|
343
|
+
// Only apply hardware acceleration if the decoder supports it
|
|
344
|
+
// Silently ignore hardware for software decoders
|
|
345
|
+
const isHWDecoder = codec.isHardwareAcceleratedDecoder();
|
|
346
|
+
if (isHWDecoder && options.hardware) {
|
|
347
|
+
codecContext.hwDeviceCtx = options.hardware.deviceContext;
|
|
348
|
+
// Set hardware pixel format and get_format callback
|
|
349
|
+
codecContext.setHardwarePixelFormat(options.hardware.devicePixelFormat);
|
|
350
|
+
// Set extra_hw_frames if specified
|
|
351
|
+
if (options.extraHWFrames !== undefined && options.extraHWFrames > 0) {
|
|
352
|
+
codecContext.extraHWFrames = options.extraHWFrames;
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
else {
|
|
356
|
+
options.hardware = undefined;
|
|
357
|
+
}
|
|
358
|
+
options.exitOnError = options.exitOnError ?? true;
|
|
359
|
+
// Enable COPY_OPAQUE flag to copy packet.opaque to frame.opaque
|
|
360
|
+
// codecContext.setFlags(AV_CODEC_FLAG_COPY_OPAQUE);
|
|
361
|
+
// Thread parameters need to be set before open2
|
|
362
|
+
codecContext.threadCount = options.threadCount ?? 0;
|
|
363
|
+
if (options.threadType !== undefined) {
|
|
364
|
+
codecContext.threadType = options.threadType;
|
|
365
|
+
}
|
|
366
|
+
const opts = options.options ? Dictionary.fromObject(options.options) : undefined;
|
|
367
|
+
// Open codec synchronously
|
|
368
|
+
const openRet = codecContext.open2Sync(codec, opts);
|
|
369
|
+
if (openRet < 0) {
|
|
370
|
+
codecContext.freeContext();
|
|
371
|
+
FFmpegError.throwIfError(openRet, 'Failed to open codec');
|
|
372
|
+
}
|
|
373
|
+
// Adjust extra_hw_frames for queuing
|
|
374
|
+
// This is done AFTER open2 because the decoder validates extra_hw_frames during open
|
|
375
|
+
if (isHWDecoder && options.hardware) {
|
|
376
|
+
const currentExtraFrames = codecContext.extraHWFrames;
|
|
377
|
+
if (currentExtraFrames >= 0) {
|
|
378
|
+
codecContext.extraHWFrames = currentExtraFrames + FRAME_THREAD_QUEUE_SIZE;
|
|
379
|
+
}
|
|
380
|
+
else {
|
|
381
|
+
codecContext.extraHWFrames = 1;
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
const decoder = new Decoder(codecContext, codec, stream, options);
|
|
385
|
+
if (options.signal) {
|
|
386
|
+
options.signal.throwIfAborted();
|
|
387
|
+
decoder.signal = options.signal;
|
|
388
|
+
}
|
|
389
|
+
return decoder;
|
|
390
|
+
}
|
|
391
|
+
/**
|
|
392
|
+
* Check if decoder is open.
|
|
393
|
+
*
|
|
394
|
+
* @returns true if decoder is open and ready
|
|
395
|
+
*
|
|
396
|
+
* @example
|
|
397
|
+
* ```typescript
|
|
398
|
+
* if (decoder.isDecoderOpen) {
|
|
399
|
+
* const frame = await decoder.decode(packet);
|
|
400
|
+
* }
|
|
401
|
+
* ```
|
|
402
|
+
*/
|
|
403
|
+
get isDecoderOpen() {
|
|
404
|
+
return !this.isClosed;
|
|
405
|
+
}
|
|
406
|
+
/**
|
|
407
|
+
* Check if decoder has been initialized.
|
|
408
|
+
*
|
|
409
|
+
* Returns true if decoder is initialized (true by default for decoders).
|
|
410
|
+
* Decoders are pre-initialized from stream parameters.
|
|
411
|
+
*
|
|
412
|
+
* @returns true if decoder has been initialized
|
|
413
|
+
*
|
|
414
|
+
* @example
|
|
415
|
+
* ```typescript
|
|
416
|
+
* if (decoder.isDecoderInitialized) {
|
|
417
|
+
* console.log('Decoder is ready to process frames');
|
|
418
|
+
* }
|
|
419
|
+
* ```
|
|
420
|
+
*/
|
|
421
|
+
get isDecoderInitialized() {
|
|
422
|
+
return this.initialized;
|
|
423
|
+
}
|
|
424
|
+
/**
|
|
425
|
+
* Check if decoder uses hardware acceleration.
|
|
426
|
+
*
|
|
427
|
+
* @returns true if hardware-accelerated
|
|
428
|
+
*
|
|
429
|
+
* @example
|
|
430
|
+
* ```typescript
|
|
431
|
+
* if (decoder.isHardware()) {
|
|
432
|
+
* console.log('Using GPU acceleration');
|
|
433
|
+
* }
|
|
434
|
+
* ```
|
|
435
|
+
*
|
|
436
|
+
* @see {@link HardwareContext} For hardware setup
|
|
437
|
+
*/
|
|
438
|
+
isHardware() {
|
|
439
|
+
return !!this.options.hardware && this.codec.isHardwareAcceleratedDecoder();
|
|
440
|
+
}
|
|
441
|
+
/**
|
|
442
|
+
* Check if decoder is ready for processing.
|
|
443
|
+
*
|
|
444
|
+
* @returns true if initialized and ready
|
|
445
|
+
*
|
|
446
|
+
* @example
|
|
447
|
+
* ```typescript
|
|
448
|
+
* if (decoder.isReady()) {
|
|
449
|
+
* const frame = await decoder.decode(packet);
|
|
450
|
+
* }
|
|
451
|
+
* ```
|
|
452
|
+
*/
|
|
453
|
+
isReady() {
|
|
454
|
+
return this.initialized && !this.isClosed;
|
|
455
|
+
}
|
|
456
|
+
/**
|
|
457
|
+
* Send a packet to the decoder.
|
|
458
|
+
*
|
|
459
|
+
* Sends a compressed packet to the decoder for decoding.
|
|
460
|
+
* Does not return decoded frames - use {@link receive} to retrieve frames.
|
|
461
|
+
* A single packet can produce zero, one, or multiple frames depending on codec buffering.
|
|
462
|
+
* Automatically manages decoder state and error recovery.
|
|
463
|
+
*
|
|
464
|
+
* **Important**: This method only SENDS the packet to the decoder.
|
|
465
|
+
* You must call {@link receive} separately (potentially multiple times) to get decoded frames.
|
|
466
|
+
*
|
|
467
|
+
* Direct mapping to avcodec_send_packet().
|
|
468
|
+
*
|
|
469
|
+
* @param packet - Compressed packet to send to decoder, or null to flush
|
|
470
|
+
*
|
|
471
|
+
* @throws {FFmpegError} If sending packet fails
|
|
472
|
+
*
|
|
473
|
+
* @example
|
|
474
|
+
* ```typescript
|
|
475
|
+
* // Send packet and receive frames
|
|
476
|
+
* await decoder.decode(packet);
|
|
477
|
+
*
|
|
478
|
+
* // Receive all available frames
|
|
479
|
+
* while (true) {
|
|
480
|
+
* const frame = await decoder.receive();
|
|
481
|
+
* if (!frame) break;
|
|
482
|
+
* console.log(`Decoded frame with PTS: ${frame.pts}`);
|
|
483
|
+
* frame.free();
|
|
484
|
+
* }
|
|
485
|
+
* ```
|
|
486
|
+
*
|
|
487
|
+
* @example
|
|
488
|
+
* ```typescript
|
|
489
|
+
* for await (const packet of input.packets()) {
|
|
490
|
+
* // packet is null at end of stream - automatically flushes decoder
|
|
491
|
+
* await decoder.decode(packet);
|
|
492
|
+
*
|
|
493
|
+
* // Receive available frames
|
|
494
|
+
* let frame;
|
|
495
|
+
* while ((frame = await decoder.receive())) {
|
|
496
|
+
* await processFrame(frame);
|
|
497
|
+
* frame.free();
|
|
498
|
+
* }
|
|
499
|
+
* }
|
|
500
|
+
* ```
|
|
501
|
+
*
|
|
502
|
+
* @see {@link receive} For receiving decoded frames
|
|
503
|
+
* @see {@link decodeAll} For combined send+receive operation
|
|
504
|
+
* @see {@link frames} For automatic packet iteration
|
|
505
|
+
* @see {@link flush} For end-of-stream handling
|
|
506
|
+
* @see {@link decodeSync} For synchronous version
|
|
507
|
+
*/
|
|
508
|
+
async decode(packet) {
|
|
509
|
+
this.signal?.throwIfAborted();
|
|
510
|
+
if (this.isClosed) {
|
|
511
|
+
return;
|
|
512
|
+
}
|
|
513
|
+
// Null packet = flush decoder
|
|
514
|
+
if (packet === null) {
|
|
515
|
+
await this.flush();
|
|
516
|
+
return;
|
|
517
|
+
}
|
|
518
|
+
if (packet.streamIndex !== this.stream.index) {
|
|
519
|
+
return;
|
|
520
|
+
}
|
|
521
|
+
// Skip 0-sized packets
|
|
522
|
+
if (packet.size === 0) {
|
|
523
|
+
return;
|
|
524
|
+
}
|
|
525
|
+
// Send packet to decoder
|
|
526
|
+
const sendRet = await this.codecContext.sendPacket(packet);
|
|
527
|
+
// EAGAIN during send_packet is a decoder bug (FFmpeg treats this as AVERROR_BUG)
|
|
528
|
+
// We read all decoded frames with receive() until done, so decoder should never be full
|
|
529
|
+
if (sendRet === AVERROR_EAGAIN) {
|
|
530
|
+
FFmpegError.throwIfError(sendRet, 'Decoder returned EAGAIN on send - this is a decoder bug');
|
|
531
|
+
}
|
|
532
|
+
// Handle send errors
|
|
533
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
534
|
+
if (this.options.exitOnError) {
|
|
535
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send packet to decoder');
|
|
536
|
+
}
|
|
537
|
+
// exitOnError=false: Continue to receive loop to drain any buffered frames
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
/**
|
|
541
|
+
* Send a packet to the decoder synchronously.
|
|
542
|
+
* Synchronous version of decode.
|
|
543
|
+
*
|
|
544
|
+
* Sends a compressed packet to the decoder for decoding.
|
|
545
|
+
* Does not return decoded frames - use {@link receiveSync} to retrieve frames.
|
|
546
|
+
* A single packet can produce zero, one, or multiple frames depending on codec buffering.
|
|
547
|
+
* Automatically manages decoder state and error recovery.
|
|
548
|
+
*
|
|
549
|
+
* **Important**: This method only SENDS the packet to the decoder.
|
|
550
|
+
* You must call {@link receiveSync} separately (potentially multiple times) to get decoded frames.
|
|
551
|
+
*
|
|
552
|
+
* Direct mapping to avcodec_send_packet().
|
|
553
|
+
*
|
|
554
|
+
* @param packet - Compressed packet to send to decoder, or null to flush
|
|
555
|
+
*
|
|
556
|
+
* @throws {FFmpegError} If sending packet fails
|
|
557
|
+
*
|
|
558
|
+
* @example
|
|
559
|
+
* ```typescript
|
|
560
|
+
* // Send packet and receive frames
|
|
561
|
+
* decoder.decodeSync(packet);
|
|
562
|
+
*
|
|
563
|
+
* // Receive all available frames
|
|
564
|
+
* while (true) {
|
|
565
|
+
* const frame = decoder.receiveSync();
|
|
566
|
+
* if (!frame) break;
|
|
567
|
+
* console.log(`Decoded frame with PTS: ${frame.pts}`);
|
|
568
|
+
* frame.free();
|
|
569
|
+
* }
|
|
570
|
+
* ```
|
|
571
|
+
*
|
|
572
|
+
* @example
|
|
573
|
+
* ```typescript
|
|
574
|
+
* for (const packet of input.packetsSync()) {
|
|
575
|
+
* // packet is null at end of stream - automatically flushes decoder
|
|
576
|
+
* decoder.decodeSync(packet);
|
|
577
|
+
*
|
|
578
|
+
* // Receive available frames
|
|
579
|
+
* let frame;
|
|
580
|
+
* while ((frame = decoder.receiveSync())) {
|
|
581
|
+
* processFrame(frame);
|
|
582
|
+
* frame.free();
|
|
583
|
+
* }
|
|
584
|
+
* }
|
|
585
|
+
* ```
|
|
586
|
+
*
|
|
587
|
+
* @see {@link receiveSync} For receiving decoded frames
|
|
588
|
+
* @see {@link decodeAllSync} For combined send+receive operation
|
|
589
|
+
* @see {@link framesSync} For automatic packet iteration
|
|
590
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
591
|
+
* @see {@link decode} For async version
|
|
592
|
+
*/
|
|
593
|
+
decodeSync(packet) {
|
|
594
|
+
if (this.isClosed) {
|
|
595
|
+
return;
|
|
596
|
+
}
|
|
597
|
+
// Null packet = flush decoder
|
|
598
|
+
if (packet === null) {
|
|
599
|
+
this.flushSync();
|
|
600
|
+
return;
|
|
601
|
+
}
|
|
602
|
+
if (packet.streamIndex !== this.stream.index) {
|
|
603
|
+
return;
|
|
604
|
+
}
|
|
605
|
+
// Skip 0-sized packets
|
|
606
|
+
if (packet.size === 0) {
|
|
607
|
+
return;
|
|
608
|
+
}
|
|
609
|
+
// Send packet to decoder
|
|
610
|
+
const sendRet = this.codecContext.sendPacketSync(packet);
|
|
611
|
+
// EAGAIN during send_packet is a decoder bug (FFmpeg treats this as AVERROR_BUG)
|
|
612
|
+
// We read all decoded frames with receive() until done, so decoder should never be full
|
|
613
|
+
if (sendRet === AVERROR_EAGAIN) {
|
|
614
|
+
FFmpegError.throwIfError(AVERROR_EAGAIN, 'Decoder returned EAGAIN on send - this is a decoder bug');
|
|
615
|
+
}
|
|
616
|
+
// Handle send errors
|
|
617
|
+
if (sendRet < 0 && sendRet !== AVERROR_EOF) {
|
|
618
|
+
if (this.options.exitOnError) {
|
|
619
|
+
FFmpegError.throwIfError(sendRet, 'Failed to send packet to decoder');
|
|
620
|
+
}
|
|
621
|
+
// exitOnError=false: Continue to receive loop to drain any buffered frames
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
/**
|
|
625
|
+
* Decode a packet to frames.
|
|
626
|
+
*
|
|
627
|
+
* Sends a packet to the decoder and receives all available decoded frames.
|
|
628
|
+
* Returns array of frames - may be empty if decoder needs more data.
|
|
629
|
+
* One packet can produce zero, one, or multiple frames depending on codec.
|
|
630
|
+
* Automatically manages decoder state and error recovery.
|
|
631
|
+
*
|
|
632
|
+
* Direct mapping to avcodec_send_packet() and avcodec_receive_frame().
|
|
633
|
+
*
|
|
634
|
+
* @param packet - Compressed packet to decode
|
|
635
|
+
*
|
|
636
|
+
* @returns Array of decoded frames (empty if more data needed or decoder is closed)
|
|
637
|
+
*
|
|
638
|
+
* @throws {FFmpegError} If decoding fails
|
|
639
|
+
*
|
|
640
|
+
* @example
|
|
641
|
+
* ```typescript
|
|
642
|
+
* const frames = await decoder.decodeAll(packet);
|
|
643
|
+
* for (const frame of frames) {
|
|
644
|
+
* console.log(`Decoded frame with PTS: ${frame.pts}`);
|
|
645
|
+
* frame.free();
|
|
646
|
+
* }
|
|
647
|
+
* ```
|
|
648
|
+
*
|
|
649
|
+
* @example
|
|
650
|
+
* ```typescript
|
|
651
|
+
* for await (const packet of input.packets()) {
|
|
652
|
+
* const frames = await decoder.decodeAll(packet);
|
|
653
|
+
* for (const frame of frames) {
|
|
654
|
+
* await processFrame(frame);
|
|
655
|
+
* frame.free();
|
|
656
|
+
* }
|
|
657
|
+
* packet.free();
|
|
658
|
+
* }
|
|
659
|
+
* ```
|
|
660
|
+
*
|
|
661
|
+
* @see {@link decode} For single packet decoding
|
|
662
|
+
* @see {@link frames} For automatic packet iteration
|
|
663
|
+
* @see {@link flush} For end-of-stream handling
|
|
664
|
+
* @see {@link decodeAllSync} For synchronous version
|
|
665
|
+
*/
|
|
666
|
+
async decodeAll(packet) {
|
|
667
|
+
this.signal?.throwIfAborted();
|
|
668
|
+
await this.decode(packet);
|
|
669
|
+
// Receive all available frames
|
|
670
|
+
const frames = [];
|
|
671
|
+
while (true) {
|
|
672
|
+
const remaining = await this.receive();
|
|
673
|
+
if (!remaining)
|
|
674
|
+
break;
|
|
675
|
+
frames.push(remaining);
|
|
676
|
+
}
|
|
677
|
+
return frames;
|
|
678
|
+
}
|
|
679
|
+
/**
|
|
680
|
+
* Decode a packet to frames synchronously.
|
|
681
|
+
* Synchronous version of decodeAll.
|
|
682
|
+
*
|
|
683
|
+
* Sends packet to decoder and receives all available decoded frames.
|
|
684
|
+
* Returns array of frames - may be empty if decoder needs more data.
|
|
685
|
+
* One packet can produce zero, one, or multiple frames depending on codec.
|
|
686
|
+
*
|
|
687
|
+
* @param packet - Compressed packet to decode
|
|
688
|
+
*
|
|
689
|
+
* @returns Array of decoded frames (empty if more data needed or decoder is closed)
|
|
690
|
+
*
|
|
691
|
+
* @throws {FFmpegError} If decoding fails
|
|
692
|
+
*
|
|
693
|
+
* @example
|
|
694
|
+
* ```typescript
|
|
695
|
+
* const frames = decoder.decodeAllSync(packet);
|
|
696
|
+
* for (const frame of frames) {
|
|
697
|
+
* console.log(`Decoded: ${frame.width}x${frame.height}`);
|
|
698
|
+
* frame.free();
|
|
699
|
+
* }
|
|
700
|
+
*
|
|
701
|
+
* @example
|
|
702
|
+
* ```typescript
|
|
703
|
+
* for (const packet of input.packetsSync()) {
|
|
704
|
+
* const frames = await decoder.decodeAllSync(packet);
|
|
705
|
+
* for (const frame of frames) {
|
|
706
|
+
* processFrame(frame);
|
|
707
|
+
* frame.free();
|
|
708
|
+
* }
|
|
709
|
+
* packet.free();
|
|
710
|
+
* }
|
|
711
|
+
* ```
|
|
712
|
+
*
|
|
713
|
+
* @see {@link decodeSync} For single packet decoding
|
|
714
|
+
* @see {@link framesSync} For automatic packet iteration
|
|
715
|
+
* @see {@link flushSync} For end-of-stream handling
|
|
716
|
+
* @see {@link decodeAll} For async version
|
|
717
|
+
*/
|
|
718
|
+
decodeAllSync(packet) {
|
|
719
|
+
this.decodeSync(packet);
|
|
720
|
+
// Receive all available frames
|
|
721
|
+
const frames = [];
|
|
722
|
+
while (true) {
|
|
723
|
+
const remaining = this.receiveSync();
|
|
724
|
+
if (!remaining)
|
|
725
|
+
break;
|
|
726
|
+
frames.push(remaining);
|
|
727
|
+
}
|
|
728
|
+
return frames;
|
|
729
|
+
}
|
|
730
|
+
/**
|
|
731
|
+
* Decode packet stream to frame stream.
|
|
732
|
+
*
|
|
733
|
+
* High-level async generator for complete decoding pipeline.
|
|
734
|
+
* Decoder is only flushed when EOF (null) signal is explicitly received.
|
|
735
|
+
* Primary interface for stream-based decoding.
|
|
736
|
+
*
|
|
737
|
+
* **EOF Handling:**
|
|
738
|
+
* - Send null to flush decoder and get remaining buffered frames
|
|
739
|
+
* - Generator yields null after flushing when null is received
|
|
740
|
+
* - No automatic flushing - decoder stays open until EOF or close()
|
|
741
|
+
*
|
|
742
|
+
* @param packets - Async iterable of packets, single packet, or null to flush
|
|
743
|
+
*
|
|
744
|
+
* @yields {Frame | null} Decoded frames, followed by null when explicitly flushed
|
|
745
|
+
*
|
|
746
|
+
* @throws {Error} If decoder is closed
|
|
747
|
+
*
|
|
748
|
+
* @throws {FFmpegError} If decoding fails
|
|
749
|
+
*
|
|
750
|
+
* @example
|
|
751
|
+
* ```typescript
|
|
752
|
+
* // Stream of packets with automatic EOF propagation
|
|
753
|
+
* await using input = await Demuxer.open('video.mp4');
|
|
754
|
+
* using decoder = await Decoder.create(input.video());
|
|
755
|
+
*
|
|
756
|
+
* for await (const frame of decoder.frames(input.packets())) {
|
|
757
|
+
* if (frame === null) {
|
|
758
|
+
* console.log('Decoding complete');
|
|
759
|
+
* break;
|
|
760
|
+
* }
|
|
761
|
+
* console.log(`Frame: ${frame.width}x${frame.height}`);
|
|
762
|
+
* frame.free();
|
|
763
|
+
* }
|
|
764
|
+
* ```
|
|
765
|
+
*
|
|
766
|
+
* @example
|
|
767
|
+
* ```typescript
|
|
768
|
+
* // Single packet (no automatic flush)
|
|
769
|
+
* for await (const frame of decoder.frames(singlePacket)) {
|
|
770
|
+
* await encoder.encode(frame);
|
|
771
|
+
* frame.free();
|
|
772
|
+
* }
|
|
773
|
+
* // Decoder still has buffered frames - send null to flush
|
|
774
|
+
* for await (const frame of decoder.frames(null)) {
|
|
775
|
+
* if (frame === null) break;
|
|
776
|
+
* await encoder.encode(frame);
|
|
777
|
+
* frame.free();
|
|
778
|
+
* }
|
|
779
|
+
* ```
|
|
780
|
+
*
|
|
781
|
+
* @example
|
|
782
|
+
* ```typescript
|
|
783
|
+
* // Explicit flush with EOF
|
|
784
|
+
* for await (const frame of decoder.frames(null)) {
|
|
785
|
+
* if (frame === null) {
|
|
786
|
+
* console.log('All buffered frames flushed');
|
|
787
|
+
* break;
|
|
788
|
+
* }
|
|
789
|
+
* console.log('Buffered frame:', frame.pts);
|
|
790
|
+
* frame.free();
|
|
791
|
+
* }
|
|
792
|
+
* ```
|
|
793
|
+
*
|
|
794
|
+
* @see {@link decode} For single packet decoding
|
|
795
|
+
* @see {@link Demuxer.packets} For packet source
|
|
796
|
+
* @see {@link framesSync} For sync version
|
|
797
|
+
*/
|
|
798
|
+
async *frames(packets) {
|
|
799
|
+
const self = this;
|
|
800
|
+
const processPacket = async function* (packet) {
|
|
801
|
+
await self.decode(packet);
|
|
802
|
+
while (true) {
|
|
803
|
+
const frame = await self.receive();
|
|
804
|
+
if (!frame)
|
|
805
|
+
break;
|
|
806
|
+
yield frame;
|
|
807
|
+
}
|
|
808
|
+
}.bind(this);
|
|
809
|
+
const finalize = async function* () {
|
|
810
|
+
for await (const remaining of self.flushFrames()) {
|
|
811
|
+
yield remaining;
|
|
812
|
+
}
|
|
813
|
+
yield null;
|
|
814
|
+
}.bind(this);
|
|
815
|
+
if (packets === null) {
|
|
816
|
+
yield* finalize();
|
|
817
|
+
return;
|
|
818
|
+
}
|
|
819
|
+
if (packets instanceof Packet) {
|
|
820
|
+
yield* processPacket(packets);
|
|
821
|
+
return;
|
|
822
|
+
}
|
|
823
|
+
for await (const packet_1 of packets) {
|
|
824
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
825
|
+
try {
|
|
826
|
+
const packet = __addDisposableResource(env_1, packet_1, false);
|
|
827
|
+
this.signal?.throwIfAborted();
|
|
828
|
+
if (packet === null) {
|
|
829
|
+
yield* finalize();
|
|
830
|
+
return;
|
|
831
|
+
}
|
|
832
|
+
yield* processPacket(packet);
|
|
833
|
+
}
|
|
834
|
+
catch (e_1) {
|
|
835
|
+
env_1.error = e_1;
|
|
836
|
+
env_1.hasError = true;
|
|
837
|
+
}
|
|
838
|
+
finally {
|
|
839
|
+
__disposeResources(env_1);
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
}
|
|
843
|
+
/**
|
|
844
|
+
* Decode packet stream to frame stream synchronously.
|
|
845
|
+
* Synchronous version of frames.
|
|
846
|
+
*
|
|
847
|
+
* High-level async generator for complete decoding pipeline.
|
|
848
|
+
* Decoder is only flushed when EOF (null) signal is explicitly received.
|
|
849
|
+
* Primary interface for stream-based decoding.
|
|
850
|
+
*
|
|
851
|
+
* **EOF Handling:**
|
|
852
|
+
* - Send null to flush decoder and get remaining buffered frames
|
|
853
|
+
* - Generator yields null after flushing when null is received
|
|
854
|
+
* - No automatic flushing - decoder stays open until EOF or close()
|
|
855
|
+
*
|
|
856
|
+
* @param packets - Iterable of packets, single packet, or null to flush
|
|
857
|
+
*
|
|
858
|
+
* @yields {Frame | null} Decoded frames, followed by null when explicitly flushed
|
|
859
|
+
*
|
|
860
|
+
* @throws {Error} If decoder is closed
|
|
861
|
+
*
|
|
862
|
+
* @throws {FFmpegError} If decoding fails
|
|
863
|
+
*
|
|
864
|
+
* @example
|
|
865
|
+
* ```typescript
|
|
866
|
+
* // Stream of packets with automatic EOF propagation
|
|
867
|
+
* await using input = await Demuxer.open('video.mp4');
|
|
868
|
+
* using decoder = await Decoder.create(input.video());
|
|
869
|
+
*
|
|
870
|
+
* for (const frame of decoder.framesSync(input.packetsSync())) {
|
|
871
|
+
* if (frame === null) {
|
|
872
|
+
* console.log('Decoding complete');
|
|
873
|
+
* break;
|
|
874
|
+
* }
|
|
875
|
+
* console.log(`Frame: ${frame.width}x${frame.height}`);
|
|
876
|
+
* frame.free();
|
|
877
|
+
* }
|
|
878
|
+
* ```
|
|
879
|
+
*
|
|
880
|
+
* @example
|
|
881
|
+
* ```typescript
|
|
882
|
+
* // Single packet (no automatic flush)
|
|
883
|
+
* for (const frame of decoder.framesSync(singlePacket)) {
|
|
884
|
+
* encoder.encodeSync(frame);
|
|
885
|
+
* frame.free();
|
|
886
|
+
* }
|
|
887
|
+
* // Decoder still has buffered frames - send null to flush
|
|
888
|
+
* for (const frame of decoder.framesSync(null)) {
|
|
889
|
+
* if (frame === null) break;
|
|
890
|
+
* encoder.encodeSync(frame);
|
|
891
|
+
* frame.free();
|
|
892
|
+
* }
|
|
893
|
+
* ```
|
|
894
|
+
*
|
|
895
|
+
* @example
|
|
896
|
+
* ```typescript
|
|
897
|
+
* // Explicit flush with EOF
|
|
898
|
+
* for (const frame of decoder.framesSync(null)) {
|
|
899
|
+
* if (frame === null) {
|
|
900
|
+
* console.log('All buffered frames flushed');
|
|
901
|
+
* break;
|
|
902
|
+
* }
|
|
903
|
+
* console.log('Buffered frame:', frame.pts);
|
|
904
|
+
* frame.free();
|
|
905
|
+
* }
|
|
906
|
+
* ```
|
|
907
|
+
*/
|
|
908
|
+
*framesSync(packets) {
|
|
909
|
+
const self = this;
|
|
910
|
+
const processPacket = function* (packet) {
|
|
911
|
+
self.decodeSync(packet);
|
|
912
|
+
while (true) {
|
|
913
|
+
const frame = self.receiveSync();
|
|
914
|
+
if (!frame)
|
|
915
|
+
break;
|
|
916
|
+
yield frame;
|
|
917
|
+
}
|
|
918
|
+
}.bind(this);
|
|
919
|
+
const finalize = function* () {
|
|
920
|
+
for (const remaining of self.flushFramesSync()) {
|
|
921
|
+
yield remaining;
|
|
922
|
+
}
|
|
923
|
+
yield null;
|
|
924
|
+
}.bind(this);
|
|
925
|
+
if (packets === null) {
|
|
926
|
+
yield* finalize();
|
|
927
|
+
return;
|
|
928
|
+
}
|
|
929
|
+
if (packets instanceof Packet) {
|
|
930
|
+
yield* processPacket(packets);
|
|
931
|
+
return;
|
|
932
|
+
}
|
|
933
|
+
for (const packet_2 of packets) {
|
|
934
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
935
|
+
try {
|
|
936
|
+
const packet = __addDisposableResource(env_2, packet_2, false);
|
|
937
|
+
if (packet === null) {
|
|
938
|
+
yield* finalize();
|
|
939
|
+
return;
|
|
940
|
+
}
|
|
941
|
+
yield* processPacket(packet);
|
|
942
|
+
}
|
|
943
|
+
catch (e_2) {
|
|
944
|
+
env_2.error = e_2;
|
|
945
|
+
env_2.hasError = true;
|
|
946
|
+
}
|
|
947
|
+
finally {
|
|
948
|
+
__disposeResources(env_2);
|
|
949
|
+
}
|
|
950
|
+
}
|
|
951
|
+
}
|
|
952
|
+
/**
|
|
953
|
+
* Flush decoder and signal end-of-stream.
|
|
954
|
+
*
|
|
955
|
+
* Sends null packet to decoder to signal end-of-stream.
|
|
956
|
+
* Does nothing if decoder is closed.
|
|
957
|
+
* Must use receive() or flushFrames() to get remaining buffered frames.
|
|
958
|
+
*
|
|
959
|
+
* Direct mapping to avcodec_send_packet(NULL).
|
|
960
|
+
*
|
|
961
|
+
* @throws {FFmpegError} If flush fails
|
|
962
|
+
*
|
|
963
|
+
* @example
|
|
964
|
+
* ```typescript
|
|
965
|
+
* // Signal end of stream
|
|
966
|
+
* await decoder.flush();
|
|
967
|
+
*
|
|
968
|
+
* // Then get remaining frames
|
|
969
|
+
* let frame;
|
|
970
|
+
* while ((frame = await decoder.receive()) !== null) {
|
|
971
|
+
* console.log('Got buffered frame');
|
|
972
|
+
* frame.free();
|
|
973
|
+
* }
|
|
974
|
+
* ```
|
|
975
|
+
*
|
|
976
|
+
* @see {@link flushFrames} For convenient async iteration
|
|
977
|
+
* @see {@link receive} For getting buffered frames
|
|
978
|
+
* @see {@link flushSync} For synchronous version
|
|
979
|
+
*/
|
|
980
|
+
async flush() {
|
|
981
|
+
this.signal?.throwIfAborted();
|
|
982
|
+
if (this.isClosed) {
|
|
983
|
+
return;
|
|
984
|
+
}
|
|
985
|
+
// Send flush packet (null)
|
|
986
|
+
const ret = await this.codecContext.sendPacket(null);
|
|
987
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
988
|
+
if (ret !== AVERROR_EAGAIN) {
|
|
989
|
+
FFmpegError.throwIfError(ret, 'Failed to flush decoder');
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
}
|
|
993
|
+
/**
|
|
994
|
+
* Flush decoder and signal end-of-stream synchronously.
|
|
995
|
+
* Synchronous version of flush.
|
|
996
|
+
*
|
|
997
|
+
* Send null packet to signal end of input stream.
|
|
998
|
+
* Decoder may still have buffered frames.
|
|
999
|
+
* Call receiveSync() repeatedly to get remaining frames.
|
|
1000
|
+
*
|
|
1001
|
+
* @throws {FFmpegError} If flush fails
|
|
1002
|
+
*
|
|
1003
|
+
* @example
|
|
1004
|
+
* ```typescript
|
|
1005
|
+
* decoder.flushSync();
|
|
1006
|
+
* // Get remaining frames
|
|
1007
|
+
* let frame;
|
|
1008
|
+
* while ((frame = decoder.receiveSync()) !== null) {
|
|
1009
|
+
* console.log('Buffered frame');
|
|
1010
|
+
* }
|
|
1011
|
+
* ```
|
|
1012
|
+
*
|
|
1013
|
+
* @see {@link flushFramesSync} For convenient sync iteration
|
|
1014
|
+
* @see {@link receiveSync} For getting buffered frames
|
|
1015
|
+
* @see {@link flush} For async version
|
|
1016
|
+
*/
|
|
1017
|
+
flushSync() {
|
|
1018
|
+
if (this.isClosed) {
|
|
1019
|
+
return;
|
|
1020
|
+
}
|
|
1021
|
+
// Send flush packet (null)
|
|
1022
|
+
const ret = this.codecContext.sendPacketSync(null);
|
|
1023
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
1024
|
+
if (ret !== AVERROR_EAGAIN) {
|
|
1025
|
+
FFmpegError.throwIfError(ret, 'Failed to flush decoder');
|
|
1026
|
+
}
|
|
1027
|
+
}
|
|
1028
|
+
}
|
|
1029
|
+
/**
|
|
1030
|
+
* Flush all buffered frames as async generator.
|
|
1031
|
+
*
|
|
1032
|
+
* Convenient async iteration over remaining frames.
|
|
1033
|
+
* Automatically sends flush signal and retrieves buffered frames.
|
|
1034
|
+
* Useful for end-of-stream processing.
|
|
1035
|
+
*
|
|
1036
|
+
* @yields {Frame} Buffered frames
|
|
1037
|
+
*
|
|
1038
|
+
* @example
|
|
1039
|
+
* ```typescript
|
|
1040
|
+
* // Flush at end of decoding
|
|
1041
|
+
* for await (const frame of decoder.flushFrames()) {
|
|
1042
|
+
* console.log('Processing buffered frame');
|
|
1043
|
+
* await encoder.encode(frame);
|
|
1044
|
+
* frame.free();
|
|
1045
|
+
* }
|
|
1046
|
+
* ```
|
|
1047
|
+
*
|
|
1048
|
+
* @see {@link decode} For sending packets and receiving frames
|
|
1049
|
+
* @see {@link flush} For signaling end-of-stream
|
|
1050
|
+
* @see {@link flushFramesSync} For synchronous version
|
|
1051
|
+
*/
|
|
1052
|
+
async *flushFrames() {
|
|
1053
|
+
// Send flush signal
|
|
1054
|
+
await this.flush();
|
|
1055
|
+
while (true) {
|
|
1056
|
+
const remaining = await this.receive();
|
|
1057
|
+
if (!remaining)
|
|
1058
|
+
break;
|
|
1059
|
+
yield remaining;
|
|
1060
|
+
}
|
|
1061
|
+
}
|
|
1062
|
+
/**
|
|
1063
|
+
* Flush all buffered frames as generator synchronously.
|
|
1064
|
+
* Synchronous version of flushFrames.
|
|
1065
|
+
*
|
|
1066
|
+
* Convenient sync iteration over remaining frames.
|
|
1067
|
+
* Automatically sends flush signal and retrieves buffered frames.
|
|
1068
|
+
* Useful for end-of-stream processing.
|
|
1069
|
+
*
|
|
1070
|
+
* @yields {Frame} Buffered frames
|
|
1071
|
+
*
|
|
1072
|
+
* @example
|
|
1073
|
+
* ```typescript
|
|
1074
|
+
* // Flush at end of decoding
|
|
1075
|
+
* for (const frame of decoder.flushFramesSync()) {
|
|
1076
|
+
* console.log('Processing buffered frame');
|
|
1077
|
+
* encoder.encodeSync(frame);
|
|
1078
|
+
* frame.free();
|
|
1079
|
+
* }
|
|
1080
|
+
* ```
|
|
1081
|
+
*
|
|
1082
|
+
* @see {@link decodeSync} For sending packets and receiving frames
|
|
1083
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
1084
|
+
* @see {@link flushFrames} For async version
|
|
1085
|
+
*/
|
|
1086
|
+
*flushFramesSync() {
|
|
1087
|
+
// Send flush signal
|
|
1088
|
+
this.flushSync();
|
|
1089
|
+
while (true) {
|
|
1090
|
+
const remaining = this.receiveSync();
|
|
1091
|
+
if (!remaining)
|
|
1092
|
+
break;
|
|
1093
|
+
yield remaining;
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
/**
|
|
1097
|
+
* Receive frame from decoder.
|
|
1098
|
+
*
|
|
1099
|
+
* Gets decoded frames from the codec's internal buffer.
|
|
1100
|
+
* Handles frame cloning and error checking.
|
|
1101
|
+
* Hardware frames include hw_frames_ctx reference.
|
|
1102
|
+
* Call repeatedly to drain all buffered frames.
|
|
1103
|
+
*
|
|
1104
|
+
* **Return Values:**
|
|
1105
|
+
* - `Frame` - Successfully decoded frame
|
|
1106
|
+
* - `null` - No frame available (AVERROR_EAGAIN), send more packets
|
|
1107
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), decoder flushed
|
|
1108
|
+
*
|
|
1109
|
+
* Direct mapping to avcodec_receive_frame().
|
|
1110
|
+
*
|
|
1111
|
+
* @returns Decoded frame, null (need more data), or undefined (end of stream)
|
|
1112
|
+
*
|
|
1113
|
+
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
1114
|
+
*
|
|
1115
|
+
* @throws {Error} If frame cloning fails (out of memory)
|
|
1116
|
+
*
|
|
1117
|
+
* @example
|
|
1118
|
+
* ```typescript
|
|
1119
|
+
* const frame = await decoder.receive();
|
|
1120
|
+
* if (frame === EOF) {
|
|
1121
|
+
* console.log('Decoder flushed, no more frames');
|
|
1122
|
+
* } else if (frame) {
|
|
1123
|
+
* console.log('Got decoded frame');
|
|
1124
|
+
* frame.free();
|
|
1125
|
+
* } else {
|
|
1126
|
+
* console.log('Need more packets');
|
|
1127
|
+
* }
|
|
1128
|
+
* ```
|
|
1129
|
+
*
|
|
1130
|
+
* @example
|
|
1131
|
+
* ```typescript
|
|
1132
|
+
* // Drain all buffered frames (stop on null or EOF)
|
|
1133
|
+
* let frame;
|
|
1134
|
+
* while ((frame = await decoder.receive()) && frame !== EOF) {
|
|
1135
|
+
* console.log(`Frame PTS: ${frame.pts}`);
|
|
1136
|
+
* frame.free();
|
|
1137
|
+
* }
|
|
1138
|
+
* ```
|
|
1139
|
+
*
|
|
1140
|
+
* @see {@link decode} For sending packets
|
|
1141
|
+
* @see {@link flush} For signaling end-of-stream
|
|
1142
|
+
* @see {@link receiveSync} For synchronous version
|
|
1143
|
+
* @see {@link EOF} For end-of-stream signal
|
|
1144
|
+
*/
|
|
1145
|
+
async receive() {
|
|
1146
|
+
if (this.isClosed) {
|
|
1147
|
+
return EOF;
|
|
1148
|
+
}
|
|
1149
|
+
// Clear previous frame data
|
|
1150
|
+
this.frame.unref();
|
|
1151
|
+
const ret = await this.codecContext.receiveFrame(this.frame);
|
|
1152
|
+
if (ret === 0) {
|
|
1153
|
+
// Set frame time_base to decoder's packet timebase
|
|
1154
|
+
this.frame.timeBase = this.codecContext.pktTimebase;
|
|
1155
|
+
// Check for corrupt frame
|
|
1156
|
+
if (this.frame.decodeErrorFlags || this.frame.hasFlags(AV_FRAME_FLAG_CORRUPT)) {
|
|
1157
|
+
if (this.options.exitOnError) {
|
|
1158
|
+
throw new FFmpegError(AVERROR_INVALIDDATA);
|
|
1159
|
+
}
|
|
1160
|
+
// exitOnError=false: skip corrupt frame
|
|
1161
|
+
return null;
|
|
1162
|
+
}
|
|
1163
|
+
// Handles PTS assignment, duration estimation, and frame tracking
|
|
1164
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
|
|
1165
|
+
this.processVideoFrame(this.frame);
|
|
1166
|
+
}
|
|
1167
|
+
// Handles timestamp extrapolation, sample rate changes, and duration calculation
|
|
1168
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
|
|
1169
|
+
this.processAudioFrame(this.frame);
|
|
1170
|
+
}
|
|
1171
|
+
// Got a frame, clone it for the user
|
|
1172
|
+
const cloned = this.frame.clone();
|
|
1173
|
+
if (!cloned) {
|
|
1174
|
+
throw new Error('Failed to clone frame (out of memory)');
|
|
1175
|
+
}
|
|
1176
|
+
return cloned;
|
|
1177
|
+
}
|
|
1178
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1179
|
+
// Need more data
|
|
1180
|
+
return null;
|
|
1181
|
+
}
|
|
1182
|
+
else if (ret === AVERROR_EOF) {
|
|
1183
|
+
// End of stream
|
|
1184
|
+
return EOF;
|
|
1185
|
+
}
|
|
1186
|
+
else {
|
|
1187
|
+
// Error during receive
|
|
1188
|
+
if (this.options.exitOnError) {
|
|
1189
|
+
FFmpegError.throwIfError(ret, 'Failed to receive frame');
|
|
1190
|
+
}
|
|
1191
|
+
// exitOnError=false: return null, caller can retry if desired
|
|
1192
|
+
return null;
|
|
1193
|
+
}
|
|
1194
|
+
}
|
|
1195
|
+
/**
|
|
1196
|
+
* Receive frame from decoder synchronously.
|
|
1197
|
+
* Synchronous version of receive.
|
|
1198
|
+
*
|
|
1199
|
+
* Gets decoded frames from the codec's internal buffer.
|
|
1200
|
+
* Handles frame cloning and error checking.
|
|
1201
|
+
* Hardware frames include hw_frames_ctx reference.
|
|
1202
|
+
* Call repeatedly to drain all buffered frames.
|
|
1203
|
+
*
|
|
1204
|
+
* **Return Values:**
|
|
1205
|
+
* - `Frame` - Successfully decoded frame
|
|
1206
|
+
* - `null` - No frame available (AVERROR_EAGAIN), send more packets
|
|
1207
|
+
* - `undefined` - End of stream reached (AVERROR_EOF), decoder flushed
|
|
1208
|
+
*
|
|
1209
|
+
* Direct mapping to avcodec_receive_frame().
|
|
1210
|
+
*
|
|
1211
|
+
* @returns Decoded frame, null (need more data), or undefined (end of stream)
|
|
1212
|
+
*
|
|
1213
|
+
* @throws {FFmpegError} If receive fails with error other than AVERROR_EAGAIN or AVERROR_EOF
|
|
1214
|
+
*
|
|
1215
|
+
* @throws {Error} If frame cloning fails (out of memory)
|
|
1216
|
+
*
|
|
1217
|
+
* @example
|
|
1218
|
+
* ```typescript
|
|
1219
|
+
* const frame = decoder.receiveSync();
|
|
1220
|
+
* if (frame === EOF) {
|
|
1221
|
+
* console.log('Decoder flushed, no more frames');
|
|
1222
|
+
* } else if (frame) {
|
|
1223
|
+
* console.log('Got decoded frame');
|
|
1224
|
+
* frame.free();
|
|
1225
|
+
* } else {
|
|
1226
|
+
* console.log('Need more packets');
|
|
1227
|
+
* }
|
|
1228
|
+
* ```
|
|
1229
|
+
*
|
|
1230
|
+
* @example
|
|
1231
|
+
* ```typescript
|
|
1232
|
+
* // Drain all buffered frames (stop on null or EOF)
|
|
1233
|
+
* let frame;
|
|
1234
|
+
* while ((frame = decoder.receiveSync()) && frame !== EOF) {
|
|
1235
|
+
* console.log(`Frame PTS: ${frame.pts}`);
|
|
1236
|
+
* frame.free();
|
|
1237
|
+
* }
|
|
1238
|
+
* ```
|
|
1239
|
+
*
|
|
1240
|
+
* @see {@link decodeSync} For sending packets
|
|
1241
|
+
* @see {@link flushSync} For signaling end-of-stream
|
|
1242
|
+
* @see {@link receive} For async version
|
|
1243
|
+
* @see {@link EOF} For end-of-stream signal
|
|
1244
|
+
*/
|
|
1245
|
+
receiveSync() {
|
|
1246
|
+
if (this.isClosed) {
|
|
1247
|
+
return EOF;
|
|
1248
|
+
}
|
|
1249
|
+
// Clear previous frame data
|
|
1250
|
+
this.frame.unref();
|
|
1251
|
+
const ret = this.codecContext.receiveFrameSync(this.frame);
|
|
1252
|
+
if (ret === 0) {
|
|
1253
|
+
// Set frame time_base to decoder's packet timebase
|
|
1254
|
+
this.frame.timeBase = this.codecContext.pktTimebase;
|
|
1255
|
+
// Check for corrupt frame
|
|
1256
|
+
if (this.frame.decodeErrorFlags || this.frame.hasFlags(AV_FRAME_FLAG_CORRUPT)) {
|
|
1257
|
+
if (this.options.exitOnError) {
|
|
1258
|
+
throw new FFmpegError(AVERROR_INVALIDDATA);
|
|
1259
|
+
}
|
|
1260
|
+
// exitOnError=false: skip corrupt frame
|
|
1261
|
+
return null;
|
|
1262
|
+
}
|
|
1263
|
+
// Process video frame
|
|
1264
|
+
// Handles PTS assignment, duration estimation, and frame tracking
|
|
1265
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_VIDEO) {
|
|
1266
|
+
this.processVideoFrame(this.frame);
|
|
1267
|
+
}
|
|
1268
|
+
// Process audio frame
|
|
1269
|
+
// Handles timestamp extrapolation, sample rate changes, and duration calculation
|
|
1270
|
+
if (this.codecContext.codecType === AVMEDIA_TYPE_AUDIO) {
|
|
1271
|
+
this.processAudioFrame(this.frame);
|
|
1272
|
+
}
|
|
1273
|
+
// Got a frame, clone it for the user
|
|
1274
|
+
const cloned = this.frame.clone();
|
|
1275
|
+
if (!cloned) {
|
|
1276
|
+
throw new Error('Failed to clone frame (out of memory)');
|
|
1277
|
+
}
|
|
1278
|
+
return cloned;
|
|
1279
|
+
}
|
|
1280
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1281
|
+
// Need more data
|
|
1282
|
+
return null;
|
|
1283
|
+
}
|
|
1284
|
+
else if (ret === AVERROR_EOF) {
|
|
1285
|
+
// End of stream
|
|
1286
|
+
return EOF;
|
|
1287
|
+
}
|
|
1288
|
+
else {
|
|
1289
|
+
// Error during receive
|
|
1290
|
+
if (this.options.exitOnError) {
|
|
1291
|
+
FFmpegError.throwIfError(ret, 'Failed to receive frame');
|
|
1292
|
+
}
|
|
1293
|
+
// exitOnError=false: return null, caller can retry if desired
|
|
1294
|
+
return null;
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
pipeTo(target) {
|
|
1298
|
+
const t = target;
|
|
1299
|
+
// Store reference to next component for flush propagation
|
|
1300
|
+
this.nextComponent = t;
|
|
1301
|
+
// Start worker if not already running
|
|
1302
|
+
this.workerPromise ??= this.runWorker();
|
|
1303
|
+
// Start pipe task: decoder.outputQueue -> target.inputQueue (via target.send)
|
|
1304
|
+
this.pipeToPromise = (async () => {
|
|
1305
|
+
while (true) {
|
|
1306
|
+
const frame = await this.receiveFromQueue();
|
|
1307
|
+
if (!frame)
|
|
1308
|
+
break;
|
|
1309
|
+
await t.sendToQueue(frame);
|
|
1310
|
+
}
|
|
1311
|
+
})();
|
|
1312
|
+
// Return scheduler for chaining (target is now the last component)
|
|
1313
|
+
return new Scheduler(this, t);
|
|
1314
|
+
}
|
|
1315
|
+
/**
|
|
1316
|
+
* Close decoder and free resources.
|
|
1317
|
+
*
|
|
1318
|
+
* Releases codec context and internal frame buffer.
|
|
1319
|
+
* Safe to call multiple times.
|
|
1320
|
+
* Automatically called by Symbol.dispose.
|
|
1321
|
+
*
|
|
1322
|
+
* @example
|
|
1323
|
+
* ```typescript
|
|
1324
|
+
* const decoder = await Decoder.create(stream);
|
|
1325
|
+
* try {
|
|
1326
|
+
* // Use decoder
|
|
1327
|
+
* } finally {
|
|
1328
|
+
* decoder.close();
|
|
1329
|
+
* }
|
|
1330
|
+
* ```
|
|
1331
|
+
*
|
|
1332
|
+
* @see {@link Symbol.dispose} For automatic cleanup
|
|
1333
|
+
*/
|
|
1334
|
+
close() {
|
|
1335
|
+
if (this.isClosed) {
|
|
1336
|
+
return;
|
|
1337
|
+
}
|
|
1338
|
+
this.isClosed = true;
|
|
1339
|
+
this.inputQueue?.close();
|
|
1340
|
+
this.outputQueue?.close();
|
|
1341
|
+
this.frame.free();
|
|
1342
|
+
this.codecContext.freeContext();
|
|
1343
|
+
this.initialized = false;
|
|
1344
|
+
}
|
|
1345
|
+
/**
|
|
1346
|
+
* Get stream object.
|
|
1347
|
+
*
|
|
1348
|
+
* Returns the underlying stream being decoded.
|
|
1349
|
+
* Provides access to stream metadata and parameters.
|
|
1350
|
+
*
|
|
1351
|
+
* @returns Stream object
|
|
1352
|
+
*
|
|
1353
|
+
* @internal
|
|
1354
|
+
*
|
|
1355
|
+
* @see {@link Stream} For stream details
|
|
1356
|
+
*/
|
|
1357
|
+
getStream() {
|
|
1358
|
+
return this.stream;
|
|
1359
|
+
}
|
|
1360
|
+
/**
|
|
1361
|
+
* Get decoder codec.
|
|
1362
|
+
*
|
|
1363
|
+
* Returns the codec used by this decoder.
|
|
1364
|
+
* Useful for checking codec capabilities and properties.
|
|
1365
|
+
*
|
|
1366
|
+
* @returns Codec instance
|
|
1367
|
+
*
|
|
1368
|
+
* @internal
|
|
1369
|
+
*
|
|
1370
|
+
* @see {@link Codec} For codec details
|
|
1371
|
+
*/
|
|
1372
|
+
getCodec() {
|
|
1373
|
+
return this.codec;
|
|
1374
|
+
}
|
|
1375
|
+
/**
|
|
1376
|
+
* Get underlying codec context.
|
|
1377
|
+
*
|
|
1378
|
+
* Returns the codec context for advanced operations.
|
|
1379
|
+
* Useful for accessing low-level codec properties and settings.
|
|
1380
|
+
* Returns null if decoder is closed.
|
|
1381
|
+
*
|
|
1382
|
+
* @returns Codec context or null if closed
|
|
1383
|
+
*
|
|
1384
|
+
* @internal
|
|
1385
|
+
*
|
|
1386
|
+
* @see {@link CodecContext} For context details
|
|
1387
|
+
*/
|
|
1388
|
+
getCodecContext() {
|
|
1389
|
+
return !this.isClosed && this.initialized ? this.codecContext : null;
|
|
1390
|
+
}
|
|
1391
|
+
/**
|
|
1392
|
+
* Worker loop for push-based processing.
|
|
1393
|
+
*
|
|
1394
|
+
* @internal
|
|
1395
|
+
*/
|
|
1396
|
+
async runWorker() {
|
|
1397
|
+
try {
|
|
1398
|
+
// Outer loop - receive packets
|
|
1399
|
+
while (!this.inputQueue.isClosed) {
|
|
1400
|
+
const env_3 = { stack: [], error: void 0, hasError: false };
|
|
1401
|
+
try {
|
|
1402
|
+
const packet = __addDisposableResource(env_3, await this.inputQueue.receive(), false);
|
|
1403
|
+
if (!packet)
|
|
1404
|
+
break;
|
|
1405
|
+
// Skip packets for other streams
|
|
1406
|
+
if (packet.streamIndex !== this.stream.index) {
|
|
1407
|
+
continue;
|
|
1408
|
+
}
|
|
1409
|
+
if (packet.size === 0) {
|
|
1410
|
+
continue;
|
|
1411
|
+
}
|
|
1412
|
+
await this.decode(packet);
|
|
1413
|
+
// Receive ALL available frames immediately
|
|
1414
|
+
// This ensures frames are yielded ASAP without latency
|
|
1415
|
+
while (!this.outputQueue.isClosed) {
|
|
1416
|
+
const frame = await this.receive();
|
|
1417
|
+
if (!frame)
|
|
1418
|
+
break; // EAGAIN or EOF
|
|
1419
|
+
await this.outputQueue.send(frame);
|
|
1420
|
+
}
|
|
1421
|
+
}
|
|
1422
|
+
catch (e_3) {
|
|
1423
|
+
env_3.error = e_3;
|
|
1424
|
+
env_3.hasError = true;
|
|
1425
|
+
}
|
|
1426
|
+
finally {
|
|
1427
|
+
__disposeResources(env_3);
|
|
1428
|
+
}
|
|
1429
|
+
}
|
|
1430
|
+
// Flush decoder at end
|
|
1431
|
+
await this.flush();
|
|
1432
|
+
while (!this.outputQueue.isClosed) {
|
|
1433
|
+
const frame = await this.receive();
|
|
1434
|
+
if (!frame)
|
|
1435
|
+
break;
|
|
1436
|
+
await this.outputQueue.send(frame);
|
|
1437
|
+
}
|
|
1438
|
+
}
|
|
1439
|
+
catch (error) {
|
|
1440
|
+
// Propagate error to both queues so upstream and downstream know
|
|
1441
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
1442
|
+
this.inputQueue?.closeWithError(err);
|
|
1443
|
+
this.outputQueue?.closeWithError(err);
|
|
1444
|
+
}
|
|
1445
|
+
finally {
|
|
1446
|
+
// Close output queue when done (if not already closed with error)
|
|
1447
|
+
this.outputQueue?.close();
|
|
1448
|
+
}
|
|
1449
|
+
}
|
|
1450
|
+
/**
|
|
1451
|
+
* Send packet to input queue or flush the pipeline.
|
|
1452
|
+
*
|
|
1453
|
+
* When packet is provided, queues it for processing.
|
|
1454
|
+
* When null is provided, triggers flush sequence:
|
|
1455
|
+
* - Closes input queue
|
|
1456
|
+
* - Waits for worker completion
|
|
1457
|
+
* - Flushes decoder and sends remaining frames to output queue
|
|
1458
|
+
* - Closes output queue
|
|
1459
|
+
* - Waits for pipeTo task completion
|
|
1460
|
+
* - Propagates flush to next component (if any)
|
|
1461
|
+
*
|
|
1462
|
+
* Used by scheduler system for pipeline control.
|
|
1463
|
+
*
|
|
1464
|
+
* @param packet - Packet to send, or null to flush
|
|
1465
|
+
*
|
|
1466
|
+
* @internal
|
|
1467
|
+
*/
|
|
1468
|
+
async sendToQueue(packet) {
|
|
1469
|
+
if (packet) {
|
|
1470
|
+
await this.inputQueue.send(packet);
|
|
1471
|
+
}
|
|
1472
|
+
else {
|
|
1473
|
+
// Close input queue to signal end of stream to worker
|
|
1474
|
+
this.inputQueue.close();
|
|
1475
|
+
// Wait for worker to finish processing all packets (if exists)
|
|
1476
|
+
if (this.workerPromise) {
|
|
1477
|
+
await this.workerPromise;
|
|
1478
|
+
}
|
|
1479
|
+
// Flush decoder at end
|
|
1480
|
+
await this.flush();
|
|
1481
|
+
// Send all flushed frames to output queue
|
|
1482
|
+
while (true) {
|
|
1483
|
+
const frame = await this.receive();
|
|
1484
|
+
if (!frame)
|
|
1485
|
+
break;
|
|
1486
|
+
await this.outputQueue.send(frame);
|
|
1487
|
+
}
|
|
1488
|
+
// Close output queue to signal end of stream to pipeTo() task
|
|
1489
|
+
this.outputQueue.close();
|
|
1490
|
+
// Wait for pipeTo() task to finish processing all frames (if exists)
|
|
1491
|
+
if (this.pipeToPromise) {
|
|
1492
|
+
await this.pipeToPromise;
|
|
1493
|
+
}
|
|
1494
|
+
// Then propagate flush to next component
|
|
1495
|
+
if (this.nextComponent) {
|
|
1496
|
+
await this.nextComponent.sendToQueue(null);
|
|
1497
|
+
}
|
|
1498
|
+
}
|
|
1499
|
+
}
|
|
1500
|
+
/**
|
|
1501
|
+
* Receive frame from output queue.
|
|
1502
|
+
*
|
|
1503
|
+
* @returns Frame from output queue or null if closed
|
|
1504
|
+
*
|
|
1505
|
+
* @internal
|
|
1506
|
+
*/
|
|
1507
|
+
async receiveFromQueue() {
|
|
1508
|
+
return await this.outputQueue.receive();
|
|
1509
|
+
}
|
|
1510
|
+
/**
|
|
1511
|
+
* Estimate video frame duration.
|
|
1512
|
+
*
|
|
1513
|
+
* Implements FFmpeg CLI's video_duration_estimate() logic.
|
|
1514
|
+
* Uses multiple heuristics to determine frame duration when not explicitly available:
|
|
1515
|
+
* 1. Frame duration from container (if reliable)
|
|
1516
|
+
* 2. Duration from codec framerate
|
|
1517
|
+
* 3. PTS difference between frames
|
|
1518
|
+
* 4. Stream framerate
|
|
1519
|
+
* 5. Last frame's estimated duration
|
|
1520
|
+
*
|
|
1521
|
+
* @param frame - Frame to estimate duration for
|
|
1522
|
+
*
|
|
1523
|
+
* @returns Estimated duration in frame's timebase units
|
|
1524
|
+
*
|
|
1525
|
+
* @internal
|
|
1526
|
+
*/
|
|
1527
|
+
estimateVideoDuration(frame) {
|
|
1528
|
+
// Difference between this and last frame's timestamps
|
|
1529
|
+
const tsDiff = frame.pts !== AV_NOPTS_VALUE && this.lastFramePts !== AV_NOPTS_VALUE ? frame.pts - this.lastFramePts : -1n;
|
|
1530
|
+
// Frame duration is unreliable (typically guessed by lavf) when it is equal
|
|
1531
|
+
// to 1 and the actual duration of the last frame is more than 2x larger
|
|
1532
|
+
const durationUnreliable = frame.duration === 1n && tsDiff > 2n * frame.duration;
|
|
1533
|
+
// Prefer frame duration for containers with timestamps
|
|
1534
|
+
if (frame.duration > 0n && !durationUnreliable) {
|
|
1535
|
+
return frame.duration;
|
|
1536
|
+
}
|
|
1537
|
+
// Calculate codec duration from framerate
|
|
1538
|
+
let codecDuration = 0n;
|
|
1539
|
+
const framerate = this.codecContext.framerate;
|
|
1540
|
+
if (framerate && framerate.den > 0 && framerate.num > 0) {
|
|
1541
|
+
const fields = (frame.repeatPict ?? 0) + 2;
|
|
1542
|
+
const fieldRate = avMulQ(framerate, { num: 2, den: 1 });
|
|
1543
|
+
codecDuration = avRescaleQ(fields, avInvQ(fieldRate), frame.timeBase);
|
|
1544
|
+
}
|
|
1545
|
+
// When timestamps are available, repeat last frame's actual duration
|
|
1546
|
+
if (tsDiff > 0n) {
|
|
1547
|
+
return tsDiff;
|
|
1548
|
+
}
|
|
1549
|
+
// Try frame/codec duration
|
|
1550
|
+
if (frame.duration > 0n) {
|
|
1551
|
+
return frame.duration;
|
|
1552
|
+
}
|
|
1553
|
+
if (codecDuration > 0n) {
|
|
1554
|
+
return codecDuration;
|
|
1555
|
+
}
|
|
1556
|
+
// Try stream framerate
|
|
1557
|
+
const streamFramerate = this.stream.avgFrameRate ?? this.stream.rFrameRate;
|
|
1558
|
+
if (streamFramerate && streamFramerate.num > 0 && streamFramerate.den > 0) {
|
|
1559
|
+
const d = avRescaleQ(1, avInvQ(streamFramerate), frame.timeBase);
|
|
1560
|
+
if (d > 0n) {
|
|
1561
|
+
return d;
|
|
1562
|
+
}
|
|
1563
|
+
}
|
|
1564
|
+
// Last resort is last frame's estimated duration, and 1
|
|
1565
|
+
return this.lastFrameDurationEst > 0n ? this.lastFrameDurationEst : 1n;
|
|
1566
|
+
}
|
|
1567
|
+
/**
|
|
1568
|
+
* Process video frame after decoding.
|
|
1569
|
+
*
|
|
1570
|
+
* Implements FFmpeg CLI's video_frame_process() logic.
|
|
1571
|
+
* Handles:
|
|
1572
|
+
* - Hardware frame transfer to software format
|
|
1573
|
+
* - PTS assignment from best_effort_timestamp
|
|
1574
|
+
* - PTS extrapolation when missing
|
|
1575
|
+
* - Duration estimation
|
|
1576
|
+
* - Frame tracking for next frame
|
|
1577
|
+
*
|
|
1578
|
+
* @param frame - Decoded frame to process
|
|
1579
|
+
*
|
|
1580
|
+
* @internal
|
|
1581
|
+
*/
|
|
1582
|
+
processVideoFrame(frame) {
|
|
1583
|
+
// Hardware acceleration retrieve
|
|
1584
|
+
// If hwaccel_output_format is set and frame is in hardware format, transfer to software format
|
|
1585
|
+
if (this.options.hwaccelOutputFormat !== undefined && frame.isHwFrame()) {
|
|
1586
|
+
const swFrame = new Frame();
|
|
1587
|
+
swFrame.alloc();
|
|
1588
|
+
swFrame.format = this.options.hwaccelOutputFormat;
|
|
1589
|
+
// Transfer data from hardware to software frame
|
|
1590
|
+
const ret = frame.hwframeTransferDataSync(swFrame, 0);
|
|
1591
|
+
if (ret < 0) {
|
|
1592
|
+
swFrame.free();
|
|
1593
|
+
if (this.options.exitOnError) {
|
|
1594
|
+
FFmpegError.throwIfError(ret, 'Failed to transfer hardware frame data');
|
|
1595
|
+
}
|
|
1596
|
+
return;
|
|
1597
|
+
}
|
|
1598
|
+
// Copy properties from hw frame to sw frame
|
|
1599
|
+
swFrame.copyProps(frame);
|
|
1600
|
+
// Replace frame with software version (unref old, move ref)
|
|
1601
|
+
frame.unref();
|
|
1602
|
+
const refRet = frame.ref(swFrame);
|
|
1603
|
+
swFrame.free();
|
|
1604
|
+
if (refRet < 0) {
|
|
1605
|
+
if (this.options.exitOnError) {
|
|
1606
|
+
FFmpegError.throwIfError(refRet, 'Failed to reference software frame');
|
|
1607
|
+
}
|
|
1608
|
+
return;
|
|
1609
|
+
}
|
|
1610
|
+
}
|
|
1611
|
+
// Set PTS from best_effort_timestamp
|
|
1612
|
+
frame.pts = frame.bestEffortTimestamp;
|
|
1613
|
+
// DECODER_FLAG_FRAMERATE_FORCED: Ignores all timestamps and generates constant framerate
|
|
1614
|
+
if (this.options.forcedFramerate) {
|
|
1615
|
+
frame.pts = AV_NOPTS_VALUE;
|
|
1616
|
+
frame.duration = 1n;
|
|
1617
|
+
const invFramerate = avInvQ(this.options.forcedFramerate);
|
|
1618
|
+
frame.timeBase = new Rational(invFramerate.num, invFramerate.den);
|
|
1619
|
+
}
|
|
1620
|
+
// No timestamp available - extrapolate from previous frame duration
|
|
1621
|
+
if (frame.pts === AV_NOPTS_VALUE) {
|
|
1622
|
+
frame.pts = this.lastFramePts === AV_NOPTS_VALUE ? 0n : this.lastFramePts + this.lastFrameDurationEst;
|
|
1623
|
+
}
|
|
1624
|
+
// Update timestamp history
|
|
1625
|
+
this.lastFrameDurationEst = this.estimateVideoDuration(frame);
|
|
1626
|
+
this.lastFramePts = frame.pts;
|
|
1627
|
+
this.lastFrameTb = new Rational(frame.timeBase.num, frame.timeBase.den);
|
|
1628
|
+
// SAR override
|
|
1629
|
+
if (this.options.sarOverride) {
|
|
1630
|
+
frame.sampleAspectRatio = new Rational(this.options.sarOverride.num, this.options.sarOverride.den);
|
|
1631
|
+
}
|
|
1632
|
+
// Apply cropping
|
|
1633
|
+
if (this.options.applyCropping) {
|
|
1634
|
+
const ret = frame.applyCropping(1); // AV_FRAME_CROP_UNALIGNED = 1
|
|
1635
|
+
if (ret < 0) {
|
|
1636
|
+
if (this.options.exitOnError) {
|
|
1637
|
+
FFmpegError.throwIfError(ret, 'Error applying decoder cropping');
|
|
1638
|
+
}
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
}
|
|
1642
|
+
/**
|
|
1643
|
+
* Audio samplerate update - handles sample rate changes.
|
|
1644
|
+
*
|
|
1645
|
+
* Based on FFmpeg's audio_samplerate_update().
|
|
1646
|
+
*
|
|
1647
|
+
* On sample rate change, chooses a new internal timebase that can represent
|
|
1648
|
+
* timestamps from all sample rates seen so far. Uses GCD to find minimal
|
|
1649
|
+
* common timebase, with fallback to LCM of common sample rates (28224000).
|
|
1650
|
+
*
|
|
1651
|
+
* Handles:
|
|
1652
|
+
* - Sample rate change detection
|
|
1653
|
+
* - Timebase calculation via GCD
|
|
1654
|
+
* - Overflow detection and fallback
|
|
1655
|
+
* - Frame timebase optimization
|
|
1656
|
+
* - Rescaling existing timestamps
|
|
1657
|
+
*
|
|
1658
|
+
* @param frame - Audio frame to process
|
|
1659
|
+
*
|
|
1660
|
+
* @returns Timebase to use for this frame
|
|
1661
|
+
*
|
|
1662
|
+
* @internal
|
|
1663
|
+
*/
|
|
1664
|
+
audioSamplerateUpdate(frame) {
|
|
1665
|
+
const prev = this.lastFrameTb.den;
|
|
1666
|
+
const sr = frame.sampleRate;
|
|
1667
|
+
// No change - return existing timebase
|
|
1668
|
+
if (frame.sampleRate === this.lastFrameSampleRate) {
|
|
1669
|
+
return this.lastFrameTb;
|
|
1670
|
+
}
|
|
1671
|
+
// Calculate GCD to find minimal common timebase
|
|
1672
|
+
const gcd = avGcd(prev, sr);
|
|
1673
|
+
let tbNew;
|
|
1674
|
+
// Check for overflow
|
|
1675
|
+
if (Number(prev) / Number(gcd) >= INT_MAX / sr) {
|
|
1676
|
+
// LCM of 192000, 44100 - represents all common sample rates
|
|
1677
|
+
tbNew = { num: 1, den: 28224000 };
|
|
1678
|
+
}
|
|
1679
|
+
else {
|
|
1680
|
+
// Normal case
|
|
1681
|
+
tbNew = { num: 1, den: (Number(prev) / Number(gcd)) * sr };
|
|
1682
|
+
}
|
|
1683
|
+
// Keep frame's timebase if strictly better
|
|
1684
|
+
// "Strictly better" means: num=1, den > tbNew.den, and tbNew.den divides den evenly
|
|
1685
|
+
if (frame.timeBase.num === 1 && frame.timeBase.den > tbNew.den && frame.timeBase.den % tbNew.den === 0) {
|
|
1686
|
+
tbNew = { num: frame.timeBase.num, den: frame.timeBase.den };
|
|
1687
|
+
}
|
|
1688
|
+
// Rescale existing timestamps to new timebase
|
|
1689
|
+
if (this.lastFramePts !== AV_NOPTS_VALUE) {
|
|
1690
|
+
this.lastFramePts = avRescaleQ(this.lastFramePts, this.lastFrameTb, tbNew);
|
|
1691
|
+
}
|
|
1692
|
+
this.lastFrameDurationEst = avRescaleQ(this.lastFrameDurationEst, this.lastFrameTb, tbNew);
|
|
1693
|
+
this.lastFrameTb = new Rational(tbNew.num, tbNew.den);
|
|
1694
|
+
this.lastFrameSampleRate = frame.sampleRate;
|
|
1695
|
+
return this.lastFrameTb;
|
|
1696
|
+
}
|
|
1697
|
+
/**
|
|
1698
|
+
* Audio timestamp processing - handles audio frame timestamps.
|
|
1699
|
+
*
|
|
1700
|
+
* Based on FFmpeg's audio_ts_process().
|
|
1701
|
+
*
|
|
1702
|
+
* Processes audio frame timestamps with:
|
|
1703
|
+
* - Sample rate change handling via audioSamplerateUpdate()
|
|
1704
|
+
* - PTS extrapolation when missing (pts_pred)
|
|
1705
|
+
* - Gap detection (resets av_rescale_delta state)
|
|
1706
|
+
* - Smooth timestamp conversion via av_rescale_delta
|
|
1707
|
+
* - Duration calculation from nb_samples
|
|
1708
|
+
* - Conversion to filtering timebase {1, sample_rate}
|
|
1709
|
+
*
|
|
1710
|
+
* Handles:
|
|
1711
|
+
* - Dynamic sample rate changes
|
|
1712
|
+
* - Missing timestamps (AV_NOPTS_VALUE)
|
|
1713
|
+
* - Timestamp gaps/discontinuities
|
|
1714
|
+
* - Sample-accurate timestamp generation
|
|
1715
|
+
* - Frame duration calculation
|
|
1716
|
+
*
|
|
1717
|
+
* @param frame - Decoded audio frame to process
|
|
1718
|
+
*
|
|
1719
|
+
* @internal
|
|
1720
|
+
*/
|
|
1721
|
+
processAudioFrame(frame) {
|
|
1722
|
+
// Filtering timebase is always {1, sample_rate} for audio
|
|
1723
|
+
const tbFilter = { num: 1, den: frame.sampleRate };
|
|
1724
|
+
// Handle sample rate change - updates internal timebase
|
|
1725
|
+
const tb = this.audioSamplerateUpdate(frame);
|
|
1726
|
+
// Predict next PTS based on last frame + duration
|
|
1727
|
+
const ptsPred = this.lastFramePts === AV_NOPTS_VALUE ? 0n : this.lastFramePts + this.lastFrameDurationEst;
|
|
1728
|
+
// No timestamp - use predicted value
|
|
1729
|
+
if (frame.pts === AV_NOPTS_VALUE) {
|
|
1730
|
+
frame.pts = ptsPred;
|
|
1731
|
+
frame.timeBase = new Rational(tb.num, tb.den);
|
|
1732
|
+
}
|
|
1733
|
+
else if (this.lastFramePts !== AV_NOPTS_VALUE) {
|
|
1734
|
+
// Detect timestamp gap - compare with predicted timestamp
|
|
1735
|
+
const ptsPredInFrameTb = avRescaleQRnd(ptsPred, tb, frame.timeBase, AV_ROUND_UP);
|
|
1736
|
+
if (frame.pts > ptsPredInFrameTb) {
|
|
1737
|
+
// Gap detected - reset rescale_delta state for smooth conversion
|
|
1738
|
+
this.lastFilterInRescaleDelta = AV_NOPTS_VALUE;
|
|
1739
|
+
}
|
|
1740
|
+
}
|
|
1741
|
+
// Smooth timestamp conversion with av_rescale_delta
|
|
1742
|
+
// This maintains fractional sample accuracy across timebase conversions
|
|
1743
|
+
// avRescaleDelta modifies lastRef in place (simulates C's &last_filter_in_rescale_delta)
|
|
1744
|
+
const lastRef = { value: this.lastFilterInRescaleDelta };
|
|
1745
|
+
frame.pts = avRescaleDelta(frame.timeBase, frame.pts, tb, frame.nbSamples, lastRef, tb);
|
|
1746
|
+
this.lastFilterInRescaleDelta = lastRef.value;
|
|
1747
|
+
// Update frame tracking
|
|
1748
|
+
this.lastFramePts = frame.pts;
|
|
1749
|
+
this.lastFrameDurationEst = avRescaleQ(BigInt(frame.nbSamples), tbFilter, tb);
|
|
1750
|
+
// Convert to filtering timebase
|
|
1751
|
+
frame.pts = avRescaleQ(frame.pts, tb, tbFilter);
|
|
1752
|
+
frame.duration = BigInt(frame.nbSamples);
|
|
1753
|
+
frame.timeBase = new Rational(tbFilter.num, tbFilter.den);
|
|
1754
|
+
}
|
|
1755
|
+
/**
|
|
1756
|
+
* Dispose of decoder.
|
|
1757
|
+
*
|
|
1758
|
+
* Implements Disposable interface for automatic cleanup.
|
|
1759
|
+
* Equivalent to calling close().
|
|
1760
|
+
*
|
|
1761
|
+
* @example
|
|
1762
|
+
* ```typescript
|
|
1763
|
+
* {
|
|
1764
|
+
* using decoder = await Decoder.create(stream);
|
|
1765
|
+
* // Decode frames...
|
|
1766
|
+
* } // Automatically closed
|
|
1767
|
+
* ```
|
|
1768
|
+
*
|
|
1769
|
+
* @see {@link close} For manual cleanup
|
|
1770
|
+
*/
|
|
1771
|
+
[Symbol.dispose]() {
|
|
1772
|
+
this.close();
|
|
1773
|
+
}
|
|
1774
|
+
}
|
|
1775
|
+
//# sourceMappingURL=decoder.js.map
|