torchcodec 0.8.0__cp313-cp313-macosx_12_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of torchcodec might be problematic. Click here for more details.
- torchcodec/.dylibs/libc++.1.0.dylib +0 -0
- torchcodec/.dylibs/libpython3.13.dylib +0 -0
- torchcodec/__init__.py +16 -0
- torchcodec/_core/AVIOContextHolder.cpp +60 -0
- torchcodec/_core/AVIOContextHolder.h +64 -0
- torchcodec/_core/AVIOFileLikeContext.cpp +98 -0
- torchcodec/_core/AVIOFileLikeContext.h +55 -0
- torchcodec/_core/AVIOTensorContext.cpp +123 -0
- torchcodec/_core/AVIOTensorContext.h +43 -0
- torchcodec/_core/BetaCudaDeviceInterface.cpp +636 -0
- torchcodec/_core/BetaCudaDeviceInterface.h +191 -0
- torchcodec/_core/CMakeLists.txt +325 -0
- torchcodec/_core/CUDACommon.cpp +315 -0
- torchcodec/_core/CUDACommon.h +46 -0
- torchcodec/_core/Cache.h +138 -0
- torchcodec/_core/CpuDeviceInterface.cpp +347 -0
- torchcodec/_core/CpuDeviceInterface.h +132 -0
- torchcodec/_core/CudaDeviceInterface.cpp +357 -0
- torchcodec/_core/CudaDeviceInterface.h +64 -0
- torchcodec/_core/DeviceInterface.cpp +117 -0
- torchcodec/_core/DeviceInterface.h +148 -0
- torchcodec/_core/Encoder.cpp +807 -0
- torchcodec/_core/Encoder.h +173 -0
- torchcodec/_core/FFMPEGCommon.cpp +608 -0
- torchcodec/_core/FFMPEGCommon.h +245 -0
- torchcodec/_core/FilterGraph.cpp +149 -0
- torchcodec/_core/FilterGraph.h +59 -0
- torchcodec/_core/Frame.cpp +42 -0
- torchcodec/_core/Frame.h +72 -0
- torchcodec/_core/Metadata.h +72 -0
- torchcodec/_core/NVDECCache.cpp +70 -0
- torchcodec/_core/NVDECCache.h +104 -0
- torchcodec/_core/SingleStreamDecoder.cpp +1719 -0
- torchcodec/_core/SingleStreamDecoder.h +405 -0
- torchcodec/_core/StreamOptions.h +63 -0
- torchcodec/_core/Transform.cpp +60 -0
- torchcodec/_core/Transform.h +59 -0
- torchcodec/_core/ValidationUtils.cpp +35 -0
- torchcodec/_core/ValidationUtils.h +21 -0
- torchcodec/_core/__init__.py +41 -0
- torchcodec/_core/_metadata.py +317 -0
- torchcodec/_core/custom_ops.cpp +875 -0
- torchcodec/_core/fetch_and_expose_non_gpl_ffmpeg_libs.cmake +360 -0
- torchcodec/_core/nvcuvid_include/cuviddec.h +1374 -0
- torchcodec/_core/nvcuvid_include/nvcuvid.h +610 -0
- torchcodec/_core/ops.py +498 -0
- torchcodec/_core/pybind_ops.cpp +50 -0
- torchcodec/_frame.py +145 -0
- torchcodec/_internally_replaced_utils.py +67 -0
- torchcodec/_samplers/__init__.py +7 -0
- torchcodec/_samplers/video_clip_sampler.py +418 -0
- torchcodec/decoders/__init__.py +12 -0
- torchcodec/decoders/_audio_decoder.py +177 -0
- torchcodec/decoders/_decoder_utils.py +112 -0
- torchcodec/decoders/_video_decoder.py +500 -0
- torchcodec/encoders/__init__.py +1 -0
- torchcodec/encoders/_audio_encoder.py +150 -0
- torchcodec/libtorchcodec_core4.dylib +0 -0
- torchcodec/libtorchcodec_core5.dylib +0 -0
- torchcodec/libtorchcodec_core6.dylib +0 -0
- torchcodec/libtorchcodec_core7.dylib +0 -0
- torchcodec/libtorchcodec_core8.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops4.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops5.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops6.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops7.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops8.dylib +0 -0
- torchcodec/libtorchcodec_pybind_ops4.so +0 -0
- torchcodec/libtorchcodec_pybind_ops5.so +0 -0
- torchcodec/libtorchcodec_pybind_ops6.so +0 -0
- torchcodec/libtorchcodec_pybind_ops7.so +0 -0
- torchcodec/libtorchcodec_pybind_ops8.so +0 -0
- torchcodec/samplers/__init__.py +2 -0
- torchcodec/samplers/_common.py +84 -0
- torchcodec/samplers/_index_based.py +287 -0
- torchcodec/samplers/_time_based.py +358 -0
- torchcodec/version.py +2 -0
- torchcodec-0.8.0.dist-info/METADATA +253 -0
- torchcodec-0.8.0.dist-info/RECORD +82 -0
- torchcodec-0.8.0.dist-info/WHEEL +5 -0
- torchcodec-0.8.0.dist-info/licenses/LICENSE +28 -0
- torchcodec-0.8.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,807 @@
|
|
|
1
|
+
#include <sstream>
|
|
2
|
+
|
|
3
|
+
#include "src/torchcodec/_core/AVIOTensorContext.h"
|
|
4
|
+
#include "src/torchcodec/_core/Encoder.h"
|
|
5
|
+
#include "torch/types.h"
|
|
6
|
+
|
|
7
|
+
extern "C" {
|
|
8
|
+
#include <libavutil/pixdesc.h>
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
namespace facebook::torchcodec {
|
|
12
|
+
|
|
13
|
+
namespace {
|
|
14
|
+
|
|
15
|
+
torch::Tensor validateSamples(const torch::Tensor& samples) {
|
|
16
|
+
TORCH_CHECK(
|
|
17
|
+
samples.dtype() == torch::kFloat32,
|
|
18
|
+
"samples must have float32 dtype, got ",
|
|
19
|
+
samples.dtype());
|
|
20
|
+
TORCH_CHECK(
|
|
21
|
+
samples.dim() == 2,
|
|
22
|
+
"samples must have 2 dimensions, got ",
|
|
23
|
+
samples.dim());
|
|
24
|
+
|
|
25
|
+
// We enforce this, but if we get user reports we should investigate whether
|
|
26
|
+
// that's actually needed.
|
|
27
|
+
int numChannels = static_cast<int>(samples.sizes()[0]);
|
|
28
|
+
TORCH_CHECK(
|
|
29
|
+
numChannels <= AV_NUM_DATA_POINTERS,
|
|
30
|
+
"Trying to encode ",
|
|
31
|
+
numChannels,
|
|
32
|
+
" channels, but FFmpeg only supports ",
|
|
33
|
+
AV_NUM_DATA_POINTERS,
|
|
34
|
+
" channels per frame.");
|
|
35
|
+
|
|
36
|
+
return samples.contiguous();
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
void validateSampleRate(const AVCodec& avCodec, int sampleRate) {
|
|
40
|
+
const int* supportedSampleRates = getSupportedSampleRates(avCodec);
|
|
41
|
+
if (supportedSampleRates == nullptr) {
|
|
42
|
+
return;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
for (auto i = 0; supportedSampleRates[i] != 0; ++i) {
|
|
46
|
+
if (sampleRate == supportedSampleRates[i]) {
|
|
47
|
+
return;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
std::stringstream supportedRates;
|
|
51
|
+
for (auto i = 0; supportedSampleRates[i] != 0; ++i) {
|
|
52
|
+
if (i > 0) {
|
|
53
|
+
supportedRates << ", ";
|
|
54
|
+
}
|
|
55
|
+
supportedRates << supportedSampleRates[i];
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
TORCH_CHECK(
|
|
59
|
+
false,
|
|
60
|
+
"invalid sample rate=",
|
|
61
|
+
sampleRate,
|
|
62
|
+
". Supported sample rate values are: ",
|
|
63
|
+
supportedRates.str());
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
static const std::vector<AVSampleFormat> preferredFormatsOrder = {
|
|
67
|
+
AV_SAMPLE_FMT_FLTP,
|
|
68
|
+
AV_SAMPLE_FMT_FLT,
|
|
69
|
+
AV_SAMPLE_FMT_DBLP,
|
|
70
|
+
AV_SAMPLE_FMT_DBL,
|
|
71
|
+
AV_SAMPLE_FMT_S64P,
|
|
72
|
+
AV_SAMPLE_FMT_S64,
|
|
73
|
+
AV_SAMPLE_FMT_S32P,
|
|
74
|
+
AV_SAMPLE_FMT_S32,
|
|
75
|
+
AV_SAMPLE_FMT_S16P,
|
|
76
|
+
AV_SAMPLE_FMT_S16,
|
|
77
|
+
AV_SAMPLE_FMT_U8P,
|
|
78
|
+
AV_SAMPLE_FMT_U8};
|
|
79
|
+
|
|
80
|
+
AVSampleFormat findBestOutputSampleFormat(const AVCodec& avCodec) {
|
|
81
|
+
const AVSampleFormat* supportedSampleFormats =
|
|
82
|
+
getSupportedOutputSampleFormats(avCodec);
|
|
83
|
+
|
|
84
|
+
// Find a sample format that the encoder supports. We prefer using FLT[P],
|
|
85
|
+
// since this is the format of the input samples. If FLTP isn't supported
|
|
86
|
+
// then we'll need to convert the AVFrame's format. Our heuristic is to encode
|
|
87
|
+
// into the format with the highest resolution.
|
|
88
|
+
if (supportedSampleFormats == nullptr) {
|
|
89
|
+
// Can't really validate anything in this case, best we can do is hope that
|
|
90
|
+
// FLTP is supported by the encoder. If not, FFmpeg will raise.
|
|
91
|
+
return AV_SAMPLE_FMT_FLTP;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
for (AVSampleFormat preferredFormat : preferredFormatsOrder) {
|
|
95
|
+
for (int i = 0; supportedSampleFormats[i] != -1; ++i) {
|
|
96
|
+
if (supportedSampleFormats[i] == preferredFormat) {
|
|
97
|
+
return preferredFormat;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
// We should always find a match in preferredFormatsOrder, so we should always
|
|
102
|
+
// return earlier. But in the event that a future FFmpeg version defines an
|
|
103
|
+
// additional sample format that isn't in preferredFormatsOrder, we fallback:
|
|
104
|
+
return supportedSampleFormats[0];
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
} // namespace
|
|
108
|
+
|
|
109
|
+
AudioEncoder::~AudioEncoder() {
|
|
110
|
+
close_avio();
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
void AudioEncoder::close_avio() {
|
|
114
|
+
if (avFormatContext_ && avFormatContext_->pb) {
|
|
115
|
+
if (avFormatContext_->pb->error == 0) {
|
|
116
|
+
avio_flush(avFormatContext_->pb);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
if (!avioContextHolder_) {
|
|
120
|
+
if (avFormatContext_->pb->error == 0) {
|
|
121
|
+
avio_close(avFormatContext_->pb);
|
|
122
|
+
}
|
|
123
|
+
// avoids closing again in destructor, which would segfault.
|
|
124
|
+
avFormatContext_->pb = nullptr;
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
AudioEncoder::AudioEncoder(
|
|
130
|
+
const torch::Tensor& samples,
|
|
131
|
+
int sampleRate,
|
|
132
|
+
std::string_view fileName,
|
|
133
|
+
const AudioStreamOptions& audioStreamOptions)
|
|
134
|
+
: samples_(validateSamples(samples)), inSampleRate_(sampleRate) {
|
|
135
|
+
setFFmpegLogLevel();
|
|
136
|
+
AVFormatContext* avFormatContext = nullptr;
|
|
137
|
+
int status = avformat_alloc_output_context2(
|
|
138
|
+
&avFormatContext, nullptr, nullptr, fileName.data());
|
|
139
|
+
|
|
140
|
+
TORCH_CHECK(
|
|
141
|
+
avFormatContext != nullptr,
|
|
142
|
+
"Couldn't allocate AVFormatContext. ",
|
|
143
|
+
"The destination file is ",
|
|
144
|
+
fileName,
|
|
145
|
+
", check the desired extension? ",
|
|
146
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
147
|
+
avFormatContext_.reset(avFormatContext);
|
|
148
|
+
|
|
149
|
+
status = avio_open(&avFormatContext_->pb, fileName.data(), AVIO_FLAG_WRITE);
|
|
150
|
+
TORCH_CHECK(
|
|
151
|
+
status >= 0,
|
|
152
|
+
"avio_open failed. The destination file is ",
|
|
153
|
+
fileName,
|
|
154
|
+
", make sure it's a valid path? ",
|
|
155
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
156
|
+
|
|
157
|
+
initializeEncoder(audioStreamOptions);
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
AudioEncoder::AudioEncoder(
|
|
161
|
+
const torch::Tensor& samples,
|
|
162
|
+
int sampleRate,
|
|
163
|
+
std::string_view formatName,
|
|
164
|
+
std::unique_ptr<AVIOContextHolder> avioContextHolder,
|
|
165
|
+
const AudioStreamOptions& audioStreamOptions)
|
|
166
|
+
: samples_(validateSamples(samples)),
|
|
167
|
+
inSampleRate_(sampleRate),
|
|
168
|
+
avioContextHolder_(std::move(avioContextHolder)) {
|
|
169
|
+
setFFmpegLogLevel();
|
|
170
|
+
AVFormatContext* avFormatContext = nullptr;
|
|
171
|
+
int status = avformat_alloc_output_context2(
|
|
172
|
+
&avFormatContext, nullptr, formatName.data(), nullptr);
|
|
173
|
+
|
|
174
|
+
TORCH_CHECK(
|
|
175
|
+
avFormatContext != nullptr,
|
|
176
|
+
"Couldn't allocate AVFormatContext. ",
|
|
177
|
+
"Check the desired format? Got format=",
|
|
178
|
+
formatName,
|
|
179
|
+
". ",
|
|
180
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
181
|
+
avFormatContext_.reset(avFormatContext);
|
|
182
|
+
|
|
183
|
+
avFormatContext_->pb = avioContextHolder_->getAVIOContext();
|
|
184
|
+
|
|
185
|
+
initializeEncoder(audioStreamOptions);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
void AudioEncoder::initializeEncoder(
|
|
189
|
+
const AudioStreamOptions& audioStreamOptions) {
|
|
190
|
+
// We use the AVFormatContext's default codec for that
|
|
191
|
+
// specific format/container.
|
|
192
|
+
const AVCodec* avCodec =
|
|
193
|
+
avcodec_find_encoder(avFormatContext_->oformat->audio_codec);
|
|
194
|
+
TORCH_CHECK(avCodec != nullptr, "Codec not found");
|
|
195
|
+
|
|
196
|
+
AVCodecContext* avCodecContext = avcodec_alloc_context3(avCodec);
|
|
197
|
+
TORCH_CHECK(avCodecContext != nullptr, "Couldn't allocate codec context.");
|
|
198
|
+
avCodecContext_.reset(avCodecContext);
|
|
199
|
+
|
|
200
|
+
auto desiredBitRate = audioStreamOptions.bitRate;
|
|
201
|
+
if (desiredBitRate.has_value()) {
|
|
202
|
+
TORCH_CHECK(
|
|
203
|
+
*desiredBitRate >= 0, "bit_rate=", *desiredBitRate, " must be >= 0.");
|
|
204
|
+
}
|
|
205
|
+
// bit_rate=None defaults to 0, which is what the FFmpeg CLI seems to use as
|
|
206
|
+
// well when "-b:a" isn't specified.
|
|
207
|
+
avCodecContext_->bit_rate = desiredBitRate.value_or(0);
|
|
208
|
+
|
|
209
|
+
outNumChannels_ = static_cast<int>(
|
|
210
|
+
audioStreamOptions.numChannels.value_or(samples_.sizes()[0]));
|
|
211
|
+
validateNumChannels(*avCodec, outNumChannels_);
|
|
212
|
+
// The avCodecContext layout defines the layout of the encoded output, it's
|
|
213
|
+
// not related to the input sampes.
|
|
214
|
+
setDefaultChannelLayout(avCodecContext_, outNumChannels_);
|
|
215
|
+
|
|
216
|
+
outSampleRate_ = audioStreamOptions.sampleRate.value_or(inSampleRate_);
|
|
217
|
+
validateSampleRate(*avCodec, outSampleRate_);
|
|
218
|
+
avCodecContext_->sample_rate = outSampleRate_;
|
|
219
|
+
|
|
220
|
+
// Input samples are expected to be FLTP. Not all encoders support FLTP, so we
|
|
221
|
+
// may need to convert the samples into a supported output sample format,
|
|
222
|
+
// which is what the `.sample_fmt` defines.
|
|
223
|
+
avCodecContext_->sample_fmt = findBestOutputSampleFormat(*avCodec);
|
|
224
|
+
|
|
225
|
+
int status = avcodec_open2(avCodecContext_.get(), avCodec, nullptr);
|
|
226
|
+
TORCH_CHECK(
|
|
227
|
+
status == AVSUCCESS,
|
|
228
|
+
"avcodec_open2 failed: ",
|
|
229
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
230
|
+
|
|
231
|
+
// We're allocating the stream here. Streams are meant to be freed by
|
|
232
|
+
// avformat_free_context(avFormatContext), which we call in the
|
|
233
|
+
// avFormatContext_'s destructor.
|
|
234
|
+
AVStream* avStream = avformat_new_stream(avFormatContext_.get(), nullptr);
|
|
235
|
+
TORCH_CHECK(avStream != nullptr, "Couldn't create new stream.");
|
|
236
|
+
status = avcodec_parameters_from_context(
|
|
237
|
+
avStream->codecpar, avCodecContext_.get());
|
|
238
|
+
TORCH_CHECK(
|
|
239
|
+
status == AVSUCCESS,
|
|
240
|
+
"avcodec_parameters_from_context failed: ",
|
|
241
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
242
|
+
streamIndex_ = avStream->index;
|
|
243
|
+
|
|
244
|
+
// If sample rate conversion is needed and the encoder doesn't support
|
|
245
|
+
// variable frame size, we need to create an intermediate FIFO. See
|
|
246
|
+
// [Encoding loop, sample rate conversion and FIFO].
|
|
247
|
+
if (((avCodec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) == 0) &&
|
|
248
|
+
(inSampleRate_ != outSampleRate_)) {
|
|
249
|
+
// frame_size * 2 is a decent default size. FFmpeg automatically
|
|
250
|
+
// re-allocates the fifo if more space is needed.
|
|
251
|
+
auto avAudioFifo = av_audio_fifo_alloc(
|
|
252
|
+
avCodecContext_->sample_fmt,
|
|
253
|
+
outNumChannels_,
|
|
254
|
+
avCodecContext_->frame_size * 2);
|
|
255
|
+
TORCH_CHECK(avAudioFifo != nullptr, "Couldn't create AVAudioFifo.");
|
|
256
|
+
avAudioFifo_.reset(avAudioFifo);
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
torch::Tensor AudioEncoder::encodeToTensor() {
|
|
261
|
+
TORCH_CHECK(
|
|
262
|
+
avioContextHolder_ != nullptr,
|
|
263
|
+
"Cannot encode to tensor, avio tensor context doesn't exist.");
|
|
264
|
+
encode();
|
|
265
|
+
auto avioToTensorContext =
|
|
266
|
+
dynamic_cast<AVIOToTensorContext*>(avioContextHolder_.get());
|
|
267
|
+
TORCH_CHECK(avioToTensorContext != nullptr, "Invalid AVIO context holder.");
|
|
268
|
+
return avioToTensorContext->getOutputTensor();
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
void AudioEncoder::encode() {
|
|
272
|
+
// To be on the safe side we enforce that encode() can only be called once on
|
|
273
|
+
// an encoder object. Whether this is actually necessary is unknown, so this
|
|
274
|
+
// may be relaxed if needed.
|
|
275
|
+
TORCH_CHECK(!encodeWasCalled_, "Cannot call encode() twice.");
|
|
276
|
+
encodeWasCalled_ = true;
|
|
277
|
+
|
|
278
|
+
// Default to 256 like in torchaudio
|
|
279
|
+
int numSamplesAllocatedPerFrame =
|
|
280
|
+
avCodecContext_->frame_size > 0 ? avCodecContext_->frame_size : 256;
|
|
281
|
+
UniqueAVFrame avFrame = allocateAVFrame(
|
|
282
|
+
numSamplesAllocatedPerFrame,
|
|
283
|
+
inSampleRate_,
|
|
284
|
+
static_cast<int>(samples_.sizes()[0]),
|
|
285
|
+
AV_SAMPLE_FMT_FLTP);
|
|
286
|
+
avFrame->pts = 0;
|
|
287
|
+
|
|
288
|
+
AutoAVPacket autoAVPacket;
|
|
289
|
+
|
|
290
|
+
uint8_t* psamples = static_cast<uint8_t*>(samples_.data_ptr());
|
|
291
|
+
int numSamples = static_cast<int>(samples_.sizes()[1]); // per channel
|
|
292
|
+
int numEncodedSamples = 0; // per channel
|
|
293
|
+
int numBytesPerSample = static_cast<int>(samples_.element_size());
|
|
294
|
+
int numBytesPerChannel = numSamples * numBytesPerSample;
|
|
295
|
+
|
|
296
|
+
auto status = avformat_write_header(avFormatContext_.get(), nullptr);
|
|
297
|
+
TORCH_CHECK(
|
|
298
|
+
status == AVSUCCESS,
|
|
299
|
+
"Error in avformat_write_header: ",
|
|
300
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
301
|
+
|
|
302
|
+
while (numEncodedSamples < numSamples) {
|
|
303
|
+
int numSamplesToEncode =
|
|
304
|
+
std::min(numSamplesAllocatedPerFrame, numSamples - numEncodedSamples);
|
|
305
|
+
int numBytesToEncode = numSamplesToEncode * numBytesPerSample;
|
|
306
|
+
|
|
307
|
+
for (int ch = 0; ch < samples_.sizes()[0]; ch++) {
|
|
308
|
+
std::memcpy(
|
|
309
|
+
avFrame->data[ch],
|
|
310
|
+
psamples + ch * numBytesPerChannel,
|
|
311
|
+
numBytesToEncode);
|
|
312
|
+
}
|
|
313
|
+
psamples += numBytesToEncode;
|
|
314
|
+
|
|
315
|
+
// Above, we set the AVFrame's .nb_samples to AVCodecContext.frame_size so
|
|
316
|
+
// that the frame buffers are allocated to a big enough size. Here, we reset
|
|
317
|
+
// it to the exact number of samples that need to be encoded, otherwise the
|
|
318
|
+
// encoded frame would contain more samples than necessary and our results
|
|
319
|
+
// wouldn't match the ffmpeg CLI.
|
|
320
|
+
avFrame->nb_samples = numSamplesToEncode;
|
|
321
|
+
|
|
322
|
+
UniqueAVFrame convertedAVFrame = maybeConvertAVFrame(avFrame);
|
|
323
|
+
encodeFrameThroughFifo(autoAVPacket, convertedAVFrame);
|
|
324
|
+
|
|
325
|
+
numEncodedSamples += numSamplesToEncode;
|
|
326
|
+
}
|
|
327
|
+
TORCH_CHECK(numEncodedSamples == numSamples, "Hmmmmmm something went wrong.");
|
|
328
|
+
|
|
329
|
+
flushBuffers();
|
|
330
|
+
|
|
331
|
+
status = av_write_trailer(avFormatContext_.get());
|
|
332
|
+
TORCH_CHECK(
|
|
333
|
+
status == AVSUCCESS,
|
|
334
|
+
"Error in: av_write_trailer",
|
|
335
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
336
|
+
|
|
337
|
+
close_avio();
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
UniqueAVFrame AudioEncoder::maybeConvertAVFrame(const UniqueAVFrame& avFrame) {
|
|
341
|
+
if (static_cast<AVSampleFormat>(avFrame->format) ==
|
|
342
|
+
avCodecContext_->sample_fmt &&
|
|
343
|
+
getNumChannels(avFrame) == outNumChannels_ &&
|
|
344
|
+
avFrame->sample_rate == outSampleRate_) {
|
|
345
|
+
// Note: the clone references the same underlying data, it's a cheap copy.
|
|
346
|
+
return UniqueAVFrame(av_frame_clone(avFrame.get()));
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
if (!swrContext_) {
|
|
350
|
+
swrContext_.reset(createSwrContext(
|
|
351
|
+
static_cast<AVSampleFormat>(avFrame->format),
|
|
352
|
+
avCodecContext_->sample_fmt,
|
|
353
|
+
avFrame->sample_rate,
|
|
354
|
+
outSampleRate_,
|
|
355
|
+
avFrame,
|
|
356
|
+
outNumChannels_));
|
|
357
|
+
}
|
|
358
|
+
UniqueAVFrame convertedAVFrame = convertAudioAVFrameSamples(
|
|
359
|
+
swrContext_,
|
|
360
|
+
avFrame,
|
|
361
|
+
avCodecContext_->sample_fmt,
|
|
362
|
+
outSampleRate_,
|
|
363
|
+
outNumChannels_);
|
|
364
|
+
|
|
365
|
+
if (avFrame->sample_rate == outSampleRate_) {
|
|
366
|
+
TORCH_CHECK(
|
|
367
|
+
convertedAVFrame->nb_samples == avFrame->nb_samples,
|
|
368
|
+
"convertedAVFrame->nb_samples=",
|
|
369
|
+
convertedAVFrame->nb_samples,
|
|
370
|
+
" differs from ",
|
|
371
|
+
"avFrame->nb_samples=",
|
|
372
|
+
avFrame->nb_samples,
|
|
373
|
+
"This is unexpected, please report on the TorchCodec bug tracker.");
|
|
374
|
+
}
|
|
375
|
+
return convertedAVFrame;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
void AudioEncoder::encodeFrameThroughFifo(
|
|
379
|
+
AutoAVPacket& autoAVPacket,
|
|
380
|
+
const UniqueAVFrame& avFrame,
|
|
381
|
+
// flushFifo is only set to true in maybeFlushSwrBuffers(), i.e. at the very
|
|
382
|
+
// end of the encoding process when we're flushing buffers. We also want to
|
|
383
|
+
// flush the FIFO so as to not leave any remaining samples in it.
|
|
384
|
+
bool flushFifo) {
|
|
385
|
+
if (avAudioFifo_ == nullptr) {
|
|
386
|
+
encodeFrame(autoAVPacket, avFrame);
|
|
387
|
+
return;
|
|
388
|
+
}
|
|
389
|
+
int numSamplesWritten = av_audio_fifo_write(
|
|
390
|
+
avAudioFifo_.get(),
|
|
391
|
+
reinterpret_cast<void**>(avFrame->data),
|
|
392
|
+
avFrame->nb_samples);
|
|
393
|
+
TORCH_CHECK(
|
|
394
|
+
numSamplesWritten == avFrame->nb_samples,
|
|
395
|
+
"Tried to write ",
|
|
396
|
+
avFrame->nb_samples,
|
|
397
|
+
" samples, but only wrote ",
|
|
398
|
+
numSamplesWritten);
|
|
399
|
+
|
|
400
|
+
UniqueAVFrame newavFrame = allocateAVFrame(
|
|
401
|
+
avCodecContext_->frame_size,
|
|
402
|
+
outSampleRate_,
|
|
403
|
+
outNumChannels_,
|
|
404
|
+
avCodecContext_->sample_fmt);
|
|
405
|
+
|
|
406
|
+
// Explaining the while bound:
|
|
407
|
+
// - if we're not flushing the FIFO, i.e. in most cases, we want to pull
|
|
408
|
+
// exactly `frame_size` samples from the FIFO, so we have to stop before it
|
|
409
|
+
// contains less than `frame_size` samples.
|
|
410
|
+
// - if we're flushing the FIFO, we want to read from the FIFO until the very
|
|
411
|
+
// last sample it contains.
|
|
412
|
+
//
|
|
413
|
+
// In both cases, for as long as we can, we're trying to pull exatly
|
|
414
|
+
// `frame_size` samples from the FIFO and send each `frame_size`-sized avFrame
|
|
415
|
+
// to encodeFrame(). Only the very last avFrame of the encoding process is
|
|
416
|
+
// allowed to contained less than frame_size samples. That only happens when
|
|
417
|
+
// flushFifo is true.
|
|
418
|
+
while (av_audio_fifo_size(avAudioFifo_.get()) >=
|
|
419
|
+
(flushFifo ? 1 : avCodecContext_->frame_size)) {
|
|
420
|
+
int samplesToRead = std::min(
|
|
421
|
+
av_audio_fifo_size(avAudioFifo_.get()), newavFrame->nb_samples);
|
|
422
|
+
int numSamplesRead = av_audio_fifo_read(
|
|
423
|
+
avAudioFifo_.get(),
|
|
424
|
+
reinterpret_cast<void**>(newavFrame->data),
|
|
425
|
+
samplesToRead);
|
|
426
|
+
TORCH_CHECK(
|
|
427
|
+
numSamplesRead == samplesToRead,
|
|
428
|
+
"Tried to read ",
|
|
429
|
+
samplesToRead,
|
|
430
|
+
" samples, but only read ",
|
|
431
|
+
numSamplesRead);
|
|
432
|
+
|
|
433
|
+
newavFrame->nb_samples = numSamplesRead;
|
|
434
|
+
encodeFrame(autoAVPacket, newavFrame);
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
void AudioEncoder::encodeFrame(
|
|
439
|
+
AutoAVPacket& autoAVPacket,
|
|
440
|
+
const UniqueAVFrame& avFrame) {
|
|
441
|
+
if (avFrame != nullptr) {
|
|
442
|
+
avFrame->pts = lastEncodedAVFramePts_;
|
|
443
|
+
lastEncodedAVFramePts_ += avFrame->nb_samples;
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
auto status = avcodec_send_frame(avCodecContext_.get(), avFrame.get());
|
|
447
|
+
TORCH_CHECK(
|
|
448
|
+
status == AVSUCCESS,
|
|
449
|
+
"Error while sending frame: ",
|
|
450
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
451
|
+
|
|
452
|
+
while (status >= 0) {
|
|
453
|
+
ReferenceAVPacket packet(autoAVPacket);
|
|
454
|
+
status = avcodec_receive_packet(avCodecContext_.get(), packet.get());
|
|
455
|
+
if (status == AVERROR(EAGAIN) || status == AVERROR_EOF) {
|
|
456
|
+
if (status == AVERROR_EOF) {
|
|
457
|
+
// Flush the packets that were potentially buffered by
|
|
458
|
+
// av_interleaved_write_frame(). See corresponding block in
|
|
459
|
+
// TorchAudio:
|
|
460
|
+
// https://github.com/pytorch/audio/blob/d60ce09e2c532d5bf2e05619e700ab520543465e/src/libtorio/ffmpeg/stream_writer/encoder.cpp#L21
|
|
461
|
+
status = av_interleaved_write_frame(avFormatContext_.get(), nullptr);
|
|
462
|
+
TORCH_CHECK(
|
|
463
|
+
status == AVSUCCESS,
|
|
464
|
+
"Failed to flush packet: ",
|
|
465
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
466
|
+
}
|
|
467
|
+
return;
|
|
468
|
+
}
|
|
469
|
+
TORCH_CHECK(
|
|
470
|
+
status >= 0,
|
|
471
|
+
"Error receiving packet: ",
|
|
472
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
473
|
+
|
|
474
|
+
packet->stream_index = streamIndex_;
|
|
475
|
+
|
|
476
|
+
status = av_interleaved_write_frame(avFormatContext_.get(), packet.get());
|
|
477
|
+
TORCH_CHECK(
|
|
478
|
+
status == AVSUCCESS,
|
|
479
|
+
"Error in av_interleaved_write_frame: ",
|
|
480
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
void AudioEncoder::maybeFlushSwrBuffers(AutoAVPacket& autoAVPacket) {
|
|
485
|
+
// Similar to the decoder's method with the same name, but for encoding this
|
|
486
|
+
// time. That is, when sample conversion is involved, libswresample may have
|
|
487
|
+
// buffered some samples that we now need to flush and send to the encoder.
|
|
488
|
+
if (swrContext_ == nullptr && inSampleRate_ == outSampleRate_) {
|
|
489
|
+
return;
|
|
490
|
+
}
|
|
491
|
+
TORCH_CHECK(
|
|
492
|
+
swrContext_ != nullptr,
|
|
493
|
+
"swrContext is null, but sample rate conversion is needed. ",
|
|
494
|
+
"This is unexpected, please report on the TorchCodec bug tracker.");
|
|
495
|
+
|
|
496
|
+
int numRemainingSamples = // this is an upper bound
|
|
497
|
+
swr_get_out_samples(swrContext_.get(), 0);
|
|
498
|
+
if (numRemainingSamples == 0) {
|
|
499
|
+
return;
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
UniqueAVFrame avFrame = allocateAVFrame(
|
|
503
|
+
numRemainingSamples,
|
|
504
|
+
outSampleRate_,
|
|
505
|
+
outNumChannels_,
|
|
506
|
+
avCodecContext_->sample_fmt);
|
|
507
|
+
int actualNumRemainingSamples = swr_convert(
|
|
508
|
+
swrContext_.get(), avFrame->data, avFrame->nb_samples, nullptr, 0);
|
|
509
|
+
avFrame->nb_samples = actualNumRemainingSamples;
|
|
510
|
+
|
|
511
|
+
// We're potentially sending avFrame through the FIFO (if it exists), in which
|
|
512
|
+
// case we also want to flush the FIFO itself.
|
|
513
|
+
encodeFrameThroughFifo(autoAVPacket, avFrame, /*flushFifo=*/true);
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
void AudioEncoder::flushBuffers() {
|
|
517
|
+
AutoAVPacket autoAVPacket;
|
|
518
|
+
maybeFlushSwrBuffers(autoAVPacket);
|
|
519
|
+
|
|
520
|
+
encodeFrame(autoAVPacket, UniqueAVFrame(nullptr));
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
namespace {
|
|
524
|
+
|
|
525
|
+
torch::Tensor validateFrames(const torch::Tensor& frames) {
|
|
526
|
+
TORCH_CHECK(
|
|
527
|
+
frames.dtype() == torch::kUInt8,
|
|
528
|
+
"frames must have uint8 dtype, got ",
|
|
529
|
+
frames.dtype());
|
|
530
|
+
TORCH_CHECK(
|
|
531
|
+
frames.dim() == 4,
|
|
532
|
+
"frames must have 4 dimensions (N, C, H, W), got ",
|
|
533
|
+
frames.dim());
|
|
534
|
+
TORCH_CHECK(
|
|
535
|
+
frames.sizes()[1] == 3,
|
|
536
|
+
"frame must have 3 channels (R, G, B), got ",
|
|
537
|
+
frames.sizes()[1]);
|
|
538
|
+
// TODO-VideoEncoder: Investigate if non-contiguous frames can be accepted
|
|
539
|
+
return frames.contiguous();
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
} // namespace
|
|
543
|
+
|
|
544
|
+
VideoEncoder::~VideoEncoder() {
|
|
545
|
+
if (avFormatContext_ && avFormatContext_->pb) {
|
|
546
|
+
avio_flush(avFormatContext_->pb);
|
|
547
|
+
avio_close(avFormatContext_->pb);
|
|
548
|
+
avFormatContext_->pb = nullptr;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
VideoEncoder::VideoEncoder(
|
|
553
|
+
const torch::Tensor& frames,
|
|
554
|
+
int frameRate,
|
|
555
|
+
std::string_view fileName,
|
|
556
|
+
const VideoStreamOptions& videoStreamOptions)
|
|
557
|
+
: frames_(validateFrames(frames)), inFrameRate_(frameRate) {
|
|
558
|
+
setFFmpegLogLevel();
|
|
559
|
+
|
|
560
|
+
// Allocate output format context
|
|
561
|
+
AVFormatContext* avFormatContext = nullptr;
|
|
562
|
+
int status = avformat_alloc_output_context2(
|
|
563
|
+
&avFormatContext, nullptr, nullptr, fileName.data());
|
|
564
|
+
|
|
565
|
+
TORCH_CHECK(
|
|
566
|
+
avFormatContext != nullptr,
|
|
567
|
+
"Couldn't allocate AVFormatContext. ",
|
|
568
|
+
"The destination file is ",
|
|
569
|
+
fileName,
|
|
570
|
+
", check the desired extension? ",
|
|
571
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
572
|
+
avFormatContext_.reset(avFormatContext);
|
|
573
|
+
|
|
574
|
+
status = avio_open(&avFormatContext_->pb, fileName.data(), AVIO_FLAG_WRITE);
|
|
575
|
+
TORCH_CHECK(
|
|
576
|
+
status >= 0,
|
|
577
|
+
"avio_open failed. The destination file is ",
|
|
578
|
+
fileName,
|
|
579
|
+
", make sure it's a valid path? ",
|
|
580
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
581
|
+
initializeEncoder(videoStreamOptions);
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
void VideoEncoder::initializeEncoder(
|
|
585
|
+
const VideoStreamOptions& videoStreamOptions) {
|
|
586
|
+
const AVCodec* avCodec =
|
|
587
|
+
avcodec_find_encoder(avFormatContext_->oformat->video_codec);
|
|
588
|
+
TORCH_CHECK(avCodec != nullptr, "Video codec not found");
|
|
589
|
+
|
|
590
|
+
AVCodecContext* avCodecContext = avcodec_alloc_context3(avCodec);
|
|
591
|
+
TORCH_CHECK(avCodecContext != nullptr, "Couldn't allocate codec context.");
|
|
592
|
+
avCodecContext_.reset(avCodecContext);
|
|
593
|
+
|
|
594
|
+
// Store dimension order and input pixel format
|
|
595
|
+
// TODO-VideoEncoder: Remove assumption that tensor in NCHW format
|
|
596
|
+
auto sizes = frames_.sizes();
|
|
597
|
+
inPixelFormat_ = AV_PIX_FMT_GBRP;
|
|
598
|
+
inHeight_ = static_cast<int>(sizes[2]);
|
|
599
|
+
inWidth_ = static_cast<int>(sizes[3]);
|
|
600
|
+
|
|
601
|
+
// Use specified dimensions or input dimensions
|
|
602
|
+
// TODO-VideoEncoder: Allow height and width to be set
|
|
603
|
+
outWidth_ = inWidth_;
|
|
604
|
+
outHeight_ = inHeight_;
|
|
605
|
+
|
|
606
|
+
// TODO-VideoEncoder: Enable other pixel formats
|
|
607
|
+
// Let FFmpeg choose best pixel format to minimize loss
|
|
608
|
+
outPixelFormat_ = avcodec_find_best_pix_fmt_of_list(
|
|
609
|
+
getSupportedPixelFormats(*avCodec), // List of supported formats
|
|
610
|
+
AV_PIX_FMT_GBRP, // We reorder input to GBRP currently
|
|
611
|
+
0, // No alpha channel
|
|
612
|
+
nullptr // Discard conversion loss information
|
|
613
|
+
);
|
|
614
|
+
TORCH_CHECK(outPixelFormat_ != -1, "Failed to find best pix fmt")
|
|
615
|
+
|
|
616
|
+
// Configure codec parameters
|
|
617
|
+
avCodecContext_->codec_id = avCodec->id;
|
|
618
|
+
avCodecContext_->width = outWidth_;
|
|
619
|
+
avCodecContext_->height = outHeight_;
|
|
620
|
+
avCodecContext_->pix_fmt = outPixelFormat_;
|
|
621
|
+
// TODO-VideoEncoder: Verify that frame_rate and time_base are correct
|
|
622
|
+
avCodecContext_->time_base = {1, inFrameRate_};
|
|
623
|
+
avCodecContext_->framerate = {inFrameRate_, 1};
|
|
624
|
+
|
|
625
|
+
// Set flag for containers that require extradata to be in the codec context
|
|
626
|
+
if (avFormatContext_->oformat->flags & AVFMT_GLOBALHEADER) {
|
|
627
|
+
avCodecContext_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
// Apply videoStreamOptions
|
|
631
|
+
AVDictionary* options = nullptr;
|
|
632
|
+
if (videoStreamOptions.crf.has_value()) {
|
|
633
|
+
av_dict_set(
|
|
634
|
+
&options,
|
|
635
|
+
"crf",
|
|
636
|
+
std::to_string(videoStreamOptions.crf.value()).c_str(),
|
|
637
|
+
0);
|
|
638
|
+
}
|
|
639
|
+
int status = avcodec_open2(avCodecContext_.get(), avCodec, &options);
|
|
640
|
+
av_dict_free(&options);
|
|
641
|
+
|
|
642
|
+
TORCH_CHECK(
|
|
643
|
+
status == AVSUCCESS,
|
|
644
|
+
"avcodec_open2 failed: ",
|
|
645
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
646
|
+
|
|
647
|
+
avStream_ = avformat_new_stream(avFormatContext_.get(), nullptr);
|
|
648
|
+
TORCH_CHECK(avStream_ != nullptr, "Couldn't create new stream.");
|
|
649
|
+
|
|
650
|
+
// Set the stream time base to encode correct frame timestamps
|
|
651
|
+
avStream_->time_base = avCodecContext_->time_base;
|
|
652
|
+
status = avcodec_parameters_from_context(
|
|
653
|
+
avStream_->codecpar, avCodecContext_.get());
|
|
654
|
+
TORCH_CHECK(
|
|
655
|
+
status == AVSUCCESS,
|
|
656
|
+
"avcodec_parameters_from_context failed: ",
|
|
657
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
void VideoEncoder::encode() {
|
|
661
|
+
// To be on the safe side we enforce that encode() can only be called once
|
|
662
|
+
TORCH_CHECK(!encodeWasCalled_, "Cannot call encode() twice.");
|
|
663
|
+
encodeWasCalled_ = true;
|
|
664
|
+
|
|
665
|
+
int status = avformat_write_header(avFormatContext_.get(), nullptr);
|
|
666
|
+
TORCH_CHECK(
|
|
667
|
+
status == AVSUCCESS,
|
|
668
|
+
"Error in avformat_write_header: ",
|
|
669
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
670
|
+
|
|
671
|
+
AutoAVPacket autoAVPacket;
|
|
672
|
+
int numFrames = static_cast<int>(frames_.sizes()[0]);
|
|
673
|
+
for (int i = 0; i < numFrames; ++i) {
|
|
674
|
+
torch::Tensor currFrame = frames_[i];
|
|
675
|
+
UniqueAVFrame avFrame = convertTensorToAVFrame(currFrame, i);
|
|
676
|
+
encodeFrame(autoAVPacket, avFrame);
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
flushBuffers();
|
|
680
|
+
|
|
681
|
+
status = av_write_trailer(avFormatContext_.get());
|
|
682
|
+
TORCH_CHECK(
|
|
683
|
+
status == AVSUCCESS,
|
|
684
|
+
"Error in av_write_trailer: ",
|
|
685
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
UniqueAVFrame VideoEncoder::convertTensorToAVFrame(
|
|
689
|
+
const torch::Tensor& frame,
|
|
690
|
+
int frameIndex) {
|
|
691
|
+
// Initialize and cache scaling context if it does not exist
|
|
692
|
+
if (!swsContext_) {
|
|
693
|
+
swsContext_.reset(sws_getContext(
|
|
694
|
+
inWidth_,
|
|
695
|
+
inHeight_,
|
|
696
|
+
inPixelFormat_,
|
|
697
|
+
outWidth_,
|
|
698
|
+
outHeight_,
|
|
699
|
+
outPixelFormat_,
|
|
700
|
+
SWS_BICUBIC, // Used by FFmpeg CLI
|
|
701
|
+
nullptr,
|
|
702
|
+
nullptr,
|
|
703
|
+
nullptr));
|
|
704
|
+
TORCH_CHECK(swsContext_ != nullptr, "Failed to create scaling context");
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
UniqueAVFrame avFrame(av_frame_alloc());
|
|
708
|
+
TORCH_CHECK(avFrame != nullptr, "Failed to allocate AVFrame");
|
|
709
|
+
|
|
710
|
+
// Set output frame properties
|
|
711
|
+
avFrame->format = outPixelFormat_;
|
|
712
|
+
avFrame->width = outWidth_;
|
|
713
|
+
avFrame->height = outHeight_;
|
|
714
|
+
avFrame->pts = frameIndex;
|
|
715
|
+
|
|
716
|
+
int status = av_frame_get_buffer(avFrame.get(), 0);
|
|
717
|
+
TORCH_CHECK(status >= 0, "Failed to allocate frame buffer");
|
|
718
|
+
|
|
719
|
+
// Need to convert/scale the frame
|
|
720
|
+
// Create temporary frame with input format
|
|
721
|
+
UniqueAVFrame inputFrame(av_frame_alloc());
|
|
722
|
+
TORCH_CHECK(inputFrame != nullptr, "Failed to allocate input AVFrame");
|
|
723
|
+
|
|
724
|
+
inputFrame->format = inPixelFormat_;
|
|
725
|
+
inputFrame->width = inWidth_;
|
|
726
|
+
inputFrame->height = inHeight_;
|
|
727
|
+
|
|
728
|
+
uint8_t* tensorData = static_cast<uint8_t*>(frame.data_ptr());
|
|
729
|
+
|
|
730
|
+
// TODO-VideoEncoder: Reorder tensor if in NHWC format
|
|
731
|
+
int channelSize = inHeight_ * inWidth_;
|
|
732
|
+
// Reorder RGB -> GBR for AV_PIX_FMT_GBRP format
|
|
733
|
+
// TODO-VideoEncoder: Determine if FFmpeg supports planar RGB input format
|
|
734
|
+
inputFrame->data[0] = tensorData + channelSize;
|
|
735
|
+
inputFrame->data[1] = tensorData + (2 * channelSize);
|
|
736
|
+
inputFrame->data[2] = tensorData;
|
|
737
|
+
|
|
738
|
+
inputFrame->linesize[0] = inWidth_;
|
|
739
|
+
inputFrame->linesize[1] = inWidth_;
|
|
740
|
+
inputFrame->linesize[2] = inWidth_;
|
|
741
|
+
|
|
742
|
+
status = sws_scale(
|
|
743
|
+
swsContext_.get(),
|
|
744
|
+
inputFrame->data,
|
|
745
|
+
inputFrame->linesize,
|
|
746
|
+
0,
|
|
747
|
+
inputFrame->height,
|
|
748
|
+
avFrame->data,
|
|
749
|
+
avFrame->linesize);
|
|
750
|
+
TORCH_CHECK(status == outHeight_, "sws_scale failed");
|
|
751
|
+
return avFrame;
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
void VideoEncoder::encodeFrame(
|
|
755
|
+
AutoAVPacket& autoAVPacket,
|
|
756
|
+
const UniqueAVFrame& avFrame) {
|
|
757
|
+
auto status = avcodec_send_frame(avCodecContext_.get(), avFrame.get());
|
|
758
|
+
TORCH_CHECK(
|
|
759
|
+
status == AVSUCCESS,
|
|
760
|
+
"Error while sending frame: ",
|
|
761
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
762
|
+
|
|
763
|
+
while (status >= 0) {
|
|
764
|
+
ReferenceAVPacket packet(autoAVPacket);
|
|
765
|
+
status = avcodec_receive_packet(avCodecContext_.get(), packet.get());
|
|
766
|
+
if (status == AVERROR(EAGAIN) || status == AVERROR_EOF) {
|
|
767
|
+
if (status == AVERROR_EOF) {
|
|
768
|
+
// Flush remaining buffered packets
|
|
769
|
+
status = av_interleaved_write_frame(avFormatContext_.get(), nullptr);
|
|
770
|
+
TORCH_CHECK(
|
|
771
|
+
status == AVSUCCESS,
|
|
772
|
+
"Failed to flush packet: ",
|
|
773
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
774
|
+
}
|
|
775
|
+
return;
|
|
776
|
+
}
|
|
777
|
+
TORCH_CHECK(
|
|
778
|
+
status >= 0,
|
|
779
|
+
"Error receiving packet: ",
|
|
780
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
781
|
+
|
|
782
|
+
// The code below is borrowed from torchaudio:
|
|
783
|
+
// https://github.com/pytorch/audio/blob/b6a3368a45aaafe05f1a6a9f10c68adc5e944d9e/src/libtorio/ffmpeg/stream_writer/encoder.cpp#L46
|
|
784
|
+
// Setting packet->duration to 1 allows the last frame to be properly
|
|
785
|
+
// encoded, and needs to be set before calling av_packet_rescale_ts.
|
|
786
|
+
if (packet->duration == 0) {
|
|
787
|
+
packet->duration = 1;
|
|
788
|
+
}
|
|
789
|
+
av_packet_rescale_ts(
|
|
790
|
+
packet.get(), avCodecContext_->time_base, avStream_->time_base);
|
|
791
|
+
packet->stream_index = avStream_->index;
|
|
792
|
+
|
|
793
|
+
status = av_interleaved_write_frame(avFormatContext_.get(), packet.get());
|
|
794
|
+
TORCH_CHECK(
|
|
795
|
+
status == AVSUCCESS,
|
|
796
|
+
"Error in av_interleaved_write_frame: ",
|
|
797
|
+
getFFMPEGErrorStringFromErrorCode(status));
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
void VideoEncoder::flushBuffers() {
|
|
802
|
+
AutoAVPacket autoAVPacket;
|
|
803
|
+
// Send null frame to signal end of input
|
|
804
|
+
encodeFrame(autoAVPacket, UniqueAVFrame(nullptr));
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
} // namespace facebook::torchcodec
|