torchcodec 0.3.0__cp39-cp39-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of torchcodec might be problematic. Click here for more details.
- torchcodec/.dylibs/libc++.1.0.dylib +0 -0
- torchcodec/.dylibs/libpython3.9.dylib +0 -0
- torchcodec/__init__.py +16 -0
- torchcodec/_core/AVIOBytesContext.cpp +70 -0
- torchcodec/_core/AVIOBytesContext.h +32 -0
- torchcodec/_core/AVIOContextHolder.cpp +50 -0
- torchcodec/_core/AVIOContextHolder.h +65 -0
- torchcodec/_core/AVIOFileLikeContext.cpp +80 -0
- torchcodec/_core/AVIOFileLikeContext.h +54 -0
- torchcodec/_core/CMakeLists.txt +237 -0
- torchcodec/_core/CudaDeviceInterface.cpp +289 -0
- torchcodec/_core/CudaDeviceInterface.h +34 -0
- torchcodec/_core/DeviceInterface.cpp +88 -0
- torchcodec/_core/DeviceInterface.h +66 -0
- torchcodec/_core/Encoder.cpp +319 -0
- torchcodec/_core/Encoder.h +39 -0
- torchcodec/_core/FFMPEGCommon.cpp +264 -0
- torchcodec/_core/FFMPEGCommon.h +180 -0
- torchcodec/_core/Frame.h +47 -0
- torchcodec/_core/Metadata.h +70 -0
- torchcodec/_core/SingleStreamDecoder.cpp +1947 -0
- torchcodec/_core/SingleStreamDecoder.h +462 -0
- torchcodec/_core/StreamOptions.h +49 -0
- torchcodec/_core/__init__.py +39 -0
- torchcodec/_core/_metadata.py +277 -0
- torchcodec/_core/custom_ops.cpp +681 -0
- torchcodec/_core/fetch_and_expose_non_gpl_ffmpeg_libs.cmake +226 -0
- torchcodec/_core/ops.py +381 -0
- torchcodec/_core/pybind_ops.cpp +45 -0
- torchcodec/_frame.py +145 -0
- torchcodec/_internally_replaced_utils.py +53 -0
- torchcodec/_samplers/__init__.py +7 -0
- torchcodec/_samplers/video_clip_sampler.py +430 -0
- torchcodec/decoders/__init__.py +11 -0
- torchcodec/decoders/_audio_decoder.py +168 -0
- torchcodec/decoders/_decoder_utils.py +52 -0
- torchcodec/decoders/_video_decoder.py +399 -0
- torchcodec/libtorchcodec_custom_ops4.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops5.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops6.dylib +0 -0
- torchcodec/libtorchcodec_custom_ops7.dylib +0 -0
- torchcodec/libtorchcodec_decoder4.dylib +0 -0
- torchcodec/libtorchcodec_decoder5.dylib +0 -0
- torchcodec/libtorchcodec_decoder6.dylib +0 -0
- torchcodec/libtorchcodec_decoder7.dylib +0 -0
- torchcodec/libtorchcodec_pybind_ops4.so +0 -0
- torchcodec/libtorchcodec_pybind_ops5.so +0 -0
- torchcodec/libtorchcodec_pybind_ops6.so +0 -0
- torchcodec/libtorchcodec_pybind_ops7.so +0 -0
- torchcodec/samplers/__init__.py +2 -0
- torchcodec/samplers/_common.py +84 -0
- torchcodec/samplers/_index_based.py +285 -0
- torchcodec/samplers/_time_based.py +348 -0
- torchcodec/version.py +2 -0
- torchcodec-0.3.0.dist-info/LICENSE +28 -0
- torchcodec-0.3.0.dist-info/METADATA +280 -0
- torchcodec-0.3.0.dist-info/RECORD +59 -0
- torchcodec-0.3.0.dist-info/WHEEL +5 -0
- torchcodec-0.3.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
#include <ATen/cuda/CUDAEvent.h>
|
|
2
|
+
#include <c10/cuda/CUDAStream.h>
|
|
3
|
+
#include <npp.h>
|
|
4
|
+
#include <torch/types.h>
|
|
5
|
+
#include <mutex>
|
|
6
|
+
|
|
7
|
+
#include "src/torchcodec/_core/CudaDeviceInterface.h"
|
|
8
|
+
#include "src/torchcodec/_core/FFMPEGCommon.h"
|
|
9
|
+
#include "src/torchcodec/_core/SingleStreamDecoder.h"
|
|
10
|
+
|
|
11
|
+
extern "C" {
|
|
12
|
+
#include <libavutil/hwcontext_cuda.h>
|
|
13
|
+
#include <libavutil/pixdesc.h>
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
namespace facebook::torchcodec {
|
|
17
|
+
namespace {
|
|
18
|
+
|
|
19
|
+
bool g_cuda =
|
|
20
|
+
registerDeviceInterface(torch::kCUDA, [](const torch::Device& device) {
|
|
21
|
+
return new CudaDeviceInterface(device);
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
// We reuse cuda contexts across VideoDeoder instances. This is because
|
|
25
|
+
// creating a cuda context is expensive. The cache mechanism is as follows:
|
|
26
|
+
// 1. There is a cache of size MAX_CONTEXTS_PER_GPU_IN_CACHE cuda contexts for
|
|
27
|
+
// each GPU.
|
|
28
|
+
// 2. When we destroy a SingleStreamDecoder instance we release the cuda context
|
|
29
|
+
// to
|
|
30
|
+
// the cache if the cache is not full.
|
|
31
|
+
// 3. When we create a SingleStreamDecoder instance we try to get a cuda context
|
|
32
|
+
// from
|
|
33
|
+
// the cache. If the cache is empty we create a new cuda context.
|
|
34
|
+
|
|
35
|
+
// Pytorch can only handle up to 128 GPUs.
|
|
36
|
+
// https://github.com/pytorch/pytorch/blob/e30c55ee527b40d67555464b9e402b4b7ce03737/c10/cuda/CUDAMacros.h#L44
|
|
37
|
+
const int MAX_CUDA_GPUS = 128;
|
|
38
|
+
// Set to -1 to have an infinitely sized cache. Set it to 0 to disable caching.
|
|
39
|
+
// Set to a positive number to have a cache of that size.
|
|
40
|
+
const int MAX_CONTEXTS_PER_GPU_IN_CACHE = -1;
|
|
41
|
+
std::vector<AVBufferRef*> g_cached_hw_device_ctxs[MAX_CUDA_GPUS];
|
|
42
|
+
std::mutex g_cached_hw_device_mutexes[MAX_CUDA_GPUS];
|
|
43
|
+
|
|
44
|
+
torch::DeviceIndex getFFMPEGCompatibleDeviceIndex(const torch::Device& device) {
|
|
45
|
+
torch::DeviceIndex deviceIndex = device.index();
|
|
46
|
+
deviceIndex = std::max<at::DeviceIndex>(deviceIndex, 0);
|
|
47
|
+
TORCH_CHECK(deviceIndex >= 0, "Device index out of range");
|
|
48
|
+
// FFMPEG cannot handle negative device indices.
|
|
49
|
+
// For single GPU- machines libtorch returns -1 for the device index. So for
|
|
50
|
+
// that case we set the device index to 0.
|
|
51
|
+
// TODO: Double check if this works for multi-GPU machines correctly.
|
|
52
|
+
return deviceIndex;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
void addToCacheIfCacheHasCapacity(
|
|
56
|
+
const torch::Device& device,
|
|
57
|
+
AVBufferRef* hwContext) {
|
|
58
|
+
torch::DeviceIndex deviceIndex = getFFMPEGCompatibleDeviceIndex(device);
|
|
59
|
+
if (static_cast<int>(deviceIndex) >= MAX_CUDA_GPUS) {
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
std::scoped_lock lock(g_cached_hw_device_mutexes[deviceIndex]);
|
|
63
|
+
if (MAX_CONTEXTS_PER_GPU_IN_CACHE >= 0 &&
|
|
64
|
+
g_cached_hw_device_ctxs[deviceIndex].size() >=
|
|
65
|
+
MAX_CONTEXTS_PER_GPU_IN_CACHE) {
|
|
66
|
+
return;
|
|
67
|
+
}
|
|
68
|
+
g_cached_hw_device_ctxs[deviceIndex].push_back(av_buffer_ref(hwContext));
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
AVBufferRef* getFromCache(const torch::Device& device) {
|
|
72
|
+
torch::DeviceIndex deviceIndex = getFFMPEGCompatibleDeviceIndex(device);
|
|
73
|
+
if (static_cast<int>(deviceIndex) >= MAX_CUDA_GPUS) {
|
|
74
|
+
return nullptr;
|
|
75
|
+
}
|
|
76
|
+
std::scoped_lock lock(g_cached_hw_device_mutexes[deviceIndex]);
|
|
77
|
+
if (g_cached_hw_device_ctxs[deviceIndex].size() > 0) {
|
|
78
|
+
AVBufferRef* hw_device_ctx = g_cached_hw_device_ctxs[deviceIndex].back();
|
|
79
|
+
g_cached_hw_device_ctxs[deviceIndex].pop_back();
|
|
80
|
+
return hw_device_ctx;
|
|
81
|
+
}
|
|
82
|
+
return nullptr;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 26, 100)
|
|
86
|
+
|
|
87
|
+
AVBufferRef* getFFMPEGContextFromExistingCudaContext(
|
|
88
|
+
const torch::Device& device,
|
|
89
|
+
torch::DeviceIndex nonNegativeDeviceIndex,
|
|
90
|
+
enum AVHWDeviceType type) {
|
|
91
|
+
c10::cuda::CUDAGuard deviceGuard(device);
|
|
92
|
+
// Valid values for the argument to cudaSetDevice are 0 to maxDevices - 1:
|
|
93
|
+
// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html#group__CUDART__DEVICE_1g159587909ffa0791bbe4b40187a4c6bb
|
|
94
|
+
// So we ensure the deviceIndex is not negative.
|
|
95
|
+
// We set the device because we may be called from a different thread than
|
|
96
|
+
// the one that initialized the cuda context.
|
|
97
|
+
cudaSetDevice(nonNegativeDeviceIndex);
|
|
98
|
+
AVBufferRef* hw_device_ctx = nullptr;
|
|
99
|
+
std::string deviceOrdinal = std::to_string(nonNegativeDeviceIndex);
|
|
100
|
+
int err = av_hwdevice_ctx_create(
|
|
101
|
+
&hw_device_ctx,
|
|
102
|
+
type,
|
|
103
|
+
deviceOrdinal.c_str(),
|
|
104
|
+
nullptr,
|
|
105
|
+
AV_CUDA_USE_CURRENT_CONTEXT);
|
|
106
|
+
if (err < 0) {
|
|
107
|
+
/* clang-format off */
|
|
108
|
+
TORCH_CHECK(
|
|
109
|
+
false,
|
|
110
|
+
"Failed to create specified HW device. This typically happens when ",
|
|
111
|
+
"your installed FFmpeg doesn't support CUDA (see ",
|
|
112
|
+
"https://github.com/pytorch/torchcodec#installing-cuda-enabled-torchcodec",
|
|
113
|
+
"). FFmpeg error: ", getFFMPEGErrorStringFromErrorCode(err));
|
|
114
|
+
/* clang-format on */
|
|
115
|
+
}
|
|
116
|
+
return hw_device_ctx;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
#else
|
|
120
|
+
|
|
121
|
+
AVBufferRef* getFFMPEGContextFromNewCudaContext(
|
|
122
|
+
[[maybe_unused]] const torch::Device& device,
|
|
123
|
+
torch::DeviceIndex nonNegativeDeviceIndex,
|
|
124
|
+
enum AVHWDeviceType type) {
|
|
125
|
+
AVBufferRef* hw_device_ctx = nullptr;
|
|
126
|
+
std::string deviceOrdinal = std::to_string(nonNegativeDeviceIndex);
|
|
127
|
+
int err = av_hwdevice_ctx_create(
|
|
128
|
+
&hw_device_ctx, type, deviceOrdinal.c_str(), nullptr, 0);
|
|
129
|
+
if (err < 0) {
|
|
130
|
+
TORCH_CHECK(
|
|
131
|
+
false,
|
|
132
|
+
"Failed to create specified HW device",
|
|
133
|
+
getFFMPEGErrorStringFromErrorCode(err));
|
|
134
|
+
}
|
|
135
|
+
return hw_device_ctx;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
#endif
|
|
139
|
+
|
|
140
|
+
AVBufferRef* getCudaContext(const torch::Device& device) {
|
|
141
|
+
enum AVHWDeviceType type = av_hwdevice_find_type_by_name("cuda");
|
|
142
|
+
TORCH_CHECK(type != AV_HWDEVICE_TYPE_NONE, "Failed to find cuda device");
|
|
143
|
+
torch::DeviceIndex nonNegativeDeviceIndex =
|
|
144
|
+
getFFMPEGCompatibleDeviceIndex(device);
|
|
145
|
+
|
|
146
|
+
AVBufferRef* hw_device_ctx = getFromCache(device);
|
|
147
|
+
if (hw_device_ctx != nullptr) {
|
|
148
|
+
return hw_device_ctx;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// 58.26.100 introduced the concept of reusing the existing cuda context
|
|
152
|
+
// which is much faster and lower memory than creating a new cuda context.
|
|
153
|
+
// So we try to use that if it is available.
|
|
154
|
+
// FFMPEG 6.1.2 appears to be the earliest release that contains version
|
|
155
|
+
// 58.26.100 of avutil.
|
|
156
|
+
// https://github.com/FFmpeg/FFmpeg/blob/4acb9b7d1046944345ae506165fb55883d04d8a6/doc/APIchanges#L265
|
|
157
|
+
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(58, 26, 100)
|
|
158
|
+
return getFFMPEGContextFromExistingCudaContext(
|
|
159
|
+
device, nonNegativeDeviceIndex, type);
|
|
160
|
+
#else
|
|
161
|
+
return getFFMPEGContextFromNewCudaContext(
|
|
162
|
+
device, nonNegativeDeviceIndex, type);
|
|
163
|
+
#endif
|
|
164
|
+
}
|
|
165
|
+
} // namespace
|
|
166
|
+
|
|
167
|
+
CudaDeviceInterface::CudaDeviceInterface(const torch::Device& device)
|
|
168
|
+
: DeviceInterface(device) {
|
|
169
|
+
if (device_.type() != torch::kCUDA) {
|
|
170
|
+
throw std::runtime_error("Unsupported device: " + device_.str());
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
CudaDeviceInterface::~CudaDeviceInterface() {
|
|
175
|
+
if (ctx_) {
|
|
176
|
+
addToCacheIfCacheHasCapacity(device_, ctx_);
|
|
177
|
+
av_buffer_unref(&ctx_);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
void CudaDeviceInterface::initializeContext(AVCodecContext* codecContext) {
|
|
182
|
+
TORCH_CHECK(!ctx_, "FFmpeg HW device context already initialized");
|
|
183
|
+
|
|
184
|
+
// It is important for pytorch itself to create the cuda context. If ffmpeg
|
|
185
|
+
// creates the context it may not be compatible with pytorch.
|
|
186
|
+
// This is a dummy tensor to initialize the cuda context.
|
|
187
|
+
torch::Tensor dummyTensorForCudaInitialization = torch::empty(
|
|
188
|
+
{1}, torch::TensorOptions().dtype(torch::kUInt8).device(device_));
|
|
189
|
+
ctx_ = getCudaContext(device_);
|
|
190
|
+
codecContext->hw_device_ctx = av_buffer_ref(ctx_);
|
|
191
|
+
return;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
void CudaDeviceInterface::convertAVFrameToFrameOutput(
|
|
195
|
+
const VideoStreamOptions& videoStreamOptions,
|
|
196
|
+
UniqueAVFrame& avFrame,
|
|
197
|
+
FrameOutput& frameOutput,
|
|
198
|
+
std::optional<torch::Tensor> preAllocatedOutputTensor) {
|
|
199
|
+
TORCH_CHECK(
|
|
200
|
+
avFrame->format == AV_PIX_FMT_CUDA,
|
|
201
|
+
"Expected format to be AV_PIX_FMT_CUDA, got " +
|
|
202
|
+
std::string(av_get_pix_fmt_name((AVPixelFormat)avFrame->format)));
|
|
203
|
+
auto frameDims =
|
|
204
|
+
getHeightAndWidthFromOptionsOrAVFrame(videoStreamOptions, avFrame);
|
|
205
|
+
int height = frameDims.height;
|
|
206
|
+
int width = frameDims.width;
|
|
207
|
+
torch::Tensor& dst = frameOutput.data;
|
|
208
|
+
if (preAllocatedOutputTensor.has_value()) {
|
|
209
|
+
dst = preAllocatedOutputTensor.value();
|
|
210
|
+
auto shape = dst.sizes();
|
|
211
|
+
TORCH_CHECK(
|
|
212
|
+
(shape.size() == 3) && (shape[0] == height) && (shape[1] == width) &&
|
|
213
|
+
(shape[2] == 3),
|
|
214
|
+
"Expected tensor of shape ",
|
|
215
|
+
height,
|
|
216
|
+
"x",
|
|
217
|
+
width,
|
|
218
|
+
"x3, got ",
|
|
219
|
+
shape);
|
|
220
|
+
} else {
|
|
221
|
+
dst = allocateEmptyHWCTensor(height, width, device_);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// Use the user-requested GPU for running the NPP kernel.
|
|
225
|
+
c10::cuda::CUDAGuard deviceGuard(device_);
|
|
226
|
+
|
|
227
|
+
NppiSize oSizeROI = {width, height};
|
|
228
|
+
Npp8u* input[2] = {avFrame->data[0], avFrame->data[1]};
|
|
229
|
+
|
|
230
|
+
auto start = std::chrono::high_resolution_clock::now();
|
|
231
|
+
NppStatus status;
|
|
232
|
+
if (avFrame->colorspace == AVColorSpace::AVCOL_SPC_BT709) {
|
|
233
|
+
status = nppiNV12ToRGB_709CSC_8u_P2C3R(
|
|
234
|
+
input,
|
|
235
|
+
avFrame->linesize[0],
|
|
236
|
+
static_cast<Npp8u*>(dst.data_ptr()),
|
|
237
|
+
dst.stride(0),
|
|
238
|
+
oSizeROI);
|
|
239
|
+
} else {
|
|
240
|
+
status = nppiNV12ToRGB_8u_P2C3R(
|
|
241
|
+
input,
|
|
242
|
+
avFrame->linesize[0],
|
|
243
|
+
static_cast<Npp8u*>(dst.data_ptr()),
|
|
244
|
+
dst.stride(0),
|
|
245
|
+
oSizeROI);
|
|
246
|
+
}
|
|
247
|
+
TORCH_CHECK(status == NPP_SUCCESS, "Failed to convert NV12 frame.");
|
|
248
|
+
|
|
249
|
+
// Make the pytorch stream wait for the npp kernel to finish before using the
|
|
250
|
+
// output.
|
|
251
|
+
at::cuda::CUDAEvent nppDoneEvent;
|
|
252
|
+
at::cuda::CUDAStream nppStreamWrapper =
|
|
253
|
+
c10::cuda::getStreamFromExternal(nppGetStream(), device_.index());
|
|
254
|
+
nppDoneEvent.record(nppStreamWrapper);
|
|
255
|
+
nppDoneEvent.block(at::cuda::getCurrentCUDAStream());
|
|
256
|
+
|
|
257
|
+
auto end = std::chrono::high_resolution_clock::now();
|
|
258
|
+
|
|
259
|
+
std::chrono::duration<double, std::micro> duration = end - start;
|
|
260
|
+
VLOG(9) << "NPP Conversion of frame height=" << height << " width=" << width
|
|
261
|
+
<< " took: " << duration.count() << "us" << std::endl;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// inspired by https://github.com/FFmpeg/FFmpeg/commit/ad67ea9
|
|
265
|
+
// we have to do this because of an FFmpeg bug where hardware decoding is not
|
|
266
|
+
// appropriately set, so we just go off and find the matching codec for the CUDA
|
|
267
|
+
// device
|
|
268
|
+
std::optional<const AVCodec*> CudaDeviceInterface::findCodec(
|
|
269
|
+
const AVCodecID& codecId) {
|
|
270
|
+
void* i = nullptr;
|
|
271
|
+
const AVCodec* codec = nullptr;
|
|
272
|
+
while ((codec = av_codec_iterate(&i)) != nullptr) {
|
|
273
|
+
if (codec->id != codecId || !av_codec_is_decoder(codec)) {
|
|
274
|
+
continue;
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
const AVCodecHWConfig* config = nullptr;
|
|
278
|
+
for (int j = 0; (config = avcodec_get_hw_config(codec, j)) != nullptr;
|
|
279
|
+
++j) {
|
|
280
|
+
if (config->device_type == AV_HWDEVICE_TYPE_CUDA) {
|
|
281
|
+
return codec;
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
return std::nullopt;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
} // namespace facebook::torchcodec
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
// All rights reserved.
|
|
3
|
+
//
|
|
4
|
+
// This source code is licensed under the BSD-style license found in the
|
|
5
|
+
// LICENSE file in the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
#pragma once
|
|
8
|
+
|
|
9
|
+
#include "src/torchcodec/_core/DeviceInterface.h"
|
|
10
|
+
|
|
11
|
+
namespace facebook::torchcodec {
|
|
12
|
+
|
|
13
|
+
class CudaDeviceInterface : public DeviceInterface {
|
|
14
|
+
public:
|
|
15
|
+
CudaDeviceInterface(const torch::Device& device);
|
|
16
|
+
|
|
17
|
+
virtual ~CudaDeviceInterface();
|
|
18
|
+
|
|
19
|
+
std::optional<const AVCodec*> findCodec(const AVCodecID& codecId) override;
|
|
20
|
+
|
|
21
|
+
void initializeContext(AVCodecContext* codecContext) override;
|
|
22
|
+
|
|
23
|
+
void convertAVFrameToFrameOutput(
|
|
24
|
+
const VideoStreamOptions& videoStreamOptions,
|
|
25
|
+
UniqueAVFrame& avFrame,
|
|
26
|
+
FrameOutput& frameOutput,
|
|
27
|
+
std::optional<torch::Tensor> preAllocatedOutputTensor =
|
|
28
|
+
std::nullopt) override;
|
|
29
|
+
|
|
30
|
+
private:
|
|
31
|
+
AVBufferRef* ctx_ = nullptr;
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
} // namespace facebook::torchcodec
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
// All rights reserved.
|
|
3
|
+
//
|
|
4
|
+
// This source code is licensed under the BSD-style license found in the
|
|
5
|
+
// LICENSE file in the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
#include "src/torchcodec/_core/DeviceInterface.h"
|
|
8
|
+
#include <map>
|
|
9
|
+
#include <mutex>
|
|
10
|
+
|
|
11
|
+
namespace facebook::torchcodec {
|
|
12
|
+
|
|
13
|
+
namespace {
|
|
14
|
+
using DeviceInterfaceMap = std::map<torch::DeviceType, CreateDeviceInterfaceFn>;
|
|
15
|
+
std::mutex g_interface_mutex;
|
|
16
|
+
std::unique_ptr<DeviceInterfaceMap> g_interface_map;
|
|
17
|
+
|
|
18
|
+
std::string getDeviceType(const std::string& device) {
|
|
19
|
+
size_t pos = device.find(':');
|
|
20
|
+
if (pos == std::string::npos) {
|
|
21
|
+
return device;
|
|
22
|
+
}
|
|
23
|
+
return device.substr(0, pos);
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
} // namespace
|
|
27
|
+
|
|
28
|
+
bool registerDeviceInterface(
|
|
29
|
+
torch::DeviceType deviceType,
|
|
30
|
+
CreateDeviceInterfaceFn createInterface) {
|
|
31
|
+
std::scoped_lock lock(g_interface_mutex);
|
|
32
|
+
if (!g_interface_map) {
|
|
33
|
+
// We delay this initialization until runtime to avoid the Static
|
|
34
|
+
// Initialization Order Fiasco:
|
|
35
|
+
//
|
|
36
|
+
// https://en.cppreference.com/w/cpp/language/siof
|
|
37
|
+
g_interface_map = std::make_unique<DeviceInterfaceMap>();
|
|
38
|
+
}
|
|
39
|
+
TORCH_CHECK(
|
|
40
|
+
g_interface_map->find(deviceType) == g_interface_map->end(),
|
|
41
|
+
"Device interface already registered for ",
|
|
42
|
+
deviceType);
|
|
43
|
+
g_interface_map->insert({deviceType, createInterface});
|
|
44
|
+
return true;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
torch::Device createTorchDevice(const std::string device) {
|
|
48
|
+
// TODO: remove once DeviceInterface for CPU is implemented
|
|
49
|
+
if (device == "cpu") {
|
|
50
|
+
return torch::kCPU;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
std::scoped_lock lock(g_interface_mutex);
|
|
54
|
+
std::string deviceType = getDeviceType(device);
|
|
55
|
+
auto deviceInterface = std::find_if(
|
|
56
|
+
g_interface_map->begin(),
|
|
57
|
+
g_interface_map->end(),
|
|
58
|
+
[&](const std::pair<torch::DeviceType, CreateDeviceInterfaceFn>& arg) {
|
|
59
|
+
return device.rfind(
|
|
60
|
+
torch::DeviceTypeName(arg.first, /*lcase*/ true), 0) == 0;
|
|
61
|
+
});
|
|
62
|
+
TORCH_CHECK(
|
|
63
|
+
deviceInterface != g_interface_map->end(),
|
|
64
|
+
"Unsupported device: ",
|
|
65
|
+
device);
|
|
66
|
+
|
|
67
|
+
return torch::Device(device);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
std::unique_ptr<DeviceInterface> createDeviceInterface(
|
|
71
|
+
const torch::Device& device) {
|
|
72
|
+
auto deviceType = device.type();
|
|
73
|
+
// TODO: remove once DeviceInterface for CPU is implemented
|
|
74
|
+
if (deviceType == torch::kCPU) {
|
|
75
|
+
return nullptr;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
std::scoped_lock lock(g_interface_mutex);
|
|
79
|
+
TORCH_CHECK(
|
|
80
|
+
g_interface_map->find(deviceType) != g_interface_map->end(),
|
|
81
|
+
"Unsupported device: ",
|
|
82
|
+
device);
|
|
83
|
+
|
|
84
|
+
return std::unique_ptr<DeviceInterface>(
|
|
85
|
+
(*g_interface_map)[deviceType](device));
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
} // namespace facebook::torchcodec
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
// Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
// All rights reserved.
|
|
3
|
+
//
|
|
4
|
+
// This source code is licensed under the BSD-style license found in the
|
|
5
|
+
// LICENSE file in the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
#pragma once
|
|
8
|
+
|
|
9
|
+
#include <torch/types.h>
|
|
10
|
+
#include <functional>
|
|
11
|
+
#include <memory>
|
|
12
|
+
#include <stdexcept>
|
|
13
|
+
#include <string>
|
|
14
|
+
#include "FFMPEGCommon.h"
|
|
15
|
+
#include "src/torchcodec/_core/Frame.h"
|
|
16
|
+
#include "src/torchcodec/_core/StreamOptions.h"
|
|
17
|
+
|
|
18
|
+
namespace facebook::torchcodec {
|
|
19
|
+
|
|
20
|
+
// Note that all these device functions should only be called if the device is
|
|
21
|
+
// not a CPU device. CPU device functions are already implemented in the
|
|
22
|
+
// SingleStreamDecoder implementation.
|
|
23
|
+
// These functions should only be called from within an if block like this:
|
|
24
|
+
// if (device.type() != torch::kCPU) {
|
|
25
|
+
// deviceFunction(device, ...);
|
|
26
|
+
// }
|
|
27
|
+
|
|
28
|
+
class DeviceInterface {
|
|
29
|
+
public:
|
|
30
|
+
DeviceInterface(const torch::Device& device) : device_(device) {}
|
|
31
|
+
|
|
32
|
+
virtual ~DeviceInterface(){};
|
|
33
|
+
|
|
34
|
+
torch::Device& device() {
|
|
35
|
+
return device_;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
virtual std::optional<const AVCodec*> findCodec(const AVCodecID& codecId) = 0;
|
|
39
|
+
|
|
40
|
+
// Initialize the hardware device that is specified in `device`. Some builds
|
|
41
|
+
// support CUDA and others only support CPU.
|
|
42
|
+
virtual void initializeContext(AVCodecContext* codecContext) = 0;
|
|
43
|
+
|
|
44
|
+
virtual void convertAVFrameToFrameOutput(
|
|
45
|
+
const VideoStreamOptions& videoStreamOptions,
|
|
46
|
+
UniqueAVFrame& avFrame,
|
|
47
|
+
FrameOutput& frameOutput,
|
|
48
|
+
std::optional<torch::Tensor> preAllocatedOutputTensor = std::nullopt) = 0;
|
|
49
|
+
|
|
50
|
+
protected:
|
|
51
|
+
torch::Device device_;
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
using CreateDeviceInterfaceFn =
|
|
55
|
+
std::function<DeviceInterface*(const torch::Device& device)>;
|
|
56
|
+
|
|
57
|
+
bool registerDeviceInterface(
|
|
58
|
+
torch::DeviceType deviceType,
|
|
59
|
+
const CreateDeviceInterfaceFn createInterface);
|
|
60
|
+
|
|
61
|
+
torch::Device createTorchDevice(const std::string device);
|
|
62
|
+
|
|
63
|
+
std::unique_ptr<DeviceInterface> createDeviceInterface(
|
|
64
|
+
const torch::Device& device);
|
|
65
|
+
|
|
66
|
+
} // namespace facebook::torchcodec
|