torchcodec 0.7.0__cp313-cp313-win_amd64.whl → 0.8.1__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchcodec might be problematic. Click here for more details.

Files changed (66) hide show
  1. torchcodec/_core/AVIOTensorContext.cpp +23 -16
  2. torchcodec/_core/AVIOTensorContext.h +2 -1
  3. torchcodec/_core/BetaCudaDeviceInterface.cpp +718 -0
  4. torchcodec/_core/BetaCudaDeviceInterface.h +193 -0
  5. torchcodec/_core/CMakeLists.txt +18 -3
  6. torchcodec/_core/CUDACommon.cpp +330 -0
  7. torchcodec/_core/CUDACommon.h +51 -0
  8. torchcodec/_core/Cache.h +6 -20
  9. torchcodec/_core/CpuDeviceInterface.cpp +195 -108
  10. torchcodec/_core/CpuDeviceInterface.h +84 -19
  11. torchcodec/_core/CudaDeviceInterface.cpp +227 -376
  12. torchcodec/_core/CudaDeviceInterface.h +38 -6
  13. torchcodec/_core/DeviceInterface.cpp +57 -19
  14. torchcodec/_core/DeviceInterface.h +97 -16
  15. torchcodec/_core/Encoder.cpp +346 -9
  16. torchcodec/_core/Encoder.h +62 -1
  17. torchcodec/_core/FFMPEGCommon.cpp +190 -3
  18. torchcodec/_core/FFMPEGCommon.h +27 -1
  19. torchcodec/_core/FilterGraph.cpp +30 -22
  20. torchcodec/_core/FilterGraph.h +15 -1
  21. torchcodec/_core/Frame.cpp +22 -7
  22. torchcodec/_core/Frame.h +15 -61
  23. torchcodec/_core/Metadata.h +2 -2
  24. torchcodec/_core/NVCUVIDRuntimeLoader.cpp +320 -0
  25. torchcodec/_core/NVCUVIDRuntimeLoader.h +14 -0
  26. torchcodec/_core/NVDECCache.cpp +60 -0
  27. torchcodec/_core/NVDECCache.h +102 -0
  28. torchcodec/_core/SingleStreamDecoder.cpp +196 -201
  29. torchcodec/_core/SingleStreamDecoder.h +42 -15
  30. torchcodec/_core/StreamOptions.h +16 -6
  31. torchcodec/_core/Transform.cpp +87 -0
  32. torchcodec/_core/Transform.h +84 -0
  33. torchcodec/_core/__init__.py +4 -0
  34. torchcodec/_core/custom_ops.cpp +257 -32
  35. torchcodec/_core/fetch_and_expose_non_gpl_ffmpeg_libs.cmake +61 -1
  36. torchcodec/_core/nvcuvid_include/cuviddec.h +1374 -0
  37. torchcodec/_core/nvcuvid_include/nvcuvid.h +610 -0
  38. torchcodec/_core/ops.py +147 -44
  39. torchcodec/_core/pybind_ops.cpp +22 -59
  40. torchcodec/_samplers/video_clip_sampler.py +7 -19
  41. torchcodec/decoders/__init__.py +1 -0
  42. torchcodec/decoders/_decoder_utils.py +61 -1
  43. torchcodec/decoders/_video_decoder.py +46 -20
  44. torchcodec/libtorchcodec_core4.dll +0 -0
  45. torchcodec/libtorchcodec_core5.dll +0 -0
  46. torchcodec/libtorchcodec_core6.dll +0 -0
  47. torchcodec/libtorchcodec_core7.dll +0 -0
  48. torchcodec/libtorchcodec_core8.dll +0 -0
  49. torchcodec/libtorchcodec_custom_ops4.dll +0 -0
  50. torchcodec/libtorchcodec_custom_ops5.dll +0 -0
  51. torchcodec/libtorchcodec_custom_ops6.dll +0 -0
  52. torchcodec/libtorchcodec_custom_ops7.dll +0 -0
  53. torchcodec/libtorchcodec_custom_ops8.dll +0 -0
  54. torchcodec/libtorchcodec_pybind_ops4.pyd +0 -0
  55. torchcodec/libtorchcodec_pybind_ops5.pyd +0 -0
  56. torchcodec/libtorchcodec_pybind_ops6.pyd +0 -0
  57. torchcodec/libtorchcodec_pybind_ops7.pyd +0 -0
  58. torchcodec/libtorchcodec_pybind_ops8.pyd +0 -0
  59. torchcodec/samplers/_time_based.py +8 -0
  60. torchcodec/version.py +1 -1
  61. {torchcodec-0.7.0.dist-info → torchcodec-0.8.1.dist-info}/METADATA +29 -16
  62. torchcodec-0.8.1.dist-info/RECORD +82 -0
  63. {torchcodec-0.7.0.dist-info → torchcodec-0.8.1.dist-info}/WHEEL +1 -1
  64. torchcodec-0.7.0.dist-info/RECORD +0 -67
  65. {torchcodec-0.7.0.dist-info → torchcodec-0.8.1.dist-info}/licenses/LICENSE +0 -0
  66. {torchcodec-0.7.0.dist-info → torchcodec-0.8.1.dist-info}/top_level.txt +0 -0
torchcodec/_core/ops.py CHANGED
@@ -41,7 +41,7 @@ def load_torchcodec_shared_libraries():
41
41
  # libraries do not meet those conditions.
42
42
 
43
43
  exceptions = []
44
- for ffmpeg_major_version in (7, 6, 5, 4):
44
+ for ffmpeg_major_version in (8, 7, 6, 5, 4):
45
45
  pybind_ops_module_name = _get_pybind_ops_module_name(ffmpeg_major_version)
46
46
  decoder_library_name = f"libtorchcodec_core{ffmpeg_major_version}"
47
47
  custom_ops_library_name = f"libtorchcodec_custom_ops{ffmpeg_major_version}"
@@ -69,7 +69,7 @@ def load_torchcodec_shared_libraries():
69
69
  raise RuntimeError(
70
70
  f"""Could not load libtorchcodec. Likely causes:
71
71
  1. FFmpeg is not properly installed in your environment. We support
72
- versions 4, 5, 6 and 7.
72
+ versions 4, 5, 6, and 7 on all platforms, and 8 on Mac and Linux.
73
73
  2. The PyTorch version ({torch.__version__}) is not compatible with
74
74
  this version of TorchCodec. Refer to the version compatibility
75
75
  table:
@@ -95,11 +95,23 @@ encode_audio_to_file = torch._dynamo.disallow_in_graph(
95
95
  encode_audio_to_tensor = torch._dynamo.disallow_in_graph(
96
96
  torch.ops.torchcodec_ns.encode_audio_to_tensor.default
97
97
  )
98
+ _encode_audio_to_file_like = torch._dynamo.disallow_in_graph(
99
+ torch.ops.torchcodec_ns._encode_audio_to_file_like.default
100
+ )
101
+ encode_video_to_file = torch._dynamo.disallow_in_graph(
102
+ torch.ops.torchcodec_ns.encode_video_to_file.default
103
+ )
104
+ encode_video_to_tensor = torch._dynamo.disallow_in_graph(
105
+ torch.ops.torchcodec_ns.encode_video_to_tensor.default
106
+ )
107
+ _encode_video_to_file_like = torch._dynamo.disallow_in_graph(
108
+ torch.ops.torchcodec_ns._encode_video_to_file_like.default
109
+ )
98
110
  create_from_tensor = torch._dynamo.disallow_in_graph(
99
111
  torch.ops.torchcodec_ns.create_from_tensor.default
100
112
  )
101
- _convert_to_tensor = torch._dynamo.disallow_in_graph(
102
- torch.ops.torchcodec_ns._convert_to_tensor.default
113
+ _create_from_file_like = torch._dynamo.disallow_in_graph(
114
+ torch.ops.torchcodec_ns._create_from_file_like.default
103
115
  )
104
116
  add_video_stream = torch.ops.torchcodec_ns.add_video_stream.default
105
117
  _add_video_stream = torch.ops.torchcodec_ns._add_video_stream.default
@@ -108,8 +120,10 @@ seek_to_pts = torch.ops.torchcodec_ns.seek_to_pts.default
108
120
  get_next_frame = torch.ops.torchcodec_ns.get_next_frame.default
109
121
  get_frame_at_pts = torch.ops.torchcodec_ns.get_frame_at_pts.default
110
122
  get_frame_at_index = torch.ops.torchcodec_ns.get_frame_at_index.default
111
- get_frames_at_indices = torch.ops.torchcodec_ns.get_frames_at_indices.default
112
- get_frames_by_pts = torch.ops.torchcodec_ns.get_frames_by_pts.default
123
+ _get_frames_at_indices_tensor_input = (
124
+ torch.ops.torchcodec_ns.get_frames_at_indices.default
125
+ )
126
+ _get_frames_by_pts_tensor_input = torch.ops.torchcodec_ns.get_frames_by_pts.default
113
127
  get_frames_in_range = torch.ops.torchcodec_ns.get_frames_in_range.default
114
128
  get_frames_by_pts_in_range = torch.ops.torchcodec_ns.get_frames_by_pts_in_range.default
115
129
  get_frames_by_pts_in_range_audio = (
@@ -128,6 +142,7 @@ _get_stream_json_metadata = torch.ops.torchcodec_ns.get_stream_json_metadata.def
128
142
  _get_json_ffmpeg_library_versions = (
129
143
  torch.ops.torchcodec_ns._get_json_ffmpeg_library_versions.default
130
144
  )
145
+ _get_backend_details = torch.ops.torchcodec_ns._get_backend_details.default
131
146
 
132
147
 
133
148
  # =============================
@@ -148,7 +163,12 @@ def create_from_file_like(
148
163
  file_like: Union[io.RawIOBase, io.BufferedReader], seek_mode: Optional[str] = None
149
164
  ) -> torch.Tensor:
150
165
  assert _pybind_ops is not None
151
- return _convert_to_tensor(_pybind_ops.create_from_file_like(file_like, seek_mode))
166
+ return _create_from_file_like(
167
+ _pybind_ops.create_file_like_context(
168
+ file_like, False # False means not for writing
169
+ ),
170
+ seek_mode,
171
+ )
152
172
 
153
173
 
154
174
  def encode_audio_to_file_like(
@@ -176,35 +196,69 @@ def encode_audio_to_file_like(
176
196
  if samples.dtype != torch.float32:
177
197
  raise ValueError(f"samples must have dtype torch.float32, got {samples.dtype}")
178
198
 
179
- # We're having the same problem as with the decoder's create_from_file_like:
180
- # We should be able to pass a tensor directly, but this leads to a pybind
181
- # error. In order to work around this, we pass the pointer to the tensor's
182
- # data, and its shape, in order to re-construct it in C++. For this to work:
183
- # - the tensor must be float32
184
- # - the tensor must be contiguous, which is why we call contiguous().
185
- # In theory we could avoid this restriction by also passing the strides?
186
- # - IMPORTANT: the input samples tensor and its underlying data must be
187
- # alive during the call.
188
- #
189
- # A more elegant solution would be to cast the tensor into a py::object, but
190
- # casting the py::object backk to a tensor in C++ seems to lead to the same
191
- # pybing error.
192
-
193
- samples = samples.contiguous()
194
- _pybind_ops.encode_audio_to_file_like(
195
- samples.data_ptr(),
196
- list(samples.shape),
199
+ _encode_audio_to_file_like(
200
+ samples,
197
201
  sample_rate,
198
202
  format,
199
- file_like,
203
+ _pybind_ops.create_file_like_context(file_like, True), # True means for writing
200
204
  bit_rate,
201
205
  num_channels,
202
206
  desired_sample_rate,
203
207
  )
204
208
 
205
- # This check is useless but it's critical to keep it to ensures that samples
206
- # is still alive during the call to encode_audio_to_file_like.
207
- assert samples.is_contiguous()
209
+
210
+ def encode_video_to_file_like(
211
+ frames: torch.Tensor,
212
+ frame_rate: int,
213
+ format: str,
214
+ file_like: Union[io.RawIOBase, io.BufferedIOBase],
215
+ crf: Optional[int] = None,
216
+ ) -> None:
217
+ """Encode video frames to a file-like object.
218
+
219
+ Args:
220
+ frames: Video frames tensor
221
+ frame_rate: Frame rate in frames per second
222
+ format: Video format (e.g., "mp4", "mov", "mkv")
223
+ file_like: File-like object that supports write() and seek() methods
224
+ crf: Optional constant rate factor for encoding quality
225
+ """
226
+ assert _pybind_ops is not None
227
+
228
+ _encode_video_to_file_like(
229
+ frames,
230
+ frame_rate,
231
+ format,
232
+ _pybind_ops.create_file_like_context(file_like, True), # True means for writing
233
+ crf,
234
+ )
235
+
236
+
237
+ def get_frames_at_indices(
238
+ decoder: torch.Tensor, *, frame_indices: Union[torch.Tensor, list[int]]
239
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
240
+ if isinstance(frame_indices, torch.Tensor):
241
+ # Ensure indices is the correct dtype (int64)
242
+ frame_indices = frame_indices.to(torch.int64)
243
+ else:
244
+ # Convert list to tensor for dispatch
245
+ frame_indices = torch.tensor(frame_indices)
246
+ return _get_frames_at_indices_tensor_input(decoder, frame_indices=frame_indices)
247
+
248
+
249
+ def get_frames_by_pts(
250
+ decoder: torch.Tensor, *, timestamps: Union[torch.Tensor, list[float]]
251
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
252
+ if isinstance(timestamps, torch.Tensor):
253
+ # Ensure indices is the correct dtype (float64)
254
+ timestamps = timestamps.to(torch.float64)
255
+ else:
256
+ # Convert list to tensor for dispatch
257
+ try:
258
+ timestamps = torch.tensor(timestamps, dtype=torch.float64)
259
+ except Exception as e:
260
+ raise ValueError("Couldn't convert timestamps input to a tensor") from e
261
+ return _get_frames_by_pts_tensor_input(decoder, timestamps=timestamps)
208
262
 
209
263
 
210
264
  # ==============================
@@ -215,6 +269,13 @@ def create_from_file_abstract(filename: str, seek_mode: Optional[str]) -> torch.
215
269
  return torch.empty([], dtype=torch.long)
216
270
 
217
271
 
272
+ @register_fake("torchcodec_ns::_create_from_file_like")
273
+ def _create_from_file_like_abstract(
274
+ file_like: int, seek_mode: Optional[str]
275
+ ) -> torch.Tensor:
276
+ return torch.empty([], dtype=torch.long)
277
+
278
+
218
279
  @register_fake("torchcodec_ns::encode_audio_to_file")
219
280
  def encode_audio_to_file_abstract(
220
281
  samples: torch.Tensor,
@@ -239,15 +300,54 @@ def encode_audio_to_tensor_abstract(
239
300
  return torch.empty([], dtype=torch.long)
240
301
 
241
302
 
242
- @register_fake("torchcodec_ns::create_from_tensor")
243
- def create_from_tensor_abstract(
244
- video_tensor: torch.Tensor, seek_mode: Optional[str]
303
+ @register_fake("torchcodec_ns::_encode_audio_to_file_like")
304
+ def _encode_audio_to_file_like_abstract(
305
+ samples: torch.Tensor,
306
+ sample_rate: int,
307
+ format: str,
308
+ file_like_context: int,
309
+ bit_rate: Optional[int] = None,
310
+ num_channels: Optional[int] = None,
311
+ desired_sample_rate: Optional[int] = None,
312
+ ) -> None:
313
+ return
314
+
315
+
316
+ @register_fake("torchcodec_ns::encode_video_to_file")
317
+ def encode_video_to_file_abstract(
318
+ frames: torch.Tensor,
319
+ frame_rate: int,
320
+ filename: str,
321
+ crf: Optional[int],
322
+ ) -> None:
323
+ return
324
+
325
+
326
+ @register_fake("torchcodec_ns::encode_video_to_tensor")
327
+ def encode_video_to_tensor_abstract(
328
+ frames: torch.Tensor,
329
+ frame_rate: int,
330
+ format: str,
331
+ crf: Optional[int],
245
332
  ) -> torch.Tensor:
246
333
  return torch.empty([], dtype=torch.long)
247
334
 
248
335
 
249
- @register_fake("torchcodec_ns::_convert_to_tensor")
250
- def _convert_to_tensor_abstract(decoder_ptr: int) -> torch.Tensor:
336
+ @register_fake("torchcodec_ns::_encode_video_to_file_like")
337
+ def _encode_video_to_file_like_abstract(
338
+ frames: torch.Tensor,
339
+ frame_rate: int,
340
+ format: str,
341
+ file_like_context: int,
342
+ crf: Optional[int] = None,
343
+ ) -> None:
344
+ return
345
+
346
+
347
+ @register_fake("torchcodec_ns::create_from_tensor")
348
+ def create_from_tensor_abstract(
349
+ video_tensor: torch.Tensor, seek_mode: Optional[str]
350
+ ) -> torch.Tensor:
251
351
  return torch.empty([], dtype=torch.long)
252
352
 
253
353
 
@@ -255,12 +355,12 @@ def _convert_to_tensor_abstract(decoder_ptr: int) -> torch.Tensor:
255
355
  def _add_video_stream_abstract(
256
356
  decoder: torch.Tensor,
257
357
  *,
258
- width: Optional[int] = None,
259
- height: Optional[int] = None,
260
358
  num_threads: Optional[int] = None,
261
359
  dimension_order: Optional[str] = None,
262
360
  stream_index: Optional[int] = None,
263
- device: Optional[str] = None,
361
+ device: str = "cpu",
362
+ device_variant: str = "ffmpeg",
363
+ transform_specs: str = "",
264
364
  custom_frame_mappings: Optional[
265
365
  tuple[torch.Tensor, torch.Tensor, torch.Tensor]
266
366
  ] = None,
@@ -273,12 +373,12 @@ def _add_video_stream_abstract(
273
373
  def add_video_stream_abstract(
274
374
  decoder: torch.Tensor,
275
375
  *,
276
- width: Optional[int] = None,
277
- height: Optional[int] = None,
278
376
  num_threads: Optional[int] = None,
279
377
  dimension_order: Optional[str] = None,
280
378
  stream_index: Optional[int] = None,
281
- device: Optional[str] = None,
379
+ device: str = "cpu",
380
+ device_variant: str = "ffmpeg",
381
+ transform_specs: str = "",
282
382
  custom_frame_mappings: Optional[
283
383
  tuple[torch.Tensor, torch.Tensor, torch.Tensor]
284
384
  ] = None,
@@ -332,7 +432,7 @@ def get_frame_at_pts_abstract(
332
432
  def get_frames_by_pts_abstract(
333
433
  decoder: torch.Tensor,
334
434
  *,
335
- timestamps: List[float],
435
+ timestamps: Union[torch.Tensor, List[float]],
336
436
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
337
437
  image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
338
438
  return (
@@ -356,9 +456,7 @@ def get_frame_at_index_abstract(
356
456
 
357
457
  @register_fake("torchcodec_ns::get_frames_at_indices")
358
458
  def get_frames_at_indices_abstract(
359
- decoder: torch.Tensor,
360
- *,
361
- frame_indices: List[int],
459
+ decoder: torch.Tensor, *, frame_indices: Union[torch.Tensor, List[int]]
362
460
  ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
363
461
  image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
364
462
  return (
@@ -453,3 +551,8 @@ def scan_all_streams_to_update_metadata_abstract(decoder: torch.Tensor) -> None:
453
551
  def get_ffmpeg_library_versions():
454
552
  versions_json = _get_json_ffmpeg_library_versions()
455
553
  return json.loads(versions_json)
554
+
555
+
556
+ @register_fake("torchcodec_ns::_get_backend_details")
557
+ def _get_backend_details_abstract(decoder: torch.Tensor) -> str:
558
+ return ""
@@ -7,72 +7,36 @@
7
7
  #include <pybind11/pybind11.h>
8
8
  #include <pybind11/stl.h>
9
9
  #include <cstdint>
10
- #include <string>
11
10
 
12
11
  #include "src/torchcodec/_core/AVIOFileLikeContext.h"
13
- #include "src/torchcodec/_core/Encoder.h"
14
- #include "src/torchcodec/_core/SingleStreamDecoder.h"
15
- #include "src/torchcodec/_core/StreamOptions.h"
16
- #include "src/torchcodec/_core/ValidationUtils.h"
17
12
 
18
13
  namespace py = pybind11;
19
14
 
20
15
  namespace facebook::torchcodec {
21
16
 
22
- // In principle, this should be able to return a tensor. But when we try that,
23
- // we run into the bug reported here:
17
+ // Note: It's not immediately obvous why we need both custom_ops.cpp and
18
+ // pybind_ops.cpp. We do all other Python to C++ bridging in
19
+ // custom_ops.cpp, and that even depends on pybind11, so why have an
20
+ // explicit pybind-only file?
24
21
  //
25
- // https://github.com/pytorch/pytorch/issues/136664
22
+ // The reason is that we want to accept OWNERSHIP of a file-like object
23
+ // from the Python side. In order to do that, we need a proper
24
+ // py::object. For raw bytes, we can launder that through a tensor on the
25
+ // custom_ops.cpp side, but we can't launder a proper Python object
26
+ // through a tensor. Custom ops can't accept a proper Python object
27
+ // through py::object, so we have to do direct pybind11 here.
26
28
  //
27
- // So we instead launder the pointer through an int, and then use a conversion
28
- // function on the custom ops side to launder that int into a tensor.
29
- int64_t create_from_file_like(
30
- py::object file_like,
31
- std::optional<std::string_view> seek_mode) {
32
- SingleStreamDecoder::SeekMode realSeek = SingleStreamDecoder::SeekMode::exact;
33
- if (seek_mode.has_value()) {
34
- realSeek = seekModeFromString(seek_mode.value());
35
- }
36
-
37
- auto avioContextHolder =
38
- std::make_unique<AVIOFileLikeContext>(file_like, /*isForWriting=*/false);
39
-
40
- SingleStreamDecoder* decoder =
41
- new SingleStreamDecoder(std::move(avioContextHolder), realSeek);
42
- return reinterpret_cast<int64_t>(decoder);
43
- }
44
-
45
- void encode_audio_to_file_like(
46
- int64_t data_ptr,
47
- const std::vector<int64_t>& shape,
48
- int64_t sample_rate,
49
- std::string_view format,
50
- py::object file_like,
51
- std::optional<int64_t> bit_rate = std::nullopt,
52
- std::optional<int64_t> num_channels = std::nullopt,
53
- std::optional<int64_t> desired_sample_rate = std::nullopt) {
54
- // We assume float32 *and* contiguity, this must be enforced by the caller.
55
- auto tensor_options = torch::TensorOptions().dtype(torch::kFloat32);
56
- auto samples = torch::from_blob(
57
- reinterpret_cast<void*>(data_ptr), shape, tensor_options);
58
-
59
- AudioStreamOptions audioStreamOptions;
60
- audioStreamOptions.bitRate = validateOptionalInt64ToInt(bit_rate, "bit_rate");
61
- audioStreamOptions.numChannels =
62
- validateOptionalInt64ToInt(num_channels, "num_channels");
63
- audioStreamOptions.sampleRate =
64
- validateOptionalInt64ToInt(desired_sample_rate, "desired_sample_rate");
65
-
66
- auto avioContextHolder =
67
- std::make_unique<AVIOFileLikeContext>(file_like, /*isForWriting=*/true);
68
-
69
- AudioEncoder encoder(
70
- samples,
71
- validateInt64ToInt(sample_rate, "sample_rate"),
72
- format,
73
- std::move(avioContextHolder),
74
- audioStreamOptions);
75
- encoder.encode();
29
+ // TODO: Investigate if we can do something better here. See:
30
+ // https://github.com/pytorch/torchcodec/issues/896
31
+ // Short version is that we're laundering a pointer through an int, the
32
+ // Python side forwards that to decoder creation functions in
33
+ // custom_ops.cpp and we do another cast on that side to get a pointer
34
+ // again. We want to investigate if we can do something cleaner by
35
+ // defining proper pybind objects.
36
+ int64_t create_file_like_context(py::object file_like, bool is_for_writing) {
37
+ AVIOFileLikeContext* context =
38
+ new AVIOFileLikeContext(file_like, is_for_writing);
39
+ return reinterpret_cast<int64_t>(context);
76
40
  }
77
41
 
78
42
  #ifndef PYBIND_OPS_MODULE_NAME
@@ -80,8 +44,7 @@ void encode_audio_to_file_like(
80
44
  #endif
81
45
 
82
46
  PYBIND11_MODULE(PYBIND_OPS_MODULE_NAME, m) {
83
- m.def("create_from_file_like", &create_from_file_like);
84
- m.def("encode_audio_to_file_like", &encode_audio_to_file_like);
47
+ m.def("create_file_like_context", &create_file_like_context);
85
48
  }
86
49
 
87
50
  } // namespace facebook::torchcodec
@@ -105,25 +105,12 @@ class IndexBasedSamplerArgs(SamplerArgs):
105
105
  sample_step: int = 1
106
106
 
107
107
 
108
- class VideoClipSampler(nn.Module):
108
+ class DEPRECATED_VideoClipSampler(nn.Module):
109
109
  """
110
- VideoClipSampler will do video clip sampling with given video args and sampler args.
111
- The video args contains video related information, frames_per_clip, dimensions etc.
112
- The sampler args can be either time-based or index-based, it will be used to decide clip start time pts or index.
113
- ClipSampling support, random, uniform, periodic, target, keyframe sampling etc.
110
+ DEPRECATED: Do not use. The supported samplers are in `torchcodec.samplers`. See:
114
111
 
115
- Args:
116
- video_args (`VideoArgs`): The video args
117
- sampler_args (`SamplerArgs`): The sampler args. Can be TimeBasedSamplerArgs or IndexBasedSamplerArgs
118
- decoder_args (`DecoderArgs`): Decoder args contain value needs for decoder, for example, thread count
119
-
120
- Example:
121
- >>> video_args = VideoArgs(desired_width=224, desired_height=224)
122
- >>> time_based_sampler_args = TimeBasedSamplerArgs(sampler_type="random", clips_per_video=1, frames_per_clip=4)
123
- >>> video_decoder_args = DecoderArgs(num_threads=1)
124
- >>> video_clip_sampler = VideoClipSampler(video_args, time_based_sampler_args, decoder_args)
125
- >>> clips = video_clip_sampler(video_data)
126
- clips now contains a list of clip, where clip is a list of frame tensors, each tensor represents a frame image.
112
+ * https://docs.pytorch.org/torchcodec/stable/api_ref_torchcodec.html
113
+ * https://docs.pytorch.org/torchcodec/stable/generated_examples/decoding/sampling.html
127
114
  """
128
115
 
129
116
  def __init__(
@@ -160,8 +147,7 @@ class VideoClipSampler(nn.Module):
160
147
  scan_all_streams_to_update_metadata(video_decoder)
161
148
  add_video_stream(
162
149
  video_decoder,
163
- width=target_width,
164
- height=target_height,
150
+ transform_specs=f"resize, {target_height}, {target_width}",
165
151
  num_threads=self.decoder_args.num_threads,
166
152
  )
167
153
 
@@ -240,6 +226,8 @@ class VideoClipSampler(nn.Module):
240
226
  clip_start_idx + i * index_based_sampler_args.video_frame_dilation
241
227
  for i in range(index_based_sampler_args.frames_per_clip)
242
228
  ]
229
+ # Need torch.stack to convert List[Tensor[int]] into 1D Tensor[int]
230
+ batch_indexes = torch.stack(batch_indexes)
243
231
  frames, *_ = get_frames_at_indices(
244
232
  video_decoder,
245
233
  frame_indices=batch_indexes,
@@ -6,6 +6,7 @@
6
6
 
7
7
  from .._core import AudioStreamMetadata, VideoStreamMetadata
8
8
  from ._audio_decoder import AudioDecoder # noqa
9
+ from ._decoder_utils import set_cuda_backend # noqa
9
10
  from ._video_decoder import VideoDecoder # noqa
10
11
 
11
12
  SimpleVideoDecoder = VideoDecoder
@@ -4,10 +4,12 @@
4
4
  # This source code is licensed under the BSD-style license found in the
5
5
  # LICENSE file in the root directory of this source tree.
6
6
 
7
+ import contextvars
7
8
  import io
9
+ from contextlib import contextmanager
8
10
  from pathlib import Path
9
11
 
10
- from typing import Union
12
+ from typing import Generator, Union
11
13
 
12
14
  from torch import Tensor
13
15
  from torchcodec import _core as core
@@ -50,3 +52,61 @@ def create_decoder(
50
52
  "read(self, size: int) -> bytes and "
51
53
  "seek(self, offset: int, whence: int) -> int methods."
52
54
  )
55
+
56
+
57
+ # Thread-local and async-safe storage for the current CUDA backend
58
+ _CUDA_BACKEND: contextvars.ContextVar[str] = contextvars.ContextVar(
59
+ "_CUDA_BACKEND", default="ffmpeg"
60
+ )
61
+
62
+
63
+ @contextmanager
64
+ def set_cuda_backend(backend: str) -> Generator[None, None, None]:
65
+ """Context Manager to set the CUDA backend for :class:`~torchcodec.decoders.VideoDecoder`.
66
+
67
+ This context manager allows you to specify which CUDA backend implementation
68
+ to use when creating :class:`~torchcodec.decoders.VideoDecoder` instances
69
+ with CUDA devices.
70
+
71
+ .. note::
72
+ **We recommend trying the "beta" backend instead of the default "ffmpeg"
73
+ backend!** The beta backend is faster, and will eventually become the
74
+ default in future versions. It may have rough edges that we'll polish
75
+ over time, but it's already quite stable and ready for adoption. Let us
76
+ know what you think!
77
+
78
+ Only the creation of the decoder needs to be inside the context manager, the
79
+ decoding methods can be called outside of it. You still need to pass
80
+ ``device="cuda"`` when creating the
81
+ :class:`~torchcodec.decoders.VideoDecoder` instance. If a CUDA device isn't
82
+ specified, this context manager will have no effect. See example below.
83
+
84
+ This is thread-safe and async-safe.
85
+
86
+ Args:
87
+ backend (str): The CUDA backend to use. Can be "ffmpeg" (default) or
88
+ "beta". We recommend trying "beta" as it's faster!
89
+
90
+ Example:
91
+ >>> with set_cuda_backend("beta"):
92
+ ... decoder = VideoDecoder("video.mp4", device="cuda")
93
+ ...
94
+ ... # Only the decoder creation needs to be part of the context manager.
95
+ ... # Decoder will now the beta CUDA implementation:
96
+ ... decoder.get_frame_at(0)
97
+ """
98
+ backend = backend.lower()
99
+ if backend not in ("ffmpeg", "beta"):
100
+ raise ValueError(
101
+ f"Invalid CUDA backend ({backend}). Supported values are 'ffmpeg' and 'beta'."
102
+ )
103
+
104
+ previous_state = _CUDA_BACKEND.set(backend)
105
+ try:
106
+ yield
107
+ finally:
108
+ _CUDA_BACKEND.reset(previous_state)
109
+
110
+
111
+ def _get_cuda_backend() -> str:
112
+ return _CUDA_BACKEND.get()