torchcodec 0.7.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchcodec might be problematic. Click here for more details.

Files changed (67) hide show
  1. torchcodec/__init__.py +16 -0
  2. torchcodec/_core/AVIOContextHolder.cpp +60 -0
  3. torchcodec/_core/AVIOContextHolder.h +64 -0
  4. torchcodec/_core/AVIOFileLikeContext.cpp +98 -0
  5. torchcodec/_core/AVIOFileLikeContext.h +55 -0
  6. torchcodec/_core/AVIOTensorContext.cpp +123 -0
  7. torchcodec/_core/AVIOTensorContext.h +43 -0
  8. torchcodec/_core/CMakeLists.txt +292 -0
  9. torchcodec/_core/Cache.h +138 -0
  10. torchcodec/_core/CpuDeviceInterface.cpp +266 -0
  11. torchcodec/_core/CpuDeviceInterface.h +70 -0
  12. torchcodec/_core/CudaDeviceInterface.cpp +514 -0
  13. torchcodec/_core/CudaDeviceInterface.h +37 -0
  14. torchcodec/_core/DeviceInterface.cpp +79 -0
  15. torchcodec/_core/DeviceInterface.h +67 -0
  16. torchcodec/_core/Encoder.cpp +514 -0
  17. torchcodec/_core/Encoder.h +123 -0
  18. torchcodec/_core/FFMPEGCommon.cpp +421 -0
  19. torchcodec/_core/FFMPEGCommon.h +227 -0
  20. torchcodec/_core/FilterGraph.cpp +142 -0
  21. torchcodec/_core/FilterGraph.h +45 -0
  22. torchcodec/_core/Frame.cpp +32 -0
  23. torchcodec/_core/Frame.h +118 -0
  24. torchcodec/_core/Metadata.h +72 -0
  25. torchcodec/_core/SingleStreamDecoder.cpp +1715 -0
  26. torchcodec/_core/SingleStreamDecoder.h +380 -0
  27. torchcodec/_core/StreamOptions.h +53 -0
  28. torchcodec/_core/ValidationUtils.cpp +35 -0
  29. torchcodec/_core/ValidationUtils.h +21 -0
  30. torchcodec/_core/__init__.py +40 -0
  31. torchcodec/_core/_metadata.py +317 -0
  32. torchcodec/_core/custom_ops.cpp +727 -0
  33. torchcodec/_core/fetch_and_expose_non_gpl_ffmpeg_libs.cmake +300 -0
  34. torchcodec/_core/ops.py +455 -0
  35. torchcodec/_core/pybind_ops.cpp +87 -0
  36. torchcodec/_frame.py +145 -0
  37. torchcodec/_internally_replaced_utils.py +67 -0
  38. torchcodec/_samplers/__init__.py +7 -0
  39. torchcodec/_samplers/video_clip_sampler.py +430 -0
  40. torchcodec/decoders/__init__.py +11 -0
  41. torchcodec/decoders/_audio_decoder.py +177 -0
  42. torchcodec/decoders/_decoder_utils.py +52 -0
  43. torchcodec/decoders/_video_decoder.py +464 -0
  44. torchcodec/encoders/__init__.py +1 -0
  45. torchcodec/encoders/_audio_encoder.py +150 -0
  46. torchcodec/libtorchcodec_core4.dll +0 -0
  47. torchcodec/libtorchcodec_core5.dll +0 -0
  48. torchcodec/libtorchcodec_core6.dll +0 -0
  49. torchcodec/libtorchcodec_core7.dll +0 -0
  50. torchcodec/libtorchcodec_custom_ops4.dll +0 -0
  51. torchcodec/libtorchcodec_custom_ops5.dll +0 -0
  52. torchcodec/libtorchcodec_custom_ops6.dll +0 -0
  53. torchcodec/libtorchcodec_custom_ops7.dll +0 -0
  54. torchcodec/libtorchcodec_pybind_ops4.pyd +0 -0
  55. torchcodec/libtorchcodec_pybind_ops5.pyd +0 -0
  56. torchcodec/libtorchcodec_pybind_ops6.pyd +0 -0
  57. torchcodec/libtorchcodec_pybind_ops7.pyd +0 -0
  58. torchcodec/samplers/__init__.py +2 -0
  59. torchcodec/samplers/_common.py +84 -0
  60. torchcodec/samplers/_index_based.py +287 -0
  61. torchcodec/samplers/_time_based.py +350 -0
  62. torchcodec/version.py +2 -0
  63. torchcodec-0.7.0.dist-info/METADATA +242 -0
  64. torchcodec-0.7.0.dist-info/RECORD +67 -0
  65. torchcodec-0.7.0.dist-info/WHEEL +5 -0
  66. torchcodec-0.7.0.dist-info/licenses/LICENSE +28 -0
  67. torchcodec-0.7.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,455 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import io
8
+ import json
9
+ import warnings
10
+ from types import ModuleType
11
+ from typing import List, Optional, Tuple, Union
12
+
13
+ import torch
14
+ from torch.library import get_ctx, register_fake
15
+
16
+ from torchcodec._internally_replaced_utils import ( # @manual=//pytorch/torchcodec/src:internally_replaced_utils
17
+ _get_extension_path,
18
+ _get_pybind_ops_module_name,
19
+ _load_pybind11_module,
20
+ )
21
+
22
+ _pybind_ops: Optional[ModuleType] = None
23
+
24
+
25
+ def load_torchcodec_shared_libraries():
26
+ # Successively try to load the shared libraries for each version of FFmpeg
27
+ # that we support. We always start with the highest version, working our way
28
+ # down to the lowest version. Once we can load ALL shared libraries for a
29
+ # version of FFmpeg, we have succeeded and we stop.
30
+ #
31
+ # Note that we use two different methods for loading shared libraries:
32
+ #
33
+ # 1. torch.ops.load_library(): For PyTorch custom ops and the C++ only
34
+ # libraries the custom ops depend on. Loading libraries through PyTorch
35
+ # registers the custom ops with PyTorch's runtime and the ops can be
36
+ # accessed through torch.ops after loading.
37
+ #
38
+ # 2. importlib: For pybind11 modules. We load them dynamically, rather
39
+ # than using a plain import statement. A plain import statement only
40
+ # works when the module name and file name match exactly. Our shared
41
+ # libraries do not meet those conditions.
42
+
43
+ exceptions = []
44
+ for ffmpeg_major_version in (7, 6, 5, 4):
45
+ pybind_ops_module_name = _get_pybind_ops_module_name(ffmpeg_major_version)
46
+ decoder_library_name = f"libtorchcodec_core{ffmpeg_major_version}"
47
+ custom_ops_library_name = f"libtorchcodec_custom_ops{ffmpeg_major_version}"
48
+ pybind_ops_library_name = f"libtorchcodec_pybind_ops{ffmpeg_major_version}"
49
+ try:
50
+ torch.ops.load_library(_get_extension_path(decoder_library_name))
51
+ torch.ops.load_library(_get_extension_path(custom_ops_library_name))
52
+
53
+ pybind_ops_library_path = _get_extension_path(pybind_ops_library_name)
54
+ global _pybind_ops
55
+ _pybind_ops = _load_pybind11_module(
56
+ pybind_ops_module_name, pybind_ops_library_path
57
+ )
58
+ return
59
+ except Exception as e:
60
+ # TODO: recording and reporting exceptions this way is OK for now as it's just for debugging,
61
+ # but we should probably handle that via a proper logging mechanism.
62
+ exceptions.append((ffmpeg_major_version, e))
63
+
64
+ traceback = (
65
+ "\n[start of libtorchcodec loading traceback]\n"
66
+ + "\n".join(f"FFmpeg version {v}: {str(e)}" for v, e in exceptions)
67
+ + "\n[end of libtorchcodec loading traceback]."
68
+ )
69
+ raise RuntimeError(
70
+ f"""Could not load libtorchcodec. Likely causes:
71
+ 1. FFmpeg is not properly installed in your environment. We support
72
+ versions 4, 5, 6 and 7.
73
+ 2. The PyTorch version ({torch.__version__}) is not compatible with
74
+ this version of TorchCodec. Refer to the version compatibility
75
+ table:
76
+ https://github.com/pytorch/torchcodec?tab=readme-ov-file#installing-torchcodec.
77
+ 3. Another runtime dependency; see exceptions below.
78
+ The following exceptions were raised as we tried to load libtorchcodec:
79
+ """
80
+ f"{traceback}"
81
+ )
82
+
83
+
84
+ load_torchcodec_shared_libraries()
85
+
86
+
87
+ # Note: We use disallow_in_graph because PyTorch does constant propagation of
88
+ # factory functions.
89
+ create_from_file = torch._dynamo.disallow_in_graph(
90
+ torch.ops.torchcodec_ns.create_from_file.default
91
+ )
92
+ encode_audio_to_file = torch._dynamo.disallow_in_graph(
93
+ torch.ops.torchcodec_ns.encode_audio_to_file.default
94
+ )
95
+ encode_audio_to_tensor = torch._dynamo.disallow_in_graph(
96
+ torch.ops.torchcodec_ns.encode_audio_to_tensor.default
97
+ )
98
+ create_from_tensor = torch._dynamo.disallow_in_graph(
99
+ torch.ops.torchcodec_ns.create_from_tensor.default
100
+ )
101
+ _convert_to_tensor = torch._dynamo.disallow_in_graph(
102
+ torch.ops.torchcodec_ns._convert_to_tensor.default
103
+ )
104
+ add_video_stream = torch.ops.torchcodec_ns.add_video_stream.default
105
+ _add_video_stream = torch.ops.torchcodec_ns._add_video_stream.default
106
+ add_audio_stream = torch.ops.torchcodec_ns.add_audio_stream.default
107
+ seek_to_pts = torch.ops.torchcodec_ns.seek_to_pts.default
108
+ get_next_frame = torch.ops.torchcodec_ns.get_next_frame.default
109
+ get_frame_at_pts = torch.ops.torchcodec_ns.get_frame_at_pts.default
110
+ get_frame_at_index = torch.ops.torchcodec_ns.get_frame_at_index.default
111
+ get_frames_at_indices = torch.ops.torchcodec_ns.get_frames_at_indices.default
112
+ get_frames_by_pts = torch.ops.torchcodec_ns.get_frames_by_pts.default
113
+ get_frames_in_range = torch.ops.torchcodec_ns.get_frames_in_range.default
114
+ get_frames_by_pts_in_range = torch.ops.torchcodec_ns.get_frames_by_pts_in_range.default
115
+ get_frames_by_pts_in_range_audio = (
116
+ torch.ops.torchcodec_ns.get_frames_by_pts_in_range_audio.default
117
+ )
118
+ get_json_metadata = torch.ops.torchcodec_ns.get_json_metadata.default
119
+ _test_frame_pts_equality = torch.ops.torchcodec_ns._test_frame_pts_equality.default
120
+ _get_container_json_metadata = (
121
+ torch.ops.torchcodec_ns.get_container_json_metadata.default
122
+ )
123
+ _get_key_frame_indices = torch.ops.torchcodec_ns._get_key_frame_indices.default
124
+ scan_all_streams_to_update_metadata = (
125
+ torch.ops.torchcodec_ns.scan_all_streams_to_update_metadata.default
126
+ )
127
+ _get_stream_json_metadata = torch.ops.torchcodec_ns.get_stream_json_metadata.default
128
+ _get_json_ffmpeg_library_versions = (
129
+ torch.ops.torchcodec_ns._get_json_ffmpeg_library_versions.default
130
+ )
131
+
132
+
133
+ # =============================
134
+ # Functions not related to custom ops, but similar implementation to c++ ops
135
+ # =============================
136
+ def create_from_bytes(
137
+ video_bytes: bytes, seek_mode: Optional[str] = None
138
+ ) -> torch.Tensor:
139
+ with warnings.catch_warnings():
140
+ # Ignore warning stating that the underlying video_bytes buffer is
141
+ # non-writable.
142
+ warnings.filterwarnings("ignore", category=UserWarning)
143
+ buffer = torch.frombuffer(video_bytes, dtype=torch.uint8)
144
+ return create_from_tensor(buffer, seek_mode)
145
+
146
+
147
+ def create_from_file_like(
148
+ file_like: Union[io.RawIOBase, io.BufferedReader], seek_mode: Optional[str] = None
149
+ ) -> torch.Tensor:
150
+ assert _pybind_ops is not None
151
+ return _convert_to_tensor(_pybind_ops.create_from_file_like(file_like, seek_mode))
152
+
153
+
154
+ def encode_audio_to_file_like(
155
+ samples: torch.Tensor,
156
+ sample_rate: int,
157
+ format: str,
158
+ file_like: Union[io.RawIOBase, io.BufferedIOBase],
159
+ bit_rate: Optional[int] = None,
160
+ num_channels: Optional[int] = None,
161
+ desired_sample_rate: Optional[int] = None,
162
+ ) -> None:
163
+ """Encode audio samples to a file-like object.
164
+
165
+ Args:
166
+ samples: Audio samples tensor
167
+ sample_rate: Sample rate in Hz
168
+ format: Audio format (e.g., "wav", "mp3", "flac")
169
+ file_like: File-like object that supports write() and seek() methods
170
+ bit_rate: Optional bit rate for encoding
171
+ num_channels: Optional number of output channels
172
+ desired_sample_rate: Optional desired sample rate for the output.
173
+ """
174
+ assert _pybind_ops is not None
175
+
176
+ if samples.dtype != torch.float32:
177
+ raise ValueError(f"samples must have dtype torch.float32, got {samples.dtype}")
178
+
179
+ # We're having the same problem as with the decoder's create_from_file_like:
180
+ # We should be able to pass a tensor directly, but this leads to a pybind
181
+ # error. In order to work around this, we pass the pointer to the tensor's
182
+ # data, and its shape, in order to re-construct it in C++. For this to work:
183
+ # - the tensor must be float32
184
+ # - the tensor must be contiguous, which is why we call contiguous().
185
+ # In theory we could avoid this restriction by also passing the strides?
186
+ # - IMPORTANT: the input samples tensor and its underlying data must be
187
+ # alive during the call.
188
+ #
189
+ # A more elegant solution would be to cast the tensor into a py::object, but
190
+ # casting the py::object backk to a tensor in C++ seems to lead to the same
191
+ # pybing error.
192
+
193
+ samples = samples.contiguous()
194
+ _pybind_ops.encode_audio_to_file_like(
195
+ samples.data_ptr(),
196
+ list(samples.shape),
197
+ sample_rate,
198
+ format,
199
+ file_like,
200
+ bit_rate,
201
+ num_channels,
202
+ desired_sample_rate,
203
+ )
204
+
205
+ # This check is useless but it's critical to keep it to ensures that samples
206
+ # is still alive during the call to encode_audio_to_file_like.
207
+ assert samples.is_contiguous()
208
+
209
+
210
+ # ==============================
211
+ # Abstract impl for the operators. Needed by torch.compile.
212
+ # ==============================
213
+ @register_fake("torchcodec_ns::create_from_file")
214
+ def create_from_file_abstract(filename: str, seek_mode: Optional[str]) -> torch.Tensor:
215
+ return torch.empty([], dtype=torch.long)
216
+
217
+
218
+ @register_fake("torchcodec_ns::encode_audio_to_file")
219
+ def encode_audio_to_file_abstract(
220
+ samples: torch.Tensor,
221
+ sample_rate: int,
222
+ filename: str,
223
+ bit_rate: Optional[int] = None,
224
+ num_channels: Optional[int] = None,
225
+ desired_sample_rate: Optional[int] = None,
226
+ ) -> None:
227
+ return
228
+
229
+
230
+ @register_fake("torchcodec_ns::encode_audio_to_tensor")
231
+ def encode_audio_to_tensor_abstract(
232
+ samples: torch.Tensor,
233
+ sample_rate: int,
234
+ format: str,
235
+ bit_rate: Optional[int] = None,
236
+ num_channels: Optional[int] = None,
237
+ desired_sample_rate: Optional[int] = None,
238
+ ) -> torch.Tensor:
239
+ return torch.empty([], dtype=torch.long)
240
+
241
+
242
+ @register_fake("torchcodec_ns::create_from_tensor")
243
+ def create_from_tensor_abstract(
244
+ video_tensor: torch.Tensor, seek_mode: Optional[str]
245
+ ) -> torch.Tensor:
246
+ return torch.empty([], dtype=torch.long)
247
+
248
+
249
+ @register_fake("torchcodec_ns::_convert_to_tensor")
250
+ def _convert_to_tensor_abstract(decoder_ptr: int) -> torch.Tensor:
251
+ return torch.empty([], dtype=torch.long)
252
+
253
+
254
+ @register_fake("torchcodec_ns::_add_video_stream")
255
+ def _add_video_stream_abstract(
256
+ decoder: torch.Tensor,
257
+ *,
258
+ width: Optional[int] = None,
259
+ height: Optional[int] = None,
260
+ num_threads: Optional[int] = None,
261
+ dimension_order: Optional[str] = None,
262
+ stream_index: Optional[int] = None,
263
+ device: Optional[str] = None,
264
+ custom_frame_mappings: Optional[
265
+ tuple[torch.Tensor, torch.Tensor, torch.Tensor]
266
+ ] = None,
267
+ color_conversion_library: Optional[str] = None,
268
+ ) -> None:
269
+ return
270
+
271
+
272
+ @register_fake("torchcodec_ns::add_video_stream")
273
+ def add_video_stream_abstract(
274
+ decoder: torch.Tensor,
275
+ *,
276
+ width: Optional[int] = None,
277
+ height: Optional[int] = None,
278
+ num_threads: Optional[int] = None,
279
+ dimension_order: Optional[str] = None,
280
+ stream_index: Optional[int] = None,
281
+ device: Optional[str] = None,
282
+ custom_frame_mappings: Optional[
283
+ tuple[torch.Tensor, torch.Tensor, torch.Tensor]
284
+ ] = None,
285
+ ) -> None:
286
+ return
287
+
288
+
289
+ @register_fake("torchcodec_ns::add_audio_stream")
290
+ def add_audio_stream_abstract(
291
+ decoder: torch.Tensor,
292
+ *,
293
+ stream_index: Optional[int] = None,
294
+ sample_rate: Optional[int] = None,
295
+ num_channels: Optional[int] = None,
296
+ ) -> None:
297
+ return
298
+
299
+
300
+ @register_fake("torchcodec_ns::seek_to_pts")
301
+ def seek_abstract(decoder: torch.Tensor, seconds: float) -> None:
302
+ return
303
+
304
+
305
+ @register_fake("torchcodec_ns::get_next_frame")
306
+ def get_next_frame_abstract(
307
+ decoder: torch.Tensor,
308
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
309
+ # Images are 3 dimensions: height, width, channels.
310
+ # The exact permutation depends on the constructor options passed in.
311
+ image_size = [get_ctx().new_dynamic_size() for _ in range(3)]
312
+ return (
313
+ torch.empty(image_size),
314
+ torch.empty([], dtype=torch.float),
315
+ torch.empty([], dtype=torch.float),
316
+ )
317
+
318
+
319
+ @register_fake("torchcodec_ns::get_frame_at_pts")
320
+ def get_frame_at_pts_abstract(
321
+ decoder: torch.Tensor, seconds: float
322
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
323
+ image_size = [get_ctx().new_dynamic_size() for _ in range(3)]
324
+ return (
325
+ torch.empty(image_size),
326
+ torch.empty([], dtype=torch.float),
327
+ torch.empty([], dtype=torch.float),
328
+ )
329
+
330
+
331
+ @register_fake("torchcodec_ns::get_frames_by_pts")
332
+ def get_frames_by_pts_abstract(
333
+ decoder: torch.Tensor,
334
+ *,
335
+ timestamps: List[float],
336
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
337
+ image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
338
+ return (
339
+ torch.empty(image_size),
340
+ torch.empty([], dtype=torch.float),
341
+ torch.empty([], dtype=torch.float),
342
+ )
343
+
344
+
345
+ @register_fake("torchcodec_ns::get_frame_at_index")
346
+ def get_frame_at_index_abstract(
347
+ decoder: torch.Tensor, *, frame_index: int
348
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
349
+ image_size = [get_ctx().new_dynamic_size() for _ in range(3)]
350
+ return (
351
+ torch.empty(image_size),
352
+ torch.empty([], dtype=torch.float),
353
+ torch.empty([], dtype=torch.float),
354
+ )
355
+
356
+
357
+ @register_fake("torchcodec_ns::get_frames_at_indices")
358
+ def get_frames_at_indices_abstract(
359
+ decoder: torch.Tensor,
360
+ *,
361
+ frame_indices: List[int],
362
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
363
+ image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
364
+ return (
365
+ torch.empty(image_size),
366
+ torch.empty([], dtype=torch.float),
367
+ torch.empty([], dtype=torch.float),
368
+ )
369
+
370
+
371
+ @register_fake("torchcodec_ns::get_frames_in_range")
372
+ def get_frames_in_range_abstract(
373
+ decoder: torch.Tensor,
374
+ *,
375
+ start: int,
376
+ stop: int,
377
+ step: Optional[int] = None,
378
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
379
+ image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
380
+ return (
381
+ torch.empty(image_size),
382
+ torch.empty([], dtype=torch.float),
383
+ torch.empty([], dtype=torch.float),
384
+ )
385
+
386
+
387
+ @register_fake("torchcodec_ns::get_frames_by_pts_in_range")
388
+ def get_frames_by_pts_in_range_abstract(
389
+ decoder: torch.Tensor,
390
+ *,
391
+ start_seconds: float,
392
+ stop_seconds: float,
393
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
394
+ image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
395
+ return (
396
+ torch.empty(image_size),
397
+ torch.empty([], dtype=torch.float),
398
+ torch.empty([], dtype=torch.float),
399
+ )
400
+
401
+
402
+ @register_fake("torchcodec_ns::get_frames_by_pts_in_range_audio")
403
+ def get_frames_by_pts_in_range_audio_abstract(
404
+ decoder: torch.Tensor,
405
+ *,
406
+ start_seconds: float,
407
+ stop_seconds: Optional[float] = None,
408
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
409
+ image_size = [get_ctx().new_dynamic_size() for _ in range(4)]
410
+ return (torch.empty(image_size), torch.empty([], dtype=torch.float))
411
+
412
+
413
+ @register_fake("torchcodec_ns::_get_key_frame_indices")
414
+ def get_key_frame_indices_abstract(decoder: torch.Tensor) -> torch.Tensor:
415
+ return torch.empty([], dtype=torch.int)
416
+
417
+
418
+ @register_fake("torchcodec_ns::get_json_metadata")
419
+ def get_json_metadata_abstract(decoder: torch.Tensor) -> str:
420
+ return ""
421
+
422
+
423
+ @register_fake("torchcodec_ns::get_container_json_metadata")
424
+ def get_container_json_metadata_abstract(decoder: torch.Tensor) -> str:
425
+ return ""
426
+
427
+
428
+ @register_fake("torchcodec_ns::get_stream_json_metadata")
429
+ def get_stream_json_metadata_abstract(decoder: torch.Tensor, stream_idx: int) -> str:
430
+ return ""
431
+
432
+
433
+ @register_fake("torchcodec_ns::_test_frame_pts_equality")
434
+ def _test_frame_pts_equality_abstract(
435
+ decoder: torch.Tensor,
436
+ *,
437
+ frame_index: int,
438
+ pts_seconds_to_test: float,
439
+ ) -> bool:
440
+ return False
441
+
442
+
443
+ @register_fake("torchcodec_ns::_get_json_ffmpeg_library_versions")
444
+ def _get_json_ffmpeg_library_versions_abstract() -> str:
445
+ return ""
446
+
447
+
448
+ @register_fake("torchcodec_ns::scan_all_streams_to_update_metadata")
449
+ def scan_all_streams_to_update_metadata_abstract(decoder: torch.Tensor) -> None:
450
+ return
451
+
452
+
453
+ def get_ffmpeg_library_versions():
454
+ versions_json = _get_json_ffmpeg_library_versions()
455
+ return json.loads(versions_json)
@@ -0,0 +1,87 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #include <pybind11/pybind11.h>
8
+ #include <pybind11/stl.h>
9
+ #include <cstdint>
10
+ #include <string>
11
+
12
+ #include "src/torchcodec/_core/AVIOFileLikeContext.h"
13
+ #include "src/torchcodec/_core/Encoder.h"
14
+ #include "src/torchcodec/_core/SingleStreamDecoder.h"
15
+ #include "src/torchcodec/_core/StreamOptions.h"
16
+ #include "src/torchcodec/_core/ValidationUtils.h"
17
+
18
+ namespace py = pybind11;
19
+
20
+ namespace facebook::torchcodec {
21
+
22
+ // In principle, this should be able to return a tensor. But when we try that,
23
+ // we run into the bug reported here:
24
+ //
25
+ // https://github.com/pytorch/pytorch/issues/136664
26
+ //
27
+ // So we instead launder the pointer through an int, and then use a conversion
28
+ // function on the custom ops side to launder that int into a tensor.
29
+ int64_t create_from_file_like(
30
+ py::object file_like,
31
+ std::optional<std::string_view> seek_mode) {
32
+ SingleStreamDecoder::SeekMode realSeek = SingleStreamDecoder::SeekMode::exact;
33
+ if (seek_mode.has_value()) {
34
+ realSeek = seekModeFromString(seek_mode.value());
35
+ }
36
+
37
+ auto avioContextHolder =
38
+ std::make_unique<AVIOFileLikeContext>(file_like, /*isForWriting=*/false);
39
+
40
+ SingleStreamDecoder* decoder =
41
+ new SingleStreamDecoder(std::move(avioContextHolder), realSeek);
42
+ return reinterpret_cast<int64_t>(decoder);
43
+ }
44
+
45
+ void encode_audio_to_file_like(
46
+ int64_t data_ptr,
47
+ const std::vector<int64_t>& shape,
48
+ int64_t sample_rate,
49
+ std::string_view format,
50
+ py::object file_like,
51
+ std::optional<int64_t> bit_rate = std::nullopt,
52
+ std::optional<int64_t> num_channels = std::nullopt,
53
+ std::optional<int64_t> desired_sample_rate = std::nullopt) {
54
+ // We assume float32 *and* contiguity, this must be enforced by the caller.
55
+ auto tensor_options = torch::TensorOptions().dtype(torch::kFloat32);
56
+ auto samples = torch::from_blob(
57
+ reinterpret_cast<void*>(data_ptr), shape, tensor_options);
58
+
59
+ AudioStreamOptions audioStreamOptions;
60
+ audioStreamOptions.bitRate = validateOptionalInt64ToInt(bit_rate, "bit_rate");
61
+ audioStreamOptions.numChannels =
62
+ validateOptionalInt64ToInt(num_channels, "num_channels");
63
+ audioStreamOptions.sampleRate =
64
+ validateOptionalInt64ToInt(desired_sample_rate, "desired_sample_rate");
65
+
66
+ auto avioContextHolder =
67
+ std::make_unique<AVIOFileLikeContext>(file_like, /*isForWriting=*/true);
68
+
69
+ AudioEncoder encoder(
70
+ samples,
71
+ validateInt64ToInt(sample_rate, "sample_rate"),
72
+ format,
73
+ std::move(avioContextHolder),
74
+ audioStreamOptions);
75
+ encoder.encode();
76
+ }
77
+
78
+ #ifndef PYBIND_OPS_MODULE_NAME
79
+ #error PYBIND_OPS_MODULE_NAME must be defined!
80
+ #endif
81
+
82
+ PYBIND11_MODULE(PYBIND_OPS_MODULE_NAME, m) {
83
+ m.def("create_from_file_like", &create_from_file_like);
84
+ m.def("encode_audio_to_file_like", &encode_audio_to_file_like);
85
+ }
86
+
87
+ } // namespace facebook::torchcodec
torchcodec/_frame.py ADDED
@@ -0,0 +1,145 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import dataclasses
8
+ from dataclasses import dataclass
9
+ from typing import Iterable, Iterator, Union
10
+
11
+ from torch import Tensor
12
+
13
+
14
+ def _frame_repr(self):
15
+ # Utility to replace __repr__ method of dataclasses below. This prints the
16
+ # shape of the .data tensor rather than printing the (potentially very long)
17
+ # data tensor itself.
18
+ s = self.__class__.__name__ + ":\n"
19
+ spaces = " "
20
+ for field in dataclasses.fields(self):
21
+ field_name = field.name
22
+ field_val = getattr(self, field_name)
23
+ if field_name == "data":
24
+ field_name = "data (shape)"
25
+ field_val = field_val.shape
26
+ s += f"{spaces}{field_name}: {field_val}\n"
27
+ return s
28
+
29
+
30
+ @dataclass
31
+ class Frame(Iterable):
32
+ """A single video frame with associated metadata."""
33
+
34
+ data: Tensor
35
+ """The frame data as (3-D ``torch.Tensor``)."""
36
+ pts_seconds: float
37
+ """The :term:`pts` of the frame, in seconds (float)."""
38
+ duration_seconds: float
39
+ """The duration of the frame, in seconds (float)."""
40
+
41
+ def __post_init__(self):
42
+ # This is called after __init__() when a Frame is created. We can run
43
+ # input validation checks here.
44
+ if not self.data.ndim == 3:
45
+ raise ValueError(f"data must be 3-dimensional, got {self.data.shape = }")
46
+ self.pts_seconds = float(self.pts_seconds)
47
+ self.duration_seconds = float(self.duration_seconds)
48
+
49
+ def __iter__(self) -> Iterator[Union[Tensor, float]]:
50
+ for field in dataclasses.fields(self):
51
+ yield getattr(self, field.name)
52
+
53
+ def __repr__(self):
54
+ return _frame_repr(self)
55
+
56
+
57
+ @dataclass
58
+ class FrameBatch(Iterable):
59
+ """Multiple video frames with associated metadata.
60
+
61
+ The ``data`` tensor is typically 4D for sequences of frames (NHWC or NCHW),
62
+ or 5D for sequences of clips, as returned by the :ref:`samplers
63
+ <sphx_glr_generated_examples_decoding_sampling.py>`. When ``data`` is 4D (resp. 5D)
64
+ the ``pts_seconds`` and ``duration_seconds`` tensors are 1D (resp. 2D).
65
+
66
+ .. note::
67
+ The ``pts_seconds`` and ``duration_seconds`` Tensors are always returned
68
+ on CPU, even if ``data`` is on GPU.
69
+ """
70
+
71
+ data: Tensor
72
+ """The frames data (``torch.Tensor`` of uint8)."""
73
+ pts_seconds: Tensor
74
+ """The :term:`pts` of the frame, in seconds (``torch.Tensor`` of floats)."""
75
+ duration_seconds: Tensor
76
+ """The duration of the frame, in seconds (``torch.Tensor`` of floats)."""
77
+
78
+ def __post_init__(self):
79
+ # This is called after __init__() when a FrameBatch is created. We can
80
+ # run input validation checks here.
81
+ if self.data.ndim < 3:
82
+ raise ValueError(
83
+ f"data must be at least 3-dimensional, got {self.data.shape = }"
84
+ )
85
+
86
+ leading_dims = self.data.shape[:-3]
87
+ if not (leading_dims == self.pts_seconds.shape == self.duration_seconds.shape):
88
+ raise ValueError(
89
+ "Tried to create a FrameBatch but the leading dimensions of the inputs do not match. "
90
+ f"Got {self.data.shape = } so we expected the shape of pts_seconds and "
91
+ f"duration_seconds to be {leading_dims = }, but got "
92
+ f"{self.pts_seconds.shape = } and {self.duration_seconds.shape = }."
93
+ )
94
+
95
+ def __iter__(self) -> Iterator["FrameBatch"]:
96
+ for data, pts_seconds, duration_seconds in zip(
97
+ self.data, self.pts_seconds, self.duration_seconds
98
+ ):
99
+ yield FrameBatch(
100
+ data=data,
101
+ pts_seconds=pts_seconds,
102
+ duration_seconds=duration_seconds,
103
+ )
104
+
105
+ def __getitem__(self, key) -> "FrameBatch":
106
+ return FrameBatch(
107
+ data=self.data[key],
108
+ pts_seconds=self.pts_seconds[key],
109
+ duration_seconds=self.duration_seconds[key],
110
+ )
111
+
112
+ def __len__(self):
113
+ return len(self.data)
114
+
115
+ def __repr__(self):
116
+ return _frame_repr(self)
117
+
118
+
119
+ @dataclass
120
+ class AudioSamples(Iterable):
121
+ """Audio samples with associated metadata."""
122
+
123
+ data: Tensor
124
+ """The sample data (``torch.Tensor`` of float in [-1, 1], shape is ``(num_channels, num_samples)``)."""
125
+ pts_seconds: float
126
+ """The :term:`pts` of the first sample, in seconds."""
127
+ duration_seconds: float
128
+ """The duration of the samples, in seconds."""
129
+ sample_rate: int
130
+ """The sample rate of the samples, in Hz."""
131
+
132
+ def __post_init__(self):
133
+ # This is called after __init__() when a Frame is created. We can run
134
+ # input validation checks here.
135
+ if not self.data.ndim == 2:
136
+ raise ValueError(f"data must be 2-dimensional, got {self.data.shape = }")
137
+ self.pts_seconds = float(self.pts_seconds)
138
+ self.sample_rate = int(self.sample_rate)
139
+
140
+ def __iter__(self) -> Iterator[Union[Tensor, float]]:
141
+ for field in dataclasses.fields(self):
142
+ yield getattr(self, field.name)
143
+
144
+ def __repr__(self):
145
+ return _frame_repr(self)