torchcodec 0.8.0__cp313-cp313-macosx_12_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchcodec might be problematic. Click here for more details.

Files changed (82) hide show
  1. torchcodec/.dylibs/libc++.1.0.dylib +0 -0
  2. torchcodec/.dylibs/libpython3.13.dylib +0 -0
  3. torchcodec/__init__.py +16 -0
  4. torchcodec/_core/AVIOContextHolder.cpp +60 -0
  5. torchcodec/_core/AVIOContextHolder.h +64 -0
  6. torchcodec/_core/AVIOFileLikeContext.cpp +98 -0
  7. torchcodec/_core/AVIOFileLikeContext.h +55 -0
  8. torchcodec/_core/AVIOTensorContext.cpp +123 -0
  9. torchcodec/_core/AVIOTensorContext.h +43 -0
  10. torchcodec/_core/BetaCudaDeviceInterface.cpp +636 -0
  11. torchcodec/_core/BetaCudaDeviceInterface.h +191 -0
  12. torchcodec/_core/CMakeLists.txt +325 -0
  13. torchcodec/_core/CUDACommon.cpp +315 -0
  14. torchcodec/_core/CUDACommon.h +46 -0
  15. torchcodec/_core/Cache.h +138 -0
  16. torchcodec/_core/CpuDeviceInterface.cpp +347 -0
  17. torchcodec/_core/CpuDeviceInterface.h +132 -0
  18. torchcodec/_core/CudaDeviceInterface.cpp +357 -0
  19. torchcodec/_core/CudaDeviceInterface.h +64 -0
  20. torchcodec/_core/DeviceInterface.cpp +117 -0
  21. torchcodec/_core/DeviceInterface.h +148 -0
  22. torchcodec/_core/Encoder.cpp +807 -0
  23. torchcodec/_core/Encoder.h +173 -0
  24. torchcodec/_core/FFMPEGCommon.cpp +608 -0
  25. torchcodec/_core/FFMPEGCommon.h +245 -0
  26. torchcodec/_core/FilterGraph.cpp +149 -0
  27. torchcodec/_core/FilterGraph.h +59 -0
  28. torchcodec/_core/Frame.cpp +42 -0
  29. torchcodec/_core/Frame.h +72 -0
  30. torchcodec/_core/Metadata.h +72 -0
  31. torchcodec/_core/NVDECCache.cpp +70 -0
  32. torchcodec/_core/NVDECCache.h +104 -0
  33. torchcodec/_core/SingleStreamDecoder.cpp +1719 -0
  34. torchcodec/_core/SingleStreamDecoder.h +405 -0
  35. torchcodec/_core/StreamOptions.h +63 -0
  36. torchcodec/_core/Transform.cpp +60 -0
  37. torchcodec/_core/Transform.h +59 -0
  38. torchcodec/_core/ValidationUtils.cpp +35 -0
  39. torchcodec/_core/ValidationUtils.h +21 -0
  40. torchcodec/_core/__init__.py +41 -0
  41. torchcodec/_core/_metadata.py +317 -0
  42. torchcodec/_core/custom_ops.cpp +875 -0
  43. torchcodec/_core/fetch_and_expose_non_gpl_ffmpeg_libs.cmake +360 -0
  44. torchcodec/_core/nvcuvid_include/cuviddec.h +1374 -0
  45. torchcodec/_core/nvcuvid_include/nvcuvid.h +610 -0
  46. torchcodec/_core/ops.py +498 -0
  47. torchcodec/_core/pybind_ops.cpp +50 -0
  48. torchcodec/_frame.py +145 -0
  49. torchcodec/_internally_replaced_utils.py +67 -0
  50. torchcodec/_samplers/__init__.py +7 -0
  51. torchcodec/_samplers/video_clip_sampler.py +418 -0
  52. torchcodec/decoders/__init__.py +12 -0
  53. torchcodec/decoders/_audio_decoder.py +177 -0
  54. torchcodec/decoders/_decoder_utils.py +112 -0
  55. torchcodec/decoders/_video_decoder.py +500 -0
  56. torchcodec/encoders/__init__.py +1 -0
  57. torchcodec/encoders/_audio_encoder.py +150 -0
  58. torchcodec/libtorchcodec_core4.dylib +0 -0
  59. torchcodec/libtorchcodec_core5.dylib +0 -0
  60. torchcodec/libtorchcodec_core6.dylib +0 -0
  61. torchcodec/libtorchcodec_core7.dylib +0 -0
  62. torchcodec/libtorchcodec_core8.dylib +0 -0
  63. torchcodec/libtorchcodec_custom_ops4.dylib +0 -0
  64. torchcodec/libtorchcodec_custom_ops5.dylib +0 -0
  65. torchcodec/libtorchcodec_custom_ops6.dylib +0 -0
  66. torchcodec/libtorchcodec_custom_ops7.dylib +0 -0
  67. torchcodec/libtorchcodec_custom_ops8.dylib +0 -0
  68. torchcodec/libtorchcodec_pybind_ops4.so +0 -0
  69. torchcodec/libtorchcodec_pybind_ops5.so +0 -0
  70. torchcodec/libtorchcodec_pybind_ops6.so +0 -0
  71. torchcodec/libtorchcodec_pybind_ops7.so +0 -0
  72. torchcodec/libtorchcodec_pybind_ops8.so +0 -0
  73. torchcodec/samplers/__init__.py +2 -0
  74. torchcodec/samplers/_common.py +84 -0
  75. torchcodec/samplers/_index_based.py +287 -0
  76. torchcodec/samplers/_time_based.py +358 -0
  77. torchcodec/version.py +2 -0
  78. torchcodec-0.8.0.dist-info/METADATA +253 -0
  79. torchcodec-0.8.0.dist-info/RECORD +82 -0
  80. torchcodec-0.8.0.dist-info/WHEEL +5 -0
  81. torchcodec-0.8.0.dist-info/licenses/LICENSE +28 -0
  82. torchcodec-0.8.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,405 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <torch/types.h>
10
+ #include <cstdint>
11
+ #include <memory>
12
+ #include <ostream>
13
+ #include <string_view>
14
+
15
+ #include "src/torchcodec/_core/AVIOContextHolder.h"
16
+ #include "src/torchcodec/_core/DeviceInterface.h"
17
+ #include "src/torchcodec/_core/FFMPEGCommon.h"
18
+ #include "src/torchcodec/_core/Frame.h"
19
+ #include "src/torchcodec/_core/StreamOptions.h"
20
+ #include "src/torchcodec/_core/Transform.h"
21
+
22
+ namespace facebook::torchcodec {
23
+
24
+ // The SingleStreamDecoder class can be used to decode video frames to Tensors.
25
+ // Note that SingleStreamDecoder is not thread-safe.
26
+ // Do not call non-const APIs concurrently on the same object.
27
+ class SingleStreamDecoder {
28
+ public:
29
+ // --------------------------------------------------------------------------
30
+ // CONSTRUCTION API
31
+ // --------------------------------------------------------------------------
32
+
33
+ enum class SeekMode { exact, approximate, custom_frame_mappings };
34
+
35
+ // Creates a SingleStreamDecoder from the video at videoFilePath.
36
+ explicit SingleStreamDecoder(
37
+ const std::string& videoFilePath,
38
+ SeekMode seekMode = SeekMode::exact);
39
+
40
+ // Creates a SingleStreamDecoder using the provided AVIOContext inside the
41
+ // AVIOContextHolder. The AVIOContextHolder is the base class, and the
42
+ // derived class will have specialized how the custom read, seek and writes
43
+ // work.
44
+ explicit SingleStreamDecoder(
45
+ std::unique_ptr<AVIOContextHolder> context,
46
+ SeekMode seekMode = SeekMode::exact);
47
+
48
+ // --------------------------------------------------------------------------
49
+ // VIDEO METADATA QUERY API
50
+ // --------------------------------------------------------------------------
51
+
52
+ // Updates the metadata of the video to accurate values obtained by scanning
53
+ // the contents of the video file. Also updates each StreamInfo's index, i.e.
54
+ // the allFrames and keyFrames vectors.
55
+ void scanFileAndUpdateMetadataAndIndex();
56
+
57
+ // Sorts the keyFrames and allFrames vectors in each StreamInfo by pts.
58
+ void sortAllFrames();
59
+
60
+ // Returns the metadata for the container.
61
+ ContainerMetadata getContainerMetadata() const;
62
+
63
+ // Returns the key frame indices as a tensor. The tensor is 1D and contains
64
+ // int64 values, where each value is the frame index for a key frame.
65
+ torch::Tensor getKeyFrameIndices();
66
+
67
+ // FrameMappings is used for the custom_frame_mappings seek mode to store
68
+ // metadata of frames in a stream. The size of all tensors in this struct must
69
+ // match.
70
+
71
+ // --------------------------------------------------------------------------
72
+ // ADDING STREAMS API
73
+ // --------------------------------------------------------------------------
74
+ struct FrameMappings {
75
+ // 1D tensor of int64, each value is the PTS of a frame in timebase units.
76
+ torch::Tensor all_frames;
77
+ // 1D tensor of bool, each value indicates if the corresponding frame in
78
+ // all_frames is a key frame.
79
+ torch::Tensor is_key_frame;
80
+ // 1D tensor of int64, each value is the duration of the corresponding frame
81
+ // in all_frames in timebase units.
82
+ torch::Tensor duration;
83
+ };
84
+
85
+ void addVideoStream(
86
+ int streamIndex,
87
+ std::vector<Transform*>& transforms,
88
+ const VideoStreamOptions& videoStreamOptions = VideoStreamOptions(),
89
+ std::optional<FrameMappings> customFrameMappings = std::nullopt);
90
+ void addAudioStream(
91
+ int streamIndex,
92
+ const AudioStreamOptions& audioStreamOptions = AudioStreamOptions());
93
+
94
+ // --------------------------------------------------------------------------
95
+ // DECODING AND SEEKING APIs
96
+ // --------------------------------------------------------------------------
97
+
98
+ // Places the cursor at the first frame on or after the position in seconds.
99
+ // Calling getNextFrame() will return the first frame at
100
+ // or after this position.
101
+ void setCursorPtsInSeconds(double seconds);
102
+
103
+ // Decodes the frame where the current cursor position is. It also advances
104
+ // the cursor to the next frame.
105
+ FrameOutput getNextFrame();
106
+
107
+ FrameOutput getFrameAtIndex(int64_t frameIndex);
108
+
109
+ // Returns frames at the given indices for a given stream as a single stacked
110
+ // Tensor.
111
+ FrameBatchOutput getFramesAtIndices(const torch::Tensor& frameIndices);
112
+
113
+ // Returns frames within a given range. The range is defined by [start, stop).
114
+ // The values retrieved from the range are: [start, start+step,
115
+ // start+(2*step), start+(3*step), ..., stop). The default for step is 1.
116
+ FrameBatchOutput getFramesInRange(int64_t start, int64_t stop, int64_t step);
117
+
118
+ // Decodes the first frame in any added stream that is visible at a given
119
+ // timestamp. Frames in the video have a presentation timestamp and a
120
+ // duration. For example, if a frame has presentation timestamp of 5.0s and a
121
+ // duration of 1.0s, it will be visible in the timestamp range [5.0, 6.0).
122
+ // i.e. it will be returned when this function is called with seconds=5.0 or
123
+ // seconds=5.999, etc.
124
+ FrameOutput getFramePlayedAt(double seconds);
125
+
126
+ FrameBatchOutput getFramesPlayedAt(const torch::Tensor& timestamps);
127
+
128
+ // Returns frames within a given pts range. The range is defined by
129
+ // [startSeconds, stopSeconds) with respect to the pts values for frames. The
130
+ // returned frames are in pts order.
131
+ //
132
+ // Note that while stopSeconds is excluded in the half open range, this really
133
+ // only makes a difference when stopSeconds is exactly the pts value for a
134
+ // frame. Otherwise, the moment in time immediately before stopSeconds is in
135
+ // the range, and that time maps to the same frame as stopSeconds.
136
+ //
137
+ // The frames returned are the frames that would be played by our abstract
138
+ // player. Our abstract player displays frames based on pts only. It displays
139
+ // frame i starting at the pts for frame i, and stops at the pts for frame
140
+ // i+1. This model ignores a frame's reported duration.
141
+ //
142
+ // Valid values for startSeconds and stopSeconds are:
143
+ //
144
+ // [beginStreamPtsSecondsFromContent, endStreamPtsSecondsFromContent)
145
+ FrameBatchOutput getFramesPlayedInRange(
146
+ double startSeconds,
147
+ double stopSeconds);
148
+
149
+ AudioFramesOutput getFramesPlayedInRangeAudio(
150
+ double startSeconds,
151
+ std::optional<double> stopSecondsOptional = std::nullopt);
152
+
153
+ class EndOfFileException : public std::runtime_error {
154
+ public:
155
+ explicit EndOfFileException(const std::string& msg)
156
+ : std::runtime_error(msg) {}
157
+ };
158
+
159
+ // --------------------------------------------------------------------------
160
+ // MORALLY PRIVATE APIS
161
+ // --------------------------------------------------------------------------
162
+ // These are APIs that should be private, but that are effectively exposed for
163
+ // practical reasons, typically for testing purposes.
164
+
165
+ // Once getFrameAtIndex supports the preAllocatedOutputTensor parameter, we
166
+ // can move it back to private.
167
+ FrameOutput getFrameAtIndexInternal(
168
+ int64_t frameIndex,
169
+ std::optional<torch::Tensor> preAllocatedOutputTensor = std::nullopt);
170
+
171
+ // Exposed for _test_frame_pts_equality, which is used to test non-regression
172
+ // of pts resolution (64 to 32 bit floats)
173
+ double getPtsSecondsForFrame(int64_t frameIndex);
174
+
175
+ // Exposed for performance testing.
176
+ struct DecodeStats {
177
+ int64_t numSeeksAttempted = 0;
178
+ int64_t numSeeksDone = 0;
179
+ int64_t numSeeksSkipped = 0;
180
+ int64_t numPacketsRead = 0;
181
+ int64_t numPacketsSentToDecoder = 0;
182
+ int64_t numFramesReceivedByDecoder = 0;
183
+ int64_t numFlushes = 0;
184
+ };
185
+
186
+ DecodeStats getDecodeStats() const;
187
+ void resetDecodeStats();
188
+
189
+ private:
190
+ // --------------------------------------------------------------------------
191
+ // STREAMINFO AND ASSOCIATED STRUCTS
192
+ // --------------------------------------------------------------------------
193
+
194
+ struct FrameInfo {
195
+ int64_t pts = 0;
196
+
197
+ // The value of the nextPts default is important: the last frame's nextPts
198
+ // will be INT64_MAX, which ensures that the allFrames vec contains
199
+ // FrameInfo structs with *increasing* nextPts values. That's a necessary
200
+ // condition for the binary searches on those values to work properly (as
201
+ // typically done during pts -> index conversions).
202
+ // TODO: This field is unset (left to the default) for entries in the
203
+ // keyFrames vec!
204
+ int64_t nextPts = INT64_MAX;
205
+
206
+ // Note that frameIndex is ALWAYS the index into all of the frames in that
207
+ // stream, even when the FrameInfo is part of the key frame index. Given a
208
+ // FrameInfo for a key frame, the frameIndex allows us to know which frame
209
+ // that is in the stream.
210
+ int64_t frameIndex = 0;
211
+
212
+ // Indicates whether a frame is a key frame. It may appear redundant as it's
213
+ // only true for FrameInfos in the keyFrames index, but it is needed to
214
+ // correctly map frames between allFrames and keyFrames during the scan.
215
+ bool isKeyFrame = false;
216
+ };
217
+
218
+ struct StreamInfo {
219
+ int streamIndex = -1;
220
+ AVStream* stream = nullptr;
221
+ AVMediaType avMediaType = AVMEDIA_TYPE_UNKNOWN;
222
+
223
+ AVRational timeBase = {};
224
+ UniqueAVCodecContext codecContext;
225
+
226
+ // The FrameInfo indices we built when scanFileAndUpdateMetadataAndIndex was
227
+ // called.
228
+ std::vector<FrameInfo> keyFrames;
229
+ std::vector<FrameInfo> allFrames;
230
+
231
+ VideoStreamOptions videoStreamOptions;
232
+ AudioStreamOptions audioStreamOptions;
233
+ };
234
+
235
+ // --------------------------------------------------------------------------
236
+ // INITIALIZERS
237
+ // --------------------------------------------------------------------------
238
+
239
+ void initializeDecoder();
240
+
241
+ // Reads the user provided frame index and updates each StreamInfo's index,
242
+ // i.e. the allFrames and keyFrames vectors, and
243
+ // endStreamPtsSecondsFromContent
244
+ void readCustomFrameMappingsUpdateMetadataAndIndex(
245
+ int streamIndex,
246
+ FrameMappings customFrameMappings);
247
+ // --------------------------------------------------------------------------
248
+ // DECODING APIS AND RELATED UTILS
249
+ // --------------------------------------------------------------------------
250
+
251
+ void setCursor(int64_t pts);
252
+ void setCursor(double) = delete; // prevent calls with doubles and floats
253
+ bool canWeAvoidSeeking() const;
254
+
255
+ void maybeSeekToBeforeDesiredPts();
256
+
257
+ UniqueAVFrame decodeAVFrame(
258
+ std::function<bool(const UniqueAVFrame&)> filterFunction);
259
+
260
+ FrameOutput getNextFrameInternal(
261
+ std::optional<torch::Tensor> preAllocatedOutputTensor = std::nullopt);
262
+
263
+ torch::Tensor maybePermuteHWC2CHW(torch::Tensor& hwcTensor);
264
+
265
+ FrameOutput convertAVFrameToFrameOutput(
266
+ UniqueAVFrame& avFrame,
267
+ std::optional<torch::Tensor> preAllocatedOutputTensor = std::nullopt);
268
+
269
+ void convertAVFrameToFrameOutputOnCPU(
270
+ UniqueAVFrame& avFrame,
271
+ FrameOutput& frameOutput,
272
+ std::optional<torch::Tensor> preAllocatedOutputTensor = std::nullopt);
273
+
274
+ void convertAudioAVFrameToFrameOutputOnCPU(
275
+ UniqueAVFrame& srcAVFrame,
276
+ FrameOutput& frameOutput);
277
+
278
+ torch::Tensor convertAVFrameToTensorUsingFilterGraph(
279
+ const UniqueAVFrame& avFrame);
280
+
281
+ int convertAVFrameToTensorUsingSwsScale(
282
+ const UniqueAVFrame& avFrame,
283
+ torch::Tensor& outputTensor);
284
+
285
+ std::optional<torch::Tensor> maybeFlushSwrBuffers();
286
+
287
+ // --------------------------------------------------------------------------
288
+ // PTS <-> INDEX CONVERSIONS
289
+ // --------------------------------------------------------------------------
290
+
291
+ int getKeyFrameIndexForPts(int64_t pts) const;
292
+
293
+ // Returns the key frame index of the presentation timestamp using our index.
294
+ // We build this index by scanning the file in
295
+ // scanFileAndUpdateMetadataAndIndex
296
+ int getKeyFrameIndexForPtsUsingScannedIndex(
297
+ const std::vector<SingleStreamDecoder::FrameInfo>& keyFrames,
298
+ int64_t pts) const;
299
+
300
+ int64_t secondsToIndexLowerBound(double seconds);
301
+
302
+ int64_t secondsToIndexUpperBound(double seconds);
303
+
304
+ int64_t getPts(int64_t frameIndex);
305
+
306
+ // --------------------------------------------------------------------------
307
+ // STREAM AND METADATA APIS
308
+ // --------------------------------------------------------------------------
309
+
310
+ void addStream(
311
+ int streamIndex,
312
+ AVMediaType mediaType,
313
+ const torch::Device& device = torch::kCPU,
314
+ const std::string_view deviceVariant = "default",
315
+ std::optional<int> ffmpegThreadCount = std::nullopt);
316
+
317
+ // Returns the "best" stream index for a given media type. The "best" is
318
+ // determined by various heuristics in FFMPEG.
319
+ // See
320
+ // https://ffmpeg.org/doxygen/trunk/group__lavf__decoding.html#ga757780d38f482deb4d809c6c521fbcc2
321
+ // for more details about the heuristics.
322
+ // Returns the key frame index of the presentation timestamp using FFMPEG's
323
+ // index. Note that this index may be truncated for some files.
324
+ int getBestStreamIndex(AVMediaType mediaType);
325
+
326
+ std::optional<int64_t> getNumFrames(const StreamMetadata& streamMetadata);
327
+ double getMinSeconds(const StreamMetadata& streamMetadata);
328
+ std::optional<double> getMaxSeconds(const StreamMetadata& streamMetadata);
329
+
330
+ // --------------------------------------------------------------------------
331
+ // VALIDATION UTILS
332
+ // --------------------------------------------------------------------------
333
+
334
+ void validateActiveStream(
335
+ std::optional<AVMediaType> avMediaType = std::nullopt);
336
+ void validateScannedAllStreams(const std::string& msg);
337
+ void validateFrameIndex(
338
+ const StreamMetadata& streamMetadata,
339
+ int64_t frameIndex);
340
+
341
+ // --------------------------------------------------------------------------
342
+ // ATTRIBUTES
343
+ // --------------------------------------------------------------------------
344
+
345
+ SeekMode seekMode_;
346
+ ContainerMetadata containerMetadata_;
347
+ UniqueDecodingAVFormatContext formatContext_;
348
+ std::unique_ptr<DeviceInterface> deviceInterface_;
349
+ std::map<int, StreamInfo> streamInfos_;
350
+ const int NO_ACTIVE_STREAM = -2;
351
+ int activeStreamIndex_ = NO_ACTIVE_STREAM;
352
+
353
+ // The desired position of the cursor in the stream. We send frames >= this
354
+ // pts to the user when they request a frame.
355
+ int64_t cursor_ = INT64_MIN;
356
+ bool cursorWasJustSet_ = false;
357
+ int64_t lastDecodedAvFramePts_ = 0;
358
+ int64_t lastDecodedAvFrameDuration_ = 0;
359
+
360
+ // Audio only. We cache it for performance. The video equivalents live in
361
+ // deviceInterface_. We store swrContext_ here because we only handle audio
362
+ // on the CPU.
363
+ UniqueSwrContext swrContext_;
364
+
365
+ // Stores various internal decoding stats.
366
+ DecodeStats decodeStats_;
367
+
368
+ // Stores the AVIOContext for the input buffer.
369
+ std::unique_ptr<AVIOContextHolder> avioContextHolder_;
370
+
371
+ // We will receive a vector of transforms upon adding a stream and store it
372
+ // here. However, we need to know if any of those operations change the
373
+ // dimensions of the output frame. If they do, we need to figure out what are
374
+ // the final dimensions of the output frame after ALL transformations. We
375
+ // figure this out as soon as we receive the transforms. If any of the
376
+ // transforms change the final output frame dimensions, we store that in
377
+ // resizedOutputDims_. If resizedOutputDims_ has no value, that means there
378
+ // are no transforms that change the output frame dimensions.
379
+ //
380
+ // The priority order for output frame dimension is:
381
+ //
382
+ // 1. resizedOutputDims_; the resize requested by the user always takes
383
+ // priority.
384
+ // 2. The dimemnsions of the actual decoded AVFrame. This can change
385
+ // per-decoded frame, and is unknown in SingleStreamDecoder. Only the
386
+ // DeviceInterface learns it immediately after decoding a raw frame but
387
+ // before the color transformation.
388
+ // 3. metdataDims_; the dimensions we learned from the metadata.
389
+ std::vector<std::unique_ptr<Transform>> transforms_;
390
+ std::optional<FrameDims> resizedOutputDims_;
391
+ FrameDims metadataDims_;
392
+
393
+ // Whether or not we have already scanned all streams to update the metadata.
394
+ bool scannedAllStreams_ = false;
395
+
396
+ // Tracks that we've already been initialized.
397
+ bool initialized_ = false;
398
+ };
399
+
400
+ // Prints the SingleStreamDecoder::DecodeStats to the ostream.
401
+ std::ostream& operator<<(
402
+ std::ostream& os,
403
+ const SingleStreamDecoder::DecodeStats& stats);
404
+
405
+ } // namespace facebook::torchcodec
@@ -0,0 +1,63 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <torch/types.h>
10
+ #include <optional>
11
+ #include <string>
12
+ #include <string_view>
13
+
14
+ namespace facebook::torchcodec {
15
+
16
+ enum ColorConversionLibrary {
17
+ // Use the libavfilter library for color conversion.
18
+ FILTERGRAPH,
19
+ // Use the libswscale library for color conversion.
20
+ SWSCALE
21
+ };
22
+
23
+ struct VideoStreamOptions {
24
+ VideoStreamOptions() {}
25
+
26
+ // Number of threads we pass to FFMPEG for decoding.
27
+ // 0 means FFMPEG will choose the number of threads automatically to fully
28
+ // utilize all cores. If not set, it will be the default FFMPEG behavior for
29
+ // the given codec.
30
+ std::optional<int> ffmpegThreadCount;
31
+
32
+ // Currently the dimension order can be either NHWC or NCHW.
33
+ // H=height, W=width, C=channel.
34
+ std::string dimensionOrder = "NCHW";
35
+
36
+ // By default we have to use filtergraph, as it is more general. We can only
37
+ // use swscale when we have met strict requirements. See
38
+ // CpuDeviceInterface::initialze() for the logic.
39
+ ColorConversionLibrary colorConversionLibrary =
40
+ ColorConversionLibrary::FILTERGRAPH;
41
+
42
+ // By default we use CPU for decoding for both C++ and python users.
43
+ torch::Device device = torch::kCPU;
44
+ // Device variant (e.g., "default", "beta", etc.)
45
+ std::string_view deviceVariant = "default";
46
+
47
+ // Encoding options
48
+ // TODO-VideoEncoder: Consider adding other optional fields here
49
+ // (bit rate, gop size, max b frames, preset)
50
+ std::optional<int> crf;
51
+ };
52
+
53
+ struct AudioStreamOptions {
54
+ AudioStreamOptions() {}
55
+
56
+ // Encoding only
57
+ std::optional<int> bitRate;
58
+ // Decoding and encoding:
59
+ std::optional<int> numChannels;
60
+ std::optional<int> sampleRate;
61
+ };
62
+
63
+ } // namespace facebook::torchcodec
@@ -0,0 +1,60 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #include "src/torchcodec/_core/Transform.h"
8
+ #include <torch/types.h>
9
+ #include "src/torchcodec/_core/FFMPEGCommon.h"
10
+
11
+ namespace facebook::torchcodec {
12
+
13
+ namespace {
14
+
15
+ std::string toFilterGraphInterpolation(
16
+ ResizeTransform::InterpolationMode mode) {
17
+ switch (mode) {
18
+ case ResizeTransform::InterpolationMode::BILINEAR:
19
+ return "bilinear";
20
+ default:
21
+ TORCH_CHECK(
22
+ false,
23
+ "Unknown interpolation mode: " +
24
+ std::to_string(static_cast<int>(mode)));
25
+ }
26
+ }
27
+
28
+ int toSwsInterpolation(ResizeTransform::InterpolationMode mode) {
29
+ switch (mode) {
30
+ case ResizeTransform::InterpolationMode::BILINEAR:
31
+ return SWS_BILINEAR;
32
+ default:
33
+ TORCH_CHECK(
34
+ false,
35
+ "Unknown interpolation mode: " +
36
+ std::to_string(static_cast<int>(mode)));
37
+ }
38
+ }
39
+
40
+ } // namespace
41
+
42
+ std::string ResizeTransform::getFilterGraphCpu() const {
43
+ return "scale=" + std::to_string(outputDims_.width) + ":" +
44
+ std::to_string(outputDims_.height) +
45
+ ":sws_flags=" + toFilterGraphInterpolation(interpolationMode_);
46
+ }
47
+
48
+ std::optional<FrameDims> ResizeTransform::getOutputFrameDims() const {
49
+ return outputDims_;
50
+ }
51
+
52
+ bool ResizeTransform::isResize() const {
53
+ return true;
54
+ }
55
+
56
+ int ResizeTransform::getSwsFlags() const {
57
+ return toSwsInterpolation(interpolationMode_);
58
+ }
59
+
60
+ } // namespace facebook::torchcodec
@@ -0,0 +1,59 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <optional>
10
+ #include <string>
11
+ #include "src/torchcodec/_core/Frame.h"
12
+
13
+ namespace facebook::torchcodec {
14
+
15
+ class Transform {
16
+ public:
17
+ virtual std::string getFilterGraphCpu() const = 0;
18
+ virtual ~Transform() = default;
19
+
20
+ // If the transformation does not change the output frame dimensions, then
21
+ // there is no need to override this member function. The default
22
+ // implementation returns an empty optional, indicating that the output frame
23
+ // has the same dimensions as the input frame.
24
+ //
25
+ // If the transformation does change the output frame dimensions, then it
26
+ // must override this member function and return the output frame dimensions.
27
+ virtual std::optional<FrameDims> getOutputFrameDims() const {
28
+ return std::nullopt;
29
+ }
30
+
31
+ // The ResizeTransform is special, because it is the only transform that
32
+ // swscale can handle.
33
+ virtual bool isResize() const {
34
+ return false;
35
+ }
36
+ };
37
+
38
+ class ResizeTransform : public Transform {
39
+ public:
40
+ enum class InterpolationMode { BILINEAR };
41
+
42
+ ResizeTransform(const FrameDims& dims)
43
+ : outputDims_(dims), interpolationMode_(InterpolationMode::BILINEAR) {}
44
+
45
+ ResizeTransform(const FrameDims& dims, InterpolationMode interpolationMode)
46
+ : outputDims_(dims), interpolationMode_(interpolationMode) {}
47
+
48
+ std::string getFilterGraphCpu() const override;
49
+ std::optional<FrameDims> getOutputFrameDims() const override;
50
+ bool isResize() const override;
51
+
52
+ int getSwsFlags() const;
53
+
54
+ private:
55
+ FrameDims outputDims_;
56
+ InterpolationMode interpolationMode_;
57
+ };
58
+
59
+ } // namespace facebook::torchcodec
@@ -0,0 +1,35 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #include "src/torchcodec/_core/ValidationUtils.h"
8
+ #include <limits>
9
+ #include "c10/util/Exception.h"
10
+
11
+ namespace facebook::torchcodec {
12
+
13
+ int validateInt64ToInt(int64_t value, const std::string& parameterName) {
14
+ TORCH_CHECK(
15
+ value >= std::numeric_limits<int>::min() &&
16
+ value <= std::numeric_limits<int>::max(),
17
+ parameterName,
18
+ "=",
19
+ value,
20
+ " is out of range for int type.");
21
+
22
+ return static_cast<int>(value);
23
+ }
24
+
25
+ std::optional<int> validateOptionalInt64ToInt(
26
+ const std::optional<int64_t>& value,
27
+ const std::string& parameterName) {
28
+ if (value.has_value()) {
29
+ return validateInt64ToInt(value.value(), parameterName);
30
+ } else {
31
+ return std::nullopt;
32
+ }
33
+ }
34
+
35
+ } // namespace facebook::torchcodec
@@ -0,0 +1,21 @@
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+ //
4
+ // This source code is licensed under the BSD-style license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ #pragma once
8
+
9
+ #include <cstdint>
10
+ #include <optional>
11
+ #include <string>
12
+
13
+ namespace facebook::torchcodec {
14
+
15
+ int validateInt64ToInt(int64_t value, const std::string& parameterName);
16
+
17
+ std::optional<int> validateOptionalInt64ToInt(
18
+ const std::optional<int64_t>& value,
19
+ const std::string& parameterName);
20
+
21
+ } // namespace facebook::torchcodec
@@ -0,0 +1,41 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+
8
+ from ._metadata import (
9
+ AudioStreamMetadata,
10
+ ContainerMetadata,
11
+ get_container_metadata,
12
+ get_container_metadata_from_header,
13
+ VideoStreamMetadata,
14
+ )
15
+ from .ops import (
16
+ _add_video_stream,
17
+ _get_key_frame_indices,
18
+ _test_frame_pts_equality,
19
+ add_audio_stream,
20
+ add_video_stream,
21
+ create_from_bytes,
22
+ create_from_file,
23
+ create_from_file_like,
24
+ create_from_tensor,
25
+ encode_audio_to_file,
26
+ encode_audio_to_file_like,
27
+ encode_audio_to_tensor,
28
+ encode_video_to_file,
29
+ get_ffmpeg_library_versions,
30
+ get_frame_at_index,
31
+ get_frame_at_pts,
32
+ get_frames_at_indices,
33
+ get_frames_by_pts,
34
+ get_frames_by_pts_in_range,
35
+ get_frames_by_pts_in_range_audio,
36
+ get_frames_in_range,
37
+ get_json_metadata,
38
+ get_next_frame,
39
+ scan_all_streams_to_update_metadata,
40
+ seek_to_pts,
41
+ )