pixeltable 0.4.0rc3__py3-none-any.whl → 0.4.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (202) hide show
  1. pixeltable/__init__.py +23 -5
  2. pixeltable/_version.py +1 -0
  3. pixeltable/catalog/__init__.py +5 -3
  4. pixeltable/catalog/catalog.py +1318 -404
  5. pixeltable/catalog/column.py +186 -115
  6. pixeltable/catalog/dir.py +1 -2
  7. pixeltable/catalog/globals.py +11 -43
  8. pixeltable/catalog/insertable_table.py +167 -79
  9. pixeltable/catalog/path.py +61 -23
  10. pixeltable/catalog/schema_object.py +9 -10
  11. pixeltable/catalog/table.py +626 -308
  12. pixeltable/catalog/table_metadata.py +101 -0
  13. pixeltable/catalog/table_version.py +713 -569
  14. pixeltable/catalog/table_version_handle.py +37 -6
  15. pixeltable/catalog/table_version_path.py +42 -29
  16. pixeltable/catalog/tbl_ops.py +50 -0
  17. pixeltable/catalog/update_status.py +191 -0
  18. pixeltable/catalog/view.py +108 -94
  19. pixeltable/config.py +128 -22
  20. pixeltable/dataframe.py +188 -100
  21. pixeltable/env.py +407 -136
  22. pixeltable/exceptions.py +6 -0
  23. pixeltable/exec/__init__.py +3 -0
  24. pixeltable/exec/aggregation_node.py +7 -8
  25. pixeltable/exec/cache_prefetch_node.py +83 -110
  26. pixeltable/exec/cell_materialization_node.py +231 -0
  27. pixeltable/exec/cell_reconstruction_node.py +135 -0
  28. pixeltable/exec/component_iteration_node.py +4 -3
  29. pixeltable/exec/data_row_batch.py +8 -65
  30. pixeltable/exec/exec_context.py +16 -4
  31. pixeltable/exec/exec_node.py +13 -36
  32. pixeltable/exec/expr_eval/evaluators.py +7 -6
  33. pixeltable/exec/expr_eval/expr_eval_node.py +27 -12
  34. pixeltable/exec/expr_eval/globals.py +8 -5
  35. pixeltable/exec/expr_eval/row_buffer.py +1 -2
  36. pixeltable/exec/expr_eval/schedulers.py +190 -30
  37. pixeltable/exec/globals.py +32 -0
  38. pixeltable/exec/in_memory_data_node.py +18 -18
  39. pixeltable/exec/object_store_save_node.py +293 -0
  40. pixeltable/exec/row_update_node.py +16 -9
  41. pixeltable/exec/sql_node.py +206 -101
  42. pixeltable/exprs/__init__.py +1 -1
  43. pixeltable/exprs/arithmetic_expr.py +27 -22
  44. pixeltable/exprs/array_slice.py +3 -3
  45. pixeltable/exprs/column_property_ref.py +34 -30
  46. pixeltable/exprs/column_ref.py +92 -96
  47. pixeltable/exprs/comparison.py +5 -5
  48. pixeltable/exprs/compound_predicate.py +5 -4
  49. pixeltable/exprs/data_row.py +152 -55
  50. pixeltable/exprs/expr.py +62 -43
  51. pixeltable/exprs/expr_dict.py +3 -3
  52. pixeltable/exprs/expr_set.py +17 -10
  53. pixeltable/exprs/function_call.py +75 -37
  54. pixeltable/exprs/globals.py +1 -2
  55. pixeltable/exprs/in_predicate.py +4 -4
  56. pixeltable/exprs/inline_expr.py +10 -27
  57. pixeltable/exprs/is_null.py +1 -3
  58. pixeltable/exprs/json_mapper.py +8 -8
  59. pixeltable/exprs/json_path.py +56 -22
  60. pixeltable/exprs/literal.py +5 -5
  61. pixeltable/exprs/method_ref.py +2 -2
  62. pixeltable/exprs/object_ref.py +2 -2
  63. pixeltable/exprs/row_builder.py +127 -53
  64. pixeltable/exprs/rowid_ref.py +8 -12
  65. pixeltable/exprs/similarity_expr.py +50 -25
  66. pixeltable/exprs/sql_element_cache.py +4 -4
  67. pixeltable/exprs/string_op.py +5 -5
  68. pixeltable/exprs/type_cast.py +3 -5
  69. pixeltable/func/__init__.py +1 -0
  70. pixeltable/func/aggregate_function.py +8 -8
  71. pixeltable/func/callable_function.py +9 -9
  72. pixeltable/func/expr_template_function.py +10 -10
  73. pixeltable/func/function.py +18 -20
  74. pixeltable/func/function_registry.py +6 -7
  75. pixeltable/func/globals.py +2 -3
  76. pixeltable/func/mcp.py +74 -0
  77. pixeltable/func/query_template_function.py +20 -18
  78. pixeltable/func/signature.py +43 -16
  79. pixeltable/func/tools.py +23 -13
  80. pixeltable/func/udf.py +18 -20
  81. pixeltable/functions/__init__.py +6 -0
  82. pixeltable/functions/anthropic.py +93 -33
  83. pixeltable/functions/audio.py +114 -10
  84. pixeltable/functions/bedrock.py +13 -6
  85. pixeltable/functions/date.py +1 -1
  86. pixeltable/functions/deepseek.py +20 -9
  87. pixeltable/functions/fireworks.py +2 -2
  88. pixeltable/functions/gemini.py +28 -11
  89. pixeltable/functions/globals.py +13 -13
  90. pixeltable/functions/groq.py +108 -0
  91. pixeltable/functions/huggingface.py +1046 -23
  92. pixeltable/functions/image.py +9 -18
  93. pixeltable/functions/llama_cpp.py +23 -8
  94. pixeltable/functions/math.py +3 -4
  95. pixeltable/functions/mistralai.py +4 -15
  96. pixeltable/functions/ollama.py +16 -9
  97. pixeltable/functions/openai.py +104 -82
  98. pixeltable/functions/openrouter.py +143 -0
  99. pixeltable/functions/replicate.py +2 -2
  100. pixeltable/functions/reve.py +250 -0
  101. pixeltable/functions/string.py +21 -28
  102. pixeltable/functions/timestamp.py +13 -14
  103. pixeltable/functions/together.py +4 -6
  104. pixeltable/functions/twelvelabs.py +92 -0
  105. pixeltable/functions/util.py +6 -1
  106. pixeltable/functions/video.py +1388 -106
  107. pixeltable/functions/vision.py +7 -7
  108. pixeltable/functions/whisper.py +15 -7
  109. pixeltable/functions/whisperx.py +179 -0
  110. pixeltable/{ext/functions → functions}/yolox.py +2 -4
  111. pixeltable/globals.py +332 -105
  112. pixeltable/index/base.py +13 -22
  113. pixeltable/index/btree.py +23 -22
  114. pixeltable/index/embedding_index.py +32 -44
  115. pixeltable/io/__init__.py +4 -2
  116. pixeltable/io/datarows.py +7 -6
  117. pixeltable/io/external_store.py +49 -77
  118. pixeltable/io/fiftyone.py +11 -11
  119. pixeltable/io/globals.py +29 -28
  120. pixeltable/io/hf_datasets.py +17 -9
  121. pixeltable/io/label_studio.py +70 -66
  122. pixeltable/io/lancedb.py +3 -0
  123. pixeltable/io/pandas.py +12 -11
  124. pixeltable/io/parquet.py +13 -93
  125. pixeltable/io/table_data_conduit.py +71 -47
  126. pixeltable/io/utils.py +3 -3
  127. pixeltable/iterators/__init__.py +2 -1
  128. pixeltable/iterators/audio.py +21 -11
  129. pixeltable/iterators/document.py +116 -55
  130. pixeltable/iterators/image.py +5 -2
  131. pixeltable/iterators/video.py +293 -13
  132. pixeltable/metadata/__init__.py +4 -2
  133. pixeltable/metadata/converters/convert_18.py +2 -2
  134. pixeltable/metadata/converters/convert_19.py +2 -2
  135. pixeltable/metadata/converters/convert_20.py +2 -2
  136. pixeltable/metadata/converters/convert_21.py +2 -2
  137. pixeltable/metadata/converters/convert_22.py +2 -2
  138. pixeltable/metadata/converters/convert_24.py +2 -2
  139. pixeltable/metadata/converters/convert_25.py +2 -2
  140. pixeltable/metadata/converters/convert_26.py +2 -2
  141. pixeltable/metadata/converters/convert_29.py +4 -4
  142. pixeltable/metadata/converters/convert_34.py +2 -2
  143. pixeltable/metadata/converters/convert_36.py +2 -2
  144. pixeltable/metadata/converters/convert_37.py +15 -0
  145. pixeltable/metadata/converters/convert_38.py +39 -0
  146. pixeltable/metadata/converters/convert_39.py +124 -0
  147. pixeltable/metadata/converters/convert_40.py +73 -0
  148. pixeltable/metadata/converters/util.py +13 -12
  149. pixeltable/metadata/notes.py +4 -0
  150. pixeltable/metadata/schema.py +79 -42
  151. pixeltable/metadata/utils.py +74 -0
  152. pixeltable/mypy/__init__.py +3 -0
  153. pixeltable/mypy/mypy_plugin.py +123 -0
  154. pixeltable/plan.py +274 -223
  155. pixeltable/share/__init__.py +1 -1
  156. pixeltable/share/packager.py +259 -129
  157. pixeltable/share/protocol/__init__.py +34 -0
  158. pixeltable/share/protocol/common.py +170 -0
  159. pixeltable/share/protocol/operation_types.py +33 -0
  160. pixeltable/share/protocol/replica.py +109 -0
  161. pixeltable/share/publish.py +213 -57
  162. pixeltable/store.py +238 -175
  163. pixeltable/type_system.py +104 -63
  164. pixeltable/utils/__init__.py +2 -3
  165. pixeltable/utils/arrow.py +108 -13
  166. pixeltable/utils/av.py +298 -0
  167. pixeltable/utils/azure_store.py +305 -0
  168. pixeltable/utils/code.py +3 -3
  169. pixeltable/utils/console_output.py +4 -1
  170. pixeltable/utils/coroutine.py +6 -23
  171. pixeltable/utils/dbms.py +31 -5
  172. pixeltable/utils/description_helper.py +4 -5
  173. pixeltable/utils/documents.py +5 -6
  174. pixeltable/utils/exception_handler.py +7 -30
  175. pixeltable/utils/filecache.py +6 -6
  176. pixeltable/utils/formatter.py +4 -6
  177. pixeltable/utils/gcs_store.py +283 -0
  178. pixeltable/utils/http_server.py +2 -3
  179. pixeltable/utils/iceberg.py +1 -2
  180. pixeltable/utils/image.py +17 -0
  181. pixeltable/utils/lancedb.py +88 -0
  182. pixeltable/utils/local_store.py +316 -0
  183. pixeltable/utils/misc.py +5 -0
  184. pixeltable/utils/object_stores.py +528 -0
  185. pixeltable/utils/pydantic.py +60 -0
  186. pixeltable/utils/pytorch.py +5 -6
  187. pixeltable/utils/s3_store.py +392 -0
  188. pixeltable-0.4.20.dist-info/METADATA +587 -0
  189. pixeltable-0.4.20.dist-info/RECORD +218 -0
  190. {pixeltable-0.4.0rc3.dist-info → pixeltable-0.4.20.dist-info}/WHEEL +1 -1
  191. pixeltable-0.4.20.dist-info/entry_points.txt +2 -0
  192. pixeltable/__version__.py +0 -3
  193. pixeltable/ext/__init__.py +0 -17
  194. pixeltable/ext/functions/__init__.py +0 -11
  195. pixeltable/ext/functions/whisperx.py +0 -77
  196. pixeltable/utils/media_store.py +0 -77
  197. pixeltable/utils/s3.py +0 -17
  198. pixeltable/utils/sample.py +0 -25
  199. pixeltable-0.4.0rc3.dist-info/METADATA +0 -435
  200. pixeltable-0.4.0rc3.dist-info/RECORD +0 -189
  201. pixeltable-0.4.0rc3.dist-info/entry_points.txt +0 -3
  202. {pixeltable-0.4.0rc3.dist-info → pixeltable-0.4.20.dist-info/licenses}/LICENSE +0 -0
@@ -1,65 +1,88 @@
1
1
  """
2
2
  Pixeltable [UDFs](https://pixeltable.readme.io/docs/user-defined-functions-udfs) for `VideoType`.
3
-
4
- Example:
5
- ```python
6
- import pixeltable as pxt
7
- import pixeltable.functions as pxtf
8
-
9
- t = pxt.get_table(...)
10
- t.select(pxtf.video.extract_audio(t.video_col)).collect()
11
- ```
12
3
  """
13
4
 
14
- import tempfile
15
- import uuid
16
- from pathlib import Path
17
- from typing import Any, Optional
5
+ import glob
6
+ import logging
7
+ import pathlib
8
+ import subprocess
9
+ from typing import TYPE_CHECKING, Any, Literal, NamedTuple, NoReturn
18
10
 
19
11
  import av
12
+ import av.stream
20
13
  import numpy as np
21
14
  import PIL.Image
22
15
 
23
16
  import pixeltable as pxt
24
- from pixeltable import env
17
+ import pixeltable.utils.av as av_utils
18
+ from pixeltable.env import Env
25
19
  from pixeltable.utils.code import local_public_names
20
+ from pixeltable.utils.local_store import TempStore
26
21
 
27
- _format_defaults = { # format -> (codec, ext)
28
- 'wav': ('pcm_s16le', 'wav'),
29
- 'mp3': ('libmp3lame', 'mp3'),
30
- 'flac': ('flac', 'flac'),
31
- # 'mp4': ('aac', 'm4a'),
32
- }
33
-
34
- # for mp4:
35
- # - extract_audio() fails with
36
- # "Application provided invalid, non monotonically increasing dts to muxer in stream 0: 1146 >= 290"
37
- # - chatgpt suggests this can be fixed in the following manner
38
- # for packet in container.demux(audio_stream):
39
- # packet.pts = None # Reset the PTS and DTS to allow FFmpeg to set them automatically
40
- # packet.dts = None
41
- # for frame in packet.decode():
42
- # frame.pts = None
43
- # for packet in output_stream.encode(frame):
44
- # output_container.mux(packet)
45
- #
46
- # # Flush remaining packets
47
- # for packet in output_stream.encode():
48
- # output_container.mux(packet)
22
+ if TYPE_CHECKING:
23
+ from scenedetect.detectors import SceneDetector # type: ignore[import-untyped]
24
+
25
+ _logger = logging.getLogger('pixeltable')
49
26
 
50
27
 
51
28
  @pxt.uda(requires_order_by=True)
52
29
  class make_video(pxt.Aggregator):
53
30
  """
54
- Aggregator that creates a video from a sequence of images.
31
+ Aggregator that creates a video from a sequence of images, using the default video encoder and yuv420p pixel format.
32
+
33
+ Follows https://pyav.org/docs/develop/cookbook/numpy.html#generating-video
34
+
35
+ TODO: provide parameters for video_encoder and pix_fmt
36
+
37
+ Args:
38
+ fps: Frames per second for the output video.
39
+
40
+ Returns:
41
+
42
+ - The created video.
43
+
44
+ Examples:
45
+ Create a video from frames extracted using `FrameIterator`:
46
+
47
+ >>> import pixeltable as pxt
48
+ >>> from pixeltable.functions.video import make_video
49
+ >>> from pixeltable.iterators import FrameIterator
50
+ >>>
51
+ >>> # Create base table for videos
52
+ >>> videos_table = pxt.create_table('videos', {'video': pxt.Video})
53
+ >>>
54
+ >>> # Create view to extract frames
55
+ >>> frames_view = pxt.create_view(
56
+ ... 'video_frames',
57
+ ... videos_table,
58
+ ... iterator=FrameIterator.create(video=videos_table.video, fps=1)
59
+ ... )
60
+ >>>
61
+ >>> # Reconstruct video from frames
62
+ >>> frames_view.group_by(videos_table).select(
63
+ ... make_video(frames_view.pos, frames_view.frame)
64
+ ... ).show()
65
+
66
+ Apply transformations to frames before creating a video:
67
+
68
+ >>> # Create video from transformed frames
69
+ >>> frames_view.group_by(videos_table).select(
70
+ ... make_video(frames_view.pos, frames_view.frame.rotate(30))
71
+ ... ).show()
72
+
73
+ Compare multiple processed versions side-by-side:
74
+
75
+ >>> frames_view.group_by(videos_table).select(
76
+ ... make_video(frames_view.pos, frames_view.frame),
77
+ ... make_video(frames_view.pos, frames_view.frame.rotate(30))
78
+ ... ).show()
55
79
  """
56
80
 
57
- container: Optional[av.container.OutputContainer]
58
- stream: Optional[av.video.stream.VideoStream]
81
+ container: av.container.OutputContainer | None
82
+ stream: av.video.stream.VideoStream | None
59
83
  fps: int
60
84
 
61
85
  def __init__(self, fps: int = 25):
62
- """follows https://pyav.org/docs/develop/cookbook/numpy.html#generating-video"""
63
86
  self.container = None
64
87
  self.stream = None
65
88
  self.fps = fps
@@ -68,8 +91,7 @@ class make_video(pxt.Aggregator):
68
91
  if frame is None:
69
92
  return
70
93
  if self.container is None:
71
- (_, output_filename) = tempfile.mkstemp(suffix='.mp4', dir=str(env.Env.get().tmp_dir))
72
- self.out_file = Path(output_filename)
94
+ self.out_file = TempStore.create_path(extension='.mp4')
73
95
  self.container = av.open(str(self.out_file), mode='w')
74
96
  self.stream = self.container.add_stream('h264', rate=self.fps)
75
97
  self.stream.pix_fmt = 'yuv420p'
@@ -89,102 +111,1362 @@ class make_video(pxt.Aggregator):
89
111
 
90
112
  @pxt.udf(is_method=True)
91
113
  def extract_audio(
92
- video_path: pxt.Video, stream_idx: int = 0, format: str = 'wav', codec: Optional[str] = None
114
+ video_path: pxt.Video, stream_idx: int = 0, format: str = 'wav', codec: str | None = None
93
115
  ) -> pxt.Audio:
94
116
  """
95
- Extract an audio stream from a video file, save it as a media file and return its path.
117
+ Extract an audio stream from a video.
96
118
 
97
119
  Args:
98
120
  stream_idx: Index of the audio stream to extract.
99
121
  format: The target audio format. (`'wav'`, `'mp3'`, `'flac'`).
100
122
  codec: The codec to use for the audio stream. If not provided, a default codec will be used.
123
+
124
+ Returns:
125
+ The extracted audio.
126
+
127
+ Examples:
128
+ Add a computed column to a table `tbl` that extracts audio from an existing column `video_col`:
129
+
130
+ >>> tbl.add_computed_column(
131
+ ... extracted_audio=tbl.video_col.extract_audio(format='flac')
132
+ ... )
101
133
  """
102
- if format not in _format_defaults:
134
+ if format not in av_utils.AUDIO_FORMATS:
103
135
  raise ValueError(f'extract_audio(): unsupported audio format: {format}')
104
- default_codec, ext = _format_defaults[format]
136
+ default_codec, ext = av_utils.AUDIO_FORMATS[format]
105
137
 
106
138
  with av.open(video_path) as container:
107
139
  if len(container.streams.audio) <= stream_idx:
108
140
  return None
109
141
  audio_stream = container.streams.audio[stream_idx]
110
142
  # create this in our tmp directory, so it'll get cleaned up if it's being generated as part of a query
111
- output_filename = str(env.Env.get().tmp_dir / f'{uuid.uuid4()}.{ext}')
143
+ output_path = str(TempStore.create_path(extension=f'.{ext}'))
112
144
 
113
- with av.open(output_filename, 'w', format=format) as output_container:
145
+ with av.open(output_path, 'w', format=format) as output_container:
114
146
  output_stream = output_container.add_stream(codec or default_codec)
115
147
  assert isinstance(output_stream, av.audio.stream.AudioStream)
116
148
  for packet in container.demux(audio_stream):
117
149
  for frame in packet.decode():
118
150
  output_container.mux(output_stream.encode(frame)) # type: ignore[arg-type]
119
151
 
120
- return output_filename
152
+ return output_path
121
153
 
122
154
 
123
155
  @pxt.udf(is_method=True)
124
156
  def get_metadata(video: pxt.Video) -> dict:
125
157
  """
126
158
  Gets various metadata associated with a video file and returns it as a dictionary.
159
+
160
+ Args:
161
+ video: The video for which to get metadata.
162
+
163
+ Returns:
164
+ A `dict` such as the following:
165
+
166
+ ```json
167
+ {
168
+ 'bit_exact': False,
169
+ 'bit_rate': 967260,
170
+ 'size': 2234371,
171
+ 'metadata': {
172
+ 'encoder': 'Lavf60.16.100',
173
+ 'major_brand': 'isom',
174
+ 'minor_version': '512',
175
+ 'compatible_brands': 'isomiso2avc1mp41',
176
+ },
177
+ 'streams': [
178
+ {
179
+ 'type': 'video',
180
+ 'width': 640,
181
+ 'height': 360,
182
+ 'frames': 462,
183
+ 'time_base': 1.0 / 12800,
184
+ 'duration': 236544,
185
+ 'duration_seconds': 236544.0 / 12800,
186
+ 'average_rate': 25.0,
187
+ 'base_rate': 25.0,
188
+ 'guessed_rate': 25.0,
189
+ 'metadata': {
190
+ 'language': 'und',
191
+ 'handler_name': 'L-SMASH Video Handler',
192
+ 'vendor_id': '[0][0][0][0]',
193
+ 'encoder': 'Lavc60.31.102 libx264',
194
+ },
195
+ 'codec_context': {'name': 'h264', 'codec_tag': 'avc1', 'profile': 'High', 'pix_fmt': 'yuv420p'},
196
+ }
197
+ ],
198
+ }
199
+ ```
200
+
201
+ Examples:
202
+ Extract metadata for files in the `video_col` column of the table `tbl`:
203
+
204
+ >>> tbl.select(tbl.video_col.get_metadata()).collect()
205
+ """
206
+ return av_utils.get_metadata(video)
207
+
208
+
209
+ @pxt.udf(is_method=True)
210
+ def get_duration(video: pxt.Video) -> float | None:
211
+ """
212
+ Get video duration in seconds.
213
+
214
+ Args:
215
+ video: The video for which to get the duration.
216
+
217
+ Returns:
218
+ The duration in seconds, or None if the duration cannot be determined.
219
+ """
220
+ return av_utils.get_video_duration(video)
221
+
222
+
223
+ @pxt.udf(is_method=True)
224
+ def extract_frame(video: pxt.Video, *, timestamp: float) -> PIL.Image.Image | None:
225
+ """
226
+ Extract a single frame from a video at a specific timestamp.
227
+
228
+ Args:
229
+ video: The video from which to extract the frame.
230
+ timestamp: Extract frame at this timestamp (in seconds).
231
+
232
+ Returns:
233
+ The extracted frame as a PIL Image, or None if the timestamp is beyond the video duration.
234
+
235
+ Examples:
236
+ Extract the first frame from each video in the `video` column of the table `tbl`:
237
+
238
+ >>> tbl.select(tbl.video.extract_frame(0.0)).collect()
239
+
240
+ Extract a frame close to the end of each video in the `video` column of the table `tbl`:
241
+
242
+ >>> tbl.select(tbl.video.extract_frame(tbl.video.get_metadata().streams[0].duration_seconds - 0.1)).collect()
127
243
  """
128
- return _get_metadata(video)
129
-
130
-
131
- def _get_metadata(path: str) -> dict:
132
- with av.open(path) as container:
133
- assert isinstance(container, av.container.InputContainer)
134
- streams_info = [__get_stream_metadata(stream) for stream in container.streams]
135
- result = {
136
- 'bit_exact': getattr(container, 'bit_exact', False),
137
- 'bit_rate': container.bit_rate,
138
- 'size': container.size,
139
- 'metadata': container.metadata,
140
- 'streams': streams_info,
141
- }
142
- return result
143
-
144
-
145
- def __get_stream_metadata(stream: av.stream.Stream) -> dict:
146
- if stream.type not in ('audio', 'video'):
147
- return {'type': stream.type} # Currently unsupported
148
-
149
- codec_context = stream.codec_context
150
- codec_context_md: dict[str, Any] = {
151
- 'name': codec_context.name,
152
- 'codec_tag': codec_context.codec_tag.encode('unicode-escape').decode('utf-8'),
153
- 'profile': codec_context.profile,
154
- }
155
- metadata = {
156
- 'type': stream.type,
157
- 'duration': stream.duration,
158
- 'time_base': float(stream.time_base) if stream.time_base is not None else None,
159
- 'duration_seconds': float(stream.duration * stream.time_base)
160
- if stream.duration is not None and stream.time_base is not None
161
- else None,
162
- 'frames': stream.frames,
163
- 'metadata': stream.metadata,
164
- 'codec_context': codec_context_md,
165
- }
166
-
167
- if stream.type == 'audio':
168
- # Additional metadata for audio
169
- channels = getattr(stream.codec_context, 'channels', None)
170
- codec_context_md['channels'] = int(channels) if channels is not None else None
244
+ if timestamp < 0:
245
+ raise ValueError("'timestamp' must be non-negative")
246
+
247
+ try:
248
+ with av.open(str(video)) as container:
249
+ video_stream = container.streams.video[0]
250
+ time_base = float(video_stream.time_base)
251
+ start_time = video_stream.start_time or 0
252
+ duration = video_stream.duration
253
+
254
+ # Check if timestamp is beyond video duration
255
+ if duration is not None:
256
+ duration_seconds = float(duration * time_base)
257
+ if timestamp > duration_seconds:
258
+ return None
259
+
260
+ # Convert timestamp to stream time base units
261
+ target_pts = int(timestamp / time_base) + start_time
262
+
263
+ # Seek to the nearest keyframe *before* our target timestamp
264
+ container.seek(target_pts, backward=True, stream=video_stream)
265
+
266
+ # Decode frames until we reach or pass the target timestamp
267
+ for frame in container.decode(video=0):
268
+ frame_pts = frame.pts
269
+ if frame_pts is None:
270
+ continue
271
+ frame_timestamp = (frame_pts - start_time) * time_base
272
+ if frame_timestamp >= timestamp:
273
+ return frame.to_image()
274
+
275
+ return None
276
+
277
+ except Exception as e:
278
+ raise pxt.Error(f'extract_frame(): failed to extract frame: {e}') from e
279
+
280
+
281
+ def _handle_ffmpeg_error(e: subprocess.CalledProcessError) -> NoReturn:
282
+ error_msg = f'ffmpeg failed with return code {e.returncode}'
283
+ if e.stderr is not None:
284
+ error_msg += f':\n{e.stderr.strip()}'
285
+ raise pxt.Error(error_msg) from e
286
+
287
+
288
+ @pxt.udf(is_method=True)
289
+ def clip(
290
+ video: pxt.Video,
291
+ *,
292
+ start_time: float,
293
+ end_time: float | None = None,
294
+ duration: float | None = None,
295
+ mode: Literal['fast', 'accurate'] = 'accurate',
296
+ video_encoder: str | None = None,
297
+ video_encoder_args: dict[str, Any] | None = None,
298
+ ) -> pxt.Video | None:
299
+ """
300
+ Extract a clip from a video, specified by `start_time` and either `end_time` or `duration` (in seconds).
301
+
302
+ If `start_time` is beyond the end of the video, returns None. Can only specify one of `end_time` and `duration`.
303
+ If both `end_time` and `duration` are None, the clip goes to the end of the video.
304
+
305
+ __Requirements:__
306
+
307
+ - `ffmpeg` needs to be installed and in PATH
308
+
309
+ Args:
310
+ video: Input video file
311
+ start_time: Start time in seconds
312
+ end_time: End time in seconds
313
+ duration: Duration of the clip in seconds
314
+ mode:
315
+
316
+ - `'fast'`: avoids re-encoding but starts the clip at the nearest keyframes and as a result, the clip
317
+ duration will be slightly longer than requested
318
+ - `'accurate'`: extracts a frame-accurate clip, but requires re-encoding
319
+ video_encoder: Video encoder to use. If not specified, uses the default encoder for the current platform.
320
+ Only available for `mode='accurate'`.
321
+ video_encoder_args: Additional arguments to pass to the video encoder. Only available for `mode='accurate'`.
322
+
323
+ Returns:
324
+ New video containing only the specified time range or None if start_time is beyond the end of the video.
325
+ """
326
+ Env.get().require_binary('ffmpeg')
327
+ if start_time < 0:
328
+ raise pxt.Error(f'start_time must be non-negative, got {start_time}')
329
+ if end_time is not None and end_time <= start_time:
330
+ raise pxt.Error(f'end_time ({end_time}) must be greater than start_time ({start_time})')
331
+ if duration is not None and duration <= 0:
332
+ raise pxt.Error(f'duration must be positive, got {duration}')
333
+ if end_time is not None and duration is not None:
334
+ raise pxt.Error('end_time and duration cannot both be specified')
335
+ if mode == 'fast':
336
+ if video_encoder is not None:
337
+ raise pxt.Error("video_encoder is not supported for mode='fast'")
338
+ if video_encoder_args is not None:
339
+ raise pxt.Error("video_encoder_args is not supported for mode='fast'")
340
+
341
+ video_duration = av_utils.get_video_duration(video)
342
+ if video_duration is not None and start_time > video_duration:
343
+ return None
344
+
345
+ output_path = str(TempStore.create_path(extension='.mp4'))
346
+
347
+ if end_time is not None:
348
+ duration = end_time - start_time
349
+ cmd = av_utils.ffmpeg_clip_cmd(
350
+ str(video),
351
+ output_path,
352
+ start_time,
353
+ duration,
354
+ fast=(mode == 'fast'),
355
+ video_encoder=video_encoder,
356
+ video_encoder_args=video_encoder_args,
357
+ )
358
+
359
+ try:
360
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
361
+ output_file = pathlib.Path(output_path)
362
+ if not output_file.exists() or output_file.stat().st_size == 0:
363
+ stderr_output = result.stderr.strip() if result.stderr is not None else ''
364
+ raise pxt.Error(f'ffmpeg failed to create output file for commandline: {" ".join(cmd)}\n{stderr_output}')
365
+ return output_path
366
+ except subprocess.CalledProcessError as e:
367
+ _handle_ffmpeg_error(e)
368
+
369
+
370
+ @pxt.udf(is_method=True)
371
+ def segment_video(
372
+ video: pxt.Video,
373
+ *,
374
+ duration: float | None = None,
375
+ segment_times: list[float] | None = None,
376
+ mode: Literal['fast', 'accurate'] = 'accurate',
377
+ video_encoder: str | None = None,
378
+ video_encoder_args: dict[str, Any] | None = None,
379
+ ) -> list[str]:
380
+ """
381
+ Split a video into segments.
382
+
383
+ __Requirements:__
384
+
385
+ - `ffmpeg` needs to be installed and in PATH
386
+
387
+ Args:
388
+ video: Input video file to segment
389
+ duration: Duration of each segment (in seconds). For `mode='fast'`, this is approximate;
390
+ for `mode='accurate'`, segments will have exact durations. Cannot be specified together with
391
+ `segment_times`.
392
+ segment_times: List of timestamps (in seconds) in video where segments should be split. Note that these are not
393
+ segment durations. If all segment times are less than the duration of the video, produces exactly
394
+ `len(segment_times) + 1` segments. Cannot be empty or be specified together with `duration`.
395
+ mode: Segmentation mode:
396
+
397
+ - `'fast'`: Quick segmentation using stream copy (splits only at keyframes, approximate durations)
398
+ - `'accurate'`: Precise segmentation with re-encoding (exact durations, slower)
399
+ video_encoder: Video encoder to use. If not specified, uses the default encoder for the current platform.
400
+ Only available for `mode='accurate'`.
401
+ video_encoder_args: Additional arguments to pass to the video encoder. Only available for `mode='accurate'`.
402
+
403
+ Returns:
404
+ List of file paths for the generated video segments.
405
+
406
+ Raises:
407
+ pxt.Error: If the video is missing timing information.
408
+
409
+ Examples:
410
+ Split a video at 1 minute intervals using fast mode:
411
+
412
+ >>> tbl.select(segment_paths=tbl.video.segment_video(duration=60, mode='fast')).collect()
413
+
414
+ Split video into exact 10-second segments with default accurate mode, using the libx264 encoder with a CRF of 23
415
+ and slow preset (for smaller output files):
416
+
417
+ >>> tbl.select(
418
+ ... segment_paths=tbl.video.segment_video(
419
+ ... duration=10,
420
+ ... video_encoder='libx264',
421
+ ... video_encoder_args={'crf': 23, 'preset': 'slow'}
422
+ ... )
423
+ ... ).collect()
424
+
425
+ Split video into two parts at the midpoint:
426
+
427
+ >>> duration = tbl.video.get_duration()
428
+ >>> tbl.select(segment_paths=tbl.video.segment_video(segment_times=[duration / 2])).collect()
429
+ """
430
+ Env.get().require_binary('ffmpeg')
431
+ if duration is not None and segment_times is not None:
432
+ raise pxt.Error('duration and segment_times cannot both be specified')
433
+ if duration is not None and duration <= 0:
434
+ raise pxt.Error(f'duration must be positive, got {duration}')
435
+ if segment_times is not None and len(segment_times) == 0:
436
+ raise pxt.Error('segment_times cannot be empty')
437
+ if mode == 'fast':
438
+ if video_encoder is not None:
439
+ raise pxt.Error("video_encoder is not supported for mode='fast'")
440
+ if video_encoder_args is not None:
441
+ raise pxt.Error("video_encoder_args is not supported for mode='fast'")
442
+
443
+ base_path = TempStore.create_path(extension='')
444
+
445
+ output_paths: list[str] = []
446
+ if mode == 'accurate':
447
+ # Use ffmpeg -f segment for accurate segmentation with re-encoding
448
+ output_pattern = f'{base_path}_segment_%04d.mp4'
449
+ cmd = av_utils.ffmpeg_segment_cmd(
450
+ str(video),
451
+ output_pattern,
452
+ segment_duration=duration,
453
+ segment_times=segment_times,
454
+ video_encoder=video_encoder,
455
+ video_encoder_args=video_encoder_args,
456
+ )
457
+
458
+ try:
459
+ _ = subprocess.run(cmd, capture_output=True, text=True, check=True)
460
+ output_paths = sorted(glob.glob(f'{base_path}_segment_*.mp4'))
461
+ # TODO: is this actually an error?
462
+ # if len(output_paths) == 0:
463
+ # stderr_output = result.stderr.strip() if result.stderr is not None else ''
464
+ # raise pxt.Error(
465
+ # f'ffmpeg failed to create output files for commandline: {" ".join(cmd)}\n{stderr_output}'
466
+ # )
467
+ return output_paths
468
+
469
+ except subprocess.CalledProcessError as e:
470
+ _handle_ffmpeg_error(e)
471
+
171
472
  else:
172
- assert stream.type == 'video'
173
- assert isinstance(stream, av.video.stream.VideoStream)
174
- # Additional metadata for video
175
- codec_context_md['pix_fmt'] = getattr(stream.codec_context, 'pix_fmt', None)
176
- metadata.update(
177
- **{
178
- 'width': stream.width,
179
- 'height': stream.height,
180
- 'frames': stream.frames,
181
- 'average_rate': float(stream.average_rate) if stream.average_rate is not None else None,
182
- 'base_rate': float(stream.base_rate) if stream.base_rate is not None else None,
183
- 'guessed_rate': float(stream.guessed_rate) if stream.guessed_rate is not None else None,
184
- }
473
+ # Fast mode: extract consecutive clips using stream copy (no re-encoding)
474
+ # This is faster but can only split at keyframes, leading to approximate durations
475
+ start_time = 0.0
476
+ segment_idx = 0
477
+ try:
478
+ while True:
479
+ target_duration: float | None
480
+ if duration is not None:
481
+ target_duration = duration
482
+ elif segment_idx < len(segment_times):
483
+ target_duration = segment_times[segment_idx] - start_time
484
+ else:
485
+ target_duration = None # the rest
486
+ segment_path = f'{base_path}_segment_{len(output_paths)}.mp4'
487
+ cmd = av_utils.ffmpeg_clip_cmd(str(video), segment_path, start_time, target_duration)
488
+
489
+ _ = subprocess.run(cmd, capture_output=True, text=True, check=True)
490
+ segment_duration = av_utils.get_video_duration(segment_path)
491
+ if segment_duration == 0.0:
492
+ # we're done
493
+ pathlib.Path(segment_path).unlink()
494
+ return output_paths
495
+ output_paths.append(segment_path)
496
+ start_time += segment_duration # use the actual segment duration here, it won't match duration exactly
497
+
498
+ segment_idx += 1
499
+ if segment_times is not None and segment_idx > len(segment_times):
500
+ break
501
+
502
+ return output_paths
503
+
504
+ except subprocess.CalledProcessError as e:
505
+ # clean up partial results
506
+ for segment_path in output_paths:
507
+ pathlib.Path(segment_path).unlink()
508
+ _handle_ffmpeg_error(e)
509
+
510
+
511
+ @pxt.udf(is_method=True)
512
+ def concat_videos(videos: list[pxt.Video]) -> pxt.Video:
513
+ """
514
+ Merge multiple videos into a single video.
515
+
516
+ __Requirements:__
517
+
518
+ - `ffmpeg` needs to be installed and in PATH
519
+
520
+ Args:
521
+ videos: List of videos to merge.
522
+
523
+ Returns:
524
+ A new video containing the merged videos.
525
+ """
526
+ Env.get().require_binary('ffmpeg')
527
+ if len(videos) == 0:
528
+ raise pxt.Error('concat_videos(): empty argument list')
529
+
530
+ # Check that all videos have the same resolution
531
+ resolutions: list[tuple[int, int]] = []
532
+ for video in videos:
533
+ metadata = av_utils.get_metadata(str(video))
534
+ video_stream = next((stream for stream in metadata['streams'] if stream['type'] == 'video'), None)
535
+ if video_stream is None:
536
+ raise pxt.Error(f'concat_videos(): file {video!r} has no video stream')
537
+ resolutions.append((video_stream['width'], video_stream['height']))
538
+
539
+ # check for divergence
540
+ x0, y0 = resolutions[0]
541
+ for i, (x, y) in enumerate(resolutions[1:], start=1):
542
+ if (x0, y0) != (x, y):
543
+ raise pxt.Error(
544
+ f'concat_videos(): requires that all videos have the same resolution, but:'
545
+ f'\n video 0 ({videos[0]!r}): {x0}x{y0}'
546
+ f'\n video {i} ({videos[i]!r}): {x}x{y}.'
547
+ )
548
+
549
+ # ffmpeg -f concat needs an input file list
550
+ filelist_path = TempStore.create_path(extension='.txt')
551
+ with filelist_path.open('w', encoding='utf-8') as f:
552
+ for video in videos:
553
+ f.write(f'file {video!r}\n')
554
+
555
+ output_path = TempStore.create_path(extension='.mp4')
556
+
557
+ try:
558
+ # First attempt: fast copy without re-encoding (works for compatible formats)
559
+ cmd = ['ffmpeg', '-f', 'concat', '-safe', '0', '-i', str(filelist_path), '-c', 'copy', '-y', str(output_path)]
560
+ _logger.debug(f'concat_videos(): {" ".join(cmd)}')
561
+ try:
562
+ _ = subprocess.run(cmd, capture_output=True, text=True, check=True)
563
+ return str(output_path)
564
+ except subprocess.CalledProcessError:
565
+ # Expected for mixed formats - continue to fallback
566
+ pass
567
+
568
+ # we might have some corrupted output
569
+ if output_path.exists():
570
+ output_path.unlink()
571
+
572
+ # general approach: re-encode with -f filter_complex
573
+ #
574
+ # example: 2 videos with audio:
575
+ # ffmpeg -i video1.mp4 -i video2.mp4
576
+ # -filter_complex "[0:v:0][1:v:0]concat=n=2:v=1:a=0[outv];[0:a:0][1:a:0]concat=n=2:v=0:a=1[outa]"
577
+ # -map "[outv]" -map "[outa]"
578
+ # ...
579
+ # breakdown:
580
+ # - [0:v:0][1:v:0] - video stream 0 from inputs 0 and 1
581
+ # - concat=n=2:v=1:a=0[outv] - concat 2 inputs, 1 video stream, 0 audio, output to [outv]
582
+ # - [0:a:0][1:a:0] - audio stream 0 from inputs 0 and 1
583
+ # - concat=n=2:v=0:a=1[outa] - concat 2 inputs, 0 video, 1 audio stream, output to [outa]
584
+
585
+ cmd = ['ffmpeg']
586
+ for video in videos:
587
+ cmd.extend(['-i', video])
588
+
589
+ all_have_audio = all(av_utils.has_audio_stream(str(video)) for video in videos)
590
+ video_inputs = ''.join([f'[{i}:v:0]' for i in range(len(videos))])
591
+ # concat video streams
592
+ filter_str = f'{video_inputs}concat=n={len(videos)}:v=1:a=0[outv]'
593
+ if all_have_audio:
594
+ # also concat audio streams
595
+ audio_inputs = ''.join([f'[{i}:a:0]' for i in range(len(videos))])
596
+ filter_str += f';{audio_inputs}concat=n={len(videos)}:v=0:a=1[outa]'
597
+ cmd.extend(['-filter_complex', filter_str, '-map', '[outv]'])
598
+ if all_have_audio:
599
+ cmd.extend(['-map', '[outa]'])
600
+
601
+ video_encoder = Env.get().default_video_encoder
602
+ if video_encoder is not None:
603
+ cmd.extend(['-c:v', video_encoder])
604
+ if all_have_audio:
605
+ cmd.extend(['-c:a', 'aac'])
606
+ cmd.extend(['-pix_fmt', 'yuv420p', str(output_path)])
607
+
608
+ _ = subprocess.run(cmd, capture_output=True, text=True, check=True)
609
+ return str(output_path)
610
+
611
+ except subprocess.CalledProcessError as e:
612
+ _handle_ffmpeg_error(e)
613
+ finally:
614
+ filelist_path.unlink()
615
+
616
+
617
+ @pxt.udf
618
+ def with_audio(
619
+ video: pxt.Video,
620
+ audio: pxt.Audio,
621
+ *,
622
+ video_start_time: float = 0.0,
623
+ video_duration: float | None = None,
624
+ audio_start_time: float = 0.0,
625
+ audio_duration: float | None = None,
626
+ ) -> pxt.Video:
627
+ """
628
+ Creates a new video that combines the video stream from `video` and the audio stream from `audio`.
629
+ The `start_time` and `duration` parameters can be used to select a specific time range from each input.
630
+ If the audio input (or selected time range) is longer than the video, the audio will be truncated.
631
+
632
+
633
+ __Requirements:__
634
+
635
+ - `ffmpeg` needs to be installed and in PATH
636
+
637
+ Args:
638
+ video: Input video.
639
+ audio: Input audio.
640
+ video_start_time: Start time in the video input (in seconds).
641
+ video_duration: Duration of video segment (in seconds). If None, uses the remainder of the video after
642
+ `video_start_time`. `video_duration` determines the duration of the output video.
643
+ audio_start_time: Start time in the audio input (in seconds).
644
+ audio_duration: Duration of audio segment (in seconds). If None, uses the remainder of the audio after
645
+ `audio_start_time`. If the audio is longer than the output video, it will be truncated.
646
+
647
+ Returns:
648
+ A new video file with the audio track added.
649
+
650
+ Examples:
651
+ Add background music to a video:
652
+
653
+ >>> tbl.select(tbl.video.with_audio(tbl.music_track)).collect()
654
+
655
+ Add audio starting 5 seconds into both files:
656
+
657
+ >>> tbl.select(
658
+ ... tbl.video.with_audio(
659
+ ... tbl.music_track,
660
+ ... video_start_time=5.0,
661
+ ... audio_start_time=5.0
662
+ ... )
663
+ ... ).collect()
664
+
665
+ Use a 10-second clip from the middle of both files:
666
+
667
+ >>> tbl.select(
668
+ ... tbl.video.with_audio(
669
+ ... tbl.music_track,
670
+ ... video_start_time=30.0,
671
+ ... video_duration=10.0,
672
+ ... audio_start_time=15.0,
673
+ ... audio_duration=10.0
674
+ ... )
675
+ ... ).collect()
676
+ """
677
+ Env.get().require_binary('ffmpeg')
678
+ if video_start_time < 0:
679
+ raise pxt.Error(f'video_offset must be non-negative, got {video_start_time}')
680
+ if audio_start_time < 0:
681
+ raise pxt.Error(f'audio_offset must be non-negative, got {audio_start_time}')
682
+ if video_duration is not None and video_duration <= 0:
683
+ raise pxt.Error(f'video_duration must be positive, got {video_duration}')
684
+ if audio_duration is not None and audio_duration <= 0:
685
+ raise pxt.Error(f'audio_duration must be positive, got {audio_duration}')
686
+
687
+ output_path = str(TempStore.create_path(extension='.mp4'))
688
+
689
+ cmd = ['ffmpeg']
690
+ if video_start_time > 0:
691
+ # fast seek, must precede -i
692
+ cmd.extend(['-ss', str(video_start_time)])
693
+ if video_duration is not None:
694
+ cmd.extend(['-t', str(video_duration)])
695
+ else:
696
+ video_duration = av_utils.get_video_duration(video)
697
+ cmd.extend(['-i', str(video)])
698
+
699
+ if audio_start_time > 0:
700
+ cmd.extend(['-ss', str(audio_start_time)])
701
+ if audio_duration is not None:
702
+ cmd.extend(['-t', str(audio_duration)])
703
+ cmd.extend(['-i', str(audio)])
704
+
705
+ cmd.extend(
706
+ [
707
+ '-map',
708
+ '0:v:0', # video from first input
709
+ '-map',
710
+ '1:a:0', # audio from second input
711
+ '-c:v',
712
+ 'copy', # avoid re-encoding
713
+ '-c:a',
714
+ 'copy', # avoid re-encoding
715
+ '-t',
716
+ str(video_duration), # limit output duration to video duration
717
+ '-loglevel',
718
+ 'error', # only show errors
719
+ output_path,
720
+ ]
721
+ )
722
+
723
+ _logger.debug(f'with_audio(): {" ".join(cmd)}')
724
+
725
+ try:
726
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
727
+ output_file = pathlib.Path(output_path)
728
+ if not output_file.exists() or output_file.stat().st_size == 0:
729
+ stderr_output = result.stderr.strip() if result.stderr is not None else ''
730
+ raise pxt.Error(f'ffmpeg failed to create output file for commandline: {" ".join(cmd)}\n{stderr_output}')
731
+ return output_path
732
+ except subprocess.CalledProcessError as e:
733
+ _handle_ffmpeg_error(e)
734
+
735
+
736
+ @pxt.udf(is_method=True)
737
+ def overlay_text(
738
+ video: pxt.Video,
739
+ text: str,
740
+ *,
741
+ font: str | None = None,
742
+ font_size: int = 24,
743
+ color: str = 'white',
744
+ opacity: float = 1.0,
745
+ horizontal_align: Literal['left', 'center', 'right'] = 'center',
746
+ horizontal_margin: int = 0,
747
+ vertical_align: Literal['top', 'center', 'bottom'] = 'center',
748
+ vertical_margin: int = 0,
749
+ box: bool = False,
750
+ box_color: str = 'black',
751
+ box_opacity: float = 1.0,
752
+ box_border: list[int] | None = None,
753
+ ) -> pxt.Video:
754
+ """
755
+ Overlay text on a video with customizable positioning and styling.
756
+
757
+ __Requirements:__
758
+
759
+ - `ffmpeg` needs to be installed and in PATH
760
+
761
+ Args:
762
+ video: Input video to overlay text on.
763
+ text: The text string to overlay on the video.
764
+ font: Font family or path to font file. If None, uses the system default.
765
+ font_size: Size of the text in points.
766
+ color: Text color (e.g., `'white'`, `'red'`, `'#FF0000'`).
767
+ opacity: Text opacity from 0.0 (transparent) to 1.0 (opaque).
768
+ horizontal_align: Horizontal text alignment (`'left'`, `'center'`, `'right'`).
769
+ horizontal_margin: Horizontal margin in pixels from the alignment edge.
770
+ vertical_align: Vertical text alignment (`'top'`, `'center'`, `'bottom'`).
771
+ vertical_margin: Vertical margin in pixels from the alignment edge.
772
+ box: Whether to draw a background box behind the text.
773
+ box_color: Background box color as a string.
774
+ box_opacity: Background box opacity from 0.0 to 1.0.
775
+ box_border: Padding around text in the box in pixels.
776
+
777
+ - `[10]`: 10 pixels on all sides
778
+ - `[10, 20]`: 10 pixels on top/bottom, 20 on left/right
779
+ - `[10, 20, 30]`: 10 pixels on top, 20 on left/right, 30 on bottom
780
+ - `[10, 20, 30, 40]`: 10 pixels on top, 20 on right, 30 on bottom, 40 on left
781
+
782
+ Returns:
783
+ A new video with the text overlay applied.
784
+
785
+ Examples:
786
+ Add a simple text overlay to videos in a table:
787
+
788
+ >>> tbl.select(tbl.video.overlay_text('Sample Text')).collect()
789
+
790
+ Add a YouTube-style caption:
791
+
792
+ >>> tbl.select(
793
+ ... tbl.video.overlay_text(
794
+ ... 'Caption text',
795
+ ... font_size=32,
796
+ ... color='white',
797
+ ... opacity=1.0,
798
+ ... box=True,
799
+ ... box_color='black',
800
+ ... box_opacity=0.8,
801
+ ... box_border=[6, 14],
802
+ ... horizontal_margin=10,
803
+ ... vertical_align='bottom',
804
+ ... vertical_margin=70
805
+ ... )
806
+ ... ).collect()
807
+
808
+ Add text with a semi-transparent background box:
809
+
810
+ >>> tbl.select(
811
+ ... tbl.video.overlay_text(
812
+ ... 'Important Message',
813
+ ... font_size=32,
814
+ ... color='yellow',
815
+ ... box=True,
816
+ ... box_color='black',
817
+ ... box_opacity=0.6,
818
+ ... box_border=[20, 10]
819
+ ... )
820
+ ... ).collect()
821
+ """
822
+ Env.get().require_binary('ffmpeg')
823
+ if font_size <= 0:
824
+ raise pxt.Error(f'font_size must be positive, got {font_size}')
825
+ if opacity < 0.0 or opacity > 1.0:
826
+ raise pxt.Error(f'opacity must be between 0.0 and 1.0, got {opacity}')
827
+ if horizontal_margin < 0:
828
+ raise pxt.Error(f'horizontal_margin must be non-negative, got {horizontal_margin}')
829
+ if vertical_margin < 0:
830
+ raise pxt.Error(f'vertical_margin must be non-negative, got {vertical_margin}')
831
+ if box_opacity < 0.0 or box_opacity > 1.0:
832
+ raise pxt.Error(f'box_opacity must be between 0.0 and 1.0, got {box_opacity}')
833
+ if box_border is not None and not (
834
+ isinstance(box_border, (list, tuple))
835
+ and len(box_border) >= 1
836
+ and len(box_border) <= 4
837
+ and all(isinstance(x, int) for x in box_border)
838
+ and all(x >= 0 for x in box_border)
839
+ ):
840
+ raise pxt.Error(f'box_border must be a list or tuple of 1-4 non-negative ints, got {box_border!s} instead')
841
+
842
+ output_path = str(TempStore.create_path(extension='.mp4'))
843
+
844
+ drawtext_params = _create_drawtext_params(
845
+ text,
846
+ font,
847
+ font_size,
848
+ color,
849
+ opacity,
850
+ horizontal_align,
851
+ horizontal_margin,
852
+ vertical_align,
853
+ vertical_margin,
854
+ box,
855
+ box_color,
856
+ box_opacity,
857
+ box_border,
858
+ )
859
+
860
+ cmd = [
861
+ 'ffmpeg',
862
+ '-i',
863
+ str(video),
864
+ '-vf',
865
+ 'drawtext=' + ':'.join(drawtext_params),
866
+ '-c:a',
867
+ 'copy', # Copy audio stream unchanged
868
+ '-loglevel',
869
+ 'error', # Only show errors
870
+ output_path,
871
+ ]
872
+ _logger.debug(f'overlay_text(): {" ".join(cmd)}')
873
+
874
+ try:
875
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
876
+ output_file = pathlib.Path(output_path)
877
+ if not output_file.exists() or output_file.stat().st_size == 0:
878
+ stderr_output = result.stderr.strip() if result.stderr is not None else ''
879
+ raise pxt.Error(f'ffmpeg failed to create output file for commandline: {" ".join(cmd)}\n{stderr_output}')
880
+ return output_path
881
+ except subprocess.CalledProcessError as e:
882
+ _handle_ffmpeg_error(e)
883
+
884
+
885
+ def _create_drawtext_params(
886
+ text: str,
887
+ font: str | None,
888
+ font_size: int,
889
+ color: str,
890
+ opacity: float,
891
+ horizontal_align: str,
892
+ horizontal_margin: int,
893
+ vertical_align: str,
894
+ vertical_margin: int,
895
+ box: bool,
896
+ box_color: str,
897
+ box_opacity: float,
898
+ box_border: list[int] | None,
899
+ ) -> list[str]:
900
+ """Construct parameters for the ffmpeg drawtext filter"""
901
+ drawtext_params: list[str] = []
902
+ escaped_text = text.replace('\\', '\\\\').replace(':', '\\:').replace("'", "\\'")
903
+ drawtext_params.append(f"text='{escaped_text}'")
904
+ drawtext_params.append(f'fontsize={font_size}')
905
+
906
+ if font is not None:
907
+ if pathlib.Path(font).exists():
908
+ drawtext_params.append(f"fontfile='{font}'")
909
+ else:
910
+ drawtext_params.append(f"font='{font}'")
911
+ if opacity < 1.0:
912
+ drawtext_params.append(f'fontcolor={color}@{opacity}')
913
+ else:
914
+ drawtext_params.append(f'fontcolor={color}')
915
+
916
+ if horizontal_align == 'left':
917
+ x_expr = str(horizontal_margin)
918
+ elif horizontal_align == 'center':
919
+ x_expr = '(w-text_w)/2'
920
+ else: # right
921
+ x_expr = f'w-text_w-{horizontal_margin}' if horizontal_margin != 0 else 'w-text_w'
922
+ if vertical_align == 'top':
923
+ y_expr = str(vertical_margin)
924
+ elif vertical_align == 'center':
925
+ y_expr = '(h-text_h)/2'
926
+ else: # bottom
927
+ y_expr = f'h-text_h-{vertical_margin}' if vertical_margin != 0 else 'h-text_h'
928
+ drawtext_params.extend([f'x={x_expr}', f'y={y_expr}'])
929
+
930
+ if box:
931
+ drawtext_params.append('box=1')
932
+ if box_opacity < 1.0:
933
+ drawtext_params.append(f'boxcolor={box_color}@{box_opacity}')
934
+ else:
935
+ drawtext_params.append(f'boxcolor={box_color}')
936
+ if box_border is not None:
937
+ drawtext_params.append(f'boxborderw={"|".join(map(str, box_border))}')
938
+
939
+ return drawtext_params
940
+
941
+
942
+ @pxt.udf(is_method=True)
943
+ def scene_detect_adaptive(
944
+ video: pxt.Video,
945
+ *,
946
+ fps: float | None = None,
947
+ adaptive_threshold: float = 3.0,
948
+ min_scene_len: int = 15,
949
+ window_width: int = 2,
950
+ min_content_val: float = 15.0,
951
+ delta_hue: float = 1.0,
952
+ delta_sat: float = 1.0,
953
+ delta_lum: float = 1.0,
954
+ delta_edges: float = 0.0,
955
+ luma_only: bool = False,
956
+ kernel_size: int | None = None,
957
+ ) -> list[dict]:
958
+ """
959
+ Detect scene cuts in a video using PySceneDetect's
960
+ [AdaptiveDetector](https://www.scenedetect.com/docs/latest/api/detectors.html#scenedetect.detectors.adaptive_detector.AdaptiveDetector).
961
+
962
+ __Requirements:__
963
+
964
+ - `pip install scenedetect`
965
+
966
+ Args:
967
+ video: The video to analyze for scene cuts.
968
+ fps: Number of frames to extract per second for analysis. If None or 0, analyzes all frames.
969
+ Lower values process faster but may miss exact scene cuts.
970
+ adaptive_threshold: Threshold that the score ratio must exceed to trigger a new scene cut.
971
+ Lower values will detect more scenes (more sensitive), higher values will detect fewer scenes.
972
+ min_scene_len: Once a cut is detected, this many frames must pass before a new one can be added to the scene
973
+ list.
974
+ window_width: Size of window (number of frames) before and after each frame to average together in order to
975
+ detect deviations from the mean. Must be at least 1.
976
+ min_content_val: Minimum threshold (float) that the content_val must exceed in order to register as a new scene.
977
+ This is calculated the same way that `scene_detect_content()` calculates frame
978
+ score based on weights/luma_only/kernel_size.
979
+ delta_hue: Weight for hue component changes. Higher values make hue changes more important.
980
+ delta_sat: Weight for saturation component changes. Higher values make saturation changes more important.
981
+ delta_lum: Weight for luminance component changes. Higher values make brightness changes more important.
982
+ delta_edges: Weight for edge detection changes. Higher values make edge changes more important.
983
+ Edge detection can help detect cuts in scenes with similar colors but different content.
984
+ luma_only: If True, only analyzes changes in the luminance (brightness) channel of the video,
985
+ ignoring color information. This can be faster and may work better for grayscale content.
986
+ kernel_size: Size of kernel to use for post edge detection filtering. If None, automatically set based on video
987
+ resolution.
988
+
989
+ Returns:
990
+ A list of dictionaries, one for each detected scene, with the following keys:
991
+
992
+ - `start_time` (float): The start time of the scene in seconds.
993
+ - `start_pts` (int): The pts of the start of the scene.
994
+ - `duration` (float): The duration of the scene in seconds.
995
+
996
+ The list is ordered chronologically. Returns the full duration of the video if no scenes are detected.
997
+
998
+ Examples:
999
+ Detect scene cuts with default parameters:
1000
+
1001
+ >>> tbl.select(tbl.video.scene_detect_adaptive()).collect()
1002
+
1003
+ Detect more scenes by lowering the threshold:
1004
+
1005
+ >>> tbl.select(tbl.video.scene_detect_adaptive(adaptive_threshold=1.5)).collect()
1006
+
1007
+ Use luminance-only detection with a longer minimum scene length:
1008
+
1009
+ >>> tbl.select(
1010
+ ... tbl.video.scene_detect_adaptive(
1011
+ ... luma_only=True,
1012
+ ... min_scene_len=30
1013
+ ... )
1014
+ ... ).collect()
1015
+
1016
+ Add scene cuts as a computed column:
1017
+
1018
+ >>> tbl.add_computed_column(
1019
+ ... scene_cuts=tbl.video.scene_detect_adaptive(adaptive_threshold=2.0)
1020
+ ... )
1021
+
1022
+ Analyze at a lower frame rate for faster processing:
1023
+
1024
+ >>> tbl.select(tbl.video.scene_detect_adaptive(fps=2.0)).collect()
1025
+ """
1026
+ Env.get().require_package('scenedetect')
1027
+ from scenedetect.detectors import AdaptiveDetector, ContentDetector
1028
+
1029
+ weights = ContentDetector.Components(
1030
+ delta_hue=delta_hue, delta_sat=delta_sat, delta_lum=delta_lum, delta_edges=delta_edges
1031
+ )
1032
+ try:
1033
+ detector = AdaptiveDetector(
1034
+ adaptive_threshold=adaptive_threshold,
1035
+ min_scene_len=min_scene_len,
1036
+ window_width=window_width,
1037
+ min_content_val=min_content_val,
1038
+ weights=weights,
1039
+ luma_only=luma_only,
1040
+ kernel_size=kernel_size,
1041
+ )
1042
+ return _scene_detect(video, fps, detector)
1043
+ except Exception as e:
1044
+ raise pxt.Error(f'scene_detect_adaptive(): failed to detect scenes: {e}') from e
1045
+
1046
+
1047
+ @pxt.udf(is_method=True)
1048
+ def scene_detect_content(
1049
+ video: pxt.Video,
1050
+ *,
1051
+ fps: float | None = None,
1052
+ threshold: float = 27.0,
1053
+ min_scene_len: int = 15,
1054
+ delta_hue: float = 1.0,
1055
+ delta_sat: float = 1.0,
1056
+ delta_lum: float = 1.0,
1057
+ delta_edges: float = 0.0,
1058
+ luma_only: bool = False,
1059
+ kernel_size: int | None = None,
1060
+ filter_mode: Literal['merge', 'suppress'] = 'merge',
1061
+ ) -> list[dict]:
1062
+ """
1063
+ Detect scene cuts in a video using PySceneDetect's
1064
+ [ContentDetector](https://www.scenedetect.com/docs/latest/api/detectors.html#scenedetect.detectors.content_detector.ContentDetector).
1065
+
1066
+ __Requirements:__
1067
+
1068
+ - `pip install scenedetect`
1069
+
1070
+ Args:
1071
+ video: The video to analyze for scene cuts.
1072
+ fps: Number of frames to extract per second for analysis. If None, analyzes all frames.
1073
+ Lower values process faster but may miss exact scene cuts.
1074
+ threshold: Threshold that the weighted sum of component changes must exceed to trigger a scene cut.
1075
+ Lower values detect more scenes (more sensitive), higher values detect fewer scenes.
1076
+ min_scene_len: Once a cut is detected, this many frames must pass before a new one can be added to the scene
1077
+ list.
1078
+ delta_hue: Weight for hue component changes. Higher values make hue changes more important.
1079
+ delta_sat: Weight for saturation component changes. Higher values make saturation changes more important.
1080
+ delta_lum: Weight for luminance component changes. Higher values make brightness changes more important.
1081
+ delta_edges: Weight for edge detection changes. Higher values make edge changes more important.
1082
+ Edge detection can help detect cuts in scenes with similar colors but different content.
1083
+ luma_only: If True, only analyzes changes in the luminance (brightness) channel,
1084
+ ignoring color information. This can be faster and may work better for grayscale content.
1085
+ kernel_size: Size of kernel for expanding detected edges. Must be odd integer greater than or equal to 3. If
1086
+ None, automatically set using video resolution.
1087
+ filter_mode: How to handle fast cuts/flashes. 'merge' combines quick cuts, 'suppress' filters them out.
1088
+
1089
+ Returns:
1090
+ A list of dictionaries, one for each detected scene, with the following keys:
1091
+
1092
+ - `start_time` (float): The start time of the scene in seconds.
1093
+ - `start_pts` (int): The pts of the start of the scene.
1094
+ - `duration` (float): The duration of the scene in seconds.
1095
+
1096
+ The list is ordered chronologically. Returns the full duration of the video if no scenes are detected.
1097
+
1098
+ Examples:
1099
+ Detect scene cuts with default parameters:
1100
+
1101
+ >>> tbl.select(tbl.video.scene_detect_content()).collect()
1102
+
1103
+ Detect more scenes by lowering the threshold:
1104
+
1105
+ >>> tbl.select(tbl.video.scene_detect_content(threshold=15.0)).collect()
1106
+
1107
+ Use luminance-only detection:
1108
+
1109
+ >>> tbl.select(tbl.video.scene_detect_content(luma_only=True)).collect()
1110
+
1111
+ Emphasize edge detection for scenes with similar colors:
1112
+
1113
+ >>> tbl.select(
1114
+ ... tbl.video.scene_detect_content(
1115
+ ... delta_edges=1.0,
1116
+ ... delta_hue=0.5,
1117
+ ... delta_sat=0.5
1118
+ ... )
1119
+ ... ).collect()
1120
+
1121
+ Add scene cuts as a computed column:
1122
+
1123
+ >>> tbl.add_computed_column(
1124
+ ... scene_cuts=tbl.video.scene_detect_content(threshold=20.0)
1125
+ ... )
1126
+ """
1127
+ Env.get().require_package('scenedetect')
1128
+ from scenedetect.detectors import ContentDetector
1129
+ from scenedetect.detectors.content_detector import FlashFilter # type: ignore[import-untyped]
1130
+
1131
+ weights = ContentDetector.Components(
1132
+ delta_hue=delta_hue, delta_sat=delta_sat, delta_lum=delta_lum, delta_edges=delta_edges
1133
+ )
1134
+ filter_mode_enum = FlashFilter.Mode.MERGE if filter_mode == 'merge' else FlashFilter.Mode.SUPPRESS
1135
+
1136
+ try:
1137
+ detector = ContentDetector(
1138
+ threshold=threshold,
1139
+ min_scene_len=min_scene_len,
1140
+ weights=weights,
1141
+ luma_only=luma_only,
1142
+ kernel_size=kernel_size,
1143
+ filter_mode=filter_mode_enum,
1144
+ )
1145
+ return _scene_detect(video, fps, detector)
1146
+ except Exception as e:
1147
+ raise pxt.Error(f'scene_detect_content(): failed to detect scenes: {e}') from e
1148
+
1149
+
1150
+ @pxt.udf(is_method=True)
1151
+ def scene_detect_threshold(
1152
+ video: pxt.Video,
1153
+ *,
1154
+ fps: float | None = None,
1155
+ threshold: float = 12.0,
1156
+ min_scene_len: int = 15,
1157
+ fade_bias: float = 0.0,
1158
+ add_final_scene: bool = False,
1159
+ method: Literal['ceiling', 'floor'] = 'floor',
1160
+ ) -> list[dict]:
1161
+ """
1162
+ Detect fade-in and fade-out transitions in a video using PySceneDetect's
1163
+ [ThresholdDetector](https://www.scenedetect.com/docs/latest/api/detectors.html#scenedetect.detectors.threshold_detector.ThresholdDetector).
1164
+
1165
+ ThresholdDetector identifies scenes by detecting when pixel brightness falls below or rises above
1166
+ a threshold value, suitable for detecting fade-to-black, fade-to-white, and similar transitions.
1167
+
1168
+ __Requirements:__
1169
+
1170
+ - `pip install scenedetect`
1171
+
1172
+ Args:
1173
+ video: The video to analyze for fade transitions.
1174
+ fps: Number of frames to extract per second for analysis. If None or 0, analyzes all frames.
1175
+ Lower values process faster but may miss exact transition points.
1176
+ threshold: 8-bit intensity value that each pixel value (R, G, and B) must be <= to in order to trigger a fade
1177
+ in/out.
1178
+ min_scene_len: Once a cut is detected, this many frames must pass before a new one can be added to the scene
1179
+ list.
1180
+ fade_bias: Float between -1.0 and +1.0 representing the percentage of timecode skew for the start of a scene
1181
+ (-1.0 causing a cut at the fade-to-black, 0.0 in the middle, and +1.0 causing the cut to be right at the
1182
+ position where the threshold is passed).
1183
+ add_final_scene: Boolean indicating if the video ends on a fade-out to generate an additional scene at this
1184
+ timecode.
1185
+ method: How to treat threshold when detecting fade events
1186
+ - 'ceiling': Fade out happens when frame brightness rises above threshold.
1187
+ - 'floor': Fade out happens when frame brightness falls below threshold.
1188
+
1189
+
1190
+ Returns:
1191
+ A list of dictionaries, one for each detected scene, with the following keys:
1192
+
1193
+ - `start_time` (float): The start time of the scene in seconds.
1194
+ - `start_pts` (int): The pts of the start of the scene.
1195
+ - `duration` (float): The duration of the scene in seconds.
1196
+
1197
+ The list is ordered chronologically. Returns the full duration of the video if no scenes are detected.
1198
+
1199
+ Examples:
1200
+ Detect fade-to-black transitions with default parameters:
1201
+
1202
+ >>> tbl.select(tbl.video.scene_detect_threshold()).collect()
1203
+
1204
+ Use a lower threshold to detect darker fades:
1205
+
1206
+ >>> tbl.select(tbl.video.scene_detect_threshold(threshold=8.0)).collect()
1207
+
1208
+ Detect both fade-to-black and fade-to-white using absolute method:
1209
+
1210
+ >>> tbl.select(tbl.video.scene_detect_threshold(method='absolute')).collect()
1211
+
1212
+ Add final scene boundary:
1213
+
1214
+ >>> tbl.select(
1215
+ ... tbl.video.scene_detect_threshold(
1216
+ ... add_final_scene=True
1217
+ ... )
1218
+ ... ).collect()
1219
+
1220
+ Add fade transitions as a computed column:
1221
+
1222
+ >>> tbl.add_computed_column(
1223
+ ... fade_cuts=tbl.video.scene_detect_threshold(threshold=15.0)
1224
+ ... )
1225
+ """
1226
+ Env.get().require_package('scenedetect')
1227
+ from scenedetect.detectors import ThresholdDetector
1228
+
1229
+ method_enum = ThresholdDetector.Method.FLOOR if method == 'floor' else ThresholdDetector.Method.CEILING
1230
+ try:
1231
+ detector = ThresholdDetector(
1232
+ threshold=threshold,
1233
+ min_scene_len=min_scene_len,
1234
+ fade_bias=fade_bias,
1235
+ add_final_scene=add_final_scene,
1236
+ method=method_enum,
185
1237
  )
1238
+ return _scene_detect(video, fps, detector)
1239
+ except Exception as e:
1240
+ raise pxt.Error(f'scene_detect_threshold(): failed to detect scenes: {e}') from e
1241
+
1242
+
1243
+ @pxt.udf(is_method=True)
1244
+ def scene_detect_histogram(
1245
+ video: pxt.Video, *, fps: float | None = None, threshold: float = 0.05, bins: int = 256, min_scene_len: int = 15
1246
+ ) -> list[dict]:
1247
+ """
1248
+ Detect scene cuts in a video using PySceneDetect's
1249
+ [HistogramDetector](https://www.scenedetect.com/docs/latest/api/detectors.html#scenedetect.detectors.histogram_detector.HistogramDetector).
1250
+
1251
+ HistogramDetector compares frame histograms on the Y (luminance) channel after YUV conversion.
1252
+ It detects scenes based on relative histogram differences and is more robust to gradual lighting
1253
+ changes than content-based detection.
1254
+
1255
+ __Requirements:__
1256
+
1257
+ - `pip install scenedetect`
1258
+
1259
+ Args:
1260
+ video: The video to analyze for scene cuts.
1261
+ fps: Number of frames to extract per second for analysis. If None or 0, analyzes all frames.
1262
+ Lower values process faster but may miss exact scene cuts.
1263
+ threshold: Maximum relative difference between 0.0 and 1.0 that the histograms can differ. Histograms are
1264
+ calculated on the Y channel after converting the frame to YUV, and normalized based on the number of bins.
1265
+ Higher differences imply greater change in content, so larger threshold values are less sensitive to cuts.
1266
+ Lower values detect more scenes (more sensitive), higher values detect fewer scenes.
1267
+ bins: Number of bins to use for histogram calculation (typically 16-256). More bins provide
1268
+ finer granularity but may be more sensitive to noise.
1269
+ min_scene_len: Once a cut is detected, this many frames must pass before a new one can be added to the scene
1270
+ list.
1271
+
1272
+
1273
+ Returns:
1274
+ A list of dictionaries, one for each detected scene, with the following keys:
1275
+
1276
+ - `start_time` (float): The start time of the scene in seconds.
1277
+ - `start_pts` (int): The pts of the start of the scene.
1278
+ - `duration` (float): The duration of the scene in seconds.
1279
+
1280
+ The list is ordered chronologically. Returns the full duration of the video if no scenes are detected.
1281
+
1282
+ Examples:
1283
+ Detect scene cuts with default parameters:
1284
+
1285
+ >>> tbl.select(tbl.video.scene_detect_histogram()).collect()
1286
+
1287
+ Detect more scenes by lowering the threshold:
1288
+
1289
+ >>> tbl.select(tbl.video.scene_detect_histogram(threshold=0.03)).collect()
1290
+
1291
+ Use fewer bins for faster processing:
1292
+
1293
+ >>> tbl.select(tbl.video.scene_detect_histogram(bins=64)).collect()
1294
+
1295
+ Use with a longer minimum scene length:
1296
+
1297
+ >>> tbl.select(
1298
+ ... tbl.video.scene_detect_histogram(
1299
+ ... min_scene_len=30
1300
+ ... )
1301
+ ... ).collect()
1302
+
1303
+ Add scene cuts as a computed column:
1304
+
1305
+ >>> tbl.add_computed_column(
1306
+ ... scene_cuts=tbl.video.scene_detect_histogram(threshold=0.04)
1307
+ ... )
1308
+ """
1309
+ Env.get().require_package('scenedetect')
1310
+ from scenedetect.detectors import HistogramDetector
1311
+
1312
+ try:
1313
+ detector = HistogramDetector(threshold=threshold, bins=bins, min_scene_len=min_scene_len)
1314
+ return _scene_detect(video, fps, detector)
1315
+ except Exception as e:
1316
+ raise pxt.Error(f'scene_detect_histogram(): failed to detect scenes: {e}') from e
1317
+
1318
+
1319
+ @pxt.udf(is_method=True)
1320
+ def scene_detect_hash(
1321
+ video: pxt.Video,
1322
+ *,
1323
+ fps: float | None = None,
1324
+ threshold: float = 0.395,
1325
+ size: int = 16,
1326
+ lowpass: int = 2,
1327
+ min_scene_len: int = 15,
1328
+ ) -> list[dict]:
1329
+ """
1330
+ Detect scene cuts in a video using PySceneDetect's
1331
+ [HashDetector](https://www.scenedetect.com/docs/latest/api/detectors.html#scenedetect.detectors.hash_detector.HashDetector).
1332
+
1333
+ HashDetector uses perceptual hashing for very fast scene detection. It computes a hash of each
1334
+ frame at reduced resolution and compares hash distances.
1335
+
1336
+ __Requirements:__
1337
+
1338
+ - `pip install scenedetect`
1339
+
1340
+ Args:
1341
+ video: The video to analyze for scene cuts.
1342
+ fps: Number of frames to extract per second for analysis. If None, analyzes all frames.
1343
+ Lower values process faster but may miss exact scene cuts.
1344
+ threshold: Value from 0.0 and 1.0 representing the relative hamming distance between the perceptual hashes of
1345
+ adjacent frames. A distance of 0 means the image is the same, and 1 means no correlation. Smaller threshold
1346
+ values thus require more correlation, making the detector more sensitive. The Hamming distance is divided
1347
+ by size x size before comparing to threshold for normalization.
1348
+ Lower values detect more scenes (more sensitive), higher values detect fewer scenes.
1349
+ size: Size of square of low frequency data to use for the DCT. Larger values are more precise but slower.
1350
+ Common values are 8, 16, or 32.
1351
+ lowpass: How much high frequency information to filter from the DCT. A value of 2 means keep lower 1/2 of the
1352
+ frequency data, 4 means only keep 1/4, etc. Larger values make the
1353
+ detector less sensitive to high-frequency details and noise.
1354
+ min_scene_len: Once a cut is detected, this many frames must pass before a new one can be added to the scene
1355
+ list.
1356
+
1357
+
1358
+ Returns:
1359
+ A list of dictionaries, one for each detected scene, with the following keys:
1360
+
1361
+ - `start_time` (float): The start time of the scene in seconds.
1362
+ - `start_pts` (int): The pts of the start of the scene.
1363
+ - `duration` (float): The duration of the scene in seconds.
1364
+
1365
+ The list is ordered chronologically. Returns the full duration of the video if no scenes are detected.
1366
+
1367
+ Examples:
1368
+ Detect scene cuts with default parameters:
1369
+
1370
+ >>> tbl.select(tbl.video.scene_detect_hash()).collect()
1371
+
1372
+ Detect more scenes by lowering the threshold:
1373
+
1374
+ >>> tbl.select(tbl.video.scene_detect_hash(threshold=0.3)).collect()
1375
+
1376
+ Use larger hash size for more precision:
1377
+
1378
+ >>> tbl.select(tbl.video.scene_detect_hash(size=32)).collect()
1379
+
1380
+ Use for fast processing with lower frame rate:
1381
+
1382
+ >>> tbl.select(
1383
+ ... tbl.video.scene_detect_hash(
1384
+ ... fps=1.0,
1385
+ ... threshold=0.4
1386
+ ... )
1387
+ ... ).collect()
1388
+
1389
+ Add scene cuts as a computed column:
1390
+
1391
+ >>> tbl.add_computed_column(
1392
+ ... scene_cuts=tbl.video.scene_detect_hash()
1393
+ ... )
1394
+ """
1395
+ Env.get().require_package('scenedetect')
1396
+ from scenedetect.detectors import HashDetector
1397
+
1398
+ try:
1399
+ detector = HashDetector(threshold=threshold, size=size, lowpass=lowpass, min_scene_len=min_scene_len)
1400
+ return _scene_detect(video, fps, detector)
1401
+ except Exception as e:
1402
+ raise pxt.Error(f'scene_detect_hash(): failed to detect scenes: {e}') from e
1403
+
1404
+
1405
+ class _SceneDetectFrameInfo(NamedTuple):
1406
+ frame_idx: int
1407
+ frame_pts: int
1408
+ frame_time: float
1409
+
1410
+
1411
+ def _scene_detect(video: str, fps: float, detector: 'SceneDetector') -> list[dict[str, int | float]]:
1412
+ from scenedetect import FrameTimecode # type: ignore[import-untyped]
1413
+
1414
+ with av_utils.VideoFrames(pathlib.Path(video), fps=fps) as frame_iter:
1415
+ video_fps = float(frame_iter.video_framerate)
1416
+
1417
+ scenes: list[dict[str, int | float]] = []
1418
+ frame_idx: int | None = None
1419
+ start_time: float | None = None # of current scene
1420
+ start_pts: int | None = None # of current scene
1421
+
1422
+ # in order to determine the cut frame times, we need to record frame times (chronologically) and look them
1423
+ # up by index; trying to derive frame times from frame indices isn't possible due to variable frame rates
1424
+ frame_info: list[_SceneDetectFrameInfo] = []
1425
+
1426
+ def process_cuts(cuts: list[FrameTimecode]) -> None:
1427
+ nonlocal frame_info, start_time, start_pts
1428
+ for cut_timecode in cuts:
1429
+ cut_frame_idx = cut_timecode.get_frames()
1430
+ # we expect cuts to come back in chronological order
1431
+ assert cut_frame_idx >= frame_info[0].frame_idx
1432
+ info_offset = next((i for i, info in enumerate(frame_info) if info.frame_idx == cut_frame_idx), None)
1433
+ assert info_offset is not None # the cut is at a previously reported frame idx
1434
+ info = frame_info[info_offset]
1435
+ scenes.append(
1436
+ {'start_time': start_time, 'start_pts': start_pts, 'duration': info.frame_time - start_time}
1437
+ )
1438
+ start_time = info.frame_time
1439
+ start_pts = info.frame_pts
1440
+ frame_info = frame_info[info_offset + 1 :]
1441
+
1442
+ for item in frame_iter:
1443
+ if start_time is None:
1444
+ start_time = item.time
1445
+ start_pts = item.pts
1446
+ frame_info.append(_SceneDetectFrameInfo(item.frame_idx, item.pts, item.time))
1447
+ frame_array = np.array(item.frame.convert('RGB'))
1448
+ frame_idx = item.frame_idx
1449
+ timecode = FrameTimecode(item.frame_idx, video_fps)
1450
+ cuts = detector.process_frame(timecode, frame_array)
1451
+ process_cuts(cuts)
1452
+
1453
+ # Post-process to capture any final scene cuts
1454
+ if frame_idx is not None:
1455
+ final_timecode = FrameTimecode(frame_idx, video_fps)
1456
+ final_cuts = detector.post_process(final_timecode)
1457
+ process_cuts(final_cuts)
1458
+
1459
+ # if we didn't detect any cuts but the video has content, add the full video as a single scene
1460
+ if len(scenes) == 0:
1461
+ scenes.append(
1462
+ {
1463
+ 'start_time': start_time,
1464
+ 'start_pts': start_pts,
1465
+ 'duration': frame_info[-1].frame_time - start_time,
1466
+ }
1467
+ )
186
1468
 
187
- return metadata
1469
+ return scenes
188
1470
 
189
1471
 
190
1472
  __all__ = local_public_names(__name__)