singlebehaviorlab 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sam2/__init__.py +11 -0
- sam2/automatic_mask_generator.py +454 -0
- sam2/benchmark.py +92 -0
- sam2/build_sam.py +174 -0
- sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- sam2/modeling/__init__.py +5 -0
- sam2/modeling/backbones/__init__.py +5 -0
- sam2/modeling/backbones/hieradet.py +317 -0
- sam2/modeling/backbones/image_encoder.py +134 -0
- sam2/modeling/backbones/utils.py +93 -0
- sam2/modeling/memory_attention.py +169 -0
- sam2/modeling/memory_encoder.py +181 -0
- sam2/modeling/position_encoding.py +239 -0
- sam2/modeling/sam/__init__.py +5 -0
- sam2/modeling/sam/mask_decoder.py +295 -0
- sam2/modeling/sam/prompt_encoder.py +202 -0
- sam2/modeling/sam/transformer.py +311 -0
- sam2/modeling/sam2_base.py +913 -0
- sam2/modeling/sam2_utils.py +323 -0
- sam2/sam2_hiera_b+.yaml +113 -0
- sam2/sam2_hiera_l.yaml +117 -0
- sam2/sam2_hiera_s.yaml +116 -0
- sam2/sam2_hiera_t.yaml +118 -0
- sam2/sam2_image_predictor.py +466 -0
- sam2/sam2_video_predictor.py +1388 -0
- sam2/sam2_video_predictor_legacy.py +1172 -0
- sam2/utils/__init__.py +5 -0
- sam2/utils/amg.py +348 -0
- sam2/utils/misc.py +349 -0
- sam2/utils/transforms.py +118 -0
- singlebehaviorlab/__init__.py +4 -0
- singlebehaviorlab/__main__.py +130 -0
- singlebehaviorlab/_paths.py +100 -0
- singlebehaviorlab/backend/__init__.py +2 -0
- singlebehaviorlab/backend/augmentations.py +320 -0
- singlebehaviorlab/backend/data_store.py +420 -0
- singlebehaviorlab/backend/model.py +1290 -0
- singlebehaviorlab/backend/train.py +4667 -0
- singlebehaviorlab/backend/uncertainty.py +578 -0
- singlebehaviorlab/backend/video_processor.py +688 -0
- singlebehaviorlab/backend/video_utils.py +139 -0
- singlebehaviorlab/data/config/config.yaml +85 -0
- singlebehaviorlab/data/training_profiles.json +334 -0
- singlebehaviorlab/gui/__init__.py +4 -0
- singlebehaviorlab/gui/analysis_widget.py +2291 -0
- singlebehaviorlab/gui/attention_export.py +311 -0
- singlebehaviorlab/gui/clip_extraction_widget.py +481 -0
- singlebehaviorlab/gui/clustering_widget.py +3187 -0
- singlebehaviorlab/gui/inference_popups.py +1138 -0
- singlebehaviorlab/gui/inference_widget.py +4550 -0
- singlebehaviorlab/gui/inference_worker.py +651 -0
- singlebehaviorlab/gui/labeling_widget.py +2324 -0
- singlebehaviorlab/gui/main_window.py +754 -0
- singlebehaviorlab/gui/metadata_management_widget.py +1119 -0
- singlebehaviorlab/gui/motion_tracking.py +764 -0
- singlebehaviorlab/gui/overlay_export.py +1234 -0
- singlebehaviorlab/gui/plot_integration.py +729 -0
- singlebehaviorlab/gui/qt_helpers.py +29 -0
- singlebehaviorlab/gui/registration_widget.py +1485 -0
- singlebehaviorlab/gui/review_widget.py +1330 -0
- singlebehaviorlab/gui/segmentation_tracking_widget.py +2752 -0
- singlebehaviorlab/gui/tab_tutorial_dialog.py +312 -0
- singlebehaviorlab/gui/timeline_themes.py +131 -0
- singlebehaviorlab/gui/training_profiles.py +418 -0
- singlebehaviorlab/gui/training_widget.py +3719 -0
- singlebehaviorlab/gui/video_utils.py +233 -0
- singlebehaviorlab/licenses/SAM2-LICENSE +201 -0
- singlebehaviorlab/licenses/VideoPrism-LICENSE +202 -0
- singlebehaviorlab-2.0.0.dist-info/METADATA +447 -0
- singlebehaviorlab-2.0.0.dist-info/RECORD +88 -0
- singlebehaviorlab-2.0.0.dist-info/WHEEL +5 -0
- singlebehaviorlab-2.0.0.dist-info/entry_points.txt +2 -0
- singlebehaviorlab-2.0.0.dist-info/licenses/LICENSE +21 -0
- singlebehaviorlab-2.0.0.dist-info/top_level.txt +3 -0
- videoprism/__init__.py +0 -0
- videoprism/encoders.py +910 -0
- videoprism/layers.py +1136 -0
- videoprism/models.py +407 -0
- videoprism/tokenizers.py +167 -0
- videoprism/utils.py +168 -0
|
@@ -0,0 +1,1234 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Overlay export and video preview for inference results.
|
|
3
|
+
Options dialog, export loop, and preview player live here to keep inference_widget lean.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
import cv2
|
|
10
|
+
import numpy as np
|
|
11
|
+
from PyQt6.QtWidgets import (
|
|
12
|
+
QDialog, QVBoxLayout, QHBoxLayout, QCheckBox, QLabel, QGroupBox, QFormLayout,
|
|
13
|
+
QScrollArea, QWidget, QPushButton, QDialogButtonBox, QMessageBox, QProgressDialog,
|
|
14
|
+
QFileDialog, QApplication, QRadioButton, QSpinBox, QSlider, QListWidget, QListWidgetItem,
|
|
15
|
+
)
|
|
16
|
+
from PyQt6.QtCore import Qt, QTimer
|
|
17
|
+
from PyQt6.QtGui import QImage, QPixmap
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
_NVENC_AVAILABLE = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _ffmpeg_nvenc_available():
|
|
24
|
+
"""Return True when ffmpeg with NVIDIA NVENC encoding is usable."""
|
|
25
|
+
global _NVENC_AVAILABLE
|
|
26
|
+
if _NVENC_AVAILABLE is not None:
|
|
27
|
+
return _NVENC_AVAILABLE
|
|
28
|
+
|
|
29
|
+
ffmpeg_path = shutil.which("ffmpeg")
|
|
30
|
+
if not ffmpeg_path:
|
|
31
|
+
_NVENC_AVAILABLE = False
|
|
32
|
+
return _NVENC_AVAILABLE
|
|
33
|
+
|
|
34
|
+
# NVENC rejects very small frame sizes, so use a normal tiny test clip.
|
|
35
|
+
cmd = [
|
|
36
|
+
ffmpeg_path,
|
|
37
|
+
"-hide_banner",
|
|
38
|
+
"-loglevel",
|
|
39
|
+
"error",
|
|
40
|
+
"-f",
|
|
41
|
+
"lavfi",
|
|
42
|
+
"-i",
|
|
43
|
+
"color=c=black:s=256x256:d=0.2",
|
|
44
|
+
"-frames:v",
|
|
45
|
+
"1",
|
|
46
|
+
"-an",
|
|
47
|
+
"-c:v",
|
|
48
|
+
"h264_nvenc",
|
|
49
|
+
"-f",
|
|
50
|
+
"null",
|
|
51
|
+
"-",
|
|
52
|
+
]
|
|
53
|
+
try:
|
|
54
|
+
res = subprocess.run(
|
|
55
|
+
cmd,
|
|
56
|
+
stdout=subprocess.DEVNULL,
|
|
57
|
+
stderr=subprocess.DEVNULL,
|
|
58
|
+
timeout=20,
|
|
59
|
+
check=False,
|
|
60
|
+
)
|
|
61
|
+
_NVENC_AVAILABLE = (res.returncode == 0)
|
|
62
|
+
except Exception:
|
|
63
|
+
_NVENC_AVAILABLE = False
|
|
64
|
+
return _NVENC_AVAILABLE
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _open_overlay_writer(output_path: str, fps: float, width: int, height: int):
|
|
68
|
+
"""Open a video writer, preferring ffmpeg NVENC and falling back to OpenCV."""
|
|
69
|
+
if _ffmpeg_nvenc_available():
|
|
70
|
+
ffmpeg_path = shutil.which("ffmpeg")
|
|
71
|
+
if ffmpeg_path:
|
|
72
|
+
cmd = [
|
|
73
|
+
ffmpeg_path,
|
|
74
|
+
"-y",
|
|
75
|
+
"-hide_banner",
|
|
76
|
+
"-loglevel",
|
|
77
|
+
"error",
|
|
78
|
+
"-f",
|
|
79
|
+
"rawvideo",
|
|
80
|
+
"-pix_fmt",
|
|
81
|
+
"bgr24",
|
|
82
|
+
"-s:v",
|
|
83
|
+
f"{width}x{height}",
|
|
84
|
+
"-r",
|
|
85
|
+
f"{float(fps):.6f}",
|
|
86
|
+
"-i",
|
|
87
|
+
"-",
|
|
88
|
+
"-an",
|
|
89
|
+
"-c:v",
|
|
90
|
+
"h264_nvenc",
|
|
91
|
+
"-preset",
|
|
92
|
+
"p4",
|
|
93
|
+
"-pix_fmt",
|
|
94
|
+
"yuv420p",
|
|
95
|
+
"-movflags",
|
|
96
|
+
"+faststart",
|
|
97
|
+
"-vf",
|
|
98
|
+
"pad=ceil(iw/2)*2:ceil(ih/2)*2",
|
|
99
|
+
output_path,
|
|
100
|
+
]
|
|
101
|
+
try:
|
|
102
|
+
proc = subprocess.Popen(
|
|
103
|
+
cmd,
|
|
104
|
+
stdin=subprocess.PIPE,
|
|
105
|
+
stdout=subprocess.DEVNULL,
|
|
106
|
+
stderr=subprocess.PIPE,
|
|
107
|
+
)
|
|
108
|
+
return "ffmpeg_nvenc", proc
|
|
109
|
+
except Exception:
|
|
110
|
+
pass
|
|
111
|
+
|
|
112
|
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
|
113
|
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
|
114
|
+
if not out.isOpened():
|
|
115
|
+
raise RuntimeError(f"Could not create output video: {output_path}")
|
|
116
|
+
return "opencv_mp4v", out
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _write_overlay_frame(writer_kind: str, writer_obj, frame: np.ndarray):
|
|
120
|
+
if writer_kind == "ffmpeg_nvenc":
|
|
121
|
+
try:
|
|
122
|
+
writer_obj.stdin.write(frame.tobytes())
|
|
123
|
+
except Exception as e:
|
|
124
|
+
stderr_text = ""
|
|
125
|
+
try:
|
|
126
|
+
if writer_obj.stderr is not None:
|
|
127
|
+
stderr_text = writer_obj.stderr.read().decode("utf-8", errors="ignore").strip()
|
|
128
|
+
except Exception:
|
|
129
|
+
pass
|
|
130
|
+
raise RuntimeError(stderr_text or f"ffmpeg NVENC write failed: {e}") from e
|
|
131
|
+
return
|
|
132
|
+
writer_obj.write(frame)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _close_overlay_writer(writer_kind: str, writer_obj, abort: bool = False):
|
|
136
|
+
"""Close writer and return a short encoder label."""
|
|
137
|
+
if writer_kind == "ffmpeg_nvenc":
|
|
138
|
+
stderr_text = ""
|
|
139
|
+
try:
|
|
140
|
+
if abort:
|
|
141
|
+
if writer_obj.stdin is not None:
|
|
142
|
+
writer_obj.stdin.close()
|
|
143
|
+
writer_obj.kill()
|
|
144
|
+
writer_obj.wait(timeout=5)
|
|
145
|
+
return "ffmpeg NVENC"
|
|
146
|
+
|
|
147
|
+
if writer_obj.stdin is not None:
|
|
148
|
+
writer_obj.stdin.close()
|
|
149
|
+
return_code = writer_obj.wait(timeout=30)
|
|
150
|
+
if writer_obj.stderr is not None:
|
|
151
|
+
stderr_text = writer_obj.stderr.read().decode("utf-8", errors="ignore").strip()
|
|
152
|
+
if return_code != 0:
|
|
153
|
+
raise RuntimeError(stderr_text or f"ffmpeg NVENC exited with code {return_code}")
|
|
154
|
+
return "ffmpeg NVENC"
|
|
155
|
+
finally:
|
|
156
|
+
try:
|
|
157
|
+
if writer_obj.stderr is not None:
|
|
158
|
+
writer_obj.stderr.close()
|
|
159
|
+
except Exception:
|
|
160
|
+
pass
|
|
161
|
+
|
|
162
|
+
writer_obj.release()
|
|
163
|
+
return "OpenCV mp4v"
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def ask_overlay_export_options(widget):
|
|
167
|
+
"""Ask user which behaviors to include and whether to use precise boundaries."""
|
|
168
|
+
dlg = QDialog(widget)
|
|
169
|
+
dlg.setWindowTitle("Overlay export options")
|
|
170
|
+
dlg.resize(760, 560)
|
|
171
|
+
layout = QVBoxLayout(dlg)
|
|
172
|
+
|
|
173
|
+
available_videos = [
|
|
174
|
+
vp for vp in getattr(widget, "results_cache", {}).keys()
|
|
175
|
+
if isinstance(getattr(widget, "results_cache", {}).get(vp), dict)
|
|
176
|
+
]
|
|
177
|
+
if not available_videos and getattr(widget, "video_path", None):
|
|
178
|
+
available_videos = [widget.video_path]
|
|
179
|
+
current_video = getattr(widget, "video_path", None)
|
|
180
|
+
|
|
181
|
+
layout.addWidget(QLabel("Videos to export:"))
|
|
182
|
+
video_button_row = QHBoxLayout()
|
|
183
|
+
video_all_btn = QPushButton("Select all")
|
|
184
|
+
video_current_btn = QPushButton("Current video")
|
|
185
|
+
video_clear_btn = QPushButton("Clear")
|
|
186
|
+
video_button_row.addWidget(video_all_btn)
|
|
187
|
+
video_button_row.addWidget(video_current_btn)
|
|
188
|
+
video_button_row.addWidget(video_clear_btn)
|
|
189
|
+
video_button_row.addStretch()
|
|
190
|
+
layout.addLayout(video_button_row)
|
|
191
|
+
|
|
192
|
+
video_list = QListWidget()
|
|
193
|
+
for vp in available_videos:
|
|
194
|
+
item = QListWidgetItem(os.path.basename(vp))
|
|
195
|
+
item.setData(Qt.ItemDataRole.UserRole, vp)
|
|
196
|
+
item.setFlags(item.flags() | Qt.ItemFlag.ItemIsUserCheckable | Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable)
|
|
197
|
+
default_checked = len(available_videos) == 1 or vp == current_video
|
|
198
|
+
item.setCheckState(Qt.CheckState.Checked if default_checked else Qt.CheckState.Unchecked)
|
|
199
|
+
video_list.addItem(item)
|
|
200
|
+
video_list.setMaximumHeight(120)
|
|
201
|
+
layout.addWidget(video_list)
|
|
202
|
+
|
|
203
|
+
def _set_video_checks(state):
|
|
204
|
+
for i in range(video_list.count()):
|
|
205
|
+
video_list.item(i).setCheckState(state)
|
|
206
|
+
|
|
207
|
+
def _select_current_video():
|
|
208
|
+
for i in range(video_list.count()):
|
|
209
|
+
item = video_list.item(i)
|
|
210
|
+
is_current = item.data(Qt.ItemDataRole.UserRole) == current_video
|
|
211
|
+
item.setCheckState(Qt.CheckState.Checked if is_current else Qt.CheckState.Unchecked)
|
|
212
|
+
|
|
213
|
+
video_all_btn.clicked.connect(lambda: _set_video_checks(Qt.CheckState.Checked))
|
|
214
|
+
video_clear_btn.clicked.connect(lambda: _set_video_checks(Qt.CheckState.Unchecked))
|
|
215
|
+
video_current_btn.clicked.connect(_select_current_video)
|
|
216
|
+
|
|
217
|
+
use_precise_cb = QCheckBox("Use precise boundary timeline when available")
|
|
218
|
+
use_precise_cb.setChecked(
|
|
219
|
+
bool(getattr(widget, "aggregated_segments", None))
|
|
220
|
+
or (getattr(widget, "frame_aggregation_check", None) and widget.frame_aggregation_check.isChecked())
|
|
221
|
+
)
|
|
222
|
+
layout.addWidget(use_precise_cb)
|
|
223
|
+
|
|
224
|
+
ignore_label = getattr(widget, "ignore_label_name", "Filtered")
|
|
225
|
+
include_ignore_cb = QCheckBox(f"Include '{ignore_label}' overlays")
|
|
226
|
+
include_ignore_cb.setChecked(False)
|
|
227
|
+
layout.addWidget(include_ignore_cb)
|
|
228
|
+
|
|
229
|
+
video_duration_s = 0.0
|
|
230
|
+
video_fps = 30.0
|
|
231
|
+
video_path = getattr(widget, "video_path", None)
|
|
232
|
+
if video_path:
|
|
233
|
+
try:
|
|
234
|
+
_cap = cv2.VideoCapture(video_path)
|
|
235
|
+
_fps = _cap.get(cv2.CAP_PROP_FPS)
|
|
236
|
+
if _fps > 0:
|
|
237
|
+
video_fps = _fps
|
|
238
|
+
_total = int(_cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
239
|
+
video_duration_s = _total / video_fps
|
|
240
|
+
_cap.release()
|
|
241
|
+
except Exception:
|
|
242
|
+
pass
|
|
243
|
+
|
|
244
|
+
range_group = QGroupBox("Export range")
|
|
245
|
+
range_layout = QVBoxLayout(range_group)
|
|
246
|
+
|
|
247
|
+
def _fmt_time(s):
|
|
248
|
+
m, sec = divmod(int(s), 60)
|
|
249
|
+
h, m = divmod(m, 60)
|
|
250
|
+
return f"{h:02d}:{m:02d}:{sec:02d}"
|
|
251
|
+
|
|
252
|
+
rb_full = QRadioButton(f"Full video ({_fmt_time(video_duration_s)})")
|
|
253
|
+
rb_samples = QRadioButton("Quick-check samples")
|
|
254
|
+
rb_full.setChecked(True)
|
|
255
|
+
range_layout.addWidget(rb_full)
|
|
256
|
+
range_layout.addWidget(rb_samples)
|
|
257
|
+
|
|
258
|
+
samples_widget = QWidget()
|
|
259
|
+
samples_form = QFormLayout(samples_widget)
|
|
260
|
+
samples_form.setContentsMargins(20, 4, 0, 4)
|
|
261
|
+
sample_dur_spin = QSpinBox()
|
|
262
|
+
sample_dur_spin.setRange(10, 300)
|
|
263
|
+
sample_dur_spin.setValue(60)
|
|
264
|
+
sample_dur_spin.setSuffix(" s")
|
|
265
|
+
sample_dur_spin.setToolTip("Duration of each sample clip")
|
|
266
|
+
num_samples_spin = QSpinBox()
|
|
267
|
+
num_samples_spin.setRange(1, 50)
|
|
268
|
+
num_samples_spin.setValue(min(5, max(1, int(video_duration_s / 120))))
|
|
269
|
+
num_samples_spin.setToolTip("Number of sample clips spread evenly across the video")
|
|
270
|
+
samples_form.addRow("Clip duration:", sample_dur_spin)
|
|
271
|
+
samples_form.addRow("Number of samples:", num_samples_spin)
|
|
272
|
+
samples_info = QLabel()
|
|
273
|
+
samples_form.addRow(samples_info)
|
|
274
|
+
|
|
275
|
+
def _update_samples_info():
|
|
276
|
+
n = num_samples_spin.value()
|
|
277
|
+
d = sample_dur_spin.value()
|
|
278
|
+
total = n * d
|
|
279
|
+
samples_info.setText(
|
|
280
|
+
f"{n} × {d}s clips = {_fmt_time(total)} of {_fmt_time(video_duration_s)} total\n"
|
|
281
|
+
f"Spread evenly across the video, saved to a folder"
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
num_samples_spin.valueChanged.connect(lambda: _update_samples_info())
|
|
285
|
+
sample_dur_spin.valueChanged.connect(lambda: _update_samples_info())
|
|
286
|
+
_update_samples_info()
|
|
287
|
+
|
|
288
|
+
samples_widget.setVisible(False)
|
|
289
|
+
range_layout.addWidget(samples_widget)
|
|
290
|
+
rb_samples.toggled.connect(samples_widget.setVisible)
|
|
291
|
+
layout.addWidget(range_group)
|
|
292
|
+
|
|
293
|
+
layout.addWidget(QLabel("Behaviors to render in exported video:"))
|
|
294
|
+
scroll = QScrollArea()
|
|
295
|
+
scroll.setWidgetResizable(True)
|
|
296
|
+
body = QWidget()
|
|
297
|
+
body_layout = QVBoxLayout(body)
|
|
298
|
+
behavior_checks = []
|
|
299
|
+
for cls in getattr(widget, "classes", []):
|
|
300
|
+
cb = QCheckBox(cls)
|
|
301
|
+
cb.setChecked(True)
|
|
302
|
+
behavior_checks.append(cb)
|
|
303
|
+
body_layout.addWidget(cb)
|
|
304
|
+
body_layout.addStretch()
|
|
305
|
+
scroll.setWidget(body)
|
|
306
|
+
layout.addWidget(scroll)
|
|
307
|
+
|
|
308
|
+
btn_row = QHBoxLayout()
|
|
309
|
+
sel_all = QPushButton("Select all")
|
|
310
|
+
sel_none = QPushButton("Select none")
|
|
311
|
+
btn_row.addWidget(sel_all)
|
|
312
|
+
btn_row.addWidget(sel_none)
|
|
313
|
+
btn_row.addStretch()
|
|
314
|
+
layout.addLayout(btn_row)
|
|
315
|
+
|
|
316
|
+
def _set_all(v: bool):
|
|
317
|
+
for cb in behavior_checks:
|
|
318
|
+
cb.setChecked(v)
|
|
319
|
+
|
|
320
|
+
sel_all.clicked.connect(lambda: _set_all(True))
|
|
321
|
+
sel_none.clicked.connect(lambda: _set_all(False))
|
|
322
|
+
|
|
323
|
+
buttons = QDialogButtonBox(
|
|
324
|
+
QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel
|
|
325
|
+
)
|
|
326
|
+
buttons.accepted.connect(dlg.accept)
|
|
327
|
+
buttons.rejected.connect(dlg.reject)
|
|
328
|
+
layout.addWidget(buttons)
|
|
329
|
+
|
|
330
|
+
if dlg.exec() != QDialog.DialogCode.Accepted:
|
|
331
|
+
return None
|
|
332
|
+
|
|
333
|
+
selected_videos = []
|
|
334
|
+
for i in range(video_list.count()):
|
|
335
|
+
item = video_list.item(i)
|
|
336
|
+
if item.checkState() == Qt.CheckState.Checked:
|
|
337
|
+
selected_videos.append(item.data(Qt.ItemDataRole.UserRole))
|
|
338
|
+
if not selected_videos:
|
|
339
|
+
QMessageBox.warning(widget, "No videos selected", "Select at least one video to export.")
|
|
340
|
+
return None
|
|
341
|
+
|
|
342
|
+
selected = {cb.text() for cb in behavior_checks if cb.isChecked()}
|
|
343
|
+
if not selected and not include_ignore_cb.isChecked():
|
|
344
|
+
QMessageBox.warning(
|
|
345
|
+
widget, "Nothing selected",
|
|
346
|
+
"Select at least one behavior or include ignore overlays.",
|
|
347
|
+
)
|
|
348
|
+
return None
|
|
349
|
+
|
|
350
|
+
result = {
|
|
351
|
+
"selected_videos": selected_videos,
|
|
352
|
+
"selected_behaviors": selected,
|
|
353
|
+
"include_ignore": bool(include_ignore_cb.isChecked()),
|
|
354
|
+
"use_precise": bool(use_precise_cb.isChecked()),
|
|
355
|
+
"mode": "full",
|
|
356
|
+
}
|
|
357
|
+
if rb_samples.isChecked():
|
|
358
|
+
result["mode"] = "samples"
|
|
359
|
+
result["sample_duration_seconds"] = int(sample_dur_spin.value())
|
|
360
|
+
result["sample_num_clips"] = int(num_samples_spin.value())
|
|
361
|
+
return result
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def run_export_video_with_overlay(widget):
|
|
365
|
+
"""Export video with configurable overlays (entry point from inference widget)."""
|
|
366
|
+
if not getattr(widget, "video_path", None) or not getattr(widget, "predictions", None):
|
|
367
|
+
QMessageBox.warning(widget, "Error", "No predictions available to export.")
|
|
368
|
+
return
|
|
369
|
+
|
|
370
|
+
if hasattr(widget, "_persist_current_video_state"):
|
|
371
|
+
widget._persist_current_video_state()
|
|
372
|
+
|
|
373
|
+
opts = ask_overlay_export_options(widget)
|
|
374
|
+
if not opts:
|
|
375
|
+
return
|
|
376
|
+
|
|
377
|
+
selected_videos = list(opts.get("selected_videos", []))
|
|
378
|
+
if not selected_videos:
|
|
379
|
+
return
|
|
380
|
+
|
|
381
|
+
original_video_path = getattr(widget, "video_path", None)
|
|
382
|
+
original_threshold_settings = (
|
|
383
|
+
widget._current_threshold_settings()
|
|
384
|
+
if hasattr(widget, "_current_threshold_settings")
|
|
385
|
+
else None
|
|
386
|
+
)
|
|
387
|
+
shared_threshold_settings = dict(original_threshold_settings or {})
|
|
388
|
+
multi_video = len(selected_videos) > 1
|
|
389
|
+
|
|
390
|
+
if opts.get("mode") == "samples":
|
|
391
|
+
default_dir = os.path.dirname(original_video_path) if original_video_path else os.getcwd()
|
|
392
|
+
folder = QFileDialog.getExistingDirectory(
|
|
393
|
+
widget, "Select folder for sample clips", default_dir,
|
|
394
|
+
)
|
|
395
|
+
if not folder:
|
|
396
|
+
return
|
|
397
|
+
elif multi_video:
|
|
398
|
+
default_dir = os.path.dirname(original_video_path) if original_video_path else os.getcwd()
|
|
399
|
+
folder = QFileDialog.getExistingDirectory(
|
|
400
|
+
widget, "Select folder for overlay videos", default_dir,
|
|
401
|
+
)
|
|
402
|
+
if not folder:
|
|
403
|
+
return
|
|
404
|
+
else:
|
|
405
|
+
video_path = selected_videos[0]
|
|
406
|
+
output_path, _ = QFileDialog.getSaveFileName(
|
|
407
|
+
widget,
|
|
408
|
+
"Save Video with Overlays",
|
|
409
|
+
os.path.splitext(video_path)[0] + "_annotated.mp4",
|
|
410
|
+
"Video Files (*.mp4);;All Files (*)",
|
|
411
|
+
)
|
|
412
|
+
if not output_path:
|
|
413
|
+
return
|
|
414
|
+
folder = None
|
|
415
|
+
|
|
416
|
+
exported = []
|
|
417
|
+
encoders_used = []
|
|
418
|
+
try:
|
|
419
|
+
for video_path in selected_videos:
|
|
420
|
+
entry = getattr(widget, "results_cache", {}).get(video_path, {})
|
|
421
|
+
threshold_override = None
|
|
422
|
+
if not isinstance(entry.get("threshold_settings"), dict) and shared_threshold_settings:
|
|
423
|
+
threshold_override = shared_threshold_settings
|
|
424
|
+
if hasattr(widget, "_load_video_from_cache"):
|
|
425
|
+
ok = widget._load_video_from_cache(
|
|
426
|
+
video_path,
|
|
427
|
+
refresh_display=False,
|
|
428
|
+
persist_current=False,
|
|
429
|
+
threshold_settings_override=threshold_override,
|
|
430
|
+
persist_loaded_thresholds=False,
|
|
431
|
+
)
|
|
432
|
+
if not ok:
|
|
433
|
+
continue
|
|
434
|
+
|
|
435
|
+
if opts.get("mode") == "samples":
|
|
436
|
+
out_dir = os.path.join(
|
|
437
|
+
folder,
|
|
438
|
+
f"{os.path.splitext(os.path.basename(video_path))[0]}_overlay_samples",
|
|
439
|
+
)
|
|
440
|
+
os.makedirs(out_dir, exist_ok=True)
|
|
441
|
+
sample_ranges, fps = _compute_sample_ranges_for_video(
|
|
442
|
+
video_path,
|
|
443
|
+
int(opts.get("sample_duration_seconds", 60)),
|
|
444
|
+
int(opts.get("sample_num_clips", 5)),
|
|
445
|
+
)
|
|
446
|
+
video_exported = 0
|
|
447
|
+
for si, (sf, ef) in enumerate(sample_ranges):
|
|
448
|
+
start_s = sf / fps
|
|
449
|
+
end_s = ef / fps
|
|
450
|
+
sample_path = os.path.join(
|
|
451
|
+
out_dir, f"sample_{si+1:02d}_{start_s:.0f}s-{end_s:.0f}s.mp4"
|
|
452
|
+
)
|
|
453
|
+
sample_opts = dict(opts)
|
|
454
|
+
sample_opts["mode"] = "range"
|
|
455
|
+
sample_opts["start_frame"] = sf
|
|
456
|
+
sample_opts["end_frame"] = ef
|
|
457
|
+
encoder_label = run_export_single_overlay(
|
|
458
|
+
widget,
|
|
459
|
+
sample_path,
|
|
460
|
+
sample_opts,
|
|
461
|
+
sample_label=f"{os.path.basename(video_path)} sample {si+1}/{len(sample_ranges)}",
|
|
462
|
+
)
|
|
463
|
+
if not encoder_label:
|
|
464
|
+
break
|
|
465
|
+
exported.append(sample_path)
|
|
466
|
+
encoders_used.append(str(encoder_label))
|
|
467
|
+
video_exported += 1
|
|
468
|
+
widget.log_text.append(
|
|
469
|
+
f"Exported {video_exported} sample overlay clip(s) for {os.path.basename(video_path)} to {out_dir}"
|
|
470
|
+
)
|
|
471
|
+
else:
|
|
472
|
+
if multi_video:
|
|
473
|
+
output_path = os.path.join(
|
|
474
|
+
folder,
|
|
475
|
+
os.path.splitext(os.path.basename(video_path))[0] + "_annotated.mp4",
|
|
476
|
+
)
|
|
477
|
+
export_opts = dict(opts)
|
|
478
|
+
if multi_video:
|
|
479
|
+
export_opts["show_success_popup"] = False
|
|
480
|
+
encoder_label = run_export_single_overlay(widget, output_path, export_opts)
|
|
481
|
+
if not encoder_label:
|
|
482
|
+
break
|
|
483
|
+
exported.append(output_path)
|
|
484
|
+
encoders_used.append(str(encoder_label))
|
|
485
|
+
finally:
|
|
486
|
+
if (
|
|
487
|
+
original_video_path
|
|
488
|
+
and hasattr(widget, "_load_video_from_cache")
|
|
489
|
+
and original_video_path in getattr(widget, "results_cache", {})
|
|
490
|
+
):
|
|
491
|
+
widget._load_video_from_cache(
|
|
492
|
+
original_video_path,
|
|
493
|
+
refresh_display=True,
|
|
494
|
+
persist_current=False,
|
|
495
|
+
threshold_settings_override=original_threshold_settings,
|
|
496
|
+
persist_loaded_thresholds=False,
|
|
497
|
+
)
|
|
498
|
+
if getattr(widget, "filter_video_combo", None) is not None:
|
|
499
|
+
idx = widget.filter_video_combo.findData(original_video_path)
|
|
500
|
+
if idx >= 0:
|
|
501
|
+
widget.filter_video_combo.blockSignals(True)
|
|
502
|
+
widget.filter_video_combo.setCurrentIndex(idx)
|
|
503
|
+
widget.filter_video_combo.blockSignals(False)
|
|
504
|
+
|
|
505
|
+
if not exported:
|
|
506
|
+
return
|
|
507
|
+
|
|
508
|
+
if opts.get("mode") == "samples":
|
|
509
|
+
encoder_summary = ", ".join(sorted(set(encoders_used))) if encoders_used else "unknown"
|
|
510
|
+
QMessageBox.information(
|
|
511
|
+
widget,
|
|
512
|
+
"Success",
|
|
513
|
+
f"Exported {len(exported)} sample overlay clip(s) to:\n{folder}\n\n"
|
|
514
|
+
f"Encoder: {encoder_summary}",
|
|
515
|
+
)
|
|
516
|
+
elif multi_video:
|
|
517
|
+
encoder_summary = ", ".join(sorted(set(encoders_used))) if encoders_used else "unknown"
|
|
518
|
+
QMessageBox.information(
|
|
519
|
+
widget,
|
|
520
|
+
"Success",
|
|
521
|
+
f"Exported overlay videos for {len(exported)} video(s) to:\n{folder}\n\n"
|
|
522
|
+
f"Encoder: {encoder_summary}",
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def _compute_sample_ranges_for_video(video_path: str, duration_seconds: int, num_samples: int):
|
|
527
|
+
cap = cv2.VideoCapture(video_path)
|
|
528
|
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
529
|
+
if fps <= 0:
|
|
530
|
+
fps = 30.0
|
|
531
|
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
532
|
+
cap.release()
|
|
533
|
+
|
|
534
|
+
dur_frames = max(1, int(round(max(1, duration_seconds) * fps)))
|
|
535
|
+
dur_frames = max(1, min(dur_frames, max(1, total_frames)))
|
|
536
|
+
usable = max(0, total_frames - dur_frames)
|
|
537
|
+
n = max(1, int(num_samples))
|
|
538
|
+
if n == 1:
|
|
539
|
+
starts = [usable // 2]
|
|
540
|
+
else:
|
|
541
|
+
starts = [int(round(i * usable / (n - 1))) for i in range(n)]
|
|
542
|
+
return [(s, min(s + dur_frames, total_frames)) for s in starts], fps
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
def run_export_single_overlay(widget, output_path, opts, sample_label=None):
|
|
546
|
+
"""Export a single overlay video file for the given options/range."""
|
|
547
|
+
cap = None
|
|
548
|
+
progress = None
|
|
549
|
+
writer_kind = None
|
|
550
|
+
out = None
|
|
551
|
+
try:
|
|
552
|
+
if opts["use_precise"] and not getattr(widget, "aggregated_segments", None):
|
|
553
|
+
widget._compute_aggregated_timeline()
|
|
554
|
+
if (
|
|
555
|
+
opts["use_precise"]
|
|
556
|
+
and getattr(widget, "_use_ovr", False)
|
|
557
|
+
and not isinstance(getattr(widget, "_aggregated_frame_scores_norm", None), np.ndarray)
|
|
558
|
+
):
|
|
559
|
+
widget._compute_aggregated_timeline()
|
|
560
|
+
|
|
561
|
+
use_precise = bool(opts["use_precise"] and getattr(widget, "aggregated_segments", None))
|
|
562
|
+
selected_behaviors = set(opts["selected_behaviors"])
|
|
563
|
+
include_ignore = bool(opts["include_ignore"])
|
|
564
|
+
palette = widget._get_timeline_palette()
|
|
565
|
+
|
|
566
|
+
video_path = widget.video_path
|
|
567
|
+
cap = cv2.VideoCapture(video_path)
|
|
568
|
+
if not cap.isOpened():
|
|
569
|
+
raise RuntimeError(f"Could not open input video: {video_path}")
|
|
570
|
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
571
|
+
if fps <= 0:
|
|
572
|
+
fps = 30.0
|
|
573
|
+
raw_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
574
|
+
raw_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
575
|
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
576
|
+
frame_interval = widget._get_saved_frame_interval(video_path, fps)
|
|
577
|
+
|
|
578
|
+
MIN_EXPORT_WIDTH = 480
|
|
579
|
+
if raw_width < MIN_EXPORT_WIDTH:
|
|
580
|
+
scale = MIN_EXPORT_WIDTH / raw_width
|
|
581
|
+
width = MIN_EXPORT_WIDTH
|
|
582
|
+
height = int(round(raw_height * scale))
|
|
583
|
+
else:
|
|
584
|
+
width = raw_width
|
|
585
|
+
height = raw_height
|
|
586
|
+
clip_length = widget.clip_length_spin.value()
|
|
587
|
+
|
|
588
|
+
export_start_frame = int(opts.get("start_frame", 0))
|
|
589
|
+
export_end_frame = int(opts.get("end_frame", total_frames))
|
|
590
|
+
export_end_frame = min(export_end_frame, total_frames)
|
|
591
|
+
export_frame_count = max(1, export_end_frame - export_start_frame)
|
|
592
|
+
is_partial = export_start_frame > 0 or export_end_frame < total_frames
|
|
593
|
+
|
|
594
|
+
top_panel_h = 110
|
|
595
|
+
out_h = height + top_panel_h
|
|
596
|
+
writer_kind, out = _open_overlay_writer(output_path, fps, width, out_h)
|
|
597
|
+
|
|
598
|
+
if export_start_frame > 0:
|
|
599
|
+
cap.set(cv2.CAP_PROP_POS_FRAMES, export_start_frame)
|
|
600
|
+
|
|
601
|
+
range_label = ""
|
|
602
|
+
if is_partial:
|
|
603
|
+
start_s = export_start_frame / fps
|
|
604
|
+
end_s = export_end_frame / fps
|
|
605
|
+
range_label = f" ({start_s:.1f}s – {end_s:.1f}s)"
|
|
606
|
+
|
|
607
|
+
progress_title = f"{sample_label} — " if sample_label else ""
|
|
608
|
+
progress = QProgressDialog(
|
|
609
|
+
f"{progress_title}Exporting{range_label}...",
|
|
610
|
+
"Cancel",
|
|
611
|
+
0,
|
|
612
|
+
max(1, export_frame_count),
|
|
613
|
+
widget,
|
|
614
|
+
)
|
|
615
|
+
progress.setWindowTitle(f"{progress_title}Export progress")
|
|
616
|
+
progress.setWindowModality(Qt.WindowModality.WindowModal)
|
|
617
|
+
progress.setMinimumDuration(0)
|
|
618
|
+
progress.setValue(0)
|
|
619
|
+
progress.show()
|
|
620
|
+
QApplication.processEvents()
|
|
621
|
+
|
|
622
|
+
segments = getattr(widget, "aggregated_segments", None) or []
|
|
623
|
+
frame_idx = export_start_frame
|
|
624
|
+
clip_idx = 0
|
|
625
|
+
seg_idx = 0
|
|
626
|
+
update_interval = max(1, export_frame_count // 100)
|
|
627
|
+
|
|
628
|
+
if export_start_frame > 0 and getattr(widget, "clip_starts", None):
|
|
629
|
+
for ci_tmp in range(len(widget.clip_starts)):
|
|
630
|
+
if int(widget.clip_starts[ci_tmp]) <= export_start_frame:
|
|
631
|
+
clip_idx = ci_tmp
|
|
632
|
+
else:
|
|
633
|
+
break
|
|
634
|
+
if export_start_frame > 0 and segments:
|
|
635
|
+
for si_tmp in range(len(segments)):
|
|
636
|
+
if int(segments[si_tmp]["end"]) < export_start_frame:
|
|
637
|
+
seg_idx = si_tmp + 1
|
|
638
|
+
else:
|
|
639
|
+
break
|
|
640
|
+
|
|
641
|
+
has_loc = bool(
|
|
642
|
+
getattr(widget, "localization_bboxes", None)
|
|
643
|
+
and len(widget.localization_bboxes) > 0
|
|
644
|
+
)
|
|
645
|
+
trail = []
|
|
646
|
+
trail_max = 50
|
|
647
|
+
label_anchor = None
|
|
648
|
+
segment_start_frame = None
|
|
649
|
+
last_cx_px, last_cy_px = None, None
|
|
650
|
+
smooth_alpha = 0.35
|
|
651
|
+
prev_primary_idx = None
|
|
652
|
+
max_rows = 6
|
|
653
|
+
timeline_window_frames = max(60, int(round(fps * 8.0)))
|
|
654
|
+
selected_cls_idx = [
|
|
655
|
+
i for i, name in enumerate(widget.classes) if name in selected_behaviors
|
|
656
|
+
][:max_rows]
|
|
657
|
+
row_history = {ci: [] for ci in selected_cls_idx}
|
|
658
|
+
|
|
659
|
+
while True:
|
|
660
|
+
if frame_idx >= export_end_frame:
|
|
661
|
+
break
|
|
662
|
+
ret, frame = cap.read()
|
|
663
|
+
if not ret:
|
|
664
|
+
break
|
|
665
|
+
if frame.shape[1] != width or frame.shape[0] != height:
|
|
666
|
+
is_upscale = (frame.shape[1] < width) or (frame.shape[0] < height)
|
|
667
|
+
interp = cv2.INTER_LANCZOS4 if is_upscale else cv2.INTER_AREA
|
|
668
|
+
frame = cv2.resize(frame, (width, height), interpolation=interp)
|
|
669
|
+
if is_upscale:
|
|
670
|
+
blurred = cv2.GaussianBlur(frame, (0, 0), sigmaX=1.0)
|
|
671
|
+
frame = cv2.addWeighted(frame, 1.5, blurred, -0.5, 0)
|
|
672
|
+
|
|
673
|
+
frames_done = frame_idx - export_start_frame
|
|
674
|
+
if frames_done % update_interval == 0 or frame_idx == export_end_frame - 1:
|
|
675
|
+
progress.setValue(frames_done)
|
|
676
|
+
progress.setLabelText(
|
|
677
|
+
f"{progress_title}Exporting{range_label}... frame {frames_done + 1} / {export_frame_count}"
|
|
678
|
+
)
|
|
679
|
+
QApplication.processEvents()
|
|
680
|
+
if progress.wasCanceled():
|
|
681
|
+
break
|
|
682
|
+
|
|
683
|
+
pred_idx = None
|
|
684
|
+
conf = None
|
|
685
|
+
mode_tag = "clip"
|
|
686
|
+
seg_start_this_frame = False
|
|
687
|
+
active_infos = []
|
|
688
|
+
|
|
689
|
+
if (
|
|
690
|
+
use_precise
|
|
691
|
+
and getattr(widget, "_use_ovr", False)
|
|
692
|
+
and isinstance(getattr(widget, "_aggregated_frame_scores_norm", None), np.ndarray)
|
|
693
|
+
):
|
|
694
|
+
mode_tag = "precise-ovr"
|
|
695
|
+
active_infos = widget._get_precise_active_for_frame(frame_idx)
|
|
696
|
+
elif use_precise:
|
|
697
|
+
mode_tag = "precise"
|
|
698
|
+
while seg_idx < len(segments) and frame_idx > int(segments[seg_idx]["end"]):
|
|
699
|
+
seg_idx += 1
|
|
700
|
+
if seg_idx < len(segments):
|
|
701
|
+
seg = segments[seg_idx]
|
|
702
|
+
s0, s1 = int(seg["start"]), int(seg["end"])
|
|
703
|
+
if s0 <= frame_idx <= s1:
|
|
704
|
+
pred_idx = int(seg["class"])
|
|
705
|
+
conf = float(seg.get("confidence", 0.0))
|
|
706
|
+
if frame_idx == s0:
|
|
707
|
+
seg_start_this_frame = True
|
|
708
|
+
active_infos = [(pred_idx, conf)]
|
|
709
|
+
else:
|
|
710
|
+
clip_starts = widget.clip_starts
|
|
711
|
+
while clip_idx + 1 < len(clip_starts) and frame_idx >= int(clip_starts[clip_idx + 1]):
|
|
712
|
+
clip_idx += 1
|
|
713
|
+
if 0 <= clip_idx < len(clip_starts):
|
|
714
|
+
start_f = int(clip_starts[clip_idx])
|
|
715
|
+
if clip_idx + 1 < len(clip_starts):
|
|
716
|
+
end_exclusive = int(clip_starts[clip_idx + 1])
|
|
717
|
+
else:
|
|
718
|
+
end_exclusive = start_f + (clip_length * frame_interval)
|
|
719
|
+
if start_f <= frame_idx < end_exclusive:
|
|
720
|
+
pred_idx = int(widget._effective_prediction_for_clip(clip_idx))
|
|
721
|
+
conf = (
|
|
722
|
+
float(widget.confidences[clip_idx])
|
|
723
|
+
if clip_idx < len(widget.confidences)
|
|
724
|
+
else 0.0
|
|
725
|
+
)
|
|
726
|
+
if frame_idx == start_f:
|
|
727
|
+
seg_start_this_frame = True
|
|
728
|
+
active_infos = [(pred_idx, conf)]
|
|
729
|
+
|
|
730
|
+
ignore_label = getattr(widget, "ignore_label_name", "Filtered")
|
|
731
|
+
filtered_infos = []
|
|
732
|
+
for ci, sc in active_infos:
|
|
733
|
+
if ci < 0:
|
|
734
|
+
if include_ignore:
|
|
735
|
+
filtered_infos.append((ci, sc, ignore_label))
|
|
736
|
+
elif 0 <= ci < len(widget.classes):
|
|
737
|
+
lbl = widget.classes[ci]
|
|
738
|
+
if lbl in selected_behaviors:
|
|
739
|
+
filtered_infos.append((ci, sc, lbl))
|
|
740
|
+
|
|
741
|
+
draw_label = None
|
|
742
|
+
draw_color = (160, 160, 160)
|
|
743
|
+
extra_overlay_labels = []
|
|
744
|
+
if filtered_infos:
|
|
745
|
+
primary_idx, conf, primary_label = filtered_infos[0]
|
|
746
|
+
draw_label = primary_label
|
|
747
|
+
pred_idx = primary_idx
|
|
748
|
+
if primary_idx >= 0:
|
|
749
|
+
pr, pg, pb = palette[primary_idx % len(palette)]
|
|
750
|
+
draw_color = (int(pb), int(pg), int(pr))
|
|
751
|
+
extra_overlay_labels = [
|
|
752
|
+
f"{lbl}:{float(sc):.0%}" for _, sc, lbl in filtered_infos[1:4]
|
|
753
|
+
]
|
|
754
|
+
if len(filtered_infos) > 4:
|
|
755
|
+
extra_overlay_labels.append(f"+{len(filtered_infos) - 4} more")
|
|
756
|
+
if primary_idx != prev_primary_idx:
|
|
757
|
+
seg_start_this_frame = True
|
|
758
|
+
prev_primary_idx = primary_idx
|
|
759
|
+
else:
|
|
760
|
+
prev_primary_idx = None
|
|
761
|
+
|
|
762
|
+
if draw_label is not None and has_loc:
|
|
763
|
+
clip_for_bbox = clip_idx
|
|
764
|
+
if use_precise and seg_idx < len(segments):
|
|
765
|
+
start_f = int(segments[seg_idx]["start"])
|
|
766
|
+
clip_for_bbox = 0
|
|
767
|
+
for ci in range(len(widget.clip_starts)):
|
|
768
|
+
cs = int(widget.clip_starts[ci])
|
|
769
|
+
end_c = (
|
|
770
|
+
cs + (clip_length * frame_interval)
|
|
771
|
+
if ci + 1 >= len(widget.clip_starts)
|
|
772
|
+
else int(widget.clip_starts[ci + 1])
|
|
773
|
+
)
|
|
774
|
+
if cs <= frame_idx < end_c:
|
|
775
|
+
clip_for_bbox = ci
|
|
776
|
+
break
|
|
777
|
+
frame_within_clip = 0
|
|
778
|
+
if clip_for_bbox < len(widget.clip_starts):
|
|
779
|
+
cs = int(widget.clip_starts[clip_for_bbox])
|
|
780
|
+
frame_within_clip = (frame_idx - cs) // max(1, frame_interval)
|
|
781
|
+
frame_within_clip = max(0, min(clip_length - 1, frame_within_clip))
|
|
782
|
+
bbox = widget._get_localization_bbox_for_clip_frame(clip_for_bbox, frame_within_clip)
|
|
783
|
+
if bbox is not None:
|
|
784
|
+
x1, y1, x2, y2 = bbox
|
|
785
|
+
cx = (x1 + x2) / 2.0
|
|
786
|
+
cy = (y1 + y2) / 2.0
|
|
787
|
+
cx_px = int(round(cx * width))
|
|
788
|
+
cy_px = int(round(cy * height))
|
|
789
|
+
if last_cx_px is not None and last_cy_px is not None:
|
|
790
|
+
cx_px = int(smooth_alpha * cx_px + (1 - smooth_alpha) * last_cx_px)
|
|
791
|
+
cy_px = int(smooth_alpha * cy_px + (1 - smooth_alpha) * last_cy_px)
|
|
792
|
+
last_cx_px, last_cy_px = cx_px, cy_px
|
|
793
|
+
cx_px = max(0, min(width - 1, cx_px))
|
|
794
|
+
cy_px = max(0, min(height - 1, cy_px))
|
|
795
|
+
trail.append((cx_px, cy_px))
|
|
796
|
+
if len(trail) > trail_max:
|
|
797
|
+
trail.pop(0)
|
|
798
|
+
if seg_start_this_frame or label_anchor is None:
|
|
799
|
+
label_anchor = (cx_px, cy_px)
|
|
800
|
+
|
|
801
|
+
if len(trail) >= 2:
|
|
802
|
+
overlay_trail = frame.copy()
|
|
803
|
+
pts = np.array(trail, dtype=np.int32)
|
|
804
|
+
cv2.polylines(overlay_trail, [pts], False, draw_color, 4, cv2.LINE_AA)
|
|
805
|
+
frame = cv2.addWeighted(overlay_trail, 0.4, frame, 0.6, 0.0)
|
|
806
|
+
dot_overlay = frame.copy()
|
|
807
|
+
cv2.circle(dot_overlay, (cx_px, cy_px), 10, draw_color, -1, cv2.LINE_AA)
|
|
808
|
+
cv2.circle(dot_overlay, (cx_px, cy_px), 10, (255, 255, 255), 1, cv2.LINE_AA)
|
|
809
|
+
frame = cv2.addWeighted(dot_overlay, 0.75, frame, 0.25, 0.0)
|
|
810
|
+
if label_anchor is not None:
|
|
811
|
+
lx, ly = label_anchor
|
|
812
|
+
ly = max(24, ly - 14)
|
|
813
|
+
(tw, th), _ = cv2.getTextSize(
|
|
814
|
+
draw_label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2
|
|
815
|
+
)
|
|
816
|
+
has_extra = bool(extra_overlay_labels)
|
|
817
|
+
box_h = th + 8 + (16 if has_extra else 0)
|
|
818
|
+
cv2.rectangle(
|
|
819
|
+
frame,
|
|
820
|
+
(lx - 4, ly - box_h + 4),
|
|
821
|
+
(lx + tw + 4, ly + 4),
|
|
822
|
+
(30, 30, 30),
|
|
823
|
+
-1,
|
|
824
|
+
)
|
|
825
|
+
cv2.rectangle(
|
|
826
|
+
frame,
|
|
827
|
+
(lx - 4, ly - box_h + 4),
|
|
828
|
+
(lx + tw + 4, ly + 4),
|
|
829
|
+
draw_color,
|
|
830
|
+
1,
|
|
831
|
+
)
|
|
832
|
+
cv2.putText(
|
|
833
|
+
frame, draw_label, (lx, ly),
|
|
834
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.7, draw_color, 2, cv2.LINE_AA,
|
|
835
|
+
)
|
|
836
|
+
if has_extra:
|
|
837
|
+
cv2.putText(
|
|
838
|
+
frame,
|
|
839
|
+
", ".join(extra_overlay_labels),
|
|
840
|
+
(lx, ly + 14),
|
|
841
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
842
|
+
0.45,
|
|
843
|
+
(220, 220, 220),
|
|
844
|
+
1,
|
|
845
|
+
cv2.LINE_AA,
|
|
846
|
+
)
|
|
847
|
+
elif draw_label is not None:
|
|
848
|
+
segment_start_frame = None
|
|
849
|
+
label_anchor = None
|
|
850
|
+
trail.clear()
|
|
851
|
+
last_cx_px, last_cy_px = None, None
|
|
852
|
+
panel_h = 88 + (22 if extra_overlay_labels else 0)
|
|
853
|
+
x0, y0 = 12, 12
|
|
854
|
+
x1 = min(width - 12, 540)
|
|
855
|
+
y1 = min(height - 12, y0 + panel_h)
|
|
856
|
+
overlay = frame.copy()
|
|
857
|
+
cv2.rectangle(overlay, (x0, y0), (x1, y1), (20, 20, 20), -1)
|
|
858
|
+
frame = cv2.addWeighted(overlay, 0.55, frame, 0.45, 0.0)
|
|
859
|
+
cv2.rectangle(frame, (x0, y0), (x1, y1), draw_color, 2)
|
|
860
|
+
conf_txt = f"{conf:.1%}" if conf is not None else "n/a"
|
|
861
|
+
cv2.putText(
|
|
862
|
+
frame, draw_label, (x0 + 12, y0 + 30),
|
|
863
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.9, draw_color, 2, cv2.LINE_AA,
|
|
864
|
+
)
|
|
865
|
+
cv2.putText(
|
|
866
|
+
frame,
|
|
867
|
+
f"conf: {conf_txt} | {mode_tag} | t={frame_idx / fps:.2f}s",
|
|
868
|
+
(x0 + 12, y0 + 62),
|
|
869
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
870
|
+
0.55,
|
|
871
|
+
(230, 230, 230),
|
|
872
|
+
1,
|
|
873
|
+
cv2.LINE_AA,
|
|
874
|
+
)
|
|
875
|
+
if extra_overlay_labels:
|
|
876
|
+
cv2.putText(
|
|
877
|
+
frame,
|
|
878
|
+
"co-occur: " + ", ".join(extra_overlay_labels),
|
|
879
|
+
(x0 + 12, y0 + 82),
|
|
880
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
881
|
+
0.5,
|
|
882
|
+
(210, 210, 210),
|
|
883
|
+
1,
|
|
884
|
+
cv2.LINE_AA,
|
|
885
|
+
)
|
|
886
|
+
else:
|
|
887
|
+
trail.clear()
|
|
888
|
+
label_anchor = None
|
|
889
|
+
segment_start_frame = None
|
|
890
|
+
last_cx_px, last_cy_px = None, None
|
|
891
|
+
|
|
892
|
+
composed = np.zeros((out_h, width, 3), dtype=np.uint8)
|
|
893
|
+
panel = composed[:top_panel_h]
|
|
894
|
+
panel[:] = (22, 22, 22)
|
|
895
|
+
mode_txt = "precise" if use_precise else "clip"
|
|
896
|
+
cv2.putText(
|
|
897
|
+
panel,
|
|
898
|
+
f"Behavior sequencing ({mode_txt}) t={frame_idx / max(1e-6, fps):.2f}s",
|
|
899
|
+
(10, 18),
|
|
900
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
901
|
+
0.55,
|
|
902
|
+
(220, 220, 220),
|
|
903
|
+
1,
|
|
904
|
+
cv2.LINE_AA,
|
|
905
|
+
)
|
|
906
|
+
tool_title = "SingleBehavior Lab"
|
|
907
|
+
(title_w, _), _ = cv2.getTextSize(tool_title, cv2.FONT_HERSHEY_SIMPLEX, 0.55, 1)
|
|
908
|
+
title_x = max(10, width - title_w - 12)
|
|
909
|
+
cv2.putText(
|
|
910
|
+
panel, tool_title, (title_x, 18),
|
|
911
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.55, (235, 235, 235), 1, cv2.LINE_AA,
|
|
912
|
+
)
|
|
913
|
+
if selected_cls_idx:
|
|
914
|
+
row_top = 28
|
|
915
|
+
row_h = max(12, (top_panel_h - row_top - 8) // max_rows)
|
|
916
|
+
x0, x1 = 130, width - 12
|
|
917
|
+
active_set = {ci for ci, _sc, _lbl in filtered_infos if ci >= 0}
|
|
918
|
+
for ci in selected_cls_idx:
|
|
919
|
+
is_active = 1 if ci in active_set else 0
|
|
920
|
+
hist = row_history[ci]
|
|
921
|
+
hist.append(is_active)
|
|
922
|
+
if len(hist) > timeline_window_frames:
|
|
923
|
+
del hist[0 : len(hist) - timeline_window_frames]
|
|
924
|
+
for ri, ci in enumerate(selected_cls_idx):
|
|
925
|
+
y = row_top + ri * row_h
|
|
926
|
+
name = widget.classes[ci]
|
|
927
|
+
pr, pg, pb = palette[ci % len(palette)]
|
|
928
|
+
bgr = (int(pb), int(pg), int(pr))
|
|
929
|
+
cv2.putText(
|
|
930
|
+
panel, name, (10, y + row_h - 3),
|
|
931
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.45, bgr, 1, cv2.LINE_AA,
|
|
932
|
+
)
|
|
933
|
+
cv2.rectangle(
|
|
934
|
+
panel, (x0, y + 1), (x1, y + row_h - 2), (55, 55, 55), 1
|
|
935
|
+
)
|
|
936
|
+
hist = row_history[ci]
|
|
937
|
+
if len(hist) >= 1:
|
|
938
|
+
xs = np.linspace(
|
|
939
|
+
x0 + 1, x1 - 1, num=len(hist) + 1, dtype=np.int32
|
|
940
|
+
)
|
|
941
|
+
run_start = None
|
|
942
|
+
for hi, hv in enumerate(hist):
|
|
943
|
+
on = bool(hv)
|
|
944
|
+
if on and run_start is None:
|
|
945
|
+
run_start = hi
|
|
946
|
+
if run_start is not None and (
|
|
947
|
+
(not on) or hi == len(hist) - 1
|
|
948
|
+
):
|
|
949
|
+
run_end = (
|
|
950
|
+
hi if (on and hi == len(hist) - 1) else (hi - 1)
|
|
951
|
+
)
|
|
952
|
+
if run_end >= run_start:
|
|
953
|
+
xa = int(xs[run_start])
|
|
954
|
+
xb = int(max(xa + 1, xs[run_end + 1]))
|
|
955
|
+
cv2.rectangle(
|
|
956
|
+
panel,
|
|
957
|
+
(xa, y + 2),
|
|
958
|
+
(xb, y + row_h - 3),
|
|
959
|
+
bgr,
|
|
960
|
+
-1,
|
|
961
|
+
)
|
|
962
|
+
run_start = None
|
|
963
|
+
cv2.line(
|
|
964
|
+
panel,
|
|
965
|
+
(x1 - 1, y + 1),
|
|
966
|
+
(x1 - 1, y + row_h - 2),
|
|
967
|
+
(235, 235, 235),
|
|
968
|
+
1,
|
|
969
|
+
cv2.LINE_AA,
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
composed[top_panel_h : top_panel_h + height, :, :] = frame
|
|
973
|
+
_write_overlay_frame(writer_kind, out, composed)
|
|
974
|
+
frame_idx += 1
|
|
975
|
+
|
|
976
|
+
cap.release()
|
|
977
|
+
encoder_label = _close_overlay_writer(writer_kind, out, abort=progress.wasCanceled())
|
|
978
|
+
user_canceled = progress.wasCanceled()
|
|
979
|
+
progress.close()
|
|
980
|
+
|
|
981
|
+
if user_canceled:
|
|
982
|
+
try:
|
|
983
|
+
if os.path.exists(output_path):
|
|
984
|
+
os.remove(output_path)
|
|
985
|
+
except Exception:
|
|
986
|
+
pass
|
|
987
|
+
return False
|
|
988
|
+
|
|
989
|
+
widget.exported_video_path = output_path
|
|
990
|
+
widget.preview_btn.setEnabled(True)
|
|
991
|
+
|
|
992
|
+
mode_txt = "precise boundary" if use_precise else "clip-based"
|
|
993
|
+
range_msg = f" | range: {range_label.strip()}" if is_partial else ""
|
|
994
|
+
widget.log_text.append(
|
|
995
|
+
f"Overlay export complete ({mode_txt}{range_msg}); "
|
|
996
|
+
f"behaviors={sorted(selected_behaviors)}; include_ignore={include_ignore}; "
|
|
997
|
+
f"encoder={encoder_label}"
|
|
998
|
+
)
|
|
999
|
+
show_success_popup = bool(opts.get("show_success_popup", sample_label is None))
|
|
1000
|
+
if show_success_popup:
|
|
1001
|
+
QMessageBox.information(
|
|
1002
|
+
widget,
|
|
1003
|
+
"Success",
|
|
1004
|
+
f"Video exported to:\n{output_path}\n\nMode: {mode_txt}{range_msg}\n"
|
|
1005
|
+
f"Encoder: {encoder_label}\n"
|
|
1006
|
+
"Click 'Preview Video with Overlays' to watch it.",
|
|
1007
|
+
)
|
|
1008
|
+
return encoder_label
|
|
1009
|
+
|
|
1010
|
+
except Exception as e:
|
|
1011
|
+
try:
|
|
1012
|
+
if cap is not None:
|
|
1013
|
+
cap.release()
|
|
1014
|
+
except Exception:
|
|
1015
|
+
pass
|
|
1016
|
+
try:
|
|
1017
|
+
if writer_kind is not None and out is not None:
|
|
1018
|
+
_close_overlay_writer(writer_kind, out, abort=True)
|
|
1019
|
+
except Exception:
|
|
1020
|
+
pass
|
|
1021
|
+
try:
|
|
1022
|
+
if progress is not None:
|
|
1023
|
+
progress.close()
|
|
1024
|
+
except Exception:
|
|
1025
|
+
pass
|
|
1026
|
+
try:
|
|
1027
|
+
if os.path.exists(output_path):
|
|
1028
|
+
os.remove(output_path)
|
|
1029
|
+
except Exception:
|
|
1030
|
+
pass
|
|
1031
|
+
QMessageBox.critical(widget, "Error", f"Failed to export video: {str(e)}")
|
|
1032
|
+
return False
|
|
1033
|
+
|
|
1034
|
+
|
|
1035
|
+
class VideoPreviewDialog(QDialog):
|
|
1036
|
+
"""Video player for exported overlay video. Streams frames on demand to avoid memory crash."""
|
|
1037
|
+
|
|
1038
|
+
def __init__(self, video_path: str, parent=None):
|
|
1039
|
+
super().__init__(parent)
|
|
1040
|
+
self.video_path = video_path
|
|
1041
|
+
self.setWindowTitle("Video preview with overlays")
|
|
1042
|
+
self.setMinimumSize(900, 700)
|
|
1043
|
+
|
|
1044
|
+
self._cap = None
|
|
1045
|
+
self._total_frames = 0
|
|
1046
|
+
self._fps = 30.0
|
|
1047
|
+
|
|
1048
|
+
layout = QVBoxLayout(self)
|
|
1049
|
+
|
|
1050
|
+
self.video_label = QLabel("Loading video...")
|
|
1051
|
+
self.video_label.setMinimumSize(800, 600)
|
|
1052
|
+
self.video_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
|
|
1053
|
+
self.video_label.setStyleSheet("background-color: black; color: white;")
|
|
1054
|
+
layout.addWidget(self.video_label)
|
|
1055
|
+
|
|
1056
|
+
slider_layout = QHBoxLayout()
|
|
1057
|
+
self._frame_label = QLabel("0 / 0")
|
|
1058
|
+
self._slider = QSlider(Qt.Orientation.Horizontal)
|
|
1059
|
+
self._slider.setMinimum(0)
|
|
1060
|
+
self._slider.setMaximum(0)
|
|
1061
|
+
self._slider.valueChanged.connect(self._on_slider_changed)
|
|
1062
|
+
slider_layout.addWidget(self._slider)
|
|
1063
|
+
slider_layout.addWidget(self._frame_label)
|
|
1064
|
+
layout.addLayout(slider_layout)
|
|
1065
|
+
|
|
1066
|
+
controls = QHBoxLayout()
|
|
1067
|
+
self.play_pause_btn = QPushButton("Play")
|
|
1068
|
+
self._playing = False
|
|
1069
|
+
self._frame_idx = 0
|
|
1070
|
+
self._timer = QTimer(self)
|
|
1071
|
+
self._timer.timeout.connect(self._update_frame)
|
|
1072
|
+
self._slider_dragging = False
|
|
1073
|
+
|
|
1074
|
+
self.play_pause_btn.clicked.connect(self._toggle_play)
|
|
1075
|
+
controls.addWidget(self.play_pause_btn)
|
|
1076
|
+
|
|
1077
|
+
restart_btn = QPushButton("Restart")
|
|
1078
|
+
restart_btn.clicked.connect(self._restart)
|
|
1079
|
+
controls.addWidget(restart_btn)
|
|
1080
|
+
|
|
1081
|
+
controls.addStretch()
|
|
1082
|
+
controls.addWidget(QLabel(f"Video: {os.path.basename(video_path)}"))
|
|
1083
|
+
|
|
1084
|
+
close_btn = QPushButton("Close")
|
|
1085
|
+
close_btn.clicked.connect(self._close_and_stop)
|
|
1086
|
+
controls.addWidget(close_btn)
|
|
1087
|
+
|
|
1088
|
+
layout.addLayout(controls)
|
|
1089
|
+
|
|
1090
|
+
QTimer.singleShot(100, self._load_and_show_first)
|
|
1091
|
+
|
|
1092
|
+
def _open_video(self):
|
|
1093
|
+
if self._cap is not None:
|
|
1094
|
+
return True
|
|
1095
|
+
try:
|
|
1096
|
+
self._cap = cv2.VideoCapture(self.video_path)
|
|
1097
|
+
if not self._cap.isOpened():
|
|
1098
|
+
self.video_label.setText("Error: Could not open video")
|
|
1099
|
+
return False
|
|
1100
|
+
self._total_frames = int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
1101
|
+
self._fps = self._cap.get(cv2.CAP_PROP_FPS)
|
|
1102
|
+
if self._fps <= 0:
|
|
1103
|
+
self._fps = 30.0
|
|
1104
|
+
self._timer.setInterval(int(1000 / self._fps))
|
|
1105
|
+
self._slider.setMaximum(max(0, self._total_frames - 1))
|
|
1106
|
+
return True
|
|
1107
|
+
except Exception as e:
|
|
1108
|
+
self.video_label.setText(f"Error: {e}")
|
|
1109
|
+
return False
|
|
1110
|
+
|
|
1111
|
+
def _read_frame(self, idx):
|
|
1112
|
+
if self._cap is None or idx < 0 or idx >= self._total_frames:
|
|
1113
|
+
return None
|
|
1114
|
+
self._cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
|
1115
|
+
ret, frame = self._cap.read()
|
|
1116
|
+
if not ret:
|
|
1117
|
+
return None
|
|
1118
|
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
1119
|
+
if not frame.flags["C_CONTIGUOUS"]:
|
|
1120
|
+
frame = np.ascontiguousarray(frame)
|
|
1121
|
+
return frame
|
|
1122
|
+
|
|
1123
|
+
def _show_frame(self, frame):
|
|
1124
|
+
if frame is None:
|
|
1125
|
+
return
|
|
1126
|
+
h, w, c = frame.shape
|
|
1127
|
+
q_image = QImage(frame.data, w, h, c * w, QImage.Format.Format_RGB888)
|
|
1128
|
+
pixmap = QPixmap.fromImage(q_image)
|
|
1129
|
+
scaled = pixmap.scaled(
|
|
1130
|
+
self.video_label.size(),
|
|
1131
|
+
Qt.AspectRatioMode.KeepAspectRatio,
|
|
1132
|
+
Qt.TransformationMode.SmoothTransformation,
|
|
1133
|
+
)
|
|
1134
|
+
self.video_label.setPixmap(scaled)
|
|
1135
|
+
self._frame_label.setText(f"{self._frame_idx + 1} / {self._total_frames}")
|
|
1136
|
+
|
|
1137
|
+
def _update_frame(self):
|
|
1138
|
+
if self._cap is None or self._total_frames == 0:
|
|
1139
|
+
return
|
|
1140
|
+
frame = self._read_frame(self._frame_idx)
|
|
1141
|
+
if frame is not None:
|
|
1142
|
+
self._show_frame(frame)
|
|
1143
|
+
self._slider.blockSignals(True)
|
|
1144
|
+
self._slider.setValue(self._frame_idx)
|
|
1145
|
+
self._slider.blockSignals(False)
|
|
1146
|
+
self._frame_idx = (self._frame_idx + 1) % self._total_frames
|
|
1147
|
+
|
|
1148
|
+
def _on_slider_changed(self, value):
|
|
1149
|
+
if self._cap is None:
|
|
1150
|
+
return
|
|
1151
|
+
self._frame_idx = value
|
|
1152
|
+
frame = self._read_frame(self._frame_idx)
|
|
1153
|
+
if frame is not None:
|
|
1154
|
+
self._show_frame(frame)
|
|
1155
|
+
|
|
1156
|
+
def _toggle_play(self):
|
|
1157
|
+
if self._cap is None:
|
|
1158
|
+
if not self._open_video():
|
|
1159
|
+
return
|
|
1160
|
+
self._update_frame()
|
|
1161
|
+
|
|
1162
|
+
if self._playing:
|
|
1163
|
+
self.play_pause_btn.setText("Play")
|
|
1164
|
+
self._timer.stop()
|
|
1165
|
+
self._playing = False
|
|
1166
|
+
else:
|
|
1167
|
+
self.play_pause_btn.setText("Pause")
|
|
1168
|
+
self._timer.start()
|
|
1169
|
+
self._playing = True
|
|
1170
|
+
|
|
1171
|
+
def _restart(self):
|
|
1172
|
+
self._frame_idx = 0
|
|
1173
|
+
if self._cap is not None:
|
|
1174
|
+
self._slider.setValue(0)
|
|
1175
|
+
self._update_frame()
|
|
1176
|
+
if self._playing:
|
|
1177
|
+
self._toggle_play()
|
|
1178
|
+
|
|
1179
|
+
def _load_and_show_first(self):
|
|
1180
|
+
if self._open_video():
|
|
1181
|
+
self._update_frame()
|
|
1182
|
+
|
|
1183
|
+
def _close_and_stop(self):
|
|
1184
|
+
if self._timer.isActive():
|
|
1185
|
+
self._timer.stop()
|
|
1186
|
+
self.close()
|
|
1187
|
+
|
|
1188
|
+
def closeEvent(self, event):
|
|
1189
|
+
if self._timer.isActive():
|
|
1190
|
+
self._timer.stop()
|
|
1191
|
+
if self._cap is not None:
|
|
1192
|
+
self._cap.release()
|
|
1193
|
+
self._cap = None
|
|
1194
|
+
super().closeEvent(event)
|
|
1195
|
+
|
|
1196
|
+
|
|
1197
|
+
def run_preview_video_with_overlay(widget):
|
|
1198
|
+
"""Open video player to preview the exported video with overlays."""
|
|
1199
|
+
video_path = getattr(widget, "exported_video_path", None)
|
|
1200
|
+
|
|
1201
|
+
if not video_path or not os.path.exists(video_path):
|
|
1202
|
+
QMessageBox.warning(
|
|
1203
|
+
widget,
|
|
1204
|
+
"Error",
|
|
1205
|
+
"No exported video found. Please export a video with overlays first.",
|
|
1206
|
+
)
|
|
1207
|
+
return
|
|
1208
|
+
|
|
1209
|
+
try:
|
|
1210
|
+
dialog = VideoPreviewDialog(video_path, widget)
|
|
1211
|
+
dialog.exec()
|
|
1212
|
+
except Exception as e:
|
|
1213
|
+
QMessageBox.warning(
|
|
1214
|
+
widget,
|
|
1215
|
+
"Video Player Error",
|
|
1216
|
+
f"Could not open built-in video player:\n{str(e)}\n\n"
|
|
1217
|
+
"Opening with system default player instead.",
|
|
1218
|
+
)
|
|
1219
|
+
import subprocess
|
|
1220
|
+
import platform
|
|
1221
|
+
|
|
1222
|
+
try:
|
|
1223
|
+
if platform.system() == "Darwin":
|
|
1224
|
+
subprocess.call(("open", video_path))
|
|
1225
|
+
elif platform.system() == "Windows":
|
|
1226
|
+
os.startfile(video_path)
|
|
1227
|
+
else:
|
|
1228
|
+
subprocess.call(("xdg-open", video_path))
|
|
1229
|
+
except Exception as e2:
|
|
1230
|
+
QMessageBox.critical(
|
|
1231
|
+
widget,
|
|
1232
|
+
"Error",
|
|
1233
|
+
f"Could not open video player:\n{str(e2)}\n\nVideo saved at:\n{video_path}",
|
|
1234
|
+
)
|