singlebehaviorlab 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. sam2/__init__.py +11 -0
  2. sam2/automatic_mask_generator.py +454 -0
  3. sam2/benchmark.py +92 -0
  4. sam2/build_sam.py +174 -0
  5. sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
  6. sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
  7. sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
  8. sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
  9. sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
  10. sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
  11. sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
  12. sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
  13. sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
  14. sam2/modeling/__init__.py +5 -0
  15. sam2/modeling/backbones/__init__.py +5 -0
  16. sam2/modeling/backbones/hieradet.py +317 -0
  17. sam2/modeling/backbones/image_encoder.py +134 -0
  18. sam2/modeling/backbones/utils.py +93 -0
  19. sam2/modeling/memory_attention.py +169 -0
  20. sam2/modeling/memory_encoder.py +181 -0
  21. sam2/modeling/position_encoding.py +239 -0
  22. sam2/modeling/sam/__init__.py +5 -0
  23. sam2/modeling/sam/mask_decoder.py +295 -0
  24. sam2/modeling/sam/prompt_encoder.py +202 -0
  25. sam2/modeling/sam/transformer.py +311 -0
  26. sam2/modeling/sam2_base.py +913 -0
  27. sam2/modeling/sam2_utils.py +323 -0
  28. sam2/sam2_hiera_b+.yaml +113 -0
  29. sam2/sam2_hiera_l.yaml +117 -0
  30. sam2/sam2_hiera_s.yaml +116 -0
  31. sam2/sam2_hiera_t.yaml +118 -0
  32. sam2/sam2_image_predictor.py +466 -0
  33. sam2/sam2_video_predictor.py +1388 -0
  34. sam2/sam2_video_predictor_legacy.py +1172 -0
  35. sam2/utils/__init__.py +5 -0
  36. sam2/utils/amg.py +348 -0
  37. sam2/utils/misc.py +349 -0
  38. sam2/utils/transforms.py +118 -0
  39. singlebehaviorlab/__init__.py +4 -0
  40. singlebehaviorlab/__main__.py +130 -0
  41. singlebehaviorlab/_paths.py +100 -0
  42. singlebehaviorlab/backend/__init__.py +2 -0
  43. singlebehaviorlab/backend/augmentations.py +320 -0
  44. singlebehaviorlab/backend/data_store.py +420 -0
  45. singlebehaviorlab/backend/model.py +1290 -0
  46. singlebehaviorlab/backend/train.py +4667 -0
  47. singlebehaviorlab/backend/uncertainty.py +578 -0
  48. singlebehaviorlab/backend/video_processor.py +688 -0
  49. singlebehaviorlab/backend/video_utils.py +139 -0
  50. singlebehaviorlab/data/config/config.yaml +85 -0
  51. singlebehaviorlab/data/training_profiles.json +334 -0
  52. singlebehaviorlab/gui/__init__.py +4 -0
  53. singlebehaviorlab/gui/analysis_widget.py +2291 -0
  54. singlebehaviorlab/gui/attention_export.py +311 -0
  55. singlebehaviorlab/gui/clip_extraction_widget.py +481 -0
  56. singlebehaviorlab/gui/clustering_widget.py +3187 -0
  57. singlebehaviorlab/gui/inference_popups.py +1138 -0
  58. singlebehaviorlab/gui/inference_widget.py +4550 -0
  59. singlebehaviorlab/gui/inference_worker.py +651 -0
  60. singlebehaviorlab/gui/labeling_widget.py +2324 -0
  61. singlebehaviorlab/gui/main_window.py +754 -0
  62. singlebehaviorlab/gui/metadata_management_widget.py +1119 -0
  63. singlebehaviorlab/gui/motion_tracking.py +764 -0
  64. singlebehaviorlab/gui/overlay_export.py +1234 -0
  65. singlebehaviorlab/gui/plot_integration.py +729 -0
  66. singlebehaviorlab/gui/qt_helpers.py +29 -0
  67. singlebehaviorlab/gui/registration_widget.py +1485 -0
  68. singlebehaviorlab/gui/review_widget.py +1330 -0
  69. singlebehaviorlab/gui/segmentation_tracking_widget.py +2752 -0
  70. singlebehaviorlab/gui/tab_tutorial_dialog.py +312 -0
  71. singlebehaviorlab/gui/timeline_themes.py +131 -0
  72. singlebehaviorlab/gui/training_profiles.py +418 -0
  73. singlebehaviorlab/gui/training_widget.py +3719 -0
  74. singlebehaviorlab/gui/video_utils.py +233 -0
  75. singlebehaviorlab/licenses/SAM2-LICENSE +201 -0
  76. singlebehaviorlab/licenses/VideoPrism-LICENSE +202 -0
  77. singlebehaviorlab-2.0.0.dist-info/METADATA +447 -0
  78. singlebehaviorlab-2.0.0.dist-info/RECORD +88 -0
  79. singlebehaviorlab-2.0.0.dist-info/WHEEL +5 -0
  80. singlebehaviorlab-2.0.0.dist-info/entry_points.txt +2 -0
  81. singlebehaviorlab-2.0.0.dist-info/licenses/LICENSE +21 -0
  82. singlebehaviorlab-2.0.0.dist-info/top_level.txt +3 -0
  83. videoprism/__init__.py +0 -0
  84. videoprism/encoders.py +910 -0
  85. videoprism/layers.py +1136 -0
  86. videoprism/models.py +407 -0
  87. videoprism/tokenizers.py +167 -0
  88. videoprism/utils.py +168 -0
@@ -0,0 +1,1330 @@
1
+ """
2
+ Active-learning review widget.
3
+
4
+ After inference, ranks clips by model uncertainty per class and lets the user
5
+ accept (correct label) or reassign clips. Accepted clips are extracted from
6
+ the original video and added to the experiment's annotations.json so they can
7
+ be included in the next training run.
8
+ """
9
+
10
+ import os
11
+ import json
12
+ import re
13
+ import cv2
14
+ import numpy as np
15
+
16
+ from PyQt6.QtWidgets import (
17
+ QWidget, QVBoxLayout, QHBoxLayout, QSplitter, QGroupBox, QLabel,
18
+ QPushButton, QListWidget, QListWidgetItem, QComboBox, QFileDialog,
19
+ QMessageBox, QProgressBar, QScrollArea, QSizePolicy, QFrame,
20
+ QSpinBox,
21
+ )
22
+ from PyQt6.QtCore import Qt, QTimer, pyqtSignal
23
+ from PyQt6.QtGui import QImage, QPixmap, QColor, QFont
24
+
25
+ from singlebehaviorlab.backend.uncertainty import (
26
+ save_uncertainty_report,
27
+ rank_clips_for_review,
28
+ rank_clips_per_video_for_review,
29
+ rank_confident_clips_for_review,
30
+ rank_confident_clips_per_video_for_review,
31
+ rank_transition_clips_for_review,
32
+ rank_transition_clips_per_video_for_review,
33
+ )
34
+ from singlebehaviorlab.backend.video_utils import save_clip
35
+ from singlebehaviorlab.backend.data_store import AnnotationManager
36
+
37
+
38
+ # Helpers.
39
+
40
+ def _frames_from_video(video_path: str, start_frame: int, n_frames: int,
41
+ frame_interval: int = 1) -> list:
42
+ """Extract n_frames starting at start_frame (original-fps index)."""
43
+ cap = cv2.VideoCapture(video_path)
44
+ if not cap.isOpened():
45
+ return []
46
+ cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
47
+ frames = []
48
+ for _ in range(n_frames * frame_interval):
49
+ ret, frame = cap.read()
50
+ if not ret:
51
+ break
52
+ frames.append(frame)
53
+ cap.release()
54
+ # Subsample at frame_interval
55
+ return frames[::frame_interval] if frame_interval > 1 else frames
56
+
57
+
58
+ def _score_bar_html(scores: dict, predicted: str, width: int = 260) -> str:
59
+ """Build a minimal HTML table with per-class score bars."""
60
+ colours = {
61
+ predicted: "#2ecc71",
62
+ }
63
+ default_colour = "#5dade2"
64
+ rows = ""
65
+ for cls, score in sorted(scores.items(), key=lambda x: x[1], reverse=True):
66
+ col = colours.get(cls, default_colour)
67
+ bar_w = max(2, int(score * width))
68
+ rows += (
69
+ f"<tr>"
70
+ f"<td style='padding-right:6px;width:110px;'>{cls}</td>"
71
+ f"<td><div style='background:{col};width:{bar_w}px;height:10px;"
72
+ f"border-radius:3px;'></div></td>"
73
+ f"<td style='padding-left:6px;'>{score:.1%}</td>"
74
+ f"</tr>"
75
+ )
76
+ return f"<table cellspacing='2'>{rows}</table>"
77
+
78
+
79
+ # Mini video player.
80
+
81
+ class _ClipPlayer(QWidget):
82
+ """Loops through a list of BGR frames displayed as a QLabel."""
83
+
84
+ DISPLAY_SIZE = (560, 400)
85
+
86
+ def __init__(self, parent=None):
87
+ super().__init__(parent)
88
+ self._frames: list = []
89
+ self._idx = 0
90
+ self._timer = QTimer(self)
91
+ self._timer.timeout.connect(self._next_frame)
92
+
93
+ self._label = QLabel(self)
94
+ self._label.setFixedSize(*self.DISPLAY_SIZE)
95
+ self._label.setAlignment(Qt.AlignmentFlag.AlignCenter)
96
+ self._label.setStyleSheet("background:#111;border-radius:4px;")
97
+
98
+ self._fps_label = QLabel("–", self)
99
+ self._fps_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
100
+
101
+ layout = QVBoxLayout(self)
102
+ layout.setContentsMargins(0, 0, 0, 0)
103
+ layout.addWidget(self._label)
104
+ layout.addWidget(self._fps_label)
105
+
106
+ def load(self, frames: list, playback_fps: float = 8.0):
107
+ self._timer.stop()
108
+ self._frames = frames
109
+ self._idx = 0
110
+ self._fps_label.setText(f"{len(frames)} frames @ {playback_fps:.0f} fps")
111
+ if frames:
112
+ self._show_frame(0)
113
+ interval_ms = max(33, int(1000 / playback_fps))
114
+ self._timer.start(interval_ms)
115
+ else:
116
+ self._label.setText("No preview")
117
+
118
+ def stop(self):
119
+ self._timer.stop()
120
+
121
+ def _next_frame(self):
122
+ if not self._frames:
123
+ return
124
+ self._idx = (self._idx + 1) % len(self._frames)
125
+ self._show_frame(self._idx)
126
+
127
+ def _show_frame(self, idx: int):
128
+ frame = self._frames[idx]
129
+ h, w = frame.shape[:2]
130
+ rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
131
+ img = QImage(rgb.data, w, h, w * 3, QImage.Format.Format_RGB888)
132
+ pix = QPixmap.fromImage(img).scaled(
133
+ *self.DISPLAY_SIZE,
134
+ Qt.AspectRatioMode.KeepAspectRatio,
135
+ Qt.TransformationMode.SmoothTransformation,
136
+ )
137
+ self._label.setPixmap(pix)
138
+
139
+
140
+ # Main widget.
141
+
142
+ class ReviewWidget(QWidget):
143
+ """Tab for reviewing uncertain inference clips and adding them to the dataset."""
144
+
145
+ # Emitted when the user saves accepted clips so other tabs can refresh.
146
+ annotations_updated = pyqtSignal()
147
+
148
+ def __init__(self, config: dict, parent=None):
149
+ super().__init__(parent)
150
+ self.config = config
151
+
152
+ # State
153
+ self._report: dict = {} # loaded uncertainty report
154
+ self._pending: list = [] # [(entry, assigned_label), ...]
155
+ self._current_entry: dict = {}
156
+ self._current_frames: list = []
157
+ self._review_mode = "uncertain"
158
+ self._review_scope = "overall"
159
+ self._accepted_keys = set()
160
+ self._hard_negative_keys = set()
161
+ self._pending_hard_negatives: list = [] # [(entry, target_class), ...]
162
+ self._pending_transitions: list = [] # [(entry, primary_label, frame_labels), ...]
163
+ self._transition_frame_combos = []
164
+
165
+ self._setup_ui()
166
+
167
+ # Public API.
168
+
169
+ def update_config(self, config: dict):
170
+ self.config = config
171
+
172
+ def _arrays_sidecar_path(self, json_path: str) -> str:
173
+ base, ext = os.path.splitext(json_path)
174
+ if ext.lower() == ".json":
175
+ return base + ".arrays.npz"
176
+ return json_path + ".arrays.npz"
177
+
178
+ def _restore_external_arrays(self, file_path: str, data: dict):
179
+ results = data.get("results")
180
+ if not isinstance(results, dict):
181
+ return
182
+ store_info = data.get("external_array_store", {})
183
+ sidecar_file = store_info.get("file") if isinstance(store_info, dict) else None
184
+ sidecar_path = (
185
+ os.path.join(os.path.dirname(file_path), sidecar_file)
186
+ if sidecar_file else self._arrays_sidecar_path(file_path)
187
+ )
188
+ if not os.path.exists(sidecar_path):
189
+ return
190
+ try:
191
+ with np.load(sidecar_path, allow_pickle=False) as npz_file:
192
+ for entry in results.values():
193
+ if not isinstance(entry, dict):
194
+ continue
195
+ refs = entry.pop("_external_arrays", None)
196
+ if not isinstance(refs, dict):
197
+ continue
198
+ for field_name, store_key in refs.items():
199
+ if store_key not in npz_file:
200
+ continue
201
+ arr = np.asarray(npz_file[store_key])
202
+ if field_name == "aggregated_frame_probs":
203
+ entry[field_name] = arr.astype(np.float32, copy=False)
204
+ else:
205
+ entry[field_name] = arr.tolist()
206
+ except Exception:
207
+ pass
208
+
209
+ def load_from_inference(
210
+ self,
211
+ results: dict,
212
+ classes: list,
213
+ is_ovr: bool,
214
+ clip_length: int,
215
+ target_fps: int,
216
+ ):
217
+ """Called directly from InferenceWidget when inference finishes."""
218
+ ranked = rank_clips_for_review(
219
+ results, classes, n_per_class=25, is_ovr=is_ovr
220
+ )
221
+ ranked_per_video = rank_clips_per_video_for_review(
222
+ results, classes, n_per_class=25, is_ovr=is_ovr
223
+ )
224
+ confident_per_class = rank_confident_clips_for_review(
225
+ results, classes, n_per_class=200, is_ovr=is_ovr, clip_length=clip_length
226
+ )
227
+ confident_per_class_per_video = rank_confident_clips_per_video_for_review(
228
+ results, classes, n_per_class=200, is_ovr=is_ovr, clip_length=clip_length
229
+ )
230
+ transition_per_class = rank_transition_clips_for_review(
231
+ results, classes, clip_length=clip_length, is_ovr=is_ovr, n_per_class=50
232
+ )
233
+ transition_per_class_per_video = rank_transition_clips_per_video_for_review(
234
+ results, classes, clip_length=clip_length, is_ovr=is_ovr, n_per_class=50
235
+ )
236
+ self._report = {
237
+ "classes": classes,
238
+ "is_ovr": is_ovr,
239
+ "clip_length": clip_length,
240
+ "target_fps": target_fps,
241
+ "per_class": ranked,
242
+ "per_class_per_video": ranked_per_video,
243
+ "confident_per_class": confident_per_class,
244
+ "confident_per_class_per_video": confident_per_class_per_video,
245
+ "transition_per_class": transition_per_class,
246
+ "transition_per_class_per_video": transition_per_class_per_video,
247
+ }
248
+ self._pending.clear()
249
+ self._pending_hard_negatives.clear()
250
+ self._pending_transitions.clear()
251
+ self._accepted_keys.clear()
252
+ self._hard_negative_keys.clear()
253
+ self._review_mode = "uncertain"
254
+ self._mode_combo.setCurrentIndex(0)
255
+ self._populate_class_combo()
256
+ self._update_status_label("Loaded from inference")
257
+ self._update_save_btn()
258
+
259
+ def load_from_file(self, path: str):
260
+ """Load a previously saved _uncertainty.json file."""
261
+ try:
262
+ with open(path, "r", encoding="utf-8") as f:
263
+ self._report = json.load(f)
264
+ except Exception as exc:
265
+ QMessageBox.critical(self, "Load error", str(exc))
266
+ return
267
+ if "confident_per_class" not in self._report:
268
+ legacy_confident = self._report.get("confident_candidates", []) or []
269
+ confident_per_class = {cls: [] for cls in self._report.get("classes", [])}
270
+ for entry in legacy_confident:
271
+ pred = entry.get("predicted_class")
272
+ if pred in confident_per_class:
273
+ confident_per_class[pred].append(entry)
274
+ self._report["confident_per_class"] = confident_per_class
275
+ if "per_class_per_video" not in self._report:
276
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
277
+ for cls, entries in (self._report.get("per_class", {}) or {}).items():
278
+ video_map = {}
279
+ for entry in entries:
280
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
281
+ per_video[cls] = video_map
282
+ self._report["per_class_per_video"] = per_video
283
+ if "confident_per_class_per_video" not in self._report:
284
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
285
+ for cls, entries in (self._report.get("confident_per_class", {}) or {}).items():
286
+ video_map = {}
287
+ for entry in entries:
288
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
289
+ per_video[cls] = video_map
290
+ self._report["confident_per_class_per_video"] = per_video
291
+ if "transition_per_class" not in self._report:
292
+ self._report["transition_per_class"] = {
293
+ cls: [] for cls in self._report.get("classes", [])
294
+ }
295
+ if "transition_per_class_per_video" not in self._report:
296
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
297
+ for cls, entries in (self._report.get("transition_per_class", {}) or {}).items():
298
+ video_map = {}
299
+ for entry in entries:
300
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
301
+ per_video[cls] = video_map
302
+ self._report["transition_per_class_per_video"] = per_video
303
+ self._pending.clear()
304
+ self._pending_hard_negatives.clear()
305
+ self._pending_transitions.clear()
306
+ self._accepted_keys.clear()
307
+ self._hard_negative_keys.clear()
308
+ self._review_mode = "uncertain"
309
+ self._mode_combo.setCurrentIndex(0)
310
+ self._populate_class_combo()
311
+ self._update_status_label(f"Loaded {path}")
312
+ self._update_save_btn()
313
+
314
+ # UI construction.
315
+
316
+ def _setup_ui(self):
317
+ root = QVBoxLayout(self)
318
+ root.setContentsMargins(8, 8, 8, 8)
319
+
320
+ # Top bar.
321
+ top = QHBoxLayout()
322
+ self._status_label = QLabel("No inference results loaded.")
323
+ self._status_label.setStyleSheet("color:#aaa;font-style:italic;")
324
+ top.addWidget(self._status_label, 1)
325
+
326
+ load_btn = QPushButton("Load results file…")
327
+ load_btn.setToolTip("Load a saved _uncertainty.json or inference_results.json")
328
+ load_btn.clicked.connect(self._on_load_file)
329
+ top.addWidget(load_btn)
330
+
331
+ self._save_btn = QPushButton("Save 0 accepted clips to annotations")
332
+ self._save_btn.setStyleSheet(
333
+ "QPushButton{background:#27ae60;color:white;font-weight:bold;padding:4px 10px;}"
334
+ "QPushButton:disabled{background:#555;}"
335
+ )
336
+ self._save_btn.setEnabled(False)
337
+ self._save_btn.clicked.connect(self._on_save_to_annotations)
338
+ top.addWidget(self._save_btn)
339
+
340
+ root.addLayout(top)
341
+
342
+ # Horizontal splitter.
343
+ splitter = QSplitter(Qt.Orientation.Horizontal)
344
+ root.addWidget(splitter, 1)
345
+
346
+ # LEFT: class selector + clip list
347
+ left_panel = QWidget()
348
+ left_layout = QVBoxLayout(left_panel)
349
+ left_layout.setContentsMargins(0, 0, 4, 0)
350
+
351
+ class_row = QHBoxLayout()
352
+ class_row.addWidget(QLabel("Mode:"))
353
+ self._mode_combo = QComboBox()
354
+ self._mode_combo.addItem("Uncertain review", "uncertain")
355
+ self._mode_combo.addItem("Confident enrichment", "confident")
356
+ self._mode_combo.addItem("Transition mining", "transition")
357
+ self._mode_combo.currentIndexChanged.connect(self._on_mode_changed)
358
+ class_row.addWidget(self._mode_combo)
359
+
360
+ class_row.addSpacing(10)
361
+ class_row.addWidget(QLabel("Class:"))
362
+ self._class_combo = QComboBox()
363
+ self._class_combo.currentIndexChanged.connect(self._on_class_changed)
364
+ class_row.addWidget(self._class_combo, 1)
365
+
366
+ self._confident_count_label = QLabel("Top:")
367
+ class_row.addWidget(self._confident_count_label)
368
+ self._confident_count_spin = QSpinBox()
369
+ self._confident_count_spin.setRange(1, 500)
370
+ self._confident_count_spin.setValue(25)
371
+ self._confident_count_spin.setToolTip("Number of high-confidence suggestions to show.")
372
+ self._confident_count_spin.valueChanged.connect(self._refresh_clip_list)
373
+ class_row.addWidget(self._confident_count_spin)
374
+
375
+ self._scope_label = QLabel("Scope:")
376
+ class_row.addWidget(self._scope_label)
377
+ self._confident_scope_combo = QComboBox()
378
+ self._confident_scope_combo.addItem("Across videos", "overall")
379
+ self._confident_scope_combo.addItem("Per video", "per_video")
380
+ self._confident_scope_combo.currentIndexChanged.connect(self._on_confident_scope_changed)
381
+ class_row.addWidget(self._confident_scope_combo)
382
+
383
+ self._confident_video_label = QLabel("Video:")
384
+ class_row.addWidget(self._confident_video_label)
385
+ self._confident_video_combo = QComboBox()
386
+ self._confident_video_combo.currentIndexChanged.connect(self._refresh_clip_list)
387
+ class_row.addWidget(self._confident_video_combo, 1)
388
+ left_layout.addLayout(class_row)
389
+
390
+ self._clip_list = QListWidget()
391
+ self._clip_list.currentRowChanged.connect(self._on_clip_selected)
392
+ left_layout.addWidget(self._clip_list, 1)
393
+
394
+ # pending queue summary
395
+ self._pending_label = QLabel("Pending: 0 accepted")
396
+ self._pending_label.setStyleSheet("color:#f39c12;font-size:11px;")
397
+ left_layout.addWidget(self._pending_label)
398
+
399
+ splitter.addWidget(left_panel)
400
+
401
+ # RIGHT: preview + info + actions
402
+ right_panel = QWidget()
403
+ right_layout = QVBoxLayout(right_panel)
404
+ right_layout.setContentsMargins(4, 0, 0, 0)
405
+
406
+ # video player
407
+ player_row = QHBoxLayout()
408
+ player_row.addStretch()
409
+ self._player = _ClipPlayer()
410
+ player_row.addWidget(self._player)
411
+ player_row.addStretch()
412
+ right_layout.addLayout(player_row)
413
+
414
+ # clip info
415
+ self._info_label = QLabel("Select a clip from the list.")
416
+ self._info_label.setWordWrap(True)
417
+ self._info_label.setAlignment(Qt.AlignmentFlag.AlignTop)
418
+ self._info_label.setMinimumHeight(60)
419
+ right_layout.addWidget(self._info_label)
420
+
421
+ # score bars
422
+ self._score_label = QLabel("")
423
+ self._score_label.setWordWrap(True)
424
+ self._score_label.setAlignment(Qt.AlignmentFlag.AlignTop)
425
+ right_layout.addWidget(self._score_label)
426
+
427
+ # action box — single unified flow
428
+ action_box = QGroupBox("Label & add to dataset")
429
+ action_layout = QVBoxLayout(action_box)
430
+
431
+ label_row = QHBoxLayout()
432
+ label_row.addWidget(QLabel("Label this clip as:"))
433
+ self._label_combo = QComboBox()
434
+ self._label_combo.setMinimumWidth(160)
435
+ self._label_combo.setToolTip(
436
+ "Pre-filled with the model's prediction. Change it to reassign."
437
+ )
438
+ label_row.addWidget(self._label_combo, 1)
439
+ action_layout.addLayout(label_row)
440
+
441
+ self._transition_box = QGroupBox("Transition frame labels")
442
+ transition_layout = QVBoxLayout(self._transition_box)
443
+ self._transition_help_label = QLabel(
444
+ "Review the proposed per-frame labels for this transition window before saving."
445
+ )
446
+ self._transition_help_label.setWordWrap(True)
447
+ transition_layout.addWidget(self._transition_help_label)
448
+ self._transition_frames_widget = QWidget()
449
+ self._transition_frames_layout = QHBoxLayout(self._transition_frames_widget)
450
+ self._transition_frames_layout.setContentsMargins(0, 0, 0, 0)
451
+ self._transition_frames_layout.setSpacing(4)
452
+ transition_layout.addWidget(self._transition_frames_widget)
453
+ action_layout.addWidget(self._transition_box)
454
+
455
+ self._add_btn = QPushButton("Add to dataset with this label")
456
+ self._add_btn.setStyleSheet(
457
+ "QPushButton{background:#27ae60;color:white;font-weight:bold;"
458
+ "padding:8px;font-size:13px;}"
459
+ "QPushButton:disabled{background:#555;color:#999;}"
460
+ )
461
+ self._add_btn.setEnabled(False)
462
+ self._add_btn.clicked.connect(self._on_add)
463
+ action_layout.addWidget(self._add_btn)
464
+
465
+ self._skip_btn = QPushButton("→ Reject and save as hard negative")
466
+ self._skip_btn.setStyleSheet("padding:6px;color:#aaa;")
467
+ self._skip_btn.clicked.connect(self._on_skip)
468
+ action_layout.addWidget(self._skip_btn)
469
+
470
+ # feedback line shown briefly after adding
471
+ self._feedback_label = QLabel("")
472
+ self._feedback_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
473
+ self._feedback_label.setStyleSheet("color:#2ecc71;font-weight:bold;font-size:12px;")
474
+ action_layout.addWidget(self._feedback_label)
475
+ self._feedback_timer = QTimer(self)
476
+ self._feedback_timer.setSingleShot(True)
477
+ self._feedback_timer.timeout.connect(lambda: self._feedback_label.setText(""))
478
+
479
+ right_layout.addWidget(action_box)
480
+ splitter.addWidget(right_panel)
481
+
482
+ splitter.setSizes([360, 680])
483
+
484
+ # Population helpers.
485
+
486
+ def _populate_class_combo(self):
487
+ classes = self._report.get("classes", [])
488
+ current_index = self._class_combo.currentIndex()
489
+ self._class_combo.blockSignals(True)
490
+ self._class_combo.clear()
491
+ if self._review_mode == "confident":
492
+ confident_per_class = self._report.get("confident_per_class", {}) or {}
493
+ for cls in classes:
494
+ n = len(confident_per_class.get(cls, []))
495
+ self._class_combo.addItem(f"{cls} ({n})")
496
+ elif self._review_mode == "transition":
497
+ transition_per_class = self._report.get("transition_per_class", {}) or {}
498
+ for cls in classes:
499
+ n = len(transition_per_class.get(cls, []))
500
+ self._class_combo.addItem(f"{cls} ({n})")
501
+ else:
502
+ per_class = self._report.get("per_class", {})
503
+ for cls in classes:
504
+ n = len(per_class.get(cls, []))
505
+ self._class_combo.addItem(f"{cls} ({n})")
506
+ self._class_combo.blockSignals(False)
507
+
508
+ self._label_combo.clear()
509
+ self._label_combo.addItems(classes)
510
+ self._populate_video_combo()
511
+
512
+ if classes:
513
+ self._class_combo.setCurrentIndex(max(0, min(current_index, len(classes) - 1)))
514
+ self._update_mode_controls()
515
+ self._refresh_clip_list()
516
+
517
+ def _populate_video_combo(self):
518
+ current_video = self._confident_video_combo.currentData()
519
+ all_videos = set()
520
+ if self._review_mode == "confident":
521
+ report_key = "confident_per_class_per_video"
522
+ elif self._review_mode == "transition":
523
+ report_key = "transition_per_class_per_video"
524
+ else:
525
+ report_key = "per_class_per_video"
526
+ per_class_per_video = self._report.get(report_key, {}) or {}
527
+ for per_video in per_class_per_video.values():
528
+ if isinstance(per_video, dict):
529
+ all_videos.update(per_video.keys())
530
+ ordered_videos = sorted(all_videos, key=lambda path: os.path.basename(path))
531
+ self._confident_video_combo.blockSignals(True)
532
+ self._confident_video_combo.clear()
533
+ for video_path in ordered_videos:
534
+ self._confident_video_combo.addItem(os.path.basename(video_path), video_path)
535
+ self._confident_video_combo.blockSignals(False)
536
+ if ordered_videos:
537
+ idx = self._confident_video_combo.findData(current_video)
538
+ if idx < 0:
539
+ idx = 0
540
+ self._confident_video_combo.setCurrentIndex(idx)
541
+
542
+ def _current_video(self):
543
+ return self._confident_video_combo.currentData()
544
+
545
+ def _entry_key(self, entry: dict):
546
+ return (entry.get("video", ""), int(entry.get("clip_idx", -1)))
547
+
548
+ def _entry_class_key(self, entry: dict, class_name: str | None = None):
549
+ target_class = class_name or entry.get("review_target_class") or self._current_target_class()
550
+ return (entry.get("video", ""), int(entry.get("clip_idx", -1)), target_class or "")
551
+
552
+ def _current_target_class(self):
553
+ classes = self._report.get("classes", [])
554
+ index = self._class_combo.currentIndex()
555
+ if 0 <= index < len(classes):
556
+ return classes[index]
557
+ return ""
558
+
559
+ def _apply_item_review_state(self, item: QListWidgetItem, entry: dict):
560
+ key = self._entry_key(entry)
561
+ if key in self._accepted_keys:
562
+ item.setText("+ " + item.text())
563
+ item.setForeground(QColor("#2ecc71"))
564
+ elif self._entry_class_key(entry) in self._hard_negative_keys:
565
+ item.setText("! " + item.text())
566
+ item.setForeground(QColor("#e67e22"))
567
+
568
+ def _update_mode_controls(self):
569
+ is_confident = (self._review_mode == "confident")
570
+ is_transition = (self._review_mode == "transition")
571
+ self._class_combo.setEnabled(True)
572
+ self._confident_count_label.setVisible(is_confident)
573
+ self._confident_count_spin.setVisible(is_confident)
574
+ self._scope_label.setVisible(True)
575
+ self._confident_scope_combo.setVisible(True)
576
+ show_video = self._review_scope == "per_video"
577
+ self._confident_video_label.setVisible(show_video)
578
+ self._confident_video_combo.setVisible(show_video)
579
+ self._transition_box.setVisible(is_transition)
580
+ self._label_combo.setVisible(not is_transition)
581
+ self._transition_help_label.setVisible(is_transition)
582
+
583
+ def _update_status_label(self, prefix: str = "Loaded"):
584
+ classes = self._report.get("classes", [])
585
+ per_class = self._report.get("per_class", {})
586
+ uncertain_total = sum(len(v) for v in per_class.values())
587
+ confident_per_class = self._report.get("confident_per_class", {}) or {}
588
+ confident_total = sum(len(v) for v in confident_per_class.values())
589
+ transition_per_class = self._report.get("transition_per_class", {}) or {}
590
+ transition_total = sum(len(v) for v in transition_per_class.values())
591
+ if self._review_mode == "uncertain":
592
+ mode_txt = "uncertain review"
593
+ elif self._review_mode == "confident":
594
+ mode_txt = "confident enrichment"
595
+ else:
596
+ mode_txt = "transition mining"
597
+ if self._review_scope == "per_video":
598
+ sel_video = self._current_video()
599
+ if sel_video:
600
+ mode_txt += f" / {os.path.basename(sel_video)}"
601
+ self._status_label.setText(
602
+ f"{prefix} – {len(classes)} classes, {uncertain_total} uncertain candidates, "
603
+ f"{confident_total} confident candidates, {transition_total} transitions ({mode_txt})"
604
+ )
605
+
606
+ def _clear_transition_editor(self):
607
+ while self._transition_frames_layout.count():
608
+ item = self._transition_frames_layout.takeAt(0)
609
+ widget = item.widget()
610
+ if widget is not None:
611
+ widget.deleteLater()
612
+ self._transition_frame_combos = []
613
+
614
+ def _populate_transition_editor(self, frame_labels: list):
615
+ self._clear_transition_editor()
616
+ classes = self._report.get("classes", [])
617
+ for idx, label in enumerate(frame_labels):
618
+ cell = QWidget()
619
+ cell_layout = QVBoxLayout(cell)
620
+ cell_layout.setContentsMargins(0, 0, 0, 0)
621
+ cell_layout.setSpacing(2)
622
+ frame_label = QLabel(f"F{idx+1}")
623
+ frame_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
624
+ combo = QComboBox()
625
+ combo.addItem("Ignore", None)
626
+ for cls in classes:
627
+ combo.addItem(cls, cls)
628
+ sel_idx = combo.findData(label)
629
+ if sel_idx >= 0:
630
+ combo.setCurrentIndex(sel_idx)
631
+ cell_layout.addWidget(frame_label)
632
+ cell_layout.addWidget(combo)
633
+ self._transition_frames_layout.addWidget(cell)
634
+ self._transition_frame_combos.append(combo)
635
+ self._transition_frames_layout.addStretch(1)
636
+
637
+ def _current_transition_frame_labels(self):
638
+ labels = []
639
+ for combo in self._transition_frame_combos:
640
+ labels.append(combo.currentData())
641
+ return labels
642
+
643
+ def _refresh_clip_list(self):
644
+ self._clip_list.clear()
645
+ self._current_entry = {}
646
+ self._current_frames = []
647
+ self._player.stop()
648
+ self._add_btn.setEnabled(False)
649
+ self._skip_btn.setEnabled(False)
650
+ self._info_label.setText("Select a clip from the list.")
651
+ self._score_label.setText("")
652
+
653
+ classes = self._report.get("classes", [])
654
+ index = self._class_combo.currentIndex()
655
+ if index < 0 or index >= len(classes):
656
+ self._update_status_label()
657
+ return
658
+ class_name = classes[index]
659
+
660
+ if self._review_mode == "confident":
661
+ if self._review_scope == "per_video":
662
+ confident_per_class_per_video = self._report.get("confident_per_class_per_video", {}) or {}
663
+ per_video = confident_per_class_per_video.get(class_name, {}) or {}
664
+ entries = list(per_video.get(self._current_video(), []) or [])
665
+ else:
666
+ confident_per_class = self._report.get("confident_per_class", {}) or {}
667
+ entries = list(confident_per_class.get(class_name, []) or [])
668
+ limit = int(self._confident_count_spin.value())
669
+ entries = entries[:limit]
670
+ for rank, e in enumerate(entries, start=1):
671
+ video_name = os.path.basename(e.get("video", ""))
672
+ pred = e.get("predicted_class", class_name)
673
+ conf = e.get("confidence_score", e.get("class_score", e.get("top_score", 0.0)))
674
+ text = (
675
+ f"#{rank} {video_name} frame {e.get('start_frame', 0)}\n"
676
+ f" class={pred} score={conf:.0%}"
677
+ )
678
+ item = QListWidgetItem(text)
679
+ if conf > 0.9:
680
+ item.setForeground(QColor("#2ecc71"))
681
+ elif conf > 0.75:
682
+ item.setForeground(QColor("#5dade2"))
683
+ item.setData(Qt.ItemDataRole.UserRole, e)
684
+ self._apply_item_review_state(item, e)
685
+ self._clip_list.addItem(item)
686
+ elif self._review_mode == "transition":
687
+ if self._review_scope == "per_video":
688
+ per_class_per_video = self._report.get("transition_per_class_per_video", {}) or {}
689
+ per_video = per_class_per_video.get(class_name, {}) or {}
690
+ entries = list(per_video.get(self._current_video(), []) or [])
691
+ else:
692
+ per_class = self._report.get("transition_per_class", {}) or {}
693
+ entries = list(per_class.get(class_name, []) or [])
694
+ for rank, e in enumerate(entries, start=1):
695
+ video_name = os.path.basename(e.get("video", ""))
696
+ text = (
697
+ f"#{rank} {video_name} frame {e.get('transition_frame', e.get('start_frame', 0))}\n"
698
+ f" {e.get('left_class', '?')} -> {e.get('right_class', '?')} score={e.get('transition_score', 0.0):.0%}"
699
+ )
700
+ item = QListWidgetItem(text)
701
+ item.setForeground(QColor("#9b59b6"))
702
+ item.setData(Qt.ItemDataRole.UserRole, e)
703
+ self._apply_item_review_state(item, e)
704
+ self._clip_list.addItem(item)
705
+ else:
706
+ if self._review_scope == "per_video":
707
+ per_class_per_video = self._report.get("per_class_per_video", {}) or {}
708
+ per_video = per_class_per_video.get(class_name, {}) or {}
709
+ entries = list(per_video.get(self._current_video(), []) or [])
710
+ else:
711
+ per_class = self._report.get("per_class", {})
712
+ entries = list(per_class.get(class_name, []) or [])
713
+ for e in entries:
714
+ video_name = os.path.basename(e.get("video", ""))
715
+ pred = e.get("predicted_class", "?")
716
+ top_s = e.get("top_score", 0.0)
717
+ margin = e.get("margin", 0.0)
718
+ u_score = e.get("uncertainty_score", 0.0)
719
+ text = (
720
+ f"{video_name} frame {e.get('start_frame', 0)}\n"
721
+ f" pred={pred} ({top_s:.0%}) margin={margin:.0%} "
722
+ f"uncertainty={u_score:.0%}"
723
+ )
724
+ item = QListWidgetItem(text)
725
+ if u_score > 0.7:
726
+ item.setForeground(QColor("#e74c3c"))
727
+ elif u_score > 0.4:
728
+ item.setForeground(QColor("#f39c12"))
729
+ item.setData(Qt.ItemDataRole.UserRole, e)
730
+ self._apply_item_review_state(item, e)
731
+ self._clip_list.addItem(item)
732
+
733
+ if self._clip_list.count():
734
+ self._clip_list.setCurrentRow(0)
735
+ self._update_status_label()
736
+
737
+ def _on_mode_changed(self, index: int):
738
+ mode = self._mode_combo.itemData(index)
739
+ self._review_mode = mode or "uncertain"
740
+ self._populate_class_combo()
741
+ self._update_mode_controls()
742
+
743
+ def _on_confident_scope_changed(self, index: int):
744
+ scope = self._confident_scope_combo.itemData(index)
745
+ self._review_scope = scope or "overall"
746
+ self._populate_class_combo()
747
+ self._update_mode_controls()
748
+
749
+ def _on_class_changed(self, index: int):
750
+ classes = self._report.get("classes", [])
751
+ if index < 0 or index >= len(classes):
752
+ self._clip_list.clear()
753
+ return
754
+ self._refresh_clip_list()
755
+
756
+ # Clip selection / preview.
757
+
758
+ def _on_clip_selected(self, row: int):
759
+ self._player.stop()
760
+ if row < 0:
761
+ self._current_entry = {}
762
+ self._add_btn.setEnabled(False)
763
+ self._clear_transition_editor()
764
+ self._info_label.setText("Select a clip from the list.")
765
+ self._score_label.setText("")
766
+ return
767
+
768
+ item = self._clip_list.item(row)
769
+ if item is None:
770
+ return
771
+ entry = item.data(Qt.ItemDataRole.UserRole)
772
+ if not entry:
773
+ return
774
+ self._current_entry = entry
775
+
776
+ # Update info label
777
+ pred = entry.get("predicted_class", "?")
778
+ top_s = entry.get("top_score", 0.0)
779
+ margin = entry.get("margin", 0.0)
780
+ u_score = entry.get("uncertainty_score", 0.0)
781
+ conf_score = entry.get("confidence_score", top_s)
782
+ review_kind = entry.get("review_kind", "uncertain")
783
+ video = entry.get("video", "")
784
+ start = entry.get("start_frame", 0)
785
+ if review_kind == "transition":
786
+ self._info_label.setText(
787
+ f"<b>Video:</b> {os.path.basename(video)}<br>"
788
+ f"<b>Window start:</b> {start}<br>"
789
+ f"<b>Transition:</b> {entry.get('left_class', '?')} -> {entry.get('right_class', '?')}<br>"
790
+ f"<b>Boundary score:</b> {entry.get('transition_score', 0.0):.1%}"
791
+ )
792
+ elif review_kind == "confident":
793
+ self._info_label.setText(
794
+ f"<b>Video:</b> {os.path.basename(video)}<br>"
795
+ f"<b>Start frame:</b> {start}<br>"
796
+ f"<b>Predicted:</b> {pred} ({top_s:.1%})<br>"
797
+ f"<b>Confidence:</b> {conf_score:.1%}"
798
+ )
799
+ else:
800
+ self._info_label.setText(
801
+ f"<b>Video:</b> {os.path.basename(video)}<br>"
802
+ f"<b>Start frame:</b> {start}<br>"
803
+ f"<b>Predicted:</b> {pred} ({top_s:.1%})<br>"
804
+ f"<b>Margin:</b> {margin:.1%} | <b>Uncertainty:</b> {u_score:.1%}"
805
+ )
806
+
807
+ scores = entry.get("scores", {})
808
+ self._score_label.setText(_score_bar_html(scores, pred))
809
+
810
+ # Pre-fill the label combo with the model's prediction
811
+ idx = self._label_combo.findText(pred)
812
+ if idx >= 0:
813
+ self._label_combo.setCurrentIndex(idx)
814
+ if review_kind == "transition":
815
+ self._populate_transition_editor(
816
+ list(entry.get("proposed_frame_labels", []) or [None] * int(self._report.get("clip_length", 8)))
817
+ )
818
+ else:
819
+ self._clear_transition_editor()
820
+
821
+ key = self._entry_key(entry)
822
+ class_key = self._entry_class_key(entry)
823
+ self._add_btn.setEnabled(key not in self._accepted_keys)
824
+ if review_kind == "transition":
825
+ self._skip_btn.setEnabled(True)
826
+ self._skip_btn.setText("→ Skip transition candidate")
827
+ else:
828
+ self._skip_btn.setEnabled((key not in self._accepted_keys) and (class_key not in self._hard_negative_keys))
829
+ target_class = self._current_target_class()
830
+ if target_class:
831
+ self._skip_btn.setText(f"→ Reject and save as hard negative for '{target_class}'")
832
+ else:
833
+ self._skip_btn.setText("→ Reject and save as hard negative")
834
+
835
+ # Load frames for preview
836
+ self._load_preview(entry)
837
+
838
+ def _load_preview(self, entry: dict):
839
+ video_path = entry.get("video", "")
840
+ start_frame = entry.get("start_frame", 0)
841
+ clip_length = self._report.get("clip_length", 8)
842
+ frame_interval = entry.get("frame_interval", 1)
843
+ target_fps = self._report.get("target_fps", 8)
844
+
845
+ if not os.path.exists(video_path):
846
+ self._player.load([])
847
+ self._info_label.setText(
848
+ self._info_label.text()
849
+ + "<br><span style='color:red;'>Video file not found.</span>"
850
+ )
851
+ return
852
+
853
+ frames = _frames_from_video(
854
+ video_path, start_frame, clip_length, frame_interval
855
+ )
856
+ playback_fps = max(4, min(target_fps, 16))
857
+ self._player.load(frames, playback_fps)
858
+ self._current_frames = frames
859
+
860
+ # Actions.
861
+
862
+ def _on_add(self):
863
+ """Register the current clip with the selected label and advance."""
864
+ if not self._current_entry:
865
+ return
866
+ if self._current_entry.get("review_kind") == "transition":
867
+ frame_labels = self._current_transition_frame_labels()
868
+ valid_labels = [lbl for lbl in frame_labels if lbl]
869
+ if not valid_labels:
870
+ return
871
+ label_counts = {}
872
+ for lbl in valid_labels:
873
+ label_counts[lbl] = label_counts.get(lbl, 0) + 1
874
+ label = max(sorted(label_counts), key=lambda lbl: label_counts[lbl])
875
+ key = self._entry_key(self._current_entry)
876
+ self._accepted_keys.add(key)
877
+ self._pending = [
878
+ (entry, entry_label)
879
+ for entry, entry_label in self._pending
880
+ if self._entry_key(entry) != key
881
+ ]
882
+ self._pending_transitions = [
883
+ (entry, entry_label, entry_frame_labels)
884
+ for entry, entry_label, entry_frame_labels in self._pending_transitions
885
+ if self._entry_key(entry) != key
886
+ ]
887
+ transition_entry = dict(self._current_entry)
888
+ transition_entry["proposed_frame_labels"] = list(frame_labels)
889
+ self._pending_transitions.append((transition_entry, label, list(frame_labels)))
890
+ self._update_save_btn()
891
+ row = self._clip_list.currentRow()
892
+ item = self._clip_list.item(row)
893
+ if item is not None and not item.text().startswith("+ "):
894
+ item.setText("+ " + item.text().lstrip("! ").strip())
895
+ item.setForeground(QColor("#2ecc71"))
896
+ self._feedback_label.setText("Added transition clip with frame labels")
897
+ self._feedback_timer.start(1800)
898
+ self._advance_list()
899
+ return
900
+ label = self._label_combo.currentText().strip()
901
+ if not label:
902
+ return
903
+ key = self._entry_key(self._current_entry)
904
+ self._accepted_keys.add(key)
905
+ self._pending = [
906
+ (entry, entry_label)
907
+ for entry, entry_label in self._pending
908
+ if self._entry_key(entry) != key
909
+ ]
910
+ self._hard_negative_keys = {hn_key for hn_key in self._hard_negative_keys if hn_key[:2] != key}
911
+ self._pending_hard_negatives = [
912
+ (entry, target_class)
913
+ for entry, target_class in self._pending_hard_negatives
914
+ if self._entry_key(entry) != key
915
+ ]
916
+ self._pending.append((dict(self._current_entry), label))
917
+ self._update_save_btn()
918
+
919
+ # Mark the list item so it's clear it was registered
920
+ row = self._clip_list.currentRow()
921
+ item = self._clip_list.item(row)
922
+ if item is not None:
923
+ if not item.text().startswith("+ "):
924
+ item.setText("+ " + item.text().lstrip("! ").strip())
925
+ item.setForeground(QColor("#2ecc71"))
926
+
927
+ self._feedback_label.setText(f"Added as '{label}'")
928
+ self._feedback_timer.start(1800)
929
+
930
+ self._advance_list()
931
+
932
+ def _on_skip(self):
933
+ """Queue the current clip as a hard negative for the currently viewed class."""
934
+ if self._current_entry and self._current_entry.get("review_kind") == "transition":
935
+ self._feedback_label.setText("Skipped transition candidate")
936
+ self._feedback_timer.start(1200)
937
+ self._advance_list()
938
+ return
939
+ if self._current_entry:
940
+ clip_key = self._entry_key(self._current_entry)
941
+ if clip_key in self._accepted_keys:
942
+ return
943
+ target_class = self._current_target_class()
944
+ if not target_class:
945
+ return
946
+ class_key = self._entry_class_key(self._current_entry, target_class)
947
+ self._hard_negative_keys.add(class_key)
948
+ self._pending_hard_negatives = [
949
+ (entry, cls_name)
950
+ for entry, cls_name in self._pending_hard_negatives
951
+ if self._entry_class_key(entry, cls_name) != class_key
952
+ ]
953
+ hard_negative_entry = dict(self._current_entry)
954
+ hard_negative_entry["review_target_class"] = target_class
955
+ self._pending_hard_negatives.append((hard_negative_entry, target_class))
956
+ self._update_save_btn()
957
+ row = self._clip_list.currentRow()
958
+ item = self._clip_list.item(row)
959
+ if item is not None:
960
+ if not item.text().startswith("! "):
961
+ base_text = item.text()
962
+ if base_text.startswith("+ "):
963
+ base_text = base_text[2:]
964
+ item.setText("! " + base_text)
965
+ item.setForeground(QColor("#e67e22"))
966
+ self._feedback_label.setText("Saved as hard-negative candidate")
967
+ self._feedback_timer.start(1800)
968
+ self._advance_list()
969
+
970
+ def _advance_list(self):
971
+ self._add_btn.setEnabled(False)
972
+ row = self._clip_list.currentRow()
973
+ total = self._clip_list.count()
974
+ if total == 0:
975
+ return
976
+ if row < total - 1:
977
+ self._clip_list.setCurrentRow(row + 1)
978
+ else:
979
+ self._clip_list.setCurrentRow(-1)
980
+ self._player.stop()
981
+ self._feedback_label.setText("All clips in this view reviewed.")
982
+
983
+ def _update_save_btn(self):
984
+ n_accept = len(self._pending)
985
+ n_hn = len(self._pending_hard_negatives)
986
+ n_transitions = len(self._pending_transitions)
987
+ total = n_accept + n_hn + n_transitions
988
+ parts = []
989
+ if n_accept:
990
+ parts.append(f"{n_accept} accepted")
991
+ if n_hn:
992
+ parts.append(f"{n_hn} hard negative{'s' if n_hn != 1 else ''}")
993
+ if n_transitions:
994
+ parts.append(f"{n_transitions} transition clip{'s' if n_transitions != 1 else ''}")
995
+ detail = " + ".join(parts) if parts else "0 items"
996
+ self._save_btn.setText(f"Save {detail} to annotations")
997
+ self._save_btn.setEnabled(total > 0)
998
+ self._pending_label.setText(
999
+ f"Pending: {n_accept} accepted, {n_hn} hard negatives, {n_transitions} transitions | "
1000
+ f"{len(self._accepted_keys)} accepted, {len(self._hard_negative_keys)} hard negatives"
1001
+ )
1002
+
1003
+ # Load from file.
1004
+
1005
+ def _on_load_file(self):
1006
+ start_dir = self.config.get("experiment_path", "")
1007
+ path, _ = QFileDialog.getOpenFileName(
1008
+ self, "Load uncertainty / inference results",
1009
+ start_dir,
1010
+ "JSON files (*.json);;All files (*)"
1011
+ )
1012
+ if not path:
1013
+ return
1014
+
1015
+ # If it's a full inference_results.json, compute uncertainty on the fly
1016
+ try:
1017
+ with open(path, "r", encoding="utf-8") as f:
1018
+ data = json.load(f)
1019
+ self._restore_external_arrays(path, data)
1020
+ except Exception as exc:
1021
+ QMessageBox.critical(self, "Load error", str(exc))
1022
+ return
1023
+
1024
+ if "per_class" in data:
1025
+ # Already an uncertainty report
1026
+ self._report = data
1027
+ if "per_class_per_video" not in self._report:
1028
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
1029
+ for cls, entries in (self._report.get("per_class", {}) or {}).items():
1030
+ video_map = {}
1031
+ for entry in entries:
1032
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
1033
+ per_video[cls] = video_map
1034
+ self._report["per_class_per_video"] = per_video
1035
+ if "confident_per_class" not in self._report:
1036
+ legacy_confident = self._report.get("confident_candidates", []) or []
1037
+ confident_per_class = {cls: [] for cls in self._report.get("classes", [])}
1038
+ for entry in legacy_confident:
1039
+ pred = entry.get("predicted_class")
1040
+ if pred in confident_per_class:
1041
+ confident_per_class[pred].append(entry)
1042
+ self._report["confident_per_class"] = confident_per_class
1043
+ if "confident_per_class_per_video" not in self._report:
1044
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
1045
+ for cls, entries in (self._report.get("confident_per_class", {}) or {}).items():
1046
+ video_map = {}
1047
+ for entry in entries:
1048
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
1049
+ per_video[cls] = video_map
1050
+ self._report["confident_per_class_per_video"] = per_video
1051
+ if "transition_per_class" not in self._report:
1052
+ self._report["transition_per_class"] = {
1053
+ cls: [] for cls in self._report.get("classes", [])
1054
+ }
1055
+ if "transition_per_class_per_video" not in self._report:
1056
+ per_video = {cls: {} for cls in self._report.get("classes", [])}
1057
+ for cls, entries in (self._report.get("transition_per_class", {}) or {}).items():
1058
+ video_map = {}
1059
+ for entry in entries:
1060
+ video_map.setdefault(entry.get("video", ""), []).append(entry)
1061
+ per_video[cls] = video_map
1062
+ self._report["transition_per_class_per_video"] = per_video
1063
+ elif "classes" in data:
1064
+ # Full inference_results.json — compute uncertainty
1065
+ classes = data.get("classes", [])
1066
+ params = data.get("parameters", {})
1067
+ is_ovr = params.get("use_ovr", False)
1068
+ clip_length = params.get("clip_length", 8)
1069
+ target_fps = params.get("target_fps", 16)
1070
+ # results keyed by video path
1071
+ nested_results = data.get("results")
1072
+ if isinstance(nested_results, dict) and nested_results:
1073
+ results = nested_results
1074
+ else:
1075
+ results = {
1076
+ k: v for k, v in data.items()
1077
+ if k not in ("classes", "parameters", "inference_time")
1078
+ }
1079
+ ranked = rank_clips_for_review(results, classes, n_per_class=25, is_ovr=is_ovr)
1080
+ ranked_per_video = rank_clips_per_video_for_review(
1081
+ results, classes, n_per_class=25, is_ovr=is_ovr
1082
+ )
1083
+ confident_per_class = rank_confident_clips_for_review(
1084
+ results, classes, n_per_class=200, is_ovr=is_ovr, clip_length=clip_length
1085
+ )
1086
+ confident_per_class_per_video = rank_confident_clips_per_video_for_review(
1087
+ results, classes, n_per_class=200, is_ovr=is_ovr, clip_length=clip_length
1088
+ )
1089
+ transition_per_class = rank_transition_clips_for_review(
1090
+ results, classes, clip_length=clip_length, is_ovr=is_ovr, n_per_class=50
1091
+ )
1092
+ transition_per_class_per_video = rank_transition_clips_per_video_for_review(
1093
+ results, classes, clip_length=clip_length, is_ovr=is_ovr, n_per_class=50
1094
+ )
1095
+ self._report = {
1096
+ "classes": classes,
1097
+ "is_ovr": is_ovr,
1098
+ "clip_length": clip_length,
1099
+ "target_fps": target_fps,
1100
+ "per_class": ranked,
1101
+ "per_class_per_video": ranked_per_video,
1102
+ "confident_per_class": confident_per_class,
1103
+ "confident_per_class_per_video": confident_per_class_per_video,
1104
+ "transition_per_class": transition_per_class,
1105
+ "transition_per_class_per_video": transition_per_class_per_video,
1106
+ }
1107
+ else:
1108
+ QMessageBox.warning(
1109
+ self, "Unknown format",
1110
+ "File doesn't look like an inference_results.json or uncertainty report."
1111
+ )
1112
+ return
1113
+
1114
+ self._pending.clear()
1115
+ self._pending_hard_negatives.clear()
1116
+ self._pending_transitions.clear()
1117
+ self._accepted_keys.clear()
1118
+ self._hard_negative_keys.clear()
1119
+ self._populate_class_combo()
1120
+ self._update_status_label(f"Loaded: {path}")
1121
+ self._update_save_btn()
1122
+
1123
+ # Save accepted clips to annotations.
1124
+
1125
+ def _on_save_to_annotations(self):
1126
+ if not self._pending and not self._pending_hard_negatives and not self._pending_transitions:
1127
+ return
1128
+
1129
+ annotation_file = self.config.get("annotation_file", "")
1130
+ clips_dir = self.config.get("clips_dir", "")
1131
+
1132
+ if not annotation_file or not clips_dir:
1133
+ QMessageBox.warning(
1134
+ self, "No experiment",
1135
+ "Load an experiment first (annotation file and clips dir must be set)."
1136
+ )
1137
+ return
1138
+
1139
+ reviewed_dir = os.path.join(clips_dir, "reviewed_clips")
1140
+ os.makedirs(reviewed_dir, exist_ok=True)
1141
+
1142
+ am = AnnotationManager(annotation_file)
1143
+ added = 0
1144
+ hard_negatives_added = 0
1145
+ transitions_added = 0
1146
+ skipped = 0
1147
+ errors = []
1148
+
1149
+ clip_length = self._report.get("clip_length", 8)
1150
+ target_fps = self._report.get("target_fps", 8)
1151
+ existing = {c.get("id") for c in am.get_all_clips()}
1152
+
1153
+ def ensure_saved_clip(out_path: str, video_path: str, start_frame: int, frame_interval: int):
1154
+ frames = []
1155
+ if not os.path.exists(out_path):
1156
+ frames = _frames_from_video(video_path, start_frame, clip_length, frame_interval)
1157
+ if not frames:
1158
+ raise RuntimeError(
1159
+ f"No frames extracted: {os.path.basename(video_path)} @ {start_frame}"
1160
+ )
1161
+ save_clip(frames, out_path, fps=float(target_fps))
1162
+ else:
1163
+ cap = cv2.VideoCapture(out_path)
1164
+ n_existing = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
1165
+ cap.release()
1166
+ if n_existing <= 0:
1167
+ n_existing = clip_length
1168
+ frames = [None] * n_existing
1169
+ return frames
1170
+
1171
+ for entry, label in self._pending:
1172
+ video_path = entry.get("video", "")
1173
+ start_frame = entry.get("start_frame", 0)
1174
+ frame_interval = entry.get("frame_interval", 1)
1175
+
1176
+ if not os.path.exists(video_path):
1177
+ errors.append(f"Not found: {video_path}")
1178
+ skipped += 1
1179
+ continue
1180
+
1181
+ # Build a safe output filename
1182
+ video_stem = re.sub(r"[^\w\-]", "_", os.path.splitext(os.path.basename(video_path))[0])
1183
+ out_name = f"{video_stem}_f{start_frame:07d}.mp4"
1184
+ out_path = os.path.join(reviewed_dir, out_name)
1185
+
1186
+ try:
1187
+ frames = ensure_saved_clip(out_path, video_path, start_frame, frame_interval)
1188
+ except Exception as exc:
1189
+ errors.append(str(exc))
1190
+ skipped += 1
1191
+ continue
1192
+
1193
+ # Relative ID from clips_dir
1194
+ try:
1195
+ rel_id = os.path.relpath(out_path, clips_dir).replace("\\", "/")
1196
+ except ValueError:
1197
+ rel_id = os.path.basename(out_path)
1198
+
1199
+ # Skip if already annotated
1200
+ if rel_id in existing:
1201
+ skipped += 1
1202
+ continue
1203
+
1204
+ actual_frames = len(frames)
1205
+ am.add_clip(rel_id, label, meta={
1206
+ "source_video": video_path,
1207
+ "start_frame": start_frame,
1208
+ "origin": "active_learning_review",
1209
+ "review_mode": entry.get("review_kind", "uncertain"),
1210
+ }, _defer_save=True)
1211
+ am.set_frame_labels(rel_id, [label] * actual_frames, _defer_save=True)
1212
+ am.add_class(label)
1213
+ added += 1
1214
+ existing.add(rel_id)
1215
+
1216
+ transition_dir = os.path.join(reviewed_dir, "transitions")
1217
+ os.makedirs(transition_dir, exist_ok=True)
1218
+ for entry, label, frame_labels in self._pending_transitions:
1219
+ video_path = entry.get("video", "")
1220
+ start_frame = int(entry.get("start_frame", 0))
1221
+ frame_interval = int(entry.get("frame_interval", 1) or 1)
1222
+ if not os.path.exists(video_path):
1223
+ errors.append(f"Not found: {video_path}")
1224
+ skipped += 1
1225
+ continue
1226
+
1227
+ video_stem = re.sub(r"[^\w\-]", "_", os.path.splitext(os.path.basename(video_path))[0])
1228
+ left_slug = re.sub(r"[^\w\-]", "_", str(entry.get("left_class", "left"))).strip("_") or "left"
1229
+ right_slug = re.sub(r"[^\w\-]", "_", str(entry.get("right_class", "right"))).strip("_") or "right"
1230
+ out_name = f"{video_stem}_f{start_frame:07d}_transition_{left_slug}_to_{right_slug}.mp4"
1231
+ out_path = os.path.join(transition_dir, out_name)
1232
+ try:
1233
+ frames = ensure_saved_clip(out_path, video_path, start_frame, frame_interval)
1234
+ except Exception as exc:
1235
+ errors.append(str(exc))
1236
+ skipped += 1
1237
+ continue
1238
+
1239
+ try:
1240
+ rel_id = os.path.relpath(out_path, clips_dir).replace("\\", "/")
1241
+ except ValueError:
1242
+ rel_id = os.path.basename(out_path)
1243
+ if rel_id in existing:
1244
+ skipped += 1
1245
+ continue
1246
+
1247
+ actual_frames = len(frames)
1248
+ normalized_frame_labels = list(frame_labels[:actual_frames])
1249
+ if len(normalized_frame_labels) < actual_frames:
1250
+ normalized_frame_labels.extend([None] * (actual_frames - len(normalized_frame_labels)))
1251
+ am.add_clip(rel_id, label, meta={
1252
+ "source_video": video_path,
1253
+ "start_frame": start_frame,
1254
+ "origin": "active_learning_transition_review",
1255
+ "review_mode": "transition",
1256
+ "transition_frame": int(entry.get("transition_frame", start_frame)),
1257
+ "transition_left_class": entry.get("left_class"),
1258
+ "transition_right_class": entry.get("right_class"),
1259
+ }, _defer_save=True)
1260
+ am.set_frame_labels(rel_id, normalized_frame_labels, _defer_save=True)
1261
+ am.add_class(label)
1262
+ transitions_added += 1
1263
+ existing.add(rel_id)
1264
+
1265
+ hard_negative_dir = os.path.join(reviewed_dir, "hard_negatives")
1266
+ os.makedirs(hard_negative_dir, exist_ok=True)
1267
+ for entry, target_class in self._pending_hard_negatives:
1268
+ video_path = entry.get("video", "")
1269
+ start_frame = entry.get("start_frame", 0)
1270
+ frame_interval = entry.get("frame_interval", 1)
1271
+
1272
+ if not os.path.exists(video_path):
1273
+ errors.append(f"Not found: {video_path}")
1274
+ skipped += 1
1275
+ continue
1276
+
1277
+ video_stem = re.sub(r"[^\w\-]", "_", os.path.splitext(os.path.basename(video_path))[0])
1278
+ class_slug = re.sub(r"[^\w\-]", "_", target_class.strip()).strip("_") or "unknown"
1279
+ class_dir = os.path.join(hard_negative_dir, class_slug)
1280
+ os.makedirs(class_dir, exist_ok=True)
1281
+ out_name = f"{video_stem}_f{start_frame:07d}_hn_{class_slug}.mp4"
1282
+ out_path = os.path.join(class_dir, out_name)
1283
+
1284
+ try:
1285
+ ensure_saved_clip(out_path, video_path, start_frame, frame_interval)
1286
+ except Exception as exc:
1287
+ errors.append(str(exc))
1288
+ skipped += 1
1289
+ continue
1290
+
1291
+ try:
1292
+ rel_id = os.path.relpath(out_path, clips_dir).replace("\\", "/")
1293
+ except ValueError:
1294
+ rel_id = os.path.basename(out_path)
1295
+
1296
+ if rel_id in existing:
1297
+ skipped += 1
1298
+ continue
1299
+
1300
+ hn_label = f"near_negative_{class_slug}"
1301
+ am.add_clip(rel_id, hn_label, meta={
1302
+ "source_video": video_path,
1303
+ "start_frame": start_frame,
1304
+ "origin": "active_learning_review",
1305
+ "review_mode": entry.get("review_kind", "uncertain"),
1306
+ "review_target_class": target_class,
1307
+ "hard_negative_for_class": target_class,
1308
+ }, _defer_save=True)
1309
+ hard_negatives_added += 1
1310
+ existing.add(rel_id)
1311
+
1312
+ am.save() # flush any deferred writes
1313
+ self._pending.clear()
1314
+ self._pending_hard_negatives.clear()
1315
+ self._pending_transitions.clear()
1316
+ self._update_save_btn()
1317
+
1318
+ msg = (
1319
+ f"Added {added} accepted clip{'s' if added != 1 else ''} and "
1320
+ f"{hard_negatives_added} hard negative{'s' if hard_negatives_added != 1 else ''} and "
1321
+ f"{transitions_added} transition clip{'s' if transitions_added != 1 else ''}."
1322
+ )
1323
+ if skipped:
1324
+ msg += f" {skipped} skipped (already present or file missing)."
1325
+ if errors:
1326
+ msg += f"\n\nErrors:\n" + "\n".join(errors[:5])
1327
+ QMessageBox.information(self, "Saved", msg)
1328
+
1329
+ if added or hard_negatives_added or transitions_added:
1330
+ self.annotations_updated.emit()