singlebehaviorlab 2.3.3__tar.gz → 2.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {singlebehaviorlab-2.3.3/singlebehaviorlab.egg-info → singlebehaviorlab-2.3.5}/PKG-INFO +3 -3
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/pyproject.toml +3 -3
- singlebehaviorlab-2.3.5/singlebehaviorlab/backend/embedding_refine.py +48 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/inference.py +4 -6
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/model.py +1 -4
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/train.py +2 -2
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/training_runner.py +1 -1
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/analysis_widget.py +1 -1
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/attention_export.py +1 -1
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/inference_popups.py +141 -1
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/inference_widget.py +238 -310
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/inference_worker.py +20 -32
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/main_window.py +1 -1
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/overlay_export.py +2 -2
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/review_widget.py +74 -7
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/training_widget.py +5 -10
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5/singlebehaviorlab.egg-info}/PKG-INFO +3 -3
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab.egg-info/requires.txt +2 -2
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/models.py +3 -3
- singlebehaviorlab-2.3.3/singlebehaviorlab/backend/embedding_refine.py +0 -158
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/LICENSE +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/README.md +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/setup.cfg +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/__main__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/_paths.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/augmentations.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/clustering.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/contrastive.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/data_store.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/registration.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/segmentation.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/segments.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/uncertainty.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/video_processor.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/video_utils.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/cli.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/config.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/data/config/config.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/data/training_profiles.json +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/demo.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/clip_extraction_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/clustering_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/interactive_timeline.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/labeling_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/metadata_management_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/motion_tracking.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/plot_integration.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/qt_helpers.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/registration_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/segmentation_tracking_widget.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/tab_tutorial_dialog.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/timeline_themes.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/training_profiles.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/video_utils.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/licenses/SAM2-LICENSE +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/licenses/VideoPrism-LICENSE +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab.egg-info/SOURCES.txt +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab.egg-info/dependency_links.txt +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab.egg-info/entry_points.txt +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab.egg-info/top_level.txt +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_clustering_smoke.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_config.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_motion_tracking.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_paths.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_sam2_smoke.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/tests/test_segments.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/automatic_mask_generator.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/benchmark.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/build_sam.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/backbones/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/backbones/hieradet.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/backbones/image_encoder.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/backbones/utils.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/memory_attention.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/memory_encoder.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/position_encoding.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam/mask_decoder.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam/prompt_encoder.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam/transformer.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam2_base.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/modeling/sam2_utils.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_image_predictor.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_video_predictor.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/sam2_video_predictor_legacy.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/utils/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/utils/amg.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/utils/misc.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/sam2_backend/sam2/utils/transforms.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/__init__.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/encoders.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/layers.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/tokenizers.py +0 -0
- {singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/third_party/videoprism_backend/videoprism/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: singlebehaviorlab
|
|
3
|
-
Version: 2.3.
|
|
3
|
+
Version: 2.3.5
|
|
4
4
|
Summary: Behavioral sequencing and phenotyping with lightweight task specific adaptation
|
|
5
5
|
Author: Almir Aljovic
|
|
6
6
|
Maintainer: Almir Aljovic
|
|
@@ -34,11 +34,11 @@ License-File: LICENSE
|
|
|
34
34
|
Requires-Dist: PyQt6==6.11.0
|
|
35
35
|
Requires-Dist: PyQt6-WebEngine==6.11.0
|
|
36
36
|
Requires-Dist: PyYAML==6.0.3
|
|
37
|
-
Requires-Dist: numpy
|
|
37
|
+
Requires-Dist: numpy<3,>=2.1
|
|
38
38
|
Requires-Dist: h5py==3.14.0
|
|
39
39
|
Requires-Dist: opencv-python==4.13.0.92
|
|
40
40
|
Requires-Dist: Pillow==12.1.1
|
|
41
|
-
Requires-Dist: scipy
|
|
41
|
+
Requires-Dist: scipy<2,>=1.14
|
|
42
42
|
Requires-Dist: eva-decord==0.6.1
|
|
43
43
|
Requires-Dist: scikit-learn==1.7.2
|
|
44
44
|
Requires-Dist: pandas==2.3.3
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "singlebehaviorlab"
|
|
7
|
-
version = "2.3.
|
|
7
|
+
version = "2.3.5"
|
|
8
8
|
description = "Behavioral sequencing and phenotyping with lightweight task specific adaptation"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = { file = "LICENSE" }
|
|
@@ -16,11 +16,11 @@ dependencies = [
|
|
|
16
16
|
"PyQt6==6.11.0",
|
|
17
17
|
"PyQt6-WebEngine==6.11.0",
|
|
18
18
|
"PyYAML==6.0.3",
|
|
19
|
-
"numpy
|
|
19
|
+
"numpy>=2.1,<3",
|
|
20
20
|
"h5py==3.14.0",
|
|
21
21
|
"opencv-python==4.13.0.92",
|
|
22
22
|
"Pillow==12.1.1",
|
|
23
|
-
"scipy
|
|
23
|
+
"scipy>=1.14,<2",
|
|
24
24
|
"eva-decord==0.6.1",
|
|
25
25
|
"scikit-learn==1.7.2",
|
|
26
26
|
"pandas==2.3.3",
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Clip-level embedding refinement via label propagation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
__all__ = ["refine_clip_predictions"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def refine_clip_predictions(
|
|
12
|
+
clip_labels: np.ndarray,
|
|
13
|
+
clip_embeddings: np.ndarray,
|
|
14
|
+
clip_confidences: np.ndarray,
|
|
15
|
+
confidence_threshold: float = 0.7,
|
|
16
|
+
seed_labels: Optional[np.ndarray] = None,
|
|
17
|
+
) -> np.ndarray:
|
|
18
|
+
"""Refine per-clip predictions using label propagation on clip embeddings.
|
|
19
|
+
|
|
20
|
+
When seed_labels is provided, those are used as hard seeds and all other
|
|
21
|
+
clips are unlabeled. Otherwise, high-confidence clips seed the graph.
|
|
22
|
+
"""
|
|
23
|
+
from sklearn.semi_supervised import LabelSpreading
|
|
24
|
+
|
|
25
|
+
N = len(clip_labels)
|
|
26
|
+
if N < 4 or clip_embeddings.shape[0] != N:
|
|
27
|
+
return clip_labels.copy()
|
|
28
|
+
|
|
29
|
+
if seed_labels is not None and len(seed_labels) == N:
|
|
30
|
+
labels_for_propagation = seed_labels.copy()
|
|
31
|
+
else:
|
|
32
|
+
labels_for_propagation = clip_labels.copy()
|
|
33
|
+
labels_for_propagation[clip_confidences < confidence_threshold] = -1
|
|
34
|
+
|
|
35
|
+
n_labeled = int(np.sum(labels_for_propagation >= 0))
|
|
36
|
+
if n_labeled < 2 or n_labeled == N:
|
|
37
|
+
return clip_labels.copy()
|
|
38
|
+
|
|
39
|
+
n_neighbors = min(7, N - 1)
|
|
40
|
+
lp = LabelSpreading(kernel="knn", n_neighbors=n_neighbors, max_iter=30, alpha=0.2)
|
|
41
|
+
lp.fit(clip_embeddings, labels_for_propagation)
|
|
42
|
+
propagated = lp.transduction_
|
|
43
|
+
|
|
44
|
+
result = clip_labels.copy()
|
|
45
|
+
unlabeled = labels_for_propagation < 0
|
|
46
|
+
result[unlabeled] = propagated[unlabeled]
|
|
47
|
+
|
|
48
|
+
return result
|
|
@@ -186,6 +186,8 @@ def run_inference_on_video(
|
|
|
186
186
|
multi_scale=multi_scale,
|
|
187
187
|
)
|
|
188
188
|
model.load_head(model_path)
|
|
189
|
+
if hasattr(model, "frame_head") and model.frame_head is not None:
|
|
190
|
+
model.frame_head.use_ovr = True
|
|
189
191
|
|
|
190
192
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
191
193
|
model.to(device)
|
|
@@ -225,13 +227,13 @@ def run_inference_on_video(
|
|
|
225
227
|
frame_output = getattr(model, "_frame_output", None)
|
|
226
228
|
if frame_output is not None:
|
|
227
229
|
f_logits = frame_output[0]
|
|
228
|
-
batch_frame_probs = torch.
|
|
230
|
+
batch_frame_probs = torch.sigmoid(f_logits).detach().cpu().numpy()
|
|
229
231
|
for b_i in range(batch_frame_probs.shape[0]):
|
|
230
232
|
clip_frame_probabilities.append(batch_frame_probs[b_i].tolist())
|
|
231
233
|
else:
|
|
232
234
|
clip_frame_probabilities.extend([] for _ in batch_clips)
|
|
233
235
|
|
|
234
|
-
probs = torch.
|
|
236
|
+
probs = torch.sigmoid(logits)
|
|
235
237
|
preds = torch.argmax(probs, dim=1)
|
|
236
238
|
confs = torch.max(probs, dim=1)[0]
|
|
237
239
|
predictions.extend(int(p) for p in preds.cpu().numpy().tolist())
|
|
@@ -269,10 +271,6 @@ def run_inference_on_video(
|
|
|
269
271
|
agg_probs[f_start:f_end] += probs_arr[t][np.newaxis, :] * w
|
|
270
272
|
agg_counts[f_start:f_end] += w
|
|
271
273
|
agg_probs = agg_probs / np.maximum(agg_counts, 1.0)
|
|
272
|
-
covered = agg_counts.squeeze(-1) > 0
|
|
273
|
-
row_sums = agg_probs[covered].sum(axis=1, keepdims=True)
|
|
274
|
-
safe_sums = np.maximum(row_sums, 1e-8)
|
|
275
|
-
agg_probs[covered] = agg_probs[covered] / safe_sums
|
|
276
274
|
aggregated_frame_probs = agg_probs
|
|
277
275
|
|
|
278
276
|
res_entry: dict[str, Any] = {
|
|
@@ -555,10 +555,7 @@ class DilatedTemporalHead(nn.Module):
|
|
|
555
555
|
x_conv = x.transpose(1, 2) # [B, tcn_in, T_pooled]
|
|
556
556
|
stage_logits = self.stage1(x_conv)
|
|
557
557
|
for refine in self.refine_stages:
|
|
558
|
-
|
|
559
|
-
refine_in = torch.sigmoid(stage_logits)
|
|
560
|
-
else:
|
|
561
|
-
refine_in = torch.softmax(stage_logits, dim=1)
|
|
558
|
+
refine_in = torch.sigmoid(stage_logits)
|
|
562
559
|
stage_logits = stage_logits + refine(refine_in)
|
|
563
560
|
|
|
564
561
|
stage_logits_pooled = stage_logits
|
|
@@ -1100,7 +1100,7 @@ def _run_augmentation_ablation_eval(
|
|
|
1100
1100
|
from .augmentations import ClipAugment
|
|
1101
1101
|
|
|
1102
1102
|
aug_opts = config.get("augmentation_options") or {}
|
|
1103
|
-
use_ovr = config.get("use_ovr",
|
|
1103
|
+
use_ovr = config.get("use_ovr", True)
|
|
1104
1104
|
|
|
1105
1105
|
# Map of augmentation toggle names → ClipAugment kwargs that isolate that aug
|
|
1106
1106
|
aug_toggles = {
|
|
@@ -1395,7 +1395,7 @@ def train_model(
|
|
|
1395
1395
|
log_fn(f"Creating data loaders (batch_size={config['batch_size']})...")
|
|
1396
1396
|
|
|
1397
1397
|
use_weighted_sampler = config.get("use_weighted_sampler", False)
|
|
1398
|
-
use_ovr = config.get("use_ovr",
|
|
1398
|
+
use_ovr = config.get("use_ovr", True)
|
|
1399
1399
|
_confusion_warmup_pct = float(config.get("confusion_sampler_warmup_pct", 0.2))
|
|
1400
1400
|
sampler = None
|
|
1401
1401
|
batch_sampler = None
|
{singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/backend/training_runner.py
RENAMED
|
@@ -120,7 +120,7 @@ def _build_backend_train_config(
|
|
|
120
120
|
"crop_jitter_strength": float(cfg.get("crop_jitter_strength", 0.15)),
|
|
121
121
|
"emb_aug_versions": int(cfg.get("emb_aug_versions", 1)),
|
|
122
122
|
"clip_length": int(cfg.get("clip_length", 16)),
|
|
123
|
-
"use_ovr": bool(cfg.get("use_ovr",
|
|
123
|
+
"use_ovr": bool(cfg.get("use_ovr", True)),
|
|
124
124
|
"ovr_label_smoothing": float(cfg.get("ovr_label_smoothing", 0.05)),
|
|
125
125
|
"use_asl": bool(cfg.get("use_asl", False)),
|
|
126
126
|
"asl_gamma_neg": float(cfg.get("asl_gamma_neg", 2.0)),
|
{singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/analysis_widget.py
RENAMED
|
@@ -842,7 +842,7 @@ class AnalysisWidget(QWidget):
|
|
|
842
842
|
if i < len(clip_starts):
|
|
843
843
|
end_frame_exclusive = int(clip_starts[i])
|
|
844
844
|
else:
|
|
845
|
-
end_frame_exclusive = start_frame + (clip_length * frame_interval
|
|
845
|
+
end_frame_exclusive = start_frame + (clip_length - 1) * frame_interval + 1
|
|
846
846
|
if total_frames > 0:
|
|
847
847
|
end_frame_exclusive = min(end_frame_exclusive, total_frames)
|
|
848
848
|
duration_frames = max(0, end_frame_exclusive - start_frame)
|
{singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/attention_export.py
RENAMED
|
@@ -305,7 +305,7 @@ def export_attention_heatmap_video(widget):
|
|
|
305
305
|
def _find_clip_for_frame(frame_idx, clip_starts, clip_length, frame_interval):
|
|
306
306
|
"""Find which clip index covers the given video frame."""
|
|
307
307
|
for i in range(len(clip_starts) - 1, -1, -1):
|
|
308
|
-
clip_end = clip_starts[i] + clip_length * frame_interval
|
|
308
|
+
clip_end = clip_starts[i] + (clip_length - 1) * frame_interval + 1
|
|
309
309
|
if clip_starts[i] <= frame_idx < clip_end:
|
|
310
310
|
return i
|
|
311
311
|
return None
|
{singlebehaviorlab-2.3.3 → singlebehaviorlab-2.3.5}/singlebehaviorlab/gui/inference_popups.py
RENAMED
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from PyQt6.QtWidgets import (
|
|
4
4
|
QDialog, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QComboBox,
|
|
5
|
-
QGroupBox, QMessageBox, QSizePolicy, QSpinBox,
|
|
5
|
+
QGroupBox, QMessageBox, QSizePolicy, QSpinBox, QCheckBox,
|
|
6
|
+
QDoubleSpinBox, QFormLayout,
|
|
6
7
|
)
|
|
7
8
|
from PyQt6.QtCore import Qt, QTimer
|
|
8
9
|
from PyQt6.QtGui import QImage, QPixmap
|
|
@@ -949,3 +950,142 @@ class FrameSegmentPopupDialog(QDialog):
|
|
|
949
950
|
if hasattr(self, '_play_timer') and self._play_timer.isActive():
|
|
950
951
|
self._play_timer.stop()
|
|
951
952
|
super().closeEvent(event)
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
class PostprocessingDialog(QDialog):
|
|
956
|
+
"""Settings dialog for inference postprocessing controls."""
|
|
957
|
+
|
|
958
|
+
def __init__(self, parent_widget):
|
|
959
|
+
super().__init__(parent_widget)
|
|
960
|
+
self.setWindowTitle("Postprocessing Settings")
|
|
961
|
+
self.setMinimumWidth(420)
|
|
962
|
+
self._w = parent_widget
|
|
963
|
+
layout = QVBoxLayout(self)
|
|
964
|
+
|
|
965
|
+
# --- Basic ---
|
|
966
|
+
basic = QGroupBox("Basic")
|
|
967
|
+
bl = QVBoxLayout()
|
|
968
|
+
|
|
969
|
+
ref_row = QHBoxLayout()
|
|
970
|
+
self._embed_refine = QCheckBox("Embedding refinement")
|
|
971
|
+
self._embed_refine.setToolTip(
|
|
972
|
+
"Use clip-level embeddings to correct low-confidence predictions\n"
|
|
973
|
+
"via label propagation. Seeds the top N% most confident clips per class."
|
|
974
|
+
)
|
|
975
|
+
ref_row.addWidget(self._embed_refine)
|
|
976
|
+
ref_row.addWidget(QLabel("Top %:"))
|
|
977
|
+
self._embed_refine_pct = QSpinBox()
|
|
978
|
+
self._embed_refine_pct.setRange(1, 50)
|
|
979
|
+
self._embed_refine_pct.setValue(10)
|
|
980
|
+
self._embed_refine_pct.setSuffix("%")
|
|
981
|
+
self._embed_refine_pct.setToolTip("Percentage of most confident clips per class to use as seeds.")
|
|
982
|
+
ref_row.addWidget(self._embed_refine_pct)
|
|
983
|
+
bl.addLayout(ref_row)
|
|
984
|
+
|
|
985
|
+
self._show_all = QCheckBox("Show all classes (overlapping)")
|
|
986
|
+
self._show_all.setToolTip(
|
|
987
|
+
"When enabled, every class above its threshold is shown independently\n"
|
|
988
|
+
"(no mutual exclusivity). When disabled, only the top-1 class is shown."
|
|
989
|
+
)
|
|
990
|
+
bl.addWidget(self._show_all)
|
|
991
|
+
|
|
992
|
+
self._show_per_clip = QCheckBox("Show per clip")
|
|
993
|
+
self._show_per_clip.setToolTip(
|
|
994
|
+
"Show raw per-clip predictions instead of frame-aggregated segments.\n"
|
|
995
|
+
"Useful for visually verifying individual clip boundaries and predictions."
|
|
996
|
+
)
|
|
997
|
+
bl.addWidget(self._show_per_clip)
|
|
998
|
+
|
|
999
|
+
basic.setLayout(bl)
|
|
1000
|
+
layout.addWidget(basic)
|
|
1001
|
+
|
|
1002
|
+
# --- Thresholds ---
|
|
1003
|
+
thresh = QGroupBox("Thresholds")
|
|
1004
|
+
tl = QVBoxLayout()
|
|
1005
|
+
thr_row = QHBoxLayout()
|
|
1006
|
+
self._ignore_low = QCheckBox("Ignore low-confidence")
|
|
1007
|
+
thr_row.addWidget(self._ignore_low)
|
|
1008
|
+
thr_row.addWidget(QLabel("Default τ:"))
|
|
1009
|
+
self._ignore_spin = QDoubleSpinBox()
|
|
1010
|
+
self._ignore_spin.setDecimals(2)
|
|
1011
|
+
self._ignore_spin.setRange(0.0, 1.0)
|
|
1012
|
+
self._ignore_spin.setSingleStep(0.05)
|
|
1013
|
+
thr_row.addWidget(self._ignore_spin)
|
|
1014
|
+
self._per_class_thresh_btn = QPushButton("Per-class τ…")
|
|
1015
|
+
self._per_class_thresh_btn.clicked.connect(
|
|
1016
|
+
lambda: self._w._open_per_class_thresholds_dialog()
|
|
1017
|
+
)
|
|
1018
|
+
thr_row.addWidget(self._per_class_thresh_btn)
|
|
1019
|
+
tl.addLayout(thr_row)
|
|
1020
|
+
thresh.setLayout(tl)
|
|
1021
|
+
layout.addWidget(thresh)
|
|
1022
|
+
|
|
1023
|
+
# --- Decoding ---
|
|
1024
|
+
dec = QGroupBox("Decoding")
|
|
1025
|
+
dl = QVBoxLayout()
|
|
1026
|
+
vit_row = QHBoxLayout()
|
|
1027
|
+
self._viterbi = QCheckBox("Viterbi decode")
|
|
1028
|
+
self._viterbi.setToolTip(
|
|
1029
|
+
"Inference-only sequence decoding on merged frame probabilities.\n"
|
|
1030
|
+
"OvR models use binary per-class Viterbi."
|
|
1031
|
+
)
|
|
1032
|
+
vit_row.addWidget(self._viterbi)
|
|
1033
|
+
vit_row.addWidget(QLabel("Switch penalty:"))
|
|
1034
|
+
self._viterbi_penalty = QDoubleSpinBox()
|
|
1035
|
+
self._viterbi_penalty.setDecimals(2)
|
|
1036
|
+
self._viterbi_penalty.setRange(0.0, 5.0)
|
|
1037
|
+
self._viterbi_penalty.setSingleStep(0.05)
|
|
1038
|
+
vit_row.addWidget(self._viterbi_penalty)
|
|
1039
|
+
dl.addLayout(vit_row)
|
|
1040
|
+
dec.setLayout(dl)
|
|
1041
|
+
layout.addWidget(dec)
|
|
1042
|
+
|
|
1043
|
+
# --- Cleanup ---
|
|
1044
|
+
cleanup = QGroupBox("Cleanup")
|
|
1045
|
+
cl = QVBoxLayout()
|
|
1046
|
+
self._per_class_seg_btn = QPushButton("Per-class segment rules…")
|
|
1047
|
+
self._per_class_seg_btn.setToolTip(
|
|
1048
|
+
"Set smooth window, gap fill, and minimum segment length per behavior class."
|
|
1049
|
+
)
|
|
1050
|
+
self._per_class_seg_btn.clicked.connect(
|
|
1051
|
+
lambda: self._w._open_per_class_segment_rules_dialog()
|
|
1052
|
+
)
|
|
1053
|
+
cl.addWidget(self._per_class_seg_btn)
|
|
1054
|
+
cleanup.setLayout(cl)
|
|
1055
|
+
layout.addWidget(cleanup)
|
|
1056
|
+
|
|
1057
|
+
# --- Buttons ---
|
|
1058
|
+
btn_row = QHBoxLayout()
|
|
1059
|
+
btn_row.addStretch()
|
|
1060
|
+
apply_btn = QPushButton("Apply")
|
|
1061
|
+
apply_btn.clicked.connect(self._apply)
|
|
1062
|
+
btn_row.addWidget(apply_btn)
|
|
1063
|
+
close_btn = QPushButton("Close")
|
|
1064
|
+
close_btn.clicked.connect(self.close)
|
|
1065
|
+
btn_row.addWidget(close_btn)
|
|
1066
|
+
layout.addLayout(btn_row)
|
|
1067
|
+
|
|
1068
|
+
self._load_from_widget()
|
|
1069
|
+
|
|
1070
|
+
def _load_from_widget(self):
|
|
1071
|
+
w = self._w
|
|
1072
|
+
self._embed_refine.setChecked(w.embedding_refine_check.isChecked())
|
|
1073
|
+
self._embed_refine_pct.setValue(int(w.embedding_refine_threshold.value()))
|
|
1074
|
+
self._show_all.setChecked(w.ovr_show_all_check.isChecked())
|
|
1075
|
+
self._show_per_clip.setChecked(not w.frame_aggregation_check.isChecked())
|
|
1076
|
+
self._ignore_low.setChecked(w.use_ignore_threshold_check.isChecked())
|
|
1077
|
+
self._ignore_spin.setValue(w.ignore_threshold_spin.value())
|
|
1078
|
+
self._viterbi.setChecked(w.use_viterbi_check.isChecked())
|
|
1079
|
+
self._viterbi_penalty.setValue(w.viterbi_switch_penalty_spin.value())
|
|
1080
|
+
|
|
1081
|
+
def _apply(self):
|
|
1082
|
+
w = self._w
|
|
1083
|
+
w.embedding_refine_check.setChecked(self._embed_refine.isChecked())
|
|
1084
|
+
w.embedding_refine_threshold.setValue(float(self._embed_refine_pct.value()))
|
|
1085
|
+
w.ovr_show_all_check.setChecked(self._show_all.isChecked())
|
|
1086
|
+
w.frame_aggregation_check.setChecked(not self._show_per_clip.isChecked())
|
|
1087
|
+
w.use_ignore_threshold_check.setChecked(self._ignore_low.isChecked())
|
|
1088
|
+
w.ignore_threshold_spin.setValue(self._ignore_spin.value())
|
|
1089
|
+
w.use_viterbi_check.setChecked(self._viterbi.isChecked())
|
|
1090
|
+
w.viterbi_switch_penalty_spin.setValue(self._viterbi_penalty.value())
|
|
1091
|
+
w._on_embedding_refine_changed()
|