singlebehaviorlab 2.2.0__tar.gz → 2.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {singlebehaviorlab-2.2.0/singlebehaviorlab.egg-info → singlebehaviorlab-2.3.1}/PKG-INFO +1 -1
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/pyproject.toml +1 -1
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/__init__.py +1 -1
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/segmentation.py +35 -3
- singlebehaviorlab-2.3.1/singlebehaviorlab/backend/segments.py +257 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/inference_popups.py +0 -187
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/inference_widget.py +199 -540
- singlebehaviorlab-2.3.1/singlebehaviorlab/gui/interactive_timeline.py +549 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1/singlebehaviorlab.egg-info}/PKG-INFO +1 -1
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab.egg-info/SOURCES.txt +3 -0
- singlebehaviorlab-2.3.1/tests/test_segments.py +144 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/LICENSE +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/README.md +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/setup.cfg +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/__main__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/_paths.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/augmentations.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/clustering.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/data_store.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/inference.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/model.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/registration.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/train.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/training_runner.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/uncertainty.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/video_processor.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/video_utils.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/cli.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/config.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/data/config/config.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/data/training_profiles.json +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/demo.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/analysis_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/attention_export.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/clip_extraction_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/clustering_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/inference_worker.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/labeling_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/main_window.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/metadata_management_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/motion_tracking.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/overlay_export.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/plot_integration.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/qt_helpers.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/registration_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/review_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/segmentation_tracking_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/tab_tutorial_dialog.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/timeline_themes.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/training_profiles.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/training_widget.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/video_utils.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/licenses/SAM2-LICENSE +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/licenses/VideoPrism-LICENSE +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab.egg-info/dependency_links.txt +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab.egg-info/entry_points.txt +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab.egg-info/requires.txt +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab.egg-info/top_level.txt +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/tests/test_clustering_smoke.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/tests/test_config.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/tests/test_motion_tracking.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/tests/test_paths.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/tests/test_sam2_smoke.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/automatic_mask_generator.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/benchmark.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/build_sam.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/backbones/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/backbones/hieradet.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/backbones/image_encoder.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/backbones/utils.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/memory_attention.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/memory_encoder.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/position_encoding.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam/mask_decoder.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam/prompt_encoder.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam/transformer.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam2_base.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/modeling/sam2_utils.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_image_predictor.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_video_predictor.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/sam2_video_predictor_legacy.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/utils/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/utils/amg.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/utils/misc.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/sam2_backend/sam2/utils/transforms.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/__init__.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/encoders.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/layers.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/models.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/tokenizers.py +0 -0
- {singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/third_party/videoprism_backend/videoprism/utils.py +0 -0
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "singlebehaviorlab"
|
|
7
|
-
version = "2.
|
|
7
|
+
version = "2.3.1"
|
|
8
8
|
description = "Semi-automated behavioral video annotation, training, and analysis tool"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = { file = "LICENSE" }
|
{singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/backend/segmentation.py
RENAMED
|
@@ -49,6 +49,13 @@ _CHECKPOINT_TO_CONFIG = {
|
|
|
49
49
|
"sam2.1_hiera_large.pt": "configs/sam2.1/sam2.1_hiera_l.yaml",
|
|
50
50
|
}
|
|
51
51
|
|
|
52
|
+
_CHECKPOINT_URLS = {
|
|
53
|
+
"sam2.1_hiera_tiny.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_tiny.pt",
|
|
54
|
+
"sam2.1_hiera_small.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_small.pt",
|
|
55
|
+
"sam2.1_hiera_base_plus.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_base_plus.pt",
|
|
56
|
+
"sam2.1_hiera_large.pt": "https://dl.fbaipublicfiles.com/segment_anything_2/092824/sam2.1_hiera_large.pt",
|
|
57
|
+
}
|
|
58
|
+
|
|
52
59
|
_CHUNK_SIZE = 200
|
|
53
60
|
|
|
54
61
|
|
|
@@ -109,10 +116,35 @@ def _resolve_checkpoint(model_name: str) -> tuple[str, str]:
|
|
|
109
116
|
for candidate in candidates:
|
|
110
117
|
if candidate.exists():
|
|
111
118
|
return str(candidate), config_name
|
|
119
|
+
|
|
120
|
+
url = _CHECKPOINT_URLS.get(model_name)
|
|
121
|
+
if url:
|
|
122
|
+
dest = checkpoints_root / "checkpoints" / model_name
|
|
123
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
124
|
+
import urllib.request
|
|
125
|
+
try:
|
|
126
|
+
from tqdm.auto import tqdm as _tqdm
|
|
127
|
+
except Exception:
|
|
128
|
+
_tqdm = None
|
|
129
|
+
print(f"Downloading {model_name} from {url}")
|
|
130
|
+
if _tqdm is None:
|
|
131
|
+
urllib.request.urlretrieve(url, str(dest))
|
|
132
|
+
else:
|
|
133
|
+
with urllib.request.urlopen(url) as resp:
|
|
134
|
+
total = int(resp.headers.get("Content-Length") or 0) or None
|
|
135
|
+
with _tqdm(total=total, unit="B", unit_scale=True, desc=model_name) as bar:
|
|
136
|
+
with open(dest, "wb") as f:
|
|
137
|
+
while True:
|
|
138
|
+
chunk = resp.read(1024 * 256)
|
|
139
|
+
if not chunk:
|
|
140
|
+
break
|
|
141
|
+
f.write(chunk)
|
|
142
|
+
bar.update(len(chunk))
|
|
143
|
+
if dest.exists() and dest.stat().st_size > 0:
|
|
144
|
+
return str(dest), config_name
|
|
145
|
+
|
|
112
146
|
raise FileNotFoundError(
|
|
113
|
-
f"SAM2 checkpoint '{model_name}'
|
|
114
|
-
"Launch the GUI once to trigger the automatic download, or place the "
|
|
115
|
-
"checkpoint file manually in that directory."
|
|
147
|
+
f"SAM2 checkpoint '{model_name}' could not be downloaded or found in {checkpoints_root}."
|
|
116
148
|
)
|
|
117
149
|
|
|
118
150
|
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""Mutable segment list that drives the interactive timeline editor.
|
|
2
|
+
|
|
3
|
+
Segments are the single source of truth: per-frame label arrays, CSV rows,
|
|
4
|
+
and SVG rectangles are all derived from them. Editing operations enforce
|
|
5
|
+
non-overlap and boundary constraints, and every mutation is tracked by an
|
|
6
|
+
undo/redo stack.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import copy
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Literal, Optional
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
|
|
17
|
+
__all__ = ["Segment", "SegmentsModel"]
|
|
18
|
+
|
|
19
|
+
UNDO_LIMIT = 50
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Segment:
|
|
24
|
+
class_idx: int
|
|
25
|
+
start: int
|
|
26
|
+
end: int
|
|
27
|
+
confidence: float = 1.0
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def length(self) -> int:
|
|
31
|
+
return max(0, self.end - self.start)
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
return {
|
|
35
|
+
"class": self.class_idx,
|
|
36
|
+
"start": self.start,
|
|
37
|
+
"end": self.end,
|
|
38
|
+
"confidence": self.confidence,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def from_dict(cls, d: dict) -> "Segment":
|
|
43
|
+
return cls(
|
|
44
|
+
class_idx=int(d["class"]),
|
|
45
|
+
start=int(d["start"]),
|
|
46
|
+
end=int(d["end"]),
|
|
47
|
+
confidence=float(d.get("confidence", 1.0)),
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SegmentsModel:
|
|
52
|
+
"""Ordered, non-overlapping segment list with undo/redo."""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
segments: list[dict] | list[Segment],
|
|
57
|
+
classes: list[str],
|
|
58
|
+
total_frames: int,
|
|
59
|
+
orig_fps: float = 30.0,
|
|
60
|
+
):
|
|
61
|
+
self.classes = list(classes)
|
|
62
|
+
self.total_frames = max(0, total_frames)
|
|
63
|
+
self.orig_fps = float(orig_fps)
|
|
64
|
+
|
|
65
|
+
raw = []
|
|
66
|
+
for s in segments:
|
|
67
|
+
raw.append(s if isinstance(s, Segment) else Segment.from_dict(s))
|
|
68
|
+
raw.sort(key=lambda s: s.start)
|
|
69
|
+
self._segments: list[Segment] = raw
|
|
70
|
+
self._undo: list[list[Segment]] = []
|
|
71
|
+
self._redo: list[list[Segment]] = []
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def segments(self) -> list[Segment]:
|
|
75
|
+
return self._segments
|
|
76
|
+
|
|
77
|
+
def __len__(self) -> int:
|
|
78
|
+
return len(self._segments)
|
|
79
|
+
|
|
80
|
+
def __getitem__(self, idx: int) -> Segment:
|
|
81
|
+
return self._segments[idx]
|
|
82
|
+
|
|
83
|
+
def _snapshot(self) -> None:
|
|
84
|
+
self._undo.append(copy.deepcopy(self._segments))
|
|
85
|
+
if len(self._undo) > UNDO_LIMIT:
|
|
86
|
+
self._undo.pop(0)
|
|
87
|
+
self._redo.clear()
|
|
88
|
+
|
|
89
|
+
def _clamp(self, val: int) -> int:
|
|
90
|
+
return max(0, min(self.total_frames, val))
|
|
91
|
+
|
|
92
|
+
def _prev_end(self, idx: int) -> int:
|
|
93
|
+
return self._segments[idx - 1].end if idx > 0 else 0
|
|
94
|
+
|
|
95
|
+
def _next_start(self, idx: int) -> int:
|
|
96
|
+
if idx < len(self._segments) - 1:
|
|
97
|
+
return self._segments[idx + 1].start
|
|
98
|
+
return self.total_frames
|
|
99
|
+
|
|
100
|
+
# ------------------------------------------------------------------ undo
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def can_undo(self) -> bool:
|
|
104
|
+
return len(self._undo) > 0
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def can_redo(self) -> bool:
|
|
108
|
+
return len(self._redo) > 0
|
|
109
|
+
|
|
110
|
+
def undo(self) -> bool:
|
|
111
|
+
if not self._undo:
|
|
112
|
+
return False
|
|
113
|
+
self._redo.append(copy.deepcopy(self._segments))
|
|
114
|
+
self._segments = self._undo.pop()
|
|
115
|
+
return True
|
|
116
|
+
|
|
117
|
+
def redo(self) -> bool:
|
|
118
|
+
if not self._redo:
|
|
119
|
+
return False
|
|
120
|
+
self._undo.append(copy.deepcopy(self._segments))
|
|
121
|
+
self._segments = self._redo.pop()
|
|
122
|
+
return True
|
|
123
|
+
|
|
124
|
+
# --------------------------------------------------------------- editing
|
|
125
|
+
|
|
126
|
+
def resize(self, idx: int, edge: Literal["left", "right"], delta: int) -> bool:
|
|
127
|
+
if idx < 0 or idx >= len(self._segments):
|
|
128
|
+
return False
|
|
129
|
+
seg = self._segments[idx]
|
|
130
|
+
self._snapshot()
|
|
131
|
+
|
|
132
|
+
if edge == "left":
|
|
133
|
+
new_start = self._clamp(seg.start + delta)
|
|
134
|
+
new_start = max(new_start, self._prev_end(idx))
|
|
135
|
+
if new_start >= seg.end:
|
|
136
|
+
new_start = seg.end - 1
|
|
137
|
+
seg.start = new_start
|
|
138
|
+
else:
|
|
139
|
+
new_end = self._clamp(seg.end + delta)
|
|
140
|
+
new_end = min(new_end, self._next_start(idx))
|
|
141
|
+
if new_end <= seg.start:
|
|
142
|
+
new_end = seg.start + 1
|
|
143
|
+
seg.end = new_end
|
|
144
|
+
return True
|
|
145
|
+
|
|
146
|
+
def move(self, idx: int, delta: int) -> bool:
|
|
147
|
+
if idx < 0 or idx >= len(self._segments):
|
|
148
|
+
return False
|
|
149
|
+
seg = self._segments[idx]
|
|
150
|
+
length = seg.length
|
|
151
|
+
lo = self._prev_end(idx)
|
|
152
|
+
hi = self._next_start(idx)
|
|
153
|
+
if hi - lo < length:
|
|
154
|
+
return False
|
|
155
|
+
self._snapshot()
|
|
156
|
+
new_start = self._clamp(seg.start + delta)
|
|
157
|
+
new_start = max(new_start, lo)
|
|
158
|
+
if new_start + length > hi:
|
|
159
|
+
new_start = hi - length
|
|
160
|
+
seg.start = new_start
|
|
161
|
+
seg.end = new_start + length
|
|
162
|
+
return True
|
|
163
|
+
|
|
164
|
+
def reclass(self, idx: int, new_class_idx: int) -> bool:
|
|
165
|
+
if idx < 0 or idx >= len(self._segments):
|
|
166
|
+
return False
|
|
167
|
+
if new_class_idx < 0 or new_class_idx >= len(self.classes):
|
|
168
|
+
return False
|
|
169
|
+
self._snapshot()
|
|
170
|
+
self._segments[idx].class_idx = new_class_idx
|
|
171
|
+
return True
|
|
172
|
+
|
|
173
|
+
def delete(self, idx: int) -> bool:
|
|
174
|
+
if idx < 0 or idx >= len(self._segments):
|
|
175
|
+
return False
|
|
176
|
+
self._snapshot()
|
|
177
|
+
self._segments.pop(idx)
|
|
178
|
+
return True
|
|
179
|
+
|
|
180
|
+
def split(self, idx: int, at_frame: int) -> bool:
|
|
181
|
+
if idx < 0 or idx >= len(self._segments):
|
|
182
|
+
return False
|
|
183
|
+
seg = self._segments[idx]
|
|
184
|
+
if at_frame <= seg.start or at_frame >= seg.end:
|
|
185
|
+
return False
|
|
186
|
+
self._snapshot()
|
|
187
|
+
left = Segment(seg.class_idx, seg.start, at_frame, seg.confidence)
|
|
188
|
+
right = Segment(seg.class_idx, at_frame, seg.end, seg.confidence)
|
|
189
|
+
self._segments[idx:idx + 1] = [left, right]
|
|
190
|
+
return True
|
|
191
|
+
|
|
192
|
+
def merge_with_next(self, idx: int) -> bool:
|
|
193
|
+
if idx < 0 or idx >= len(self._segments) - 1:
|
|
194
|
+
return False
|
|
195
|
+
self._snapshot()
|
|
196
|
+
left = self._segments[idx]
|
|
197
|
+
right = self._segments[idx + 1]
|
|
198
|
+
left.end = right.end
|
|
199
|
+
self._segments.pop(idx + 1)
|
|
200
|
+
return True
|
|
201
|
+
|
|
202
|
+
# -------------------------------------------------------- derived outputs
|
|
203
|
+
|
|
204
|
+
def to_frame_labels(self) -> np.ndarray:
|
|
205
|
+
labels = np.full(self.total_frames, -1, dtype=np.int32)
|
|
206
|
+
for seg in self._segments:
|
|
207
|
+
labels[seg.start:seg.end] = seg.class_idx
|
|
208
|
+
return labels
|
|
209
|
+
|
|
210
|
+
def to_dicts(self) -> list[dict]:
|
|
211
|
+
return [s.to_dict() for s in self._segments]
|
|
212
|
+
|
|
213
|
+
def to_csv_rows(self) -> list[dict]:
|
|
214
|
+
rows = []
|
|
215
|
+
for seg in self._segments:
|
|
216
|
+
if seg.class_idx < 0 or seg.class_idx >= len(self.classes):
|
|
217
|
+
name = f"class_{seg.class_idx}"
|
|
218
|
+
else:
|
|
219
|
+
name = self.classes[seg.class_idx]
|
|
220
|
+
fps = max(1e-6, self.orig_fps)
|
|
221
|
+
rows.append({
|
|
222
|
+
"Behavior": name,
|
|
223
|
+
"Start Time (s)": round(seg.start / fps, 4),
|
|
224
|
+
"End Time (s)": round(seg.end / fps, 4),
|
|
225
|
+
"Start Frame": seg.start,
|
|
226
|
+
"End Frame": seg.end,
|
|
227
|
+
"Duration (s)": round(seg.length / fps, 4),
|
|
228
|
+
"Confidence": round(seg.confidence, 4),
|
|
229
|
+
})
|
|
230
|
+
return rows
|
|
231
|
+
|
|
232
|
+
@classmethod
|
|
233
|
+
def from_frame_labels(
|
|
234
|
+
cls,
|
|
235
|
+
labels: np.ndarray,
|
|
236
|
+
classes: list[str],
|
|
237
|
+
total_frames: int,
|
|
238
|
+
orig_fps: float = 30.0,
|
|
239
|
+
confidences: Optional[np.ndarray] = None,
|
|
240
|
+
) -> "SegmentsModel":
|
|
241
|
+
"""Build from a per-frame label array (e.g. argmax output)."""
|
|
242
|
+
segments: list[Segment] = []
|
|
243
|
+
if len(labels) == 0:
|
|
244
|
+
return cls([], classes, total_frames, orig_fps)
|
|
245
|
+
current_class = int(labels[0])
|
|
246
|
+
start = 0
|
|
247
|
+
for i in range(1, len(labels)):
|
|
248
|
+
if int(labels[i]) != current_class:
|
|
249
|
+
if current_class >= 0:
|
|
250
|
+
conf = float(confidences[start:i].mean()) if confidences is not None else 1.0
|
|
251
|
+
segments.append(Segment(current_class, start, i, conf))
|
|
252
|
+
current_class = int(labels[i])
|
|
253
|
+
start = i
|
|
254
|
+
if current_class >= 0:
|
|
255
|
+
conf = float(confidences[start:].mean()) if confidences is not None else 1.0
|
|
256
|
+
segments.append(Segment(current_class, start, len(labels), conf))
|
|
257
|
+
return cls(segments, classes, total_frames, orig_fps)
|
{singlebehaviorlab-2.2.0 → singlebehaviorlab-2.3.1}/singlebehaviorlab/gui/inference_popups.py
RENAMED
|
@@ -821,193 +821,6 @@ class FrameSegmentPopupDialog(QDialog):
|
|
|
821
821
|
add_segment_btn.clicked.connect(add_segment_chunks_to_training)
|
|
822
822
|
training_layout.addWidget(add_segment_btn)
|
|
823
823
|
|
|
824
|
-
transition_len_row = QHBoxLayout()
|
|
825
|
-
transition_len_row.addWidget(QLabel("Transition clip frames:"))
|
|
826
|
-
transition_len_spin = QSpinBox()
|
|
827
|
-
transition_len_spin.setRange(2, 64)
|
|
828
|
-
transition_len_spin.setValue(int(self._widget.clip_length_spin.value()))
|
|
829
|
-
transition_len_spin.setToolTip("Number of sampled frames to save for this transition clip.")
|
|
830
|
-
transition_len_row.addWidget(transition_len_spin)
|
|
831
|
-
transition_len_row.addWidget(QLabel("Ignore ±frames:"))
|
|
832
|
-
transition_ignore_spin = QSpinBox()
|
|
833
|
-
transition_ignore_spin.setRange(0, 8)
|
|
834
|
-
transition_ignore_spin.setValue(1)
|
|
835
|
-
transition_ignore_spin.setToolTip(
|
|
836
|
-
"Frames around the exact boundary set to None (ignored during frame loss)."
|
|
837
|
-
)
|
|
838
|
-
transition_len_row.addWidget(transition_ignore_spin)
|
|
839
|
-
training_layout.addLayout(transition_len_row)
|
|
840
|
-
|
|
841
|
-
def _safe_label_token(label_text: str) -> str:
|
|
842
|
-
token = str(label_text or "").strip().replace(" ", "_").replace("/", "_").replace("\\", "_")
|
|
843
|
-
while "__" in token:
|
|
844
|
-
token = token.replace("__", "_")
|
|
845
|
-
return token.strip("_") or "class"
|
|
846
|
-
|
|
847
|
-
def _extract_transition_clip(left_seg: dict, right_seg: dict, boundary_name: str):
|
|
848
|
-
try:
|
|
849
|
-
left_idx = int(left_seg.get("class", -1))
|
|
850
|
-
right_idx = int(right_seg.get("class", -1))
|
|
851
|
-
if not (0 <= left_idx < len(self._widget.classes) and 0 <= right_idx < len(self._widget.classes)):
|
|
852
|
-
QMessageBox.warning(self, "Invalid class", "Could not resolve neighboring segment labels.")
|
|
853
|
-
return
|
|
854
|
-
|
|
855
|
-
left_label = self._widget.classes[left_idx]
|
|
856
|
-
right_label = self._widget.classes[right_idx]
|
|
857
|
-
clip_len = int(transition_len_spin.value())
|
|
858
|
-
if clip_len <= 1:
|
|
859
|
-
QMessageBox.warning(self, "Invalid clip length", "Transition clip length must be >= 2.")
|
|
860
|
-
return
|
|
861
|
-
ignore_half = int(max(0, transition_ignore_spin.value()))
|
|
862
|
-
|
|
863
|
-
clips_dir = self._widget._get_clips_dir()
|
|
864
|
-
annotation_manager = AnnotationManager(self._widget._get_annotation_file())
|
|
865
|
-
annotation_manager.add_class(left_label)
|
|
866
|
-
annotation_manager.add_class(right_label)
|
|
867
|
-
|
|
868
|
-
frame_interval = int(max(1, self._widget._get_saved_frame_interval(self._widget.video_path, orig_fps)))
|
|
869
|
-
boundary_frame = int((int(left_seg["end"]) + int(right_seg["start"])) // 2)
|
|
870
|
-
center_idx = clip_len // 2
|
|
871
|
-
clip_start_vid_frame = max(0, boundary_frame - center_idx * frame_interval)
|
|
872
|
-
|
|
873
|
-
cap_local = cv2.VideoCapture(self._widget.video_path)
|
|
874
|
-
if not cap_local.isOpened():
|
|
875
|
-
QMessageBox.warning(self, "Error", "Could not open video file.")
|
|
876
|
-
return
|
|
877
|
-
cap_local.set(cv2.CAP_PROP_POS_FRAMES, clip_start_vid_frame)
|
|
878
|
-
frames = []
|
|
879
|
-
sampled_video_frames = []
|
|
880
|
-
read_ctr = 0
|
|
881
|
-
while len(frames) < clip_len:
|
|
882
|
-
ret, frame = cap_local.read()
|
|
883
|
-
if not ret:
|
|
884
|
-
break
|
|
885
|
-
if read_ctr % frame_interval == 0:
|
|
886
|
-
frames.append(frame.copy())
|
|
887
|
-
sampled_video_frames.append(int(clip_start_vid_frame + read_ctr))
|
|
888
|
-
read_ctr += 1
|
|
889
|
-
cap_local.release()
|
|
890
|
-
|
|
891
|
-
if not frames:
|
|
892
|
-
QMessageBox.warning(self, "No frames", "Could not extract transition clip frames.")
|
|
893
|
-
return
|
|
894
|
-
|
|
895
|
-
if len(frames) < clip_len:
|
|
896
|
-
last_frame = frames[-1]
|
|
897
|
-
last_idx = sampled_video_frames[-1]
|
|
898
|
-
while len(frames) < clip_len:
|
|
899
|
-
frames.append(last_frame.copy())
|
|
900
|
-
last_idx += frame_interval
|
|
901
|
-
sampled_video_frames.append(int(last_idx))
|
|
902
|
-
|
|
903
|
-
left_end = int(left_seg["end"]) - ignore_half * frame_interval
|
|
904
|
-
right_start = int(right_seg["start"]) + ignore_half * frame_interval
|
|
905
|
-
frame_labels = []
|
|
906
|
-
for vf in sampled_video_frames:
|
|
907
|
-
if vf <= left_end:
|
|
908
|
-
frame_labels.append(left_label)
|
|
909
|
-
elif vf >= right_start:
|
|
910
|
-
frame_labels.append(right_label)
|
|
911
|
-
else:
|
|
912
|
-
frame_labels.append(None)
|
|
913
|
-
|
|
914
|
-
if left_label not in frame_labels:
|
|
915
|
-
frame_labels[0] = left_label
|
|
916
|
-
if right_label not in frame_labels:
|
|
917
|
-
frame_labels[-1] = right_label
|
|
918
|
-
|
|
919
|
-
non_none = [x for x in frame_labels if x is not None]
|
|
920
|
-
primary_label = left_label
|
|
921
|
-
if non_none:
|
|
922
|
-
left_count = sum(1 for x in non_none if x == left_label)
|
|
923
|
-
right_count = sum(1 for x in non_none if x == right_label)
|
|
924
|
-
primary_label = left_label if left_count >= right_count else right_label
|
|
925
|
-
|
|
926
|
-
video_basename = self._widget._video_basename()
|
|
927
|
-
left_tok = _safe_label_token(left_label)
|
|
928
|
-
right_tok = _safe_label_token(right_label)
|
|
929
|
-
clip_filename = (
|
|
930
|
-
f"{video_basename}_transition_{left_tok}_to_{right_tok}_"
|
|
931
|
-
f"frame_{clip_start_vid_frame}_len_{clip_len}.mp4"
|
|
932
|
-
)
|
|
933
|
-
clip_path = os.path.join(clips_dir, clip_filename)
|
|
934
|
-
clip_path = self._widget._unique_clip_path(clip_path)
|
|
935
|
-
|
|
936
|
-
target_fps = int(self._widget.target_fps_spin.value())
|
|
937
|
-
save_clip(frames, clip_path, target_fps)
|
|
938
|
-
if not os.path.exists(clip_path) or os.path.getsize(clip_path) == 0:
|
|
939
|
-
QMessageBox.warning(self, "Save failed", "Failed to save transition clip.")
|
|
940
|
-
return
|
|
941
|
-
|
|
942
|
-
clip_id = self._widget._clip_path_to_id(clip_path, clips_dir)
|
|
943
|
-
meta = {
|
|
944
|
-
"source_video": os.path.basename(self._widget.video_path),
|
|
945
|
-
"source_frame": int(clip_start_vid_frame),
|
|
946
|
-
"target_fps": int(target_fps),
|
|
947
|
-
"clip_length": int(clip_len),
|
|
948
|
-
"added_from_inference_transition": True,
|
|
949
|
-
"transition_direction": boundary_name,
|
|
950
|
-
"transition_from_label": left_label,
|
|
951
|
-
"transition_to_label": right_label,
|
|
952
|
-
"transition_boundary_frame": int(boundary_frame),
|
|
953
|
-
"transition_ignore_half_frames": int(ignore_half),
|
|
954
|
-
}
|
|
955
|
-
used_clip_id = annotation_manager.add_clip(clip_id, primary_label, meta=meta)
|
|
956
|
-
annotation_manager.set_frame_labels(used_clip_id, frame_labels)
|
|
957
|
-
|
|
958
|
-
n_left = sum(1 for x in frame_labels if x == left_label)
|
|
959
|
-
n_right = sum(1 for x in frame_labels if x == right_label)
|
|
960
|
-
n_ignored = sum(1 for x in frame_labels if x is None)
|
|
961
|
-
self._widget.log_text.append(
|
|
962
|
-
f"Added transition clip ({boundary_name}): {left_label}->{right_label}, "
|
|
963
|
-
f"frames={clip_len}, labels=({n_left}/{n_right}/ignored={n_ignored})"
|
|
964
|
-
)
|
|
965
|
-
QMessageBox.information(
|
|
966
|
-
self,
|
|
967
|
-
"Transition clip added",
|
|
968
|
-
f"Saved transition training clip.\n\n"
|
|
969
|
-
f"From: {left_label}\n"
|
|
970
|
-
f"To: {right_label}\n"
|
|
971
|
-
f"Direction: {boundary_name}\n"
|
|
972
|
-
f"Clip: {os.path.basename(clip_path)}\n"
|
|
973
|
-
f"Frame labels: {n_left} left, {n_right} right, {n_ignored} ignored",
|
|
974
|
-
)
|
|
975
|
-
except Exception as e:
|
|
976
|
-
QMessageBox.critical(self, "Error", f"Failed to add transition clip:\n{str(e)}")
|
|
977
|
-
|
|
978
|
-
transition_btn_row = QHBoxLayout()
|
|
979
|
-
prev_transition_btn = QPushButton("Add prev -> current transition clip")
|
|
980
|
-
next_transition_btn = QPushButton("Add current -> next transition clip")
|
|
981
|
-
prev_transition_btn.setStyleSheet("background-color: #4b7bec; color: white; font-weight: bold; padding: 5px;")
|
|
982
|
-
next_transition_btn.setStyleSheet("background-color: #4b7bec; color: white; font-weight: bold; padding: 5px;")
|
|
983
|
-
|
|
984
|
-
has_prev_seg = self._segment_idx is not None and self._segment_idx > 0
|
|
985
|
-
has_next_seg = self._segment_idx is not None and self._segment_idx < (len(self._widget.aggregated_segments) - 1)
|
|
986
|
-
prev_transition_btn.setEnabled(has_prev_seg)
|
|
987
|
-
next_transition_btn.setEnabled(has_next_seg)
|
|
988
|
-
prev_transition_btn.setToolTip("Create one fixed-length transition clip around the previous->current boundary.")
|
|
989
|
-
next_transition_btn.setToolTip("Create one fixed-length transition clip around the current->next boundary.")
|
|
990
|
-
|
|
991
|
-
def _add_prev_transition():
|
|
992
|
-
if not has_prev_seg:
|
|
993
|
-
return
|
|
994
|
-
prev_seg = self._widget.aggregated_segments[self._segment_idx - 1]
|
|
995
|
-
curr_seg = self._widget.aggregated_segments[self._segment_idx]
|
|
996
|
-
_extract_transition_clip(prev_seg, curr_seg, "prev_to_current")
|
|
997
|
-
|
|
998
|
-
def _add_next_transition():
|
|
999
|
-
if not has_next_seg:
|
|
1000
|
-
return
|
|
1001
|
-
curr_seg = self._widget.aggregated_segments[self._segment_idx]
|
|
1002
|
-
next_seg = self._widget.aggregated_segments[self._segment_idx + 1]
|
|
1003
|
-
_extract_transition_clip(curr_seg, next_seg, "current_to_next")
|
|
1004
|
-
|
|
1005
|
-
prev_transition_btn.clicked.connect(_add_prev_transition)
|
|
1006
|
-
next_transition_btn.clicked.connect(_add_next_transition)
|
|
1007
|
-
transition_btn_row.addWidget(prev_transition_btn)
|
|
1008
|
-
transition_btn_row.addWidget(next_transition_btn)
|
|
1009
|
-
training_layout.addLayout(transition_btn_row)
|
|
1010
|
-
|
|
1011
824
|
training_group.setLayout(training_layout)
|
|
1012
825
|
layout.addWidget(training_group)
|
|
1013
826
|
|