singlebehaviorlab 2.1.0__tar.gz → 2.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {singlebehaviorlab-2.1.0/singlebehaviorlab.egg-info → singlebehaviorlab-2.3.0}/PKG-INFO +3 -1
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/README.md +2 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/pyproject.toml +1 -1
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/__init__.py +3 -1
- singlebehaviorlab-2.3.0/singlebehaviorlab/backend/segments.py +257 -0
- singlebehaviorlab-2.3.0/singlebehaviorlab/demo.py +131 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/inference_popups.py +0 -187
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/inference_widget.py +199 -540
- singlebehaviorlab-2.3.0/singlebehaviorlab/gui/interactive_timeline.py +549 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0/singlebehaviorlab.egg-info}/PKG-INFO +3 -1
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab.egg-info/SOURCES.txt +4 -0
- singlebehaviorlab-2.3.0/tests/test_segments.py +144 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/LICENSE +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/setup.cfg +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/__main__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/_paths.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/augmentations.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/clustering.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/data_store.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/inference.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/model.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/registration.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/segmentation.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/train.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/training_runner.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/uncertainty.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/video_processor.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/backend/video_utils.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/cli.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/config.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/data/config/config.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/data/training_profiles.json +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/analysis_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/attention_export.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/clip_extraction_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/clustering_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/inference_worker.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/labeling_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/main_window.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/metadata_management_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/motion_tracking.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/overlay_export.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/plot_integration.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/qt_helpers.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/registration_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/review_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/segmentation_tracking_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/tab_tutorial_dialog.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/timeline_themes.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/training_profiles.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/training_widget.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/video_utils.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/licenses/SAM2-LICENSE +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/licenses/VideoPrism-LICENSE +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab.egg-info/dependency_links.txt +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab.egg-info/entry_points.txt +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab.egg-info/requires.txt +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab.egg-info/top_level.txt +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/tests/test_clustering_smoke.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/tests/test_config.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/tests/test_motion_tracking.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/tests/test_paths.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/tests/test_sam2_smoke.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/automatic_mask_generator.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/benchmark.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/build_sam.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/backbones/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/backbones/hieradet.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/backbones/image_encoder.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/backbones/utils.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/memory_attention.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/memory_encoder.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/position_encoding.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam/mask_decoder.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam/prompt_encoder.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam/transformer.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam2_base.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/modeling/sam2_utils.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_hiera_b+.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_hiera_l.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_hiera_s.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_hiera_t.yaml +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_image_predictor.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_video_predictor.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/sam2_video_predictor_legacy.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/utils/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/utils/amg.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/utils/misc.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/sam2_backend/sam2/utils/transforms.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/__init__.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/encoders.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/layers.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/models.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/tokenizers.py +0 -0
- {singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/third_party/videoprism_backend/videoprism/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: singlebehaviorlab
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.3.0
|
|
4
4
|
Summary: Semi-automated behavioral video annotation, training, and analysis tool
|
|
5
5
|
Author: Almir Aljovic
|
|
6
6
|
Maintainer: Almir Aljovic
|
|
@@ -200,6 +200,8 @@ Run `singlebehaviorlab <command> --help` for the full flag list on each subcomma
|
|
|
200
200
|
> **Full CLI reference:** [**CLI.md**](CLI.md) — detailed per-command docs, file-format reference, Python API, and troubleshooting.
|
|
201
201
|
>
|
|
202
202
|
> **Notebook demos:** [**demo/**](demo/) — two Jupyter notebooks walking through behavior sequencing and segmentation/clustering end-to-end. Drop your own demo video + prompts into `demo/data/` and step through the cells.
|
|
203
|
+
>
|
|
204
|
+
> **Try it in Colab — no install needed:** [](https://colab.research.google.com/github/alms93/SingleBehaviorLab/blob/main/demo/colab_segmentation_clustering.ipynb) Runs the full segmentation + clustering pipeline on a bundled demo video in a free Colab GPU runtime. Install takes ~5–10 minutes; the pipeline itself finishes in a few minutes.
|
|
203
205
|
|
|
204
206
|
---
|
|
205
207
|
|
|
@@ -133,6 +133,8 @@ Run `singlebehaviorlab <command> --help` for the full flag list on each subcomma
|
|
|
133
133
|
> **Full CLI reference:** [**CLI.md**](CLI.md) — detailed per-command docs, file-format reference, Python API, and troubleshooting.
|
|
134
134
|
>
|
|
135
135
|
> **Notebook demos:** [**demo/**](demo/) — two Jupyter notebooks walking through behavior sequencing and segmentation/clustering end-to-end. Drop your own demo video + prompts into `demo/data/` and step through the cells.
|
|
136
|
+
>
|
|
137
|
+
> **Try it in Colab — no install needed:** [](https://colab.research.google.com/github/alms93/SingleBehaviorLab/blob/main/demo/colab_segmentation_clustering.ipynb) Runs the full segmentation + clustering pipeline on a bundled demo video in a free Colab GPU runtime. Install takes ~5–10 minutes; the pipeline itself finishes in a few minutes.
|
|
136
138
|
|
|
137
139
|
---
|
|
138
140
|
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "singlebehaviorlab"
|
|
7
|
-
version = "2.
|
|
7
|
+
version = "2.3.0"
|
|
8
8
|
description = "Semi-automated behavioral video annotation, training, and analysis tool"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = { file = "LICENSE" }
|
|
@@ -19,7 +19,7 @@ or videoprism. Each symbol triggers its underlying backend module only on
|
|
|
19
19
|
first access.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
__version__ = "2.
|
|
22
|
+
__version__ = "2.3.0"
|
|
23
23
|
__author__ = "Almir Aljovic"
|
|
24
24
|
|
|
25
25
|
# Mapping of public name → (backend module, attribute name).
|
|
@@ -35,6 +35,8 @@ _PUBLIC_API = {
|
|
|
35
35
|
"infer": ("singlebehaviorlab.backend.inference", "run_inference_on_video"),
|
|
36
36
|
"train": ("singlebehaviorlab.backend.training_runner", "run_training_session"),
|
|
37
37
|
"load_config": ("singlebehaviorlab.config", "load_config"),
|
|
38
|
+
"load_demo": ("singlebehaviorlab.demo", "load_demo"),
|
|
39
|
+
"DEMOS": ("singlebehaviorlab.demo", "DEMOS"),
|
|
38
40
|
}
|
|
39
41
|
|
|
40
42
|
__all__ = ["__version__", "__author__", *sorted(_PUBLIC_API)]
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""Mutable segment list that drives the interactive timeline editor.
|
|
2
|
+
|
|
3
|
+
Segments are the single source of truth: per-frame label arrays, CSV rows,
|
|
4
|
+
and SVG rectangles are all derived from them. Editing operations enforce
|
|
5
|
+
non-overlap and boundary constraints, and every mutation is tracked by an
|
|
6
|
+
undo/redo stack.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import copy
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Literal, Optional
|
|
14
|
+
|
|
15
|
+
import numpy as np
|
|
16
|
+
|
|
17
|
+
__all__ = ["Segment", "SegmentsModel"]
|
|
18
|
+
|
|
19
|
+
UNDO_LIMIT = 50
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Segment:
|
|
24
|
+
class_idx: int
|
|
25
|
+
start: int
|
|
26
|
+
end: int
|
|
27
|
+
confidence: float = 1.0
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def length(self) -> int:
|
|
31
|
+
return max(0, self.end - self.start)
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
return {
|
|
35
|
+
"class": self.class_idx,
|
|
36
|
+
"start": self.start,
|
|
37
|
+
"end": self.end,
|
|
38
|
+
"confidence": self.confidence,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def from_dict(cls, d: dict) -> "Segment":
|
|
43
|
+
return cls(
|
|
44
|
+
class_idx=int(d["class"]),
|
|
45
|
+
start=int(d["start"]),
|
|
46
|
+
end=int(d["end"]),
|
|
47
|
+
confidence=float(d.get("confidence", 1.0)),
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SegmentsModel:
|
|
52
|
+
"""Ordered, non-overlapping segment list with undo/redo."""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
segments: list[dict] | list[Segment],
|
|
57
|
+
classes: list[str],
|
|
58
|
+
total_frames: int,
|
|
59
|
+
orig_fps: float = 30.0,
|
|
60
|
+
):
|
|
61
|
+
self.classes = list(classes)
|
|
62
|
+
self.total_frames = max(0, total_frames)
|
|
63
|
+
self.orig_fps = float(orig_fps)
|
|
64
|
+
|
|
65
|
+
raw = []
|
|
66
|
+
for s in segments:
|
|
67
|
+
raw.append(s if isinstance(s, Segment) else Segment.from_dict(s))
|
|
68
|
+
raw.sort(key=lambda s: s.start)
|
|
69
|
+
self._segments: list[Segment] = raw
|
|
70
|
+
self._undo: list[list[Segment]] = []
|
|
71
|
+
self._redo: list[list[Segment]] = []
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def segments(self) -> list[Segment]:
|
|
75
|
+
return self._segments
|
|
76
|
+
|
|
77
|
+
def __len__(self) -> int:
|
|
78
|
+
return len(self._segments)
|
|
79
|
+
|
|
80
|
+
def __getitem__(self, idx: int) -> Segment:
|
|
81
|
+
return self._segments[idx]
|
|
82
|
+
|
|
83
|
+
def _snapshot(self) -> None:
|
|
84
|
+
self._undo.append(copy.deepcopy(self._segments))
|
|
85
|
+
if len(self._undo) > UNDO_LIMIT:
|
|
86
|
+
self._undo.pop(0)
|
|
87
|
+
self._redo.clear()
|
|
88
|
+
|
|
89
|
+
def _clamp(self, val: int) -> int:
|
|
90
|
+
return max(0, min(self.total_frames, val))
|
|
91
|
+
|
|
92
|
+
def _prev_end(self, idx: int) -> int:
|
|
93
|
+
return self._segments[idx - 1].end if idx > 0 else 0
|
|
94
|
+
|
|
95
|
+
def _next_start(self, idx: int) -> int:
|
|
96
|
+
if idx < len(self._segments) - 1:
|
|
97
|
+
return self._segments[idx + 1].start
|
|
98
|
+
return self.total_frames
|
|
99
|
+
|
|
100
|
+
# ------------------------------------------------------------------ undo
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def can_undo(self) -> bool:
|
|
104
|
+
return len(self._undo) > 0
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def can_redo(self) -> bool:
|
|
108
|
+
return len(self._redo) > 0
|
|
109
|
+
|
|
110
|
+
def undo(self) -> bool:
|
|
111
|
+
if not self._undo:
|
|
112
|
+
return False
|
|
113
|
+
self._redo.append(copy.deepcopy(self._segments))
|
|
114
|
+
self._segments = self._undo.pop()
|
|
115
|
+
return True
|
|
116
|
+
|
|
117
|
+
def redo(self) -> bool:
|
|
118
|
+
if not self._redo:
|
|
119
|
+
return False
|
|
120
|
+
self._undo.append(copy.deepcopy(self._segments))
|
|
121
|
+
self._segments = self._redo.pop()
|
|
122
|
+
return True
|
|
123
|
+
|
|
124
|
+
# --------------------------------------------------------------- editing
|
|
125
|
+
|
|
126
|
+
def resize(self, idx: int, edge: Literal["left", "right"], delta: int) -> bool:
|
|
127
|
+
if idx < 0 or idx >= len(self._segments):
|
|
128
|
+
return False
|
|
129
|
+
seg = self._segments[idx]
|
|
130
|
+
self._snapshot()
|
|
131
|
+
|
|
132
|
+
if edge == "left":
|
|
133
|
+
new_start = self._clamp(seg.start + delta)
|
|
134
|
+
new_start = max(new_start, self._prev_end(idx))
|
|
135
|
+
if new_start >= seg.end:
|
|
136
|
+
new_start = seg.end - 1
|
|
137
|
+
seg.start = new_start
|
|
138
|
+
else:
|
|
139
|
+
new_end = self._clamp(seg.end + delta)
|
|
140
|
+
new_end = min(new_end, self._next_start(idx))
|
|
141
|
+
if new_end <= seg.start:
|
|
142
|
+
new_end = seg.start + 1
|
|
143
|
+
seg.end = new_end
|
|
144
|
+
return True
|
|
145
|
+
|
|
146
|
+
def move(self, idx: int, delta: int) -> bool:
|
|
147
|
+
if idx < 0 or idx >= len(self._segments):
|
|
148
|
+
return False
|
|
149
|
+
seg = self._segments[idx]
|
|
150
|
+
length = seg.length
|
|
151
|
+
lo = self._prev_end(idx)
|
|
152
|
+
hi = self._next_start(idx)
|
|
153
|
+
if hi - lo < length:
|
|
154
|
+
return False
|
|
155
|
+
self._snapshot()
|
|
156
|
+
new_start = self._clamp(seg.start + delta)
|
|
157
|
+
new_start = max(new_start, lo)
|
|
158
|
+
if new_start + length > hi:
|
|
159
|
+
new_start = hi - length
|
|
160
|
+
seg.start = new_start
|
|
161
|
+
seg.end = new_start + length
|
|
162
|
+
return True
|
|
163
|
+
|
|
164
|
+
def reclass(self, idx: int, new_class_idx: int) -> bool:
|
|
165
|
+
if idx < 0 or idx >= len(self._segments):
|
|
166
|
+
return False
|
|
167
|
+
if new_class_idx < 0 or new_class_idx >= len(self.classes):
|
|
168
|
+
return False
|
|
169
|
+
self._snapshot()
|
|
170
|
+
self._segments[idx].class_idx = new_class_idx
|
|
171
|
+
return True
|
|
172
|
+
|
|
173
|
+
def delete(self, idx: int) -> bool:
|
|
174
|
+
if idx < 0 or idx >= len(self._segments):
|
|
175
|
+
return False
|
|
176
|
+
self._snapshot()
|
|
177
|
+
self._segments.pop(idx)
|
|
178
|
+
return True
|
|
179
|
+
|
|
180
|
+
def split(self, idx: int, at_frame: int) -> bool:
|
|
181
|
+
if idx < 0 or idx >= len(self._segments):
|
|
182
|
+
return False
|
|
183
|
+
seg = self._segments[idx]
|
|
184
|
+
if at_frame <= seg.start or at_frame >= seg.end:
|
|
185
|
+
return False
|
|
186
|
+
self._snapshot()
|
|
187
|
+
left = Segment(seg.class_idx, seg.start, at_frame, seg.confidence)
|
|
188
|
+
right = Segment(seg.class_idx, at_frame, seg.end, seg.confidence)
|
|
189
|
+
self._segments[idx:idx + 1] = [left, right]
|
|
190
|
+
return True
|
|
191
|
+
|
|
192
|
+
def merge_with_next(self, idx: int) -> bool:
|
|
193
|
+
if idx < 0 or idx >= len(self._segments) - 1:
|
|
194
|
+
return False
|
|
195
|
+
self._snapshot()
|
|
196
|
+
left = self._segments[idx]
|
|
197
|
+
right = self._segments[idx + 1]
|
|
198
|
+
left.end = right.end
|
|
199
|
+
self._segments.pop(idx + 1)
|
|
200
|
+
return True
|
|
201
|
+
|
|
202
|
+
# -------------------------------------------------------- derived outputs
|
|
203
|
+
|
|
204
|
+
def to_frame_labels(self) -> np.ndarray:
|
|
205
|
+
labels = np.full(self.total_frames, -1, dtype=np.int32)
|
|
206
|
+
for seg in self._segments:
|
|
207
|
+
labels[seg.start:seg.end] = seg.class_idx
|
|
208
|
+
return labels
|
|
209
|
+
|
|
210
|
+
def to_dicts(self) -> list[dict]:
|
|
211
|
+
return [s.to_dict() for s in self._segments]
|
|
212
|
+
|
|
213
|
+
def to_csv_rows(self) -> list[dict]:
|
|
214
|
+
rows = []
|
|
215
|
+
for seg in self._segments:
|
|
216
|
+
if seg.class_idx < 0 or seg.class_idx >= len(self.classes):
|
|
217
|
+
name = f"class_{seg.class_idx}"
|
|
218
|
+
else:
|
|
219
|
+
name = self.classes[seg.class_idx]
|
|
220
|
+
fps = max(1e-6, self.orig_fps)
|
|
221
|
+
rows.append({
|
|
222
|
+
"Behavior": name,
|
|
223
|
+
"Start Time (s)": round(seg.start / fps, 4),
|
|
224
|
+
"End Time (s)": round(seg.end / fps, 4),
|
|
225
|
+
"Start Frame": seg.start,
|
|
226
|
+
"End Frame": seg.end,
|
|
227
|
+
"Duration (s)": round(seg.length / fps, 4),
|
|
228
|
+
"Confidence": round(seg.confidence, 4),
|
|
229
|
+
})
|
|
230
|
+
return rows
|
|
231
|
+
|
|
232
|
+
@classmethod
|
|
233
|
+
def from_frame_labels(
|
|
234
|
+
cls,
|
|
235
|
+
labels: np.ndarray,
|
|
236
|
+
classes: list[str],
|
|
237
|
+
total_frames: int,
|
|
238
|
+
orig_fps: float = 30.0,
|
|
239
|
+
confidences: Optional[np.ndarray] = None,
|
|
240
|
+
) -> "SegmentsModel":
|
|
241
|
+
"""Build from a per-frame label array (e.g. argmax output)."""
|
|
242
|
+
segments: list[Segment] = []
|
|
243
|
+
if len(labels) == 0:
|
|
244
|
+
return cls([], classes, total_frames, orig_fps)
|
|
245
|
+
current_class = int(labels[0])
|
|
246
|
+
start = 0
|
|
247
|
+
for i in range(1, len(labels)):
|
|
248
|
+
if int(labels[i]) != current_class:
|
|
249
|
+
if current_class >= 0:
|
|
250
|
+
conf = float(confidences[start:i].mean()) if confidences is not None else 1.0
|
|
251
|
+
segments.append(Segment(current_class, start, i, conf))
|
|
252
|
+
current_class = int(labels[i])
|
|
253
|
+
start = i
|
|
254
|
+
if current_class >= 0:
|
|
255
|
+
conf = float(confidences[start:].mean()) if confidences is not None else 1.0
|
|
256
|
+
segments.append(Segment(current_class, start, len(labels), conf))
|
|
257
|
+
return cls(segments, classes, total_frames, orig_fps)
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
"""Downloadable demo datasets for trying SingleBehaviorLab end-to-end.
|
|
2
|
+
|
|
3
|
+
Each entry in :data:`DEMOS` maps a short name to a list of ``(filename, url)``
|
|
4
|
+
pairs. :func:`load_demo` downloads the listed files into a local cache
|
|
5
|
+
directory and returns a dict mapping logical asset names to absolute paths,
|
|
6
|
+
so the pipeline functions can be called directly on the returned values::
|
|
7
|
+
|
|
8
|
+
import singlebehaviorlab as sbl
|
|
9
|
+
|
|
10
|
+
demo = sbl.load_demo("segmentation_clustering")
|
|
11
|
+
sbl.segment(demo["video"], demo["prompts"], "masks.h5")
|
|
12
|
+
|
|
13
|
+
The asset URLs are pinned to a released tag so existing notebooks keep
|
|
14
|
+
working when ``main`` moves forward.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import os
|
|
20
|
+
import shutil
|
|
21
|
+
import urllib.request
|
|
22
|
+
from dataclasses import dataclass
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Optional
|
|
25
|
+
|
|
26
|
+
__all__ = ["DEMOS", "DemoAsset", "load_demo"]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
_TAG = "v2.1.0"
|
|
30
|
+
_RAW_BASE = f"https://raw.githubusercontent.com/alms93/SingleBehaviorLab/{_TAG}"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass(frozen=True)
|
|
34
|
+
class DemoAsset:
|
|
35
|
+
"""One file that belongs to a demo dataset."""
|
|
36
|
+
|
|
37
|
+
key: str # logical name, e.g. "video" or "prompts"
|
|
38
|
+
filename: str # on-disk filename under the cache directory
|
|
39
|
+
url: str # remote URL to download from
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
DEMOS: dict[str, list[DemoAsset]] = {
|
|
43
|
+
"segmentation_clustering": [
|
|
44
|
+
DemoAsset(
|
|
45
|
+
key="video",
|
|
46
|
+
filename="Demo_video.mp4",
|
|
47
|
+
url=f"{_RAW_BASE}/demo/data/segmentation_clustering/Demo_video.mp4",
|
|
48
|
+
),
|
|
49
|
+
DemoAsset(
|
|
50
|
+
key="prompts",
|
|
51
|
+
filename="sam2_prompts.json",
|
|
52
|
+
url=f"{_RAW_BASE}/demo/data/segmentation_clustering/sam2_prompts.json",
|
|
53
|
+
),
|
|
54
|
+
],
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _default_cache_dir() -> Path:
|
|
59
|
+
override = os.environ.get("SBL_DEMO_DIR")
|
|
60
|
+
if override:
|
|
61
|
+
return Path(override).expanduser().resolve()
|
|
62
|
+
return Path.home() / ".cache" / "singlebehaviorlab" / "demos"
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _download_with_progress(url: str, dest: Path) -> None:
|
|
66
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
67
|
+
tmp_path = dest.with_suffix(dest.suffix + ".part")
|
|
68
|
+
try:
|
|
69
|
+
from tqdm.auto import tqdm as _tqdm
|
|
70
|
+
except Exception:
|
|
71
|
+
_tqdm = None # type: ignore[assignment]
|
|
72
|
+
|
|
73
|
+
if _tqdm is None:
|
|
74
|
+
with urllib.request.urlopen(url) as response, open(tmp_path, "wb") as f:
|
|
75
|
+
shutil.copyfileobj(response, f)
|
|
76
|
+
else:
|
|
77
|
+
with urllib.request.urlopen(url) as response:
|
|
78
|
+
total = int(response.headers.get("Content-Length") or 0) or None
|
|
79
|
+
with _tqdm(
|
|
80
|
+
total=total,
|
|
81
|
+
unit="B",
|
|
82
|
+
unit_scale=True,
|
|
83
|
+
unit_divisor=1024,
|
|
84
|
+
desc=dest.name,
|
|
85
|
+
leave=False,
|
|
86
|
+
) as bar:
|
|
87
|
+
with open(tmp_path, "wb") as f:
|
|
88
|
+
while True:
|
|
89
|
+
chunk = response.read(1024 * 64)
|
|
90
|
+
if not chunk:
|
|
91
|
+
break
|
|
92
|
+
f.write(chunk)
|
|
93
|
+
bar.update(len(chunk))
|
|
94
|
+
tmp_path.replace(dest)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def load_demo(
|
|
98
|
+
name: str = "segmentation_clustering",
|
|
99
|
+
*,
|
|
100
|
+
destination: Optional[str | os.PathLike[str]] = None,
|
|
101
|
+
force: bool = False,
|
|
102
|
+
) -> dict[str, str]:
|
|
103
|
+
"""Download (or reuse a cached copy of) a demo dataset.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
name: Registered demo name. Currently ``"segmentation_clustering"``
|
|
107
|
+
is the only entry; more will follow as additional demos land.
|
|
108
|
+
destination: Optional override for the cache directory. Defaults to
|
|
109
|
+
``$SBL_DEMO_DIR`` if set, otherwise
|
|
110
|
+
``~/.cache/singlebehaviorlab/demos/``.
|
|
111
|
+
force: Re-download even if the files already exist locally.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
A dict mapping each asset's ``key`` to an absolute path on disk, e.g.
|
|
115
|
+
``{"video": "/root/.cache/.../Demo_video.mp4", "prompts": "..."}``.
|
|
116
|
+
"""
|
|
117
|
+
if name not in DEMOS:
|
|
118
|
+
available = ", ".join(sorted(DEMOS.keys()))
|
|
119
|
+
raise KeyError(f"Unknown demo '{name}'. Available demos: {available}")
|
|
120
|
+
|
|
121
|
+
cache_root = Path(destination).expanduser().resolve() if destination else _default_cache_dir()
|
|
122
|
+
demo_dir = cache_root / name
|
|
123
|
+
demo_dir.mkdir(parents=True, exist_ok=True)
|
|
124
|
+
|
|
125
|
+
paths: dict[str, str] = {}
|
|
126
|
+
for asset in DEMOS[name]:
|
|
127
|
+
target = demo_dir / asset.filename
|
|
128
|
+
if force or not target.exists() or target.stat().st_size == 0:
|
|
129
|
+
_download_with_progress(asset.url, target)
|
|
130
|
+
paths[asset.key] = str(target)
|
|
131
|
+
return paths
|
{singlebehaviorlab-2.1.0 → singlebehaviorlab-2.3.0}/singlebehaviorlab/gui/inference_popups.py
RENAMED
|
@@ -821,193 +821,6 @@ class FrameSegmentPopupDialog(QDialog):
|
|
|
821
821
|
add_segment_btn.clicked.connect(add_segment_chunks_to_training)
|
|
822
822
|
training_layout.addWidget(add_segment_btn)
|
|
823
823
|
|
|
824
|
-
transition_len_row = QHBoxLayout()
|
|
825
|
-
transition_len_row.addWidget(QLabel("Transition clip frames:"))
|
|
826
|
-
transition_len_spin = QSpinBox()
|
|
827
|
-
transition_len_spin.setRange(2, 64)
|
|
828
|
-
transition_len_spin.setValue(int(self._widget.clip_length_spin.value()))
|
|
829
|
-
transition_len_spin.setToolTip("Number of sampled frames to save for this transition clip.")
|
|
830
|
-
transition_len_row.addWidget(transition_len_spin)
|
|
831
|
-
transition_len_row.addWidget(QLabel("Ignore ±frames:"))
|
|
832
|
-
transition_ignore_spin = QSpinBox()
|
|
833
|
-
transition_ignore_spin.setRange(0, 8)
|
|
834
|
-
transition_ignore_spin.setValue(1)
|
|
835
|
-
transition_ignore_spin.setToolTip(
|
|
836
|
-
"Frames around the exact boundary set to None (ignored during frame loss)."
|
|
837
|
-
)
|
|
838
|
-
transition_len_row.addWidget(transition_ignore_spin)
|
|
839
|
-
training_layout.addLayout(transition_len_row)
|
|
840
|
-
|
|
841
|
-
def _safe_label_token(label_text: str) -> str:
|
|
842
|
-
token = str(label_text or "").strip().replace(" ", "_").replace("/", "_").replace("\\", "_")
|
|
843
|
-
while "__" in token:
|
|
844
|
-
token = token.replace("__", "_")
|
|
845
|
-
return token.strip("_") or "class"
|
|
846
|
-
|
|
847
|
-
def _extract_transition_clip(left_seg: dict, right_seg: dict, boundary_name: str):
|
|
848
|
-
try:
|
|
849
|
-
left_idx = int(left_seg.get("class", -1))
|
|
850
|
-
right_idx = int(right_seg.get("class", -1))
|
|
851
|
-
if not (0 <= left_idx < len(self._widget.classes) and 0 <= right_idx < len(self._widget.classes)):
|
|
852
|
-
QMessageBox.warning(self, "Invalid class", "Could not resolve neighboring segment labels.")
|
|
853
|
-
return
|
|
854
|
-
|
|
855
|
-
left_label = self._widget.classes[left_idx]
|
|
856
|
-
right_label = self._widget.classes[right_idx]
|
|
857
|
-
clip_len = int(transition_len_spin.value())
|
|
858
|
-
if clip_len <= 1:
|
|
859
|
-
QMessageBox.warning(self, "Invalid clip length", "Transition clip length must be >= 2.")
|
|
860
|
-
return
|
|
861
|
-
ignore_half = int(max(0, transition_ignore_spin.value()))
|
|
862
|
-
|
|
863
|
-
clips_dir = self._widget._get_clips_dir()
|
|
864
|
-
annotation_manager = AnnotationManager(self._widget._get_annotation_file())
|
|
865
|
-
annotation_manager.add_class(left_label)
|
|
866
|
-
annotation_manager.add_class(right_label)
|
|
867
|
-
|
|
868
|
-
frame_interval = int(max(1, self._widget._get_saved_frame_interval(self._widget.video_path, orig_fps)))
|
|
869
|
-
boundary_frame = int((int(left_seg["end"]) + int(right_seg["start"])) // 2)
|
|
870
|
-
center_idx = clip_len // 2
|
|
871
|
-
clip_start_vid_frame = max(0, boundary_frame - center_idx * frame_interval)
|
|
872
|
-
|
|
873
|
-
cap_local = cv2.VideoCapture(self._widget.video_path)
|
|
874
|
-
if not cap_local.isOpened():
|
|
875
|
-
QMessageBox.warning(self, "Error", "Could not open video file.")
|
|
876
|
-
return
|
|
877
|
-
cap_local.set(cv2.CAP_PROP_POS_FRAMES, clip_start_vid_frame)
|
|
878
|
-
frames = []
|
|
879
|
-
sampled_video_frames = []
|
|
880
|
-
read_ctr = 0
|
|
881
|
-
while len(frames) < clip_len:
|
|
882
|
-
ret, frame = cap_local.read()
|
|
883
|
-
if not ret:
|
|
884
|
-
break
|
|
885
|
-
if read_ctr % frame_interval == 0:
|
|
886
|
-
frames.append(frame.copy())
|
|
887
|
-
sampled_video_frames.append(int(clip_start_vid_frame + read_ctr))
|
|
888
|
-
read_ctr += 1
|
|
889
|
-
cap_local.release()
|
|
890
|
-
|
|
891
|
-
if not frames:
|
|
892
|
-
QMessageBox.warning(self, "No frames", "Could not extract transition clip frames.")
|
|
893
|
-
return
|
|
894
|
-
|
|
895
|
-
if len(frames) < clip_len:
|
|
896
|
-
last_frame = frames[-1]
|
|
897
|
-
last_idx = sampled_video_frames[-1]
|
|
898
|
-
while len(frames) < clip_len:
|
|
899
|
-
frames.append(last_frame.copy())
|
|
900
|
-
last_idx += frame_interval
|
|
901
|
-
sampled_video_frames.append(int(last_idx))
|
|
902
|
-
|
|
903
|
-
left_end = int(left_seg["end"]) - ignore_half * frame_interval
|
|
904
|
-
right_start = int(right_seg["start"]) + ignore_half * frame_interval
|
|
905
|
-
frame_labels = []
|
|
906
|
-
for vf in sampled_video_frames:
|
|
907
|
-
if vf <= left_end:
|
|
908
|
-
frame_labels.append(left_label)
|
|
909
|
-
elif vf >= right_start:
|
|
910
|
-
frame_labels.append(right_label)
|
|
911
|
-
else:
|
|
912
|
-
frame_labels.append(None)
|
|
913
|
-
|
|
914
|
-
if left_label not in frame_labels:
|
|
915
|
-
frame_labels[0] = left_label
|
|
916
|
-
if right_label not in frame_labels:
|
|
917
|
-
frame_labels[-1] = right_label
|
|
918
|
-
|
|
919
|
-
non_none = [x for x in frame_labels if x is not None]
|
|
920
|
-
primary_label = left_label
|
|
921
|
-
if non_none:
|
|
922
|
-
left_count = sum(1 for x in non_none if x == left_label)
|
|
923
|
-
right_count = sum(1 for x in non_none if x == right_label)
|
|
924
|
-
primary_label = left_label if left_count >= right_count else right_label
|
|
925
|
-
|
|
926
|
-
video_basename = self._widget._video_basename()
|
|
927
|
-
left_tok = _safe_label_token(left_label)
|
|
928
|
-
right_tok = _safe_label_token(right_label)
|
|
929
|
-
clip_filename = (
|
|
930
|
-
f"{video_basename}_transition_{left_tok}_to_{right_tok}_"
|
|
931
|
-
f"frame_{clip_start_vid_frame}_len_{clip_len}.mp4"
|
|
932
|
-
)
|
|
933
|
-
clip_path = os.path.join(clips_dir, clip_filename)
|
|
934
|
-
clip_path = self._widget._unique_clip_path(clip_path)
|
|
935
|
-
|
|
936
|
-
target_fps = int(self._widget.target_fps_spin.value())
|
|
937
|
-
save_clip(frames, clip_path, target_fps)
|
|
938
|
-
if not os.path.exists(clip_path) or os.path.getsize(clip_path) == 0:
|
|
939
|
-
QMessageBox.warning(self, "Save failed", "Failed to save transition clip.")
|
|
940
|
-
return
|
|
941
|
-
|
|
942
|
-
clip_id = self._widget._clip_path_to_id(clip_path, clips_dir)
|
|
943
|
-
meta = {
|
|
944
|
-
"source_video": os.path.basename(self._widget.video_path),
|
|
945
|
-
"source_frame": int(clip_start_vid_frame),
|
|
946
|
-
"target_fps": int(target_fps),
|
|
947
|
-
"clip_length": int(clip_len),
|
|
948
|
-
"added_from_inference_transition": True,
|
|
949
|
-
"transition_direction": boundary_name,
|
|
950
|
-
"transition_from_label": left_label,
|
|
951
|
-
"transition_to_label": right_label,
|
|
952
|
-
"transition_boundary_frame": int(boundary_frame),
|
|
953
|
-
"transition_ignore_half_frames": int(ignore_half),
|
|
954
|
-
}
|
|
955
|
-
used_clip_id = annotation_manager.add_clip(clip_id, primary_label, meta=meta)
|
|
956
|
-
annotation_manager.set_frame_labels(used_clip_id, frame_labels)
|
|
957
|
-
|
|
958
|
-
n_left = sum(1 for x in frame_labels if x == left_label)
|
|
959
|
-
n_right = sum(1 for x in frame_labels if x == right_label)
|
|
960
|
-
n_ignored = sum(1 for x in frame_labels if x is None)
|
|
961
|
-
self._widget.log_text.append(
|
|
962
|
-
f"Added transition clip ({boundary_name}): {left_label}->{right_label}, "
|
|
963
|
-
f"frames={clip_len}, labels=({n_left}/{n_right}/ignored={n_ignored})"
|
|
964
|
-
)
|
|
965
|
-
QMessageBox.information(
|
|
966
|
-
self,
|
|
967
|
-
"Transition clip added",
|
|
968
|
-
f"Saved transition training clip.\n\n"
|
|
969
|
-
f"From: {left_label}\n"
|
|
970
|
-
f"To: {right_label}\n"
|
|
971
|
-
f"Direction: {boundary_name}\n"
|
|
972
|
-
f"Clip: {os.path.basename(clip_path)}\n"
|
|
973
|
-
f"Frame labels: {n_left} left, {n_right} right, {n_ignored} ignored",
|
|
974
|
-
)
|
|
975
|
-
except Exception as e:
|
|
976
|
-
QMessageBox.critical(self, "Error", f"Failed to add transition clip:\n{str(e)}")
|
|
977
|
-
|
|
978
|
-
transition_btn_row = QHBoxLayout()
|
|
979
|
-
prev_transition_btn = QPushButton("Add prev -> current transition clip")
|
|
980
|
-
next_transition_btn = QPushButton("Add current -> next transition clip")
|
|
981
|
-
prev_transition_btn.setStyleSheet("background-color: #4b7bec; color: white; font-weight: bold; padding: 5px;")
|
|
982
|
-
next_transition_btn.setStyleSheet("background-color: #4b7bec; color: white; font-weight: bold; padding: 5px;")
|
|
983
|
-
|
|
984
|
-
has_prev_seg = self._segment_idx is not None and self._segment_idx > 0
|
|
985
|
-
has_next_seg = self._segment_idx is not None and self._segment_idx < (len(self._widget.aggregated_segments) - 1)
|
|
986
|
-
prev_transition_btn.setEnabled(has_prev_seg)
|
|
987
|
-
next_transition_btn.setEnabled(has_next_seg)
|
|
988
|
-
prev_transition_btn.setToolTip("Create one fixed-length transition clip around the previous->current boundary.")
|
|
989
|
-
next_transition_btn.setToolTip("Create one fixed-length transition clip around the current->next boundary.")
|
|
990
|
-
|
|
991
|
-
def _add_prev_transition():
|
|
992
|
-
if not has_prev_seg:
|
|
993
|
-
return
|
|
994
|
-
prev_seg = self._widget.aggregated_segments[self._segment_idx - 1]
|
|
995
|
-
curr_seg = self._widget.aggregated_segments[self._segment_idx]
|
|
996
|
-
_extract_transition_clip(prev_seg, curr_seg, "prev_to_current")
|
|
997
|
-
|
|
998
|
-
def _add_next_transition():
|
|
999
|
-
if not has_next_seg:
|
|
1000
|
-
return
|
|
1001
|
-
curr_seg = self._widget.aggregated_segments[self._segment_idx]
|
|
1002
|
-
next_seg = self._widget.aggregated_segments[self._segment_idx + 1]
|
|
1003
|
-
_extract_transition_clip(curr_seg, next_seg, "current_to_next")
|
|
1004
|
-
|
|
1005
|
-
prev_transition_btn.clicked.connect(_add_prev_transition)
|
|
1006
|
-
next_transition_btn.clicked.connect(_add_next_transition)
|
|
1007
|
-
transition_btn_row.addWidget(prev_transition_btn)
|
|
1008
|
-
transition_btn_row.addWidget(next_transition_btn)
|
|
1009
|
-
training_layout.addLayout(transition_btn_row)
|
|
1010
|
-
|
|
1011
824
|
training_group.setLayout(training_layout)
|
|
1012
825
|
layout.addWidget(training_group)
|
|
1013
826
|
|