singlebehaviorlab 2.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- singlebehaviorlab-2.0.0/LICENSE +21 -0
- singlebehaviorlab-2.0.0/PKG-INFO +447 -0
- singlebehaviorlab-2.0.0/README.md +380 -0
- singlebehaviorlab-2.0.0/pyproject.toml +84 -0
- singlebehaviorlab-2.0.0/setup.cfg +4 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/__init__.py +4 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/__main__.py +130 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/_paths.py +100 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/__init__.py +2 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/augmentations.py +320 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/data_store.py +420 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/model.py +1290 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/train.py +4667 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/uncertainty.py +578 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/video_processor.py +688 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/backend/video_utils.py +139 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/data/config/config.yaml +85 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/data/training_profiles.json +334 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/__init__.py +4 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/analysis_widget.py +2291 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/attention_export.py +311 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/clip_extraction_widget.py +481 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/clustering_widget.py +3187 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/inference_popups.py +1138 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/inference_widget.py +4550 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/inference_worker.py +651 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/labeling_widget.py +2324 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/main_window.py +754 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/metadata_management_widget.py +1119 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/motion_tracking.py +764 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/overlay_export.py +1234 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/plot_integration.py +729 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/qt_helpers.py +29 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/registration_widget.py +1485 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/review_widget.py +1330 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/segmentation_tracking_widget.py +2752 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/tab_tutorial_dialog.py +312 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/timeline_themes.py +131 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/training_profiles.py +418 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/training_widget.py +3719 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/gui/video_utils.py +233 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/licenses/SAM2-LICENSE +201 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab/licenses/VideoPrism-LICENSE +202 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/PKG-INFO +447 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/SOURCES.txt +96 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/dependency_links.txt +1 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/entry_points.txt +2 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/requires.txt +33 -0
- singlebehaviorlab-2.0.0/singlebehaviorlab.egg-info/top_level.txt +3 -0
- singlebehaviorlab-2.0.0/tests/test_clustering_smoke.py +164 -0
- singlebehaviorlab-2.0.0/tests/test_config.py +39 -0
- singlebehaviorlab-2.0.0/tests/test_motion_tracking.py +84 -0
- singlebehaviorlab-2.0.0/tests/test_paths.py +46 -0
- singlebehaviorlab-2.0.0/tests/test_sam2_smoke.py +58 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/__init__.py +11 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/automatic_mask_generator.py +454 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/benchmark.py +92 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/build_sam.py +174 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/__init__.py +5 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/backbones/__init__.py +5 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/backbones/hieradet.py +317 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/backbones/image_encoder.py +134 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/backbones/utils.py +93 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/memory_attention.py +169 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/memory_encoder.py +181 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/position_encoding.py +239 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam/__init__.py +5 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam/mask_decoder.py +295 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam/prompt_encoder.py +202 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam/transformer.py +311 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam2_base.py +913 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/modeling/sam2_utils.py +323 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_hiera_b+.yaml +113 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_hiera_l.yaml +117 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_hiera_s.yaml +116 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_hiera_t.yaml +118 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_image_predictor.py +466 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_video_predictor.py +1388 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/sam2_video_predictor_legacy.py +1172 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/utils/__init__.py +5 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/utils/amg.py +348 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/utils/misc.py +349 -0
- singlebehaviorlab-2.0.0/third_party/sam2_backend/sam2/utils/transforms.py +118 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/__init__.py +0 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/encoders.py +910 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/layers.py +1136 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/models.py +407 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/tokenizers.py +167 -0
- singlebehaviorlab-2.0.0/third_party/videoprism_backend/videoprism/utils.py +168 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Almir Aljovic
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: singlebehaviorlab
|
|
3
|
+
Version: 2.0.0
|
|
4
|
+
Summary: Semi-automated behavioral video annotation, training, and analysis tool
|
|
5
|
+
Author: Almir Aljovic
|
|
6
|
+
Maintainer: Almir Aljovic
|
|
7
|
+
License: MIT License
|
|
8
|
+
|
|
9
|
+
Copyright (c) 2026 Almir Aljovic
|
|
10
|
+
|
|
11
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
12
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
13
|
+
in the Software without restriction, including without limitation the rights
|
|
14
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
15
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
16
|
+
furnished to do so, subject to the following conditions:
|
|
17
|
+
|
|
18
|
+
The above copyright notice and this permission notice shall be included in all
|
|
19
|
+
copies or substantial portions of the Software.
|
|
20
|
+
|
|
21
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
22
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
23
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
24
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
25
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
26
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
27
|
+
SOFTWARE.
|
|
28
|
+
|
|
29
|
+
Project-URL: Repository, https://github.com/alms93/SingleBehaviorLab
|
|
30
|
+
Project-URL: Issues, https://github.com/alms93/SingleBehaviorLab/issues
|
|
31
|
+
Requires-Python: >=3.10
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
License-File: LICENSE
|
|
34
|
+
Requires-Dist: PyQt6==6.11.0
|
|
35
|
+
Requires-Dist: PyQt6-WebEngine==6.11.0
|
|
36
|
+
Requires-Dist: PyYAML==6.0.3
|
|
37
|
+
Requires-Dist: numpy==2.2.6
|
|
38
|
+
Requires-Dist: h5py==3.14.0
|
|
39
|
+
Requires-Dist: opencv-python==4.13.0.92
|
|
40
|
+
Requires-Dist: Pillow==12.1.1
|
|
41
|
+
Requires-Dist: scipy==1.15.3
|
|
42
|
+
Requires-Dist: eva-decord==0.6.1
|
|
43
|
+
Requires-Dist: scikit-learn==1.7.2
|
|
44
|
+
Requires-Dist: pandas==2.3.3
|
|
45
|
+
Requires-Dist: umap-learn==0.5.11
|
|
46
|
+
Requires-Dist: leidenalg==0.11.0
|
|
47
|
+
Requires-Dist: python-igraph==1.0.0
|
|
48
|
+
Requires-Dist: hdbscan==0.8.42
|
|
49
|
+
Requires-Dist: plotly==6.6.0
|
|
50
|
+
Requires-Dist: matplotlib==3.10.8
|
|
51
|
+
Requires-Dist: torch<2.12,>=2.8
|
|
52
|
+
Requires-Dist: torchvision<0.25,>=0.23
|
|
53
|
+
Requires-Dist: jax[cuda12]==0.6.2
|
|
54
|
+
Requires-Dist: flax==0.10.7
|
|
55
|
+
Requires-Dist: hydra-core>=1.3
|
|
56
|
+
Requires-Dist: iopath
|
|
57
|
+
Requires-Dist: einops
|
|
58
|
+
Requires-Dist: einshape
|
|
59
|
+
Requires-Dist: huggingface-hub
|
|
60
|
+
Requires-Dist: sentencepiece
|
|
61
|
+
Requires-Dist: absl-py
|
|
62
|
+
Requires-Dist: tensorflow-cpu
|
|
63
|
+
Provides-Extra: test
|
|
64
|
+
Requires-Dist: pytest; extra == "test"
|
|
65
|
+
Requires-Dist: pytest-cov; extra == "test"
|
|
66
|
+
Dynamic: license-file
|
|
67
|
+
|
|
68
|
+
# SingleBehaviorLab
|
|
69
|
+
|
|
70
|
+
**SingleBehaviorLab (SBL)** is a tool for behavior action localization in animal video. It supports lightweight few-shot training of behavior classifiers, referred to here as *behavior sequencing*, along with unsupervised behavior discovery for exploring unlabeled recordings, and a full GUI pipeline for downstream analysis: ethograms, behavior clustering, transition state analysis, and more comprehesive tools focused on postprocessing.
|
|
71
|
+
|
|
72
|
+
<table align="center">
|
|
73
|
+
<tr>
|
|
74
|
+
<td><img src="docs/behavior_seq.png" alt="Behavior Sequencing GUI" width="420"></td>
|
|
75
|
+
<td><img src="docs/behavior_clustering.png" alt="Unsupervised Discovery GUI" width="420"></td>
|
|
76
|
+
</tr>
|
|
77
|
+
<tr>
|
|
78
|
+
<td align="center"><em>Behavior Sequencing GUI</em></td>
|
|
79
|
+
<td align="center"><em>Unsupervised Discovery</em></td>
|
|
80
|
+
</tr>
|
|
81
|
+
</table>
|
|
82
|
+
<p align="center"><em>Figure 1 — The two main modules of SBL.</em></p>
|
|
83
|
+
|
|
84
|
+
**SBL demo videos:**
|
|
85
|
+
|
|
86
|
+
<table align="center">
|
|
87
|
+
<tr>
|
|
88
|
+
<td><a href="https://www.youtube.com/watch?v=Ov9rxxtYbXk"><img src="https://img.youtube.com/vi/Ov9rxxtYbXk/hqdefault.jpg" height="200" alt="SBL walkthrough demo"></a></td>
|
|
89
|
+
<td><a href="https://www.youtube.com/shorts/GEKvee0-Vvc"><img src="https://i.ytimg.com/vi/GEKvee0-Vvc/oar1.jpg" height="200" alt="SBL Shorts demo"></a></td>
|
|
90
|
+
<td><a href="https://www.youtube.com/shorts/2IZIfpOn6xo"><img src="https://i.ytimg.com/vi/2IZIfpOn6xo/oar1.jpg" height="200" alt="SBL Shorts demo"></a></td>
|
|
91
|
+
</tr>
|
|
92
|
+
</table>
|
|
93
|
+
|
|
94
|
+
---
|
|
95
|
+
|
|
96
|
+
## Table of Contents
|
|
97
|
+
|
|
98
|
+
1. [What You Need Before Starting](#1-what-you-need-before-starting)
|
|
99
|
+
2. [Installation](#2-installation)
|
|
100
|
+
3. [Launching the App](#3-launching-the-app)
|
|
101
|
+
4. [First Launch](#4-first-launch)
|
|
102
|
+
5. [How to Use](#5-how-to-use)
|
|
103
|
+
6. [Workflow Overview](#6-workflow-overview)
|
|
104
|
+
7. [Tabs Reference](#7-tabs-reference)
|
|
105
|
+
8. [SAM2 Models](#8-sam2-models)
|
|
106
|
+
9. [VideoPrism Backbone](#9-videoprism-backbone)
|
|
107
|
+
10. [GPU Memory Notes](#10-gpu-memory-notes)
|
|
108
|
+
11. [Keyboard Shortcuts](#11-keyboard-shortcuts)
|
|
109
|
+
12. [Directory Structure](#12-directory-structure)
|
|
110
|
+
13. [Troubleshooting](#13-troubleshooting)
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
## 1. What You Need Before Starting
|
|
115
|
+
|
|
116
|
+
| Requirement | Notes |
|
|
117
|
+
|---|---|
|
|
118
|
+
| **Operating System** | Linux (Ubuntu 20.04 or later recommended) |
|
|
119
|
+
| **GPU** | NVIDIA GPU with a CUDA 12-compatible driver **required**. 8 GB VRAM minimum; 12 GB+ comfortable for training. |
|
|
120
|
+
| **Python** | 3.10 or later |
|
|
121
|
+
| **Disk space** | ~10 GB free (SAM2 weights: ~3.5 GB, VideoPrism backbone: ~1 GB downloaded on first run, plus your experiment data) |
|
|
122
|
+
| **Internet** | Required on first launch only (to auto-download the VideoPrism backbone). After that the app works offline. |
|
|
123
|
+
|
|
124
|
+
To check your CUDA version:
|
|
125
|
+
```bash
|
|
126
|
+
nvidia-smi
|
|
127
|
+
```
|
|
128
|
+
Look for `CUDA Version: XX.X` in the top-right corner.
|
|
129
|
+
|
|
130
|
+
---
|
|
131
|
+
|
|
132
|
+
## 2. Installation
|
|
133
|
+
|
|
134
|
+
Install into a fresh virtual environment:
|
|
135
|
+
|
|
136
|
+
```bash
|
|
137
|
+
python -m venv sbl_env
|
|
138
|
+
source sbl_env/bin/activate
|
|
139
|
+
pip install singlebehaviorlab
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
This pulls in PyTorch (CUDA 12 build), JAX/Flax (CUDA 12), the vendored SAM2 fork, VideoPrism, and all other dependencies from PyPI in a single step. Installation takes 5–15 minutes and requires an NVIDIA GPU with a CUDA 12-compatible driver.
|
|
143
|
+
|
|
144
|
+
### Development install
|
|
145
|
+
|
|
146
|
+
Contributors working from a source checkout can use an editable install instead:
|
|
147
|
+
|
|
148
|
+
```bash
|
|
149
|
+
git clone https://github.com/alms93/SingleBehaviorLab
|
|
150
|
+
cd SingleBehaviorLab
|
|
151
|
+
pip install -e .
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
---
|
|
155
|
+
|
|
156
|
+
## 3. Launching the App
|
|
157
|
+
|
|
158
|
+
Activate the environment and run:
|
|
159
|
+
|
|
160
|
+
```bash
|
|
161
|
+
source sbl_env/bin/activate
|
|
162
|
+
singlebehaviorlab
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
Equivalent module form: `python -m singlebehaviorlab`.
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## 4. First Launch
|
|
170
|
+
|
|
171
|
+
On first launch, two things happen automatically:
|
|
172
|
+
|
|
173
|
+
1. **The VideoPrism backbone downloads** (~1 GB from Google DeepMind). This happens once and is cached for all future sessions. You need an internet connection for this step.
|
|
174
|
+
|
|
175
|
+
2. **A startup dialog appears** asking you to choose:
|
|
176
|
+
- **Create New Experiment** — opens a dialog to name your experiment and choose a folder. The app creates a clean project directory with all required subfolders.
|
|
177
|
+
- **Load Existing Experiment** — opens a file browser to load a `config.yaml` from a previous experiment.
|
|
178
|
+
|
|
179
|
+
Each experiment stores everything in one self-contained folder:
|
|
180
|
+
```
|
|
181
|
+
your_experiment/
|
|
182
|
+
├── config.yaml # Experiment settings
|
|
183
|
+
├── data/
|
|
184
|
+
│ ├── raw_videos/ # Your input videos go here
|
|
185
|
+
│ ├── clips/ # Auto-extracted short clips
|
|
186
|
+
│ └── annotations/ # Label files (annotations.json)
|
|
187
|
+
└── models/
|
|
188
|
+
└── behavior_heads/ # Your trained model files (.pt)
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
---
|
|
192
|
+
|
|
193
|
+
## 5. How to Use
|
|
194
|
+
|
|
195
|
+
Start with raw videos, extract short clips, label behaviors, train a model, run inference on new recordings, and iteratively refine with active learning.
|
|
196
|
+
|
|
197
|
+
> **▶ Full user guide:** [**HOWTOUSE.md**](HOWTOUSE.md) — step-by-step walkthrough (mouse-in-cage example), every tab explained in detail, unbiased discovery path, and practical tips.
|
|
198
|
+
|
|
199
|
+
---
|
|
200
|
+
|
|
201
|
+
## 6. Workflow Overview
|
|
202
|
+
|
|
203
|
+
SingleBehaviorLab has two complementary pipelines: a supervised pipeline that takes you from raw video to a trained behavior classifier, and an unsupervised discovery pipeline that uses segmentation, registration, and clustering to surface structure in your data. The two pipelines can be used independently, or clustering results can be fed back into labeling and training to refine the supervised model. Each step corresponds to a tab in the app.
|
|
204
|
+
|
|
205
|
+
### Supervised pipeline
|
|
206
|
+
|
|
207
|
+
```
|
|
208
|
+
Your raw video(s)
|
|
209
|
+
│
|
|
210
|
+
▼
|
|
211
|
+
┌─────────────────────────────┐
|
|
212
|
+
│ 1. Labeling │ ← Assign behavior labels to clips
|
|
213
|
+
│ → outputs: annotations │ (keyboard shortcuts 1–9)
|
|
214
|
+
└────────────┬────────────────┘
|
|
215
|
+
│
|
|
216
|
+
▼
|
|
217
|
+
┌─────────────────────────────┐
|
|
218
|
+
│ 2. Training │ ← Train a classifier on your labeled clips
|
|
219
|
+
│ → outputs: model (.pt) │
|
|
220
|
+
└────────────┬────────────────┘
|
|
221
|
+
│
|
|
222
|
+
▼
|
|
223
|
+
┌─────────────────────────────┐
|
|
224
|
+
│ 3. Inference │ ← Run your model on new videos
|
|
225
|
+
│ → outputs: behavior │ Generates the per-frame behavior sequence
|
|
226
|
+
│ sequence / timeline │ (ethogram)
|
|
227
|
+
└────────────┬────────────────┘
|
|
228
|
+
│
|
|
229
|
+
▼
|
|
230
|
+
┌─────────────────────────────┐
|
|
231
|
+
│ 4. Refinement │ ← Review uncertain predictions, correct
|
|
232
|
+
│ → outputs: more labels │ mistakes, and retrain for better accuracy
|
|
233
|
+
└────────────┬────────────────┘
|
|
234
|
+
│
|
|
235
|
+
▼
|
|
236
|
+
┌─────────────────────────────┐
|
|
237
|
+
│ 5. Analysis │ ← Ethograms, statistics, export videos
|
|
238
|
+
└─────────────────────────────┘
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
### Unsupervised discovery pipeline
|
|
242
|
+
|
|
243
|
+
Use this pipeline when you don't yet know what behaviors to label, or when you want to surface rare or unknown behaviors before training. It can be run stand-alone, or its outputs can be fed back into **Labeling → Training** to refine the supervised model.
|
|
244
|
+
|
|
245
|
+
```
|
|
246
|
+
Your raw video(s)
|
|
247
|
+
│
|
|
248
|
+
▼
|
|
249
|
+
┌─────────────────────────────┐
|
|
250
|
+
│ A. Segmentation & Tracking │ ← SAM2 segments and tracks animals
|
|
251
|
+
│ → outputs: mask files │
|
|
252
|
+
└────────────┬────────────────┘
|
|
253
|
+
│
|
|
254
|
+
▼
|
|
255
|
+
┌─────────────────────────────┐
|
|
256
|
+
│ B. Registration │ ← Crops around the animal, normalizes,
|
|
257
|
+
│ → outputs: embeddings │ extracts VideoPrism features
|
|
258
|
+
└────────────┬────────────────┘
|
|
259
|
+
│
|
|
260
|
+
▼
|
|
261
|
+
┌─────────────────────────────┐
|
|
262
|
+
│ C. Clustering │ ← Groups embeddings by similarity
|
|
263
|
+
│ → outputs: clip groups │ (UMAP + Leiden/HDBSCAN)
|
|
264
|
+
└────────────┬────────────────┘
|
|
265
|
+
│
|
|
266
|
+
│ feedback loop: pick representative clips from each
|
|
267
|
+
│ cluster and feed them into Labeling → Training
|
|
268
|
+
▼
|
|
269
|
+
Labeling (step 1 of the supervised pipeline)
|
|
270
|
+
```
|
|
271
|
+
|
|
272
|
+
---
|
|
273
|
+
|
|
274
|
+
## 7. Tabs Reference
|
|
275
|
+
|
|
276
|
+
*Supervised pipeline:*
|
|
277
|
+
|
|
278
|
+
### Labeling
|
|
279
|
+
- Browse clips in the left panel; click to preview in the video player
|
|
280
|
+
- Press **1–9** to assign a behavior class, or use the class buttons
|
|
281
|
+
- Press **Ctrl+S** to save; move to the next clip
|
|
282
|
+
- Frame-level labels can be drawn on the timeline for clips with mixed behaviors
|
|
283
|
+
|
|
284
|
+
### Training
|
|
285
|
+
- Select the behavior classes to train
|
|
286
|
+
- Choose a preset training profile or configure hyperparameters manually
|
|
287
|
+
- Click **Start Training** and monitor loss/accuracy in real time
|
|
288
|
+
- The best model checkpoint is saved automatically to `models/behavior_heads/`
|
|
289
|
+
|
|
290
|
+
### Inference
|
|
291
|
+
- Load a trained model (`.pt` file)
|
|
292
|
+
- Select a video to run predictions on
|
|
293
|
+
- The app outputs a color-coded per-frame behavior sequence (ethogram)
|
|
294
|
+
- Clips are ranked by prediction uncertainty for efficient review
|
|
295
|
+
|
|
296
|
+
### Refinement
|
|
297
|
+
- Review clips flagged as uncertain by the model
|
|
298
|
+
- Accept correct predictions or reassign labels
|
|
299
|
+
- Corrected clips are added to your annotation set
|
|
300
|
+
- Retrain from the Training tab to improve accuracy
|
|
301
|
+
|
|
302
|
+
### Analysis
|
|
303
|
+
- Generate ethograms (behavior-over-time plots)
|
|
304
|
+
- Compute per-class duration, frequency, and transitions
|
|
305
|
+
- Export annotated video overlays
|
|
306
|
+
- Export data tables (CSV) for statistical analysis
|
|
307
|
+
|
|
308
|
+
*Unsupervised discovery pipeline:*
|
|
309
|
+
|
|
310
|
+
### Segmentation & Tracking
|
|
311
|
+
- Load a raw video
|
|
312
|
+
- Click points on the animal(s) you want to track — SAM2 segments them automatically
|
|
313
|
+
- Adjust the segmentation mask if needed, then click **Track** to propagate across all frames
|
|
314
|
+
- Exports `.h5` mask files used by the Registration tab
|
|
315
|
+
|
|
316
|
+
### Registration
|
|
317
|
+
- Load your video + its mask file
|
|
318
|
+
- Configure cropping (box size) and normalization (CLAHE recommended)
|
|
319
|
+
- Click **Extract Embeddings** — VideoPrism processes each frame and saves feature vectors
|
|
320
|
+
- These embeddings feed directly into Clustering
|
|
321
|
+
|
|
322
|
+
### Clustering
|
|
323
|
+
- Load embeddings from Registration
|
|
324
|
+
- Run UMAP to reduce to 2D, then cluster with Leiden or HDBSCAN
|
|
325
|
+
- Visualize clusters interactively — each point is a short clip
|
|
326
|
+
- Select representative clips from each cluster and send them back to **Labeling → Training** to bootstrap or refine your supervised model
|
|
327
|
+
|
|
328
|
+
---
|
|
329
|
+
|
|
330
|
+
## 8. SAM2 Models
|
|
331
|
+
|
|
332
|
+
SingleBehaviorLab ships a locally modified build of [SAM2](https://github.com/facebookresearch/sam2) vendored inside the package. The modifications adjust the video predictor's memory handling so that long recordings (thousands of frames) can be segmented without exhausting GPU memory; the model weights and architecture are unchanged. All credit for the underlying model and checkpoints goes to the original SAM2 authors at Meta AI — see the upstream repository for the model cards and license.
|
|
333
|
+
|
|
334
|
+
SAM2 checkpoints download automatically on first use.
|
|
335
|
+
|
|
336
|
+
| Model | Size | Speed | Quality | Recommended for |
|
|
337
|
+
|---|---|---|---|---|
|
|
338
|
+
| SAM2.1 Tiny | 39M | Fastest | Good | Quick exploration, limited GPU memory |
|
|
339
|
+
| SAM2.1 Small | 46M | Fast | Better | General use |
|
|
340
|
+
| SAM2.1 Base+ | 80M | Moderate | High | Standard choice |
|
|
341
|
+
| SAM2.1 Large | 224M | Slowest | Best | High-quality tracking, multi-animal |
|
|
342
|
+
|
|
343
|
+
---
|
|
344
|
+
|
|
345
|
+
## 9. VideoPrism Backbone
|
|
346
|
+
|
|
347
|
+
[VideoPrism](https://github.com/google-deepmind/videoprism) (`videoprism_public_v1_base`) is the frozen video feature extractor at the core of SingleBehaviorLab.
|
|
348
|
+
|
|
349
|
+
- **Downloaded automatically on first launch** from Google DeepMind (~1 GB). Internet required once.
|
|
350
|
+
- Cached in `models/videoprism_backbone/` after first download.
|
|
351
|
+
- You do **not** need to interact with it directly — the app handles loading and inference.
|
|
352
|
+
|
|
353
|
+
---
|
|
354
|
+
|
|
355
|
+
## 10. GPU Memory Notes
|
|
356
|
+
|
|
357
|
+
SingleBehaviorLab runs two GPU frameworks simultaneously:
|
|
358
|
+
|
|
359
|
+
| Framework | Use | Memory allocation |
|
|
360
|
+
|---|---|---|
|
|
361
|
+
| JAX | VideoPrism backbone (feature extraction) | Capped at 45% of GPU VRAM |
|
|
362
|
+
| PyTorch | Classification head training and inference | Uses remaining ~55% |
|
|
363
|
+
|
|
364
|
+
- JAX grows memory on demand (no pre-allocation) to coexist with PyTorch.
|
|
365
|
+
- For a GPU with 8 GB VRAM: extraction and inference work comfortably. Training with large batch sizes may need batch size reduction.
|
|
366
|
+
- For 12 GB+ VRAM: all operations including batch training run without issues.
|
|
367
|
+
- If you get out-of-memory errors during training, reduce the batch size in the Training tab.
|
|
368
|
+
|
|
369
|
+
---
|
|
370
|
+
|
|
371
|
+
## 11. Keyboard Shortcuts
|
|
372
|
+
|
|
373
|
+
| Key | Action |
|
|
374
|
+
|---|---|
|
|
375
|
+
| `1` – `9` | Assign behavior class (by position in class list) |
|
|
376
|
+
| `Space` | Play / pause video |
|
|
377
|
+
| `Ctrl+S` | Save current label |
|
|
378
|
+
| `Ctrl+O` | Open video file |
|
|
379
|
+
| `Ctrl+Q` | Quit application |
|
|
380
|
+
|
|
381
|
+
---
|
|
382
|
+
|
|
383
|
+
## 12. Directory Structure
|
|
384
|
+
|
|
385
|
+
After `pip install`, the application code lives in your Python environment and experiments are created in a folder of your choice. The source repository layout (useful for contributors):
|
|
386
|
+
|
|
387
|
+
```
|
|
388
|
+
SingleBehaviorLab/
|
|
389
|
+
├── pyproject.toml # Package metadata and dependencies
|
|
390
|
+
├── README.md
|
|
391
|
+
├── HOWTOUSE.md
|
|
392
|
+
│
|
|
393
|
+
├── singlebehaviorlab/ # Main package (all app code)
|
|
394
|
+
│ ├── __main__.py # Entry point for `singlebehaviorlab`
|
|
395
|
+
│ ├── backend/ # Core ML and data processing
|
|
396
|
+
│ │ ├── model.py # VideoPrism + BehaviorClassifier head
|
|
397
|
+
│ │ ├── train.py # Training loop
|
|
398
|
+
│ │ ├── data_store.py # Annotation file manager
|
|
399
|
+
│ │ ├── video_processor.py # Mask-based video processing
|
|
400
|
+
│ │ ├── augmentations.py # Data augmentation
|
|
401
|
+
│ │ ├── uncertainty.py # Active-learning uncertainty scoring
|
|
402
|
+
│ │ └── video_utils.py # Video I/O helpers
|
|
403
|
+
│ ├── gui/ # PyQt6 interface
|
|
404
|
+
│ │ ├── main_window.py # Main tabbed window
|
|
405
|
+
│ │ ├── labeling_widget.py # Clip labeling
|
|
406
|
+
│ │ ├── training_widget.py # Training UI
|
|
407
|
+
│ │ ├── inference_widget.py # Inference and timeline
|
|
408
|
+
│ │ ├── review_widget.py # Active-learning review
|
|
409
|
+
│ │ ├── analysis_widget.py # Analysis and export
|
|
410
|
+
│ │ ├── segmentation_tracking_widget.py
|
|
411
|
+
│ │ ├── registration_widget.py
|
|
412
|
+
│ │ ├── clustering_widget.py
|
|
413
|
+
│ │ └── ... # Supporting widgets and helpers
|
|
414
|
+
│ ├── data/ # Bundled config template and presets
|
|
415
|
+
│ └── licenses/ # Third-party license notices (SAM2, VideoPrism)
|
|
416
|
+
│
|
|
417
|
+
├── third_party/ # Vendored upstream code
|
|
418
|
+
│ ├── sam2_backend/ # Memory-optimized SAM2 fork
|
|
419
|
+
│ │ └── sam2/ # Shipped as the `sam2` package
|
|
420
|
+
│ │ # (upstream: facebookresearch/sam2)
|
|
421
|
+
│ └── videoprism_backend/
|
|
422
|
+
│ └── videoprism/ # Shipped as the `videoprism` package
|
|
423
|
+
│ # (upstream: google-deepmind/videoprism)
|
|
424
|
+
│
|
|
425
|
+
└── tests/
|
|
426
|
+
```
|
|
427
|
+
|
|
428
|
+
A typical experiment directory (created and managed by the app):
|
|
429
|
+
```
|
|
430
|
+
my_experiment/
|
|
431
|
+
├── config.yaml
|
|
432
|
+
├── data/
|
|
433
|
+
│ ├── raw_videos/ # Your input videos
|
|
434
|
+
│ ├── clips/ # Auto-extracted short clips
|
|
435
|
+
│ └── annotations/
|
|
436
|
+
└── models/
|
|
437
|
+
└── behavior_heads/ # Trained classifier checkpoints
|
|
438
|
+
```
|
|
439
|
+
|
|
440
|
+
---
|
|
441
|
+
|
|
442
|
+
## 13. Troubleshooting
|
|
443
|
+
|
|
444
|
+
- **Out of memory during training.** Reduce **Batch Size** in the Training tab (try 8 or 4), or reduce **Clip Length**.
|
|
445
|
+
- **App window doesn't open (no display).** SingleBehaviorLab requires a graphical desktop and cannot run headless. For remote servers, use X11 forwarding: `ssh -X user@host` then `singlebehaviorlab`.
|
|
446
|
+
- **`nvidia-smi` shows a CUDA 11 driver.** The application requires a CUDA 12-compatible driver. Update the NVIDIA driver before installing.
|
|
447
|
+
- **PyPI resolution conflict between `torch` and `jax`.** The wheel pins `torch>=2.8` so its bundled cuDNN matches JAX's ABI. If an older `torch` is already installed in the environment, upgrade it: `pip install -U "torch>=2.8"`.
|